diff options
Diffstat (limited to 'vendor/github.com/minio/minio-go')
94 files changed, 34611 insertions, 0 deletions
diff --git a/vendor/github.com/minio/minio-go/v7/.gitignore b/vendor/github.com/minio/minio-go/v7/.gitignore new file mode 100644 index 0000000..8ae0384 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/.gitignore | |||
@@ -0,0 +1,6 @@ | |||
1 | *~ | ||
2 | *.test | ||
3 | validator | ||
4 | golangci-lint | ||
5 | functional_tests | ||
6 | .idea \ No newline at end of file | ||
diff --git a/vendor/github.com/minio/minio-go/v7/.golangci.yml b/vendor/github.com/minio/minio-go/v7/.golangci.yml new file mode 100644 index 0000000..875b949 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/.golangci.yml | |||
@@ -0,0 +1,27 @@ | |||
1 | linters-settings: | ||
2 | misspell: | ||
3 | locale: US | ||
4 | |||
5 | linters: | ||
6 | disable-all: true | ||
7 | enable: | ||
8 | - typecheck | ||
9 | - goimports | ||
10 | - misspell | ||
11 | - revive | ||
12 | - govet | ||
13 | - ineffassign | ||
14 | - gosimple | ||
15 | - unused | ||
16 | - gocritic | ||
17 | |||
18 | issues: | ||
19 | exclude-use-default: false | ||
20 | exclude: | ||
21 | # todo fix these when we get enough time. | ||
22 | - "singleCaseSwitch: should rewrite switch statement to if statement" | ||
23 | - "unlambda: replace" | ||
24 | - "captLocal:" | ||
25 | - "ifElseChain:" | ||
26 | - "elseif:" | ||
27 | - "should have a package comment" | ||
diff --git a/vendor/github.com/minio/minio-go/v7/CNAME b/vendor/github.com/minio/minio-go/v7/CNAME new file mode 100644 index 0000000..d365a7b --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/CNAME | |||
@@ -0,0 +1 @@ | |||
minio-go.min.io \ No newline at end of file | |||
diff --git a/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md b/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md new file mode 100644 index 0000000..24522ef --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md | |||
@@ -0,0 +1,22 @@ | |||
1 | ### Developer Guidelines | ||
2 | |||
3 | ``minio-go`` welcomes your contribution. To make the process as seamless as possible, we ask for the following: | ||
4 | |||
5 | * Go ahead and fork the project and make your changes. We encourage pull requests to discuss code changes. | ||
6 | - Fork it | ||
7 | - Create your feature branch (git checkout -b my-new-feature) | ||
8 | - Commit your changes (git commit -am 'Add some feature') | ||
9 | - Push to the branch (git push origin my-new-feature) | ||
10 | - Create new Pull Request | ||
11 | |||
12 | * When you're ready to create a pull request, be sure to: | ||
13 | - Have test cases for the new code. If you have questions about how to do it, please ask in your pull request. | ||
14 | - Run `go fmt` | ||
15 | - Squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request. | ||
16 | - Make sure `go test -race ./...` and `go build` completes. | ||
17 | NOTE: go test runs functional tests and requires you to have a AWS S3 account. Set them as environment variables | ||
18 | ``ACCESS_KEY`` and ``SECRET_KEY``. To run shorter version of the tests please use ``go test -short -race ./...`` | ||
19 | |||
20 | * Read [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project | ||
21 | - `minio-go` project is strictly conformant with Golang style | ||
22 | - if you happen to observe offending code, please feel free to send a pull request | ||
diff --git a/vendor/github.com/minio/minio-go/v7/LICENSE b/vendor/github.com/minio/minio-go/v7/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/LICENSE | |||
@@ -0,0 +1,202 @@ | |||
1 | |||
2 | Apache License | ||
3 | Version 2.0, January 2004 | ||
4 | http://www.apache.org/licenses/ | ||
5 | |||
6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | ||
7 | |||
8 | 1. Definitions. | ||
9 | |||
10 | "License" shall mean the terms and conditions for use, reproduction, | ||
11 | and distribution as defined by Sections 1 through 9 of this document. | ||
12 | |||
13 | "Licensor" shall mean the copyright owner or entity authorized by | ||
14 | the copyright owner that is granting the License. | ||
15 | |||
16 | "Legal Entity" shall mean the union of the acting entity and all | ||
17 | other entities that control, are controlled by, or are under common | ||
18 | control with that entity. For the purposes of this definition, | ||
19 | "control" means (i) the power, direct or indirect, to cause the | ||
20 | direction or management of such entity, whether by contract or | ||
21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the | ||
22 | outstanding shares, or (iii) beneficial ownership of such entity. | ||
23 | |||
24 | "You" (or "Your") shall mean an individual or Legal Entity | ||
25 | exercising permissions granted by this License. | ||
26 | |||
27 | "Source" form shall mean the preferred form for making modifications, | ||
28 | including but not limited to software source code, documentation | ||
29 | source, and configuration files. | ||
30 | |||
31 | "Object" form shall mean any form resulting from mechanical | ||
32 | transformation or translation of a Source form, including but | ||
33 | not limited to compiled object code, generated documentation, | ||
34 | and conversions to other media types. | ||
35 | |||
36 | "Work" shall mean the work of authorship, whether in Source or | ||
37 | Object form, made available under the License, as indicated by a | ||
38 | copyright notice that is included in or attached to the work | ||
39 | (an example is provided in the Appendix below). | ||
40 | |||
41 | "Derivative Works" shall mean any work, whether in Source or Object | ||
42 | form, that is based on (or derived from) the Work and for which the | ||
43 | editorial revisions, annotations, elaborations, or other modifications | ||
44 | represent, as a whole, an original work of authorship. For the purposes | ||
45 | of this License, Derivative Works shall not include works that remain | ||
46 | separable from, or merely link (or bind by name) to the interfaces of, | ||
47 | the Work and Derivative Works thereof. | ||
48 | |||
49 | "Contribution" shall mean any work of authorship, including | ||
50 | the original version of the Work and any modifications or additions | ||
51 | to that Work or Derivative Works thereof, that is intentionally | ||
52 | submitted to Licensor for inclusion in the Work by the copyright owner | ||
53 | or by an individual or Legal Entity authorized to submit on behalf of | ||
54 | the copyright owner. For the purposes of this definition, "submitted" | ||
55 | means any form of electronic, verbal, or written communication sent | ||
56 | to the Licensor or its representatives, including but not limited to | ||
57 | communication on electronic mailing lists, source code control systems, | ||
58 | and issue tracking systems that are managed by, or on behalf of, the | ||
59 | Licensor for the purpose of discussing and improving the Work, but | ||
60 | excluding communication that is conspicuously marked or otherwise | ||
61 | designated in writing by the copyright owner as "Not a Contribution." | ||
62 | |||
63 | "Contributor" shall mean Licensor and any individual or Legal Entity | ||
64 | on behalf of whom a Contribution has been received by Licensor and | ||
65 | subsequently incorporated within the Work. | ||
66 | |||
67 | 2. Grant of Copyright License. Subject to the terms and conditions of | ||
68 | this License, each Contributor hereby grants to You a perpetual, | ||
69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||
70 | copyright license to reproduce, prepare Derivative Works of, | ||
71 | publicly display, publicly perform, sublicense, and distribute the | ||
72 | Work and such Derivative Works in Source or Object form. | ||
73 | |||
74 | 3. Grant of Patent License. Subject to the terms and conditions of | ||
75 | this License, each Contributor hereby grants to You a perpetual, | ||
76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||
77 | (except as stated in this section) patent license to make, have made, | ||
78 | use, offer to sell, sell, import, and otherwise transfer the Work, | ||
79 | where such license applies only to those patent claims licensable | ||
80 | by such Contributor that are necessarily infringed by their | ||
81 | Contribution(s) alone or by combination of their Contribution(s) | ||
82 | with the Work to which such Contribution(s) was submitted. If You | ||
83 | institute patent litigation against any entity (including a | ||
84 | cross-claim or counterclaim in a lawsuit) alleging that the Work | ||
85 | or a Contribution incorporated within the Work constitutes direct | ||
86 | or contributory patent infringement, then any patent licenses | ||
87 | granted to You under this License for that Work shall terminate | ||
88 | as of the date such litigation is filed. | ||
89 | |||
90 | 4. Redistribution. You may reproduce and distribute copies of the | ||
91 | Work or Derivative Works thereof in any medium, with or without | ||
92 | modifications, and in Source or Object form, provided that You | ||
93 | meet the following conditions: | ||
94 | |||
95 | (a) You must give any other recipients of the Work or | ||
96 | Derivative Works a copy of this License; and | ||
97 | |||
98 | (b) You must cause any modified files to carry prominent notices | ||
99 | stating that You changed the files; and | ||
100 | |||
101 | (c) You must retain, in the Source form of any Derivative Works | ||
102 | that You distribute, all copyright, patent, trademark, and | ||
103 | attribution notices from the Source form of the Work, | ||
104 | excluding those notices that do not pertain to any part of | ||
105 | the Derivative Works; and | ||
106 | |||
107 | (d) If the Work includes a "NOTICE" text file as part of its | ||
108 | distribution, then any Derivative Works that You distribute must | ||
109 | include a readable copy of the attribution notices contained | ||
110 | within such NOTICE file, excluding those notices that do not | ||
111 | pertain to any part of the Derivative Works, in at least one | ||
112 | of the following places: within a NOTICE text file distributed | ||
113 | as part of the Derivative Works; within the Source form or | ||
114 | documentation, if provided along with the Derivative Works; or, | ||
115 | within a display generated by the Derivative Works, if and | ||
116 | wherever such third-party notices normally appear. The contents | ||
117 | of the NOTICE file are for informational purposes only and | ||
118 | do not modify the License. You may add Your own attribution | ||
119 | notices within Derivative Works that You distribute, alongside | ||
120 | or as an addendum to the NOTICE text from the Work, provided | ||
121 | that such additional attribution notices cannot be construed | ||
122 | as modifying the License. | ||
123 | |||
124 | You may add Your own copyright statement to Your modifications and | ||
125 | may provide additional or different license terms and conditions | ||
126 | for use, reproduction, or distribution of Your modifications, or | ||
127 | for any such Derivative Works as a whole, provided Your use, | ||
128 | reproduction, and distribution of the Work otherwise complies with | ||
129 | the conditions stated in this License. | ||
130 | |||
131 | 5. Submission of Contributions. Unless You explicitly state otherwise, | ||
132 | any Contribution intentionally submitted for inclusion in the Work | ||
133 | by You to the Licensor shall be under the terms and conditions of | ||
134 | this License, without any additional terms or conditions. | ||
135 | Notwithstanding the above, nothing herein shall supersede or modify | ||
136 | the terms of any separate license agreement you may have executed | ||
137 | with Licensor regarding such Contributions. | ||
138 | |||
139 | 6. Trademarks. This License does not grant permission to use the trade | ||
140 | names, trademarks, service marks, or product names of the Licensor, | ||
141 | except as required for reasonable and customary use in describing the | ||
142 | origin of the Work and reproducing the content of the NOTICE file. | ||
143 | |||
144 | 7. Disclaimer of Warranty. Unless required by applicable law or | ||
145 | agreed to in writing, Licensor provides the Work (and each | ||
146 | Contributor provides its Contributions) on an "AS IS" BASIS, | ||
147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | ||
148 | implied, including, without limitation, any warranties or conditions | ||
149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | ||
150 | PARTICULAR PURPOSE. You are solely responsible for determining the | ||
151 | appropriateness of using or redistributing the Work and assume any | ||
152 | risks associated with Your exercise of permissions under this License. | ||
153 | |||
154 | 8. Limitation of Liability. In no event and under no legal theory, | ||
155 | whether in tort (including negligence), contract, or otherwise, | ||
156 | unless required by applicable law (such as deliberate and grossly | ||
157 | negligent acts) or agreed to in writing, shall any Contributor be | ||
158 | liable to You for damages, including any direct, indirect, special, | ||
159 | incidental, or consequential damages of any character arising as a | ||
160 | result of this License or out of the use or inability to use the | ||
161 | Work (including but not limited to damages for loss of goodwill, | ||
162 | work stoppage, computer failure or malfunction, or any and all | ||
163 | other commercial damages or losses), even if such Contributor | ||
164 | has been advised of the possibility of such damages. | ||
165 | |||
166 | 9. Accepting Warranty or Additional Liability. While redistributing | ||
167 | the Work or Derivative Works thereof, You may choose to offer, | ||
168 | and charge a fee for, acceptance of support, warranty, indemnity, | ||
169 | or other liability obligations and/or rights consistent with this | ||
170 | License. However, in accepting such obligations, You may act only | ||
171 | on Your own behalf and on Your sole responsibility, not on behalf | ||
172 | of any other Contributor, and only if You agree to indemnify, | ||
173 | defend, and hold each Contributor harmless for any liability | ||
174 | incurred by, or claims asserted against, such Contributor by reason | ||
175 | of your accepting any such warranty or additional liability. | ||
176 | |||
177 | END OF TERMS AND CONDITIONS | ||
178 | |||
179 | APPENDIX: How to apply the Apache License to your work. | ||
180 | |||
181 | To apply the Apache License to your work, attach the following | ||
182 | boilerplate notice, with the fields enclosed by brackets "[]" | ||
183 | replaced with your own identifying information. (Don't include | ||
184 | the brackets!) The text should be enclosed in the appropriate | ||
185 | comment syntax for the file format. We also recommend that a | ||
186 | file or class name and description of purpose be included on the | ||
187 | same "printed page" as the copyright notice for easier | ||
188 | identification within third-party archives. | ||
189 | |||
190 | Copyright [yyyy] [name of copyright owner] | ||
191 | |||
192 | Licensed under the Apache License, Version 2.0 (the "License"); | ||
193 | you may not use this file except in compliance with the License. | ||
194 | You may obtain a copy of the License at | ||
195 | |||
196 | http://www.apache.org/licenses/LICENSE-2.0 | ||
197 | |||
198 | Unless required by applicable law or agreed to in writing, software | ||
199 | distributed under the License is distributed on an "AS IS" BASIS, | ||
200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
201 | See the License for the specific language governing permissions and | ||
202 | limitations under the License. | ||
diff --git a/vendor/github.com/minio/minio-go/v7/MAINTAINERS.md b/vendor/github.com/minio/minio-go/v7/MAINTAINERS.md new file mode 100644 index 0000000..f640dfb --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/MAINTAINERS.md | |||
@@ -0,0 +1,35 @@ | |||
1 | # For maintainers only | ||
2 | |||
3 | ## Responsibilities | ||
4 | |||
5 | Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522) | ||
6 | |||
7 | ### Making new releases | ||
8 | Tag and sign your release commit, additionally this step requires you to have access to MinIO's trusted private key. | ||
9 | ```sh | ||
10 | $ export GNUPGHOME=/media/${USER}/minio/trusted | ||
11 | $ git tag -s 4.0.0 | ||
12 | $ git push | ||
13 | $ git push --tags | ||
14 | ``` | ||
15 | |||
16 | ### Update version | ||
17 | Once release has been made update `libraryVersion` constant in `api.go` to next to be released version. | ||
18 | |||
19 | ```sh | ||
20 | $ grep libraryVersion api.go | ||
21 | libraryVersion = "4.0.1" | ||
22 | ``` | ||
23 | |||
24 | Commit your changes | ||
25 | ``` | ||
26 | $ git commit -a -m "Update version for next release" --author "MinIO Trusted <[email protected]>" | ||
27 | ``` | ||
28 | |||
29 | ### Announce | ||
30 | Announce new release by adding release notes at https://github.com/minio/minio-go/releases from `[email protected]` account. Release notes requires two sections `highlights` and `changelog`. Highlights is a bulleted list of salient features in this release and Changelog contains list of all commits since the last release. | ||
31 | |||
32 | To generate `changelog` | ||
33 | ```sh | ||
34 | $ git log --no-color --pretty=format:'-%d %s (%cr) <%an>' <last_release_tag>..<latest_release_tag> | ||
35 | ``` | ||
diff --git a/vendor/github.com/minio/minio-go/v7/Makefile b/vendor/github.com/minio/minio-go/v7/Makefile new file mode 100644 index 0000000..68444aa --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/Makefile | |||
@@ -0,0 +1,38 @@ | |||
1 | GOPATH := $(shell go env GOPATH) | ||
2 | TMPDIR := $(shell mktemp -d) | ||
3 | |||
4 | all: checks | ||
5 | |||
6 | .PHONY: examples docs | ||
7 | |||
8 | checks: lint vet test examples functional-test | ||
9 | |||
10 | lint: | ||
11 | @mkdir -p ${GOPATH}/bin | ||
12 | @echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin | ||
13 | @echo "Running $@ check" | ||
14 | @GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean | ||
15 | @GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml | ||
16 | |||
17 | vet: | ||
18 | @GO111MODULE=on go vet ./... | ||
19 | @echo "Installing staticcheck" && go install honnef.co/go/tools/cmd/staticcheck@latest | ||
20 | ${GOPATH}/bin/staticcheck -tests=false -checks="all,-ST1000,-ST1003,-ST1016,-ST1020,-ST1021,-ST1022,-ST1023,-ST1005" | ||
21 | |||
22 | test: | ||
23 | @GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minioadmin SECRET_KEY=minioadmin ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./... | ||
24 | |||
25 | examples: | ||
26 | @echo "Building s3 examples" | ||
27 | @cd ./examples/s3 && $(foreach v,$(wildcard examples/s3/*.go),go build -mod=mod -o ${TMPDIR}/$(basename $(v)) $(notdir $(v)) || exit 1;) | ||
28 | @echo "Building minio examples" | ||
29 | @cd ./examples/minio && $(foreach v,$(wildcard examples/minio/*.go),go build -mod=mod -o ${TMPDIR}/$(basename $(v)) $(notdir $(v)) || exit 1;) | ||
30 | |||
31 | functional-test: | ||
32 | @GO111MODULE=on go build -race functional_tests.go | ||
33 | @SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minioadmin SECRET_KEY=minioadmin ENABLE_HTTPS=1 MINT_MODE=full ./functional_tests | ||
34 | |||
35 | clean: | ||
36 | @echo "Cleaning up all the generated files" | ||
37 | @find . -name '*.test' | xargs rm -fv | ||
38 | @find . -name '*~' | xargs rm -fv | ||
diff --git a/vendor/github.com/minio/minio-go/v7/NOTICE b/vendor/github.com/minio/minio-go/v7/NOTICE new file mode 100644 index 0000000..1e8fd3b --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/NOTICE | |||
@@ -0,0 +1,9 @@ | |||
1 | MinIO Cloud Storage, (C) 2014-2020 MinIO, Inc. | ||
2 | |||
3 | This product includes software developed at MinIO, Inc. | ||
4 | (https://min.io/). | ||
5 | |||
6 | The MinIO project contains unmodified/modified subcomponents too with | ||
7 | separate copyright notices and license terms. Your use of the source | ||
8 | code for these subcomponents is subject to the terms and conditions | ||
9 | of Apache License Version 2.0 | ||
diff --git a/vendor/github.com/minio/minio-go/v7/README.md b/vendor/github.com/minio/minio-go/v7/README.md new file mode 100644 index 0000000..82f70a1 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/README.md | |||
@@ -0,0 +1,312 @@ | |||
1 | # MinIO Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) [![Apache V2 License](https://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/minio/minio-go/blob/master/LICENSE) | ||
2 | |||
3 | The MinIO Go Client SDK provides straightforward APIs to access any Amazon S3 compatible object storage. | ||
4 | |||
5 | This Quickstart Guide covers how to install the MinIO client SDK, connect to MinIO, and create a sample file uploader. | ||
6 | For a complete list of APIs and examples, see the [godoc documentation](https://pkg.go.dev/github.com/minio/minio-go/v7) or [Go Client API Reference](https://min.io/docs/minio/linux/developers/go/API.html). | ||
7 | |||
8 | These examples presume a working [Go development environment](https://golang.org/doc/install) and the [MinIO `mc` command line tool](https://min.io/docs/minio/linux/reference/minio-mc.html). | ||
9 | |||
10 | ## Download from Github | ||
11 | |||
12 | From your project directory: | ||
13 | |||
14 | ```sh | ||
15 | go get github.com/minio/minio-go/v7 | ||
16 | ``` | ||
17 | |||
18 | ## Initialize a MinIO Client Object | ||
19 | |||
20 | The MinIO client requires the following parameters to connect to an Amazon S3 compatible object storage: | ||
21 | |||
22 | | Parameter | Description | | ||
23 | | ----------------- | ---------------------------------------------------------- | | ||
24 | | `endpoint` | URL to object storage service. | | ||
25 | | `_minio.Options_` | All the options such as credentials, custom transport etc. | | ||
26 | |||
27 | ```go | ||
28 | package main | ||
29 | |||
30 | import ( | ||
31 | "log" | ||
32 | |||
33 | "github.com/minio/minio-go/v7" | ||
34 | "github.com/minio/minio-go/v7/pkg/credentials" | ||
35 | ) | ||
36 | |||
37 | func main() { | ||
38 | endpoint := "play.min.io" | ||
39 | accessKeyID := "Q3AM3UQ867SPQQA43P2F" | ||
40 | secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" | ||
41 | useSSL := true | ||
42 | |||
43 | // Initialize minio client object. | ||
44 | minioClient, err := minio.New(endpoint, &minio.Options{ | ||
45 | Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""), | ||
46 | Secure: useSSL, | ||
47 | }) | ||
48 | if err != nil { | ||
49 | log.Fatalln(err) | ||
50 | } | ||
51 | |||
52 | log.Printf("%#v\n", minioClient) // minioClient is now set up | ||
53 | } | ||
54 | ``` | ||
55 | |||
56 | ## Example - File Uploader | ||
57 | |||
58 | This sample code connects to an object storage server, creates a bucket, and uploads a file to the bucket. | ||
59 | It uses the MinIO `play` server, a public MinIO cluster located at [https://play.min.io](https://play.min.io). | ||
60 | |||
61 | The `play` server runs the latest stable version of MinIO and may be used for testing and development. | ||
62 | The access credentials shown in this example are open to the public and all data uploaded to `play` should be considered public and non-protected. | ||
63 | |||
64 | ### FileUploader.go | ||
65 | |||
66 | This example does the following: | ||
67 | |||
68 | - Connects to the MinIO `play` server using the provided credentials. | ||
69 | - Creates a bucket named `testbucket`. | ||
70 | - Uploads a file named `testdata` from `/tmp`. | ||
71 | - Verifies the file was created using `mc ls`. | ||
72 | |||
73 | ```go | ||
74 | // FileUploader.go MinIO example | ||
75 | package main | ||
76 | |||
77 | import ( | ||
78 | "context" | ||
79 | "log" | ||
80 | |||
81 | "github.com/minio/minio-go/v7" | ||
82 | "github.com/minio/minio-go/v7/pkg/credentials" | ||
83 | ) | ||
84 | |||
85 | func main() { | ||
86 | ctx := context.Background() | ||
87 | endpoint := "play.min.io" | ||
88 | accessKeyID := "Q3AM3UQ867SPQQA43P2F" | ||
89 | secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" | ||
90 | useSSL := true | ||
91 | |||
92 | // Initialize minio client object. | ||
93 | minioClient, err := minio.New(endpoint, &minio.Options{ | ||
94 | Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""), | ||
95 | Secure: useSSL, | ||
96 | }) | ||
97 | if err != nil { | ||
98 | log.Fatalln(err) | ||
99 | } | ||
100 | |||
101 | // Make a new bucket called testbucket. | ||
102 | bucketName := "testbucket" | ||
103 | location := "us-east-1" | ||
104 | |||
105 | err = minioClient.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: location}) | ||
106 | if err != nil { | ||
107 | // Check to see if we already own this bucket (which happens if you run this twice) | ||
108 | exists, errBucketExists := minioClient.BucketExists(ctx, bucketName) | ||
109 | if errBucketExists == nil && exists { | ||
110 | log.Printf("We already own %s\n", bucketName) | ||
111 | } else { | ||
112 | log.Fatalln(err) | ||
113 | } | ||
114 | } else { | ||
115 | log.Printf("Successfully created %s\n", bucketName) | ||
116 | } | ||
117 | |||
118 | // Upload the test file | ||
119 | // Change the value of filePath if the file is in another location | ||
120 | objectName := "testdata" | ||
121 | filePath := "/tmp/testdata" | ||
122 | contentType := "application/octet-stream" | ||
123 | |||
124 | // Upload the test file with FPutObject | ||
125 | info, err := minioClient.FPutObject(ctx, bucketName, objectName, filePath, minio.PutObjectOptions{ContentType: contentType}) | ||
126 | if err != nil { | ||
127 | log.Fatalln(err) | ||
128 | } | ||
129 | |||
130 | log.Printf("Successfully uploaded %s of size %d\n", objectName, info.Size) | ||
131 | } | ||
132 | ``` | ||
133 | |||
134 | **1. Create a test file containing data:** | ||
135 | |||
136 | You can do this with `dd` on Linux or macOS systems: | ||
137 | |||
138 | ```sh | ||
139 | dd if=/dev/urandom of=/tmp/testdata bs=2048 count=10 | ||
140 | ``` | ||
141 | |||
142 | or `fsutil` on Windows: | ||
143 | |||
144 | ```sh | ||
145 | fsutil file createnew "C:\Users\<username>\Desktop\sample.txt" 20480 | ||
146 | ``` | ||
147 | |||
148 | **2. Run FileUploader with the following commands:** | ||
149 | |||
150 | ```sh | ||
151 | go mod init example/FileUploader | ||
152 | go get github.com/minio/minio-go/v7 | ||
153 | go get github.com/minio/minio-go/v7/pkg/credentials | ||
154 | go run FileUploader.go | ||
155 | ``` | ||
156 | |||
157 | The output resembles the following: | ||
158 | |||
159 | ```sh | ||
160 | 2023/11/01 14:27:55 Successfully created testbucket | ||
161 | 2023/11/01 14:27:55 Successfully uploaded testdata of size 20480 | ||
162 | ``` | ||
163 | |||
164 | **3. Verify the Uploaded File With `mc ls`:** | ||
165 | |||
166 | ```sh | ||
167 | mc ls play/testbucket | ||
168 | [2023-11-01 14:27:55 UTC] 20KiB STANDARD TestDataFile | ||
169 | ``` | ||
170 | |||
171 | ## API Reference | ||
172 | |||
173 | The full API Reference is available here. | ||
174 | |||
175 | * [Complete API Reference](https://min.io/docs/minio/linux/developers/go/API.html) | ||
176 | |||
177 | ### API Reference : Bucket Operations | ||
178 | |||
179 | * [`MakeBucket`](https://min.io/docs/minio/linux/developers/go/API.html#MakeBucket) | ||
180 | * [`ListBuckets`](https://min.io/docs/minio/linux/developers/go/API.html#ListBuckets) | ||
181 | * [`BucketExists`](https://min.io/docs/minio/linux/developers/go/API.html#BucketExists) | ||
182 | * [`RemoveBucket`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveBucket) | ||
183 | * [`ListObjects`](https://min.io/docs/minio/linux/developers/go/API.html#ListObjects) | ||
184 | * [`ListIncompleteUploads`](https://min.io/docs/minio/linux/developers/go/API.html#ListIncompleteUploads) | ||
185 | |||
186 | ### API Reference : Bucket policy Operations | ||
187 | |||
188 | * [`SetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketPolicy) | ||
189 | * [`GetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketPolicy) | ||
190 | |||
191 | ### API Reference : Bucket notification Operations | ||
192 | |||
193 | * [`SetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketNotification) | ||
194 | * [`GetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketNotification) | ||
195 | * [`RemoveAllBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveAllBucketNotification) | ||
196 | * [`ListenBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenBucketNotification) (MinIO Extension) | ||
197 | * [`ListenNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenNotification) (MinIO Extension) | ||
198 | |||
199 | ### API Reference : File Object Operations | ||
200 | |||
201 | * [`FPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#FPutObject) | ||
202 | * [`FGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#FGetObject) | ||
203 | |||
204 | ### API Reference : Object Operations | ||
205 | |||
206 | * [`GetObject`](https://min.io/docs/minio/linux/developers/go/API.html#GetObject) | ||
207 | * [`PutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PutObject) | ||
208 | * [`PutObjectStreaming`](https://min.io/docs/minio/linux/developers/go/API.html#PutObjectStreaming) | ||
209 | * [`StatObject`](https://min.io/docs/minio/linux/developers/go/API.html#StatObject) | ||
210 | * [`CopyObject`](https://min.io/docs/minio/linux/developers/go/API.html#CopyObject) | ||
211 | * [`RemoveObject`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObject) | ||
212 | * [`RemoveObjects`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObjects) | ||
213 | * [`RemoveIncompleteUpload`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveIncompleteUpload) | ||
214 | * [`SelectObjectContent`](https://min.io/docs/minio/linux/developers/go/API.html#SelectObjectContent) | ||
215 | |||
216 | ### API Reference : Presigned Operations | ||
217 | |||
218 | * [`PresignedGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedGetObject) | ||
219 | * [`PresignedPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPutObject) | ||
220 | * [`PresignedHeadObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedHeadObject) | ||
221 | * [`PresignedPostPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPostPolicy) | ||
222 | |||
223 | ### API Reference : Client custom settings | ||
224 | |||
225 | * [`SetAppInfo`](https://min.io/docs/minio/linux/developers/go/API.html#SetAppInfo) | ||
226 | * [`TraceOn`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOn) | ||
227 | * [`TraceOff`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOff) | ||
228 | |||
229 | ## Full Examples | ||
230 | |||
231 | ### Full Examples : Bucket Operations | ||
232 | |||
233 | * [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go) | ||
234 | * [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go) | ||
235 | * [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go) | ||
236 | * [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go) | ||
237 | * [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go) | ||
238 | * [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go) | ||
239 | * [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go) | ||
240 | |||
241 | ### Full Examples : Bucket policy Operations | ||
242 | |||
243 | * [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go) | ||
244 | * [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go) | ||
245 | * [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go) | ||
246 | |||
247 | ### Full Examples : Bucket lifecycle Operations | ||
248 | |||
249 | * [setbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketlifecycle.go) | ||
250 | * [getbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketlifecycle.go) | ||
251 | |||
252 | ### Full Examples : Bucket encryption Operations | ||
253 | |||
254 | * [setbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketencryption.go) | ||
255 | * [getbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketencryption.go) | ||
256 | * [deletebucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/deletebucketencryption.go) | ||
257 | |||
258 | ### Full Examples : Bucket replication Operations | ||
259 | |||
260 | * [setbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketreplication.go) | ||
261 | * [getbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketreplication.go) | ||
262 | * [removebucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucketreplication.go) | ||
263 | |||
264 | ### Full Examples : Bucket notification Operations | ||
265 | |||
266 | * [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go) | ||
267 | * [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go) | ||
268 | * [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go) | ||
269 | * [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (MinIO Extension) | ||
270 | * [listennotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listen-notification.go) (MinIO Extension) | ||
271 | |||
272 | ### Full Examples : File Object Operations | ||
273 | |||
274 | * [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go) | ||
275 | * [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go) | ||
276 | |||
277 | ### Full Examples : Object Operations | ||
278 | |||
279 | * [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go) | ||
280 | * [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go) | ||
281 | * [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go) | ||
282 | * [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go) | ||
283 | * [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go) | ||
284 | * [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go) | ||
285 | * [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go) | ||
286 | |||
287 | ### Full Examples : Encrypted Object Operations | ||
288 | |||
289 | * [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go) | ||
290 | * [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go) | ||
291 | * [fput-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputencrypted-object.go) | ||
292 | |||
293 | ### Full Examples : Presigned Operations | ||
294 | |||
295 | * [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go) | ||
296 | * [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go) | ||
297 | * [presignedheadobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedheadobject.go) | ||
298 | * [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go) | ||
299 | |||
300 | ## Explore Further | ||
301 | |||
302 | * [Godoc Documentation](https://pkg.go.dev/github.com/minio/minio-go/v7) | ||
303 | * [Complete Documentation](https://min.io/docs/minio/kubernetes/upstream/index.html) | ||
304 | * [MinIO Go Client SDK API Reference](https://min.io/docs/minio/linux/developers/go/API.html) | ||
305 | |||
306 | ## Contribute | ||
307 | |||
308 | [Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md) | ||
309 | |||
310 | ## License | ||
311 | |||
312 | This SDK is distributed under the [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0), see [LICENSE](https://github.com/minio/minio-go/blob/master/LICENSE) and [NOTICE](https://github.com/minio/minio-go/blob/master/NOTICE) for more information. | ||
diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go b/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go new file mode 100644 index 0000000..24f94e0 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go | |||
@@ -0,0 +1,134 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2020 MinIO, Inc. | ||
4 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | * you may not use this file except in compliance with the License. | ||
6 | * You may obtain a copy of the License at | ||
7 | * | ||
8 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | * | ||
10 | * Unless required by applicable law or agreed to in writing, software | ||
11 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | * See the License for the specific language governing permissions and | ||
14 | * limitations under the License. | ||
15 | */ | ||
16 | |||
17 | package minio | ||
18 | |||
19 | import ( | ||
20 | "bytes" | ||
21 | "context" | ||
22 | "encoding/xml" | ||
23 | "net/http" | ||
24 | "net/url" | ||
25 | |||
26 | "github.com/minio/minio-go/v7/pkg/s3utils" | ||
27 | "github.com/minio/minio-go/v7/pkg/sse" | ||
28 | ) | ||
29 | |||
30 | // SetBucketEncryption sets the default encryption configuration on an existing bucket. | ||
31 | func (c *Client) SetBucketEncryption(ctx context.Context, bucketName string, config *sse.Configuration) error { | ||
32 | // Input validation. | ||
33 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
34 | return err | ||
35 | } | ||
36 | |||
37 | if config == nil { | ||
38 | return errInvalidArgument("configuration cannot be empty") | ||
39 | } | ||
40 | |||
41 | buf, err := xml.Marshal(config) | ||
42 | if err != nil { | ||
43 | return err | ||
44 | } | ||
45 | |||
46 | // Get resources properly escaped and lined up before | ||
47 | // using them in http request. | ||
48 | urlValues := make(url.Values) | ||
49 | urlValues.Set("encryption", "") | ||
50 | |||
51 | // Content-length is mandatory to set a default encryption configuration | ||
52 | reqMetadata := requestMetadata{ | ||
53 | bucketName: bucketName, | ||
54 | queryValues: urlValues, | ||
55 | contentBody: bytes.NewReader(buf), | ||
56 | contentLength: int64(len(buf)), | ||
57 | contentMD5Base64: sumMD5Base64(buf), | ||
58 | } | ||
59 | |||
60 | // Execute PUT to upload a new bucket default encryption configuration. | ||
61 | resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) | ||
62 | defer closeResponse(resp) | ||
63 | if err != nil { | ||
64 | return err | ||
65 | } | ||
66 | if resp.StatusCode != http.StatusOK { | ||
67 | return httpRespToErrorResponse(resp, bucketName, "") | ||
68 | } | ||
69 | return nil | ||
70 | } | ||
71 | |||
72 | // RemoveBucketEncryption removes the default encryption configuration on a bucket with a context to control cancellations and timeouts. | ||
73 | func (c *Client) RemoveBucketEncryption(ctx context.Context, bucketName string) error { | ||
74 | // Input validation. | ||
75 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
76 | return err | ||
77 | } | ||
78 | |||
79 | // Get resources properly escaped and lined up before | ||
80 | // using them in http request. | ||
81 | urlValues := make(url.Values) | ||
82 | urlValues.Set("encryption", "") | ||
83 | |||
84 | // DELETE default encryption configuration on a bucket. | ||
85 | resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ | ||
86 | bucketName: bucketName, | ||
87 | queryValues: urlValues, | ||
88 | contentSHA256Hex: emptySHA256Hex, | ||
89 | }) | ||
90 | defer closeResponse(resp) | ||
91 | if err != nil { | ||
92 | return err | ||
93 | } | ||
94 | if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { | ||
95 | return httpRespToErrorResponse(resp, bucketName, "") | ||
96 | } | ||
97 | return nil | ||
98 | } | ||
99 | |||
100 | // GetBucketEncryption gets the default encryption configuration | ||
101 | // on an existing bucket with a context to control cancellations and timeouts. | ||
102 | func (c *Client) GetBucketEncryption(ctx context.Context, bucketName string) (*sse.Configuration, error) { | ||
103 | // Input validation. | ||
104 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
105 | return nil, err | ||
106 | } | ||
107 | |||
108 | // Get resources properly escaped and lined up before | ||
109 | // using them in http request. | ||
110 | urlValues := make(url.Values) | ||
111 | urlValues.Set("encryption", "") | ||
112 | |||
113 | // Execute GET on bucket to get the default encryption configuration. | ||
114 | resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ | ||
115 | bucketName: bucketName, | ||
116 | queryValues: urlValues, | ||
117 | }) | ||
118 | |||
119 | defer closeResponse(resp) | ||
120 | if err != nil { | ||
121 | return nil, err | ||
122 | } | ||
123 | |||
124 | if resp.StatusCode != http.StatusOK { | ||
125 | return nil, httpRespToErrorResponse(resp, bucketName, "") | ||
126 | } | ||
127 | |||
128 | encryptionConfig := &sse.Configuration{} | ||
129 | if err = xmlDecoder(resp.Body, encryptionConfig); err != nil { | ||
130 | return nil, err | ||
131 | } | ||
132 | |||
133 | return encryptionConfig, nil | ||
134 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go b/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go new file mode 100644 index 0000000..fec5cec --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go | |||
@@ -0,0 +1,169 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2020 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | import ( | ||
21 | "bytes" | ||
22 | "context" | ||
23 | "encoding/xml" | ||
24 | "io" | ||
25 | "net/http" | ||
26 | "net/url" | ||
27 | "time" | ||
28 | |||
29 | "github.com/minio/minio-go/v7/pkg/lifecycle" | ||
30 | "github.com/minio/minio-go/v7/pkg/s3utils" | ||
31 | ) | ||
32 | |||
33 | // SetBucketLifecycle set the lifecycle on an existing bucket. | ||
34 | func (c *Client) SetBucketLifecycle(ctx context.Context, bucketName string, config *lifecycle.Configuration) error { | ||
35 | // Input validation. | ||
36 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
37 | return err | ||
38 | } | ||
39 | |||
40 | // If lifecycle is empty then delete it. | ||
41 | if config.Empty() { | ||
42 | return c.removeBucketLifecycle(ctx, bucketName) | ||
43 | } | ||
44 | |||
45 | buf, err := xml.Marshal(config) | ||
46 | if err != nil { | ||
47 | return err | ||
48 | } | ||
49 | |||
50 | // Save the updated lifecycle. | ||
51 | return c.putBucketLifecycle(ctx, bucketName, buf) | ||
52 | } | ||
53 | |||
54 | // Saves a new bucket lifecycle. | ||
55 | func (c *Client) putBucketLifecycle(ctx context.Context, bucketName string, buf []byte) error { | ||
56 | // Get resources properly escaped and lined up before | ||
57 | // using them in http request. | ||
58 | urlValues := make(url.Values) | ||
59 | urlValues.Set("lifecycle", "") | ||
60 | |||
61 | // Content-length is mandatory for put lifecycle request | ||
62 | reqMetadata := requestMetadata{ | ||
63 | bucketName: bucketName, | ||
64 | queryValues: urlValues, | ||
65 | contentBody: bytes.NewReader(buf), | ||
66 | contentLength: int64(len(buf)), | ||
67 | contentMD5Base64: sumMD5Base64(buf), | ||
68 | } | ||
69 | |||
70 | // Execute PUT to upload a new bucket lifecycle. | ||
71 | resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) | ||
72 | defer closeResponse(resp) | ||
73 | if err != nil { | ||
74 | return err | ||
75 | } | ||
76 | if resp != nil { | ||
77 | if resp.StatusCode != http.StatusOK { | ||
78 | return httpRespToErrorResponse(resp, bucketName, "") | ||
79 | } | ||
80 | } | ||
81 | return nil | ||
82 | } | ||
83 | |||
84 | // Remove lifecycle from a bucket. | ||
85 | func (c *Client) removeBucketLifecycle(ctx context.Context, bucketName string) error { | ||
86 | // Get resources properly escaped and lined up before | ||
87 | // using them in http request. | ||
88 | urlValues := make(url.Values) | ||
89 | urlValues.Set("lifecycle", "") | ||
90 | |||
91 | // Execute DELETE on objectName. | ||
92 | resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ | ||
93 | bucketName: bucketName, | ||
94 | queryValues: urlValues, | ||
95 | contentSHA256Hex: emptySHA256Hex, | ||
96 | }) | ||
97 | defer closeResponse(resp) | ||
98 | if err != nil { | ||
99 | return err | ||
100 | } | ||
101 | return nil | ||
102 | } | ||
103 | |||
104 | // GetBucketLifecycle fetch bucket lifecycle configuration | ||
105 | func (c *Client) GetBucketLifecycle(ctx context.Context, bucketName string) (*lifecycle.Configuration, error) { | ||
106 | lc, _, err := c.GetBucketLifecycleWithInfo(ctx, bucketName) | ||
107 | return lc, err | ||
108 | } | ||
109 | |||
110 | // GetBucketLifecycleWithInfo fetch bucket lifecycle configuration along with when it was last updated | ||
111 | func (c *Client) GetBucketLifecycleWithInfo(ctx context.Context, bucketName string) (*lifecycle.Configuration, time.Time, error) { | ||
112 | // Input validation. | ||
113 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
114 | return nil, time.Time{}, err | ||
115 | } | ||
116 | |||
117 | bucketLifecycle, updatedAt, err := c.getBucketLifecycle(ctx, bucketName) | ||
118 | if err != nil { | ||
119 | return nil, time.Time{}, err | ||
120 | } | ||
121 | |||
122 | config := lifecycle.NewConfiguration() | ||
123 | if err = xml.Unmarshal(bucketLifecycle, config); err != nil { | ||
124 | return nil, time.Time{}, err | ||
125 | } | ||
126 | return config, updatedAt, nil | ||
127 | } | ||
128 | |||
129 | // Request server for current bucket lifecycle. | ||
130 | func (c *Client) getBucketLifecycle(ctx context.Context, bucketName string) ([]byte, time.Time, error) { | ||
131 | // Get resources properly escaped and lined up before | ||
132 | // using them in http request. | ||
133 | urlValues := make(url.Values) | ||
134 | urlValues.Set("lifecycle", "") | ||
135 | urlValues.Set("withUpdatedAt", "true") | ||
136 | |||
137 | // Execute GET on bucket to get lifecycle. | ||
138 | resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ | ||
139 | bucketName: bucketName, | ||
140 | queryValues: urlValues, | ||
141 | }) | ||
142 | |||
143 | defer closeResponse(resp) | ||
144 | if err != nil { | ||
145 | return nil, time.Time{}, err | ||
146 | } | ||
147 | |||
148 | if resp != nil { | ||
149 | if resp.StatusCode != http.StatusOK { | ||
150 | return nil, time.Time{}, httpRespToErrorResponse(resp, bucketName, "") | ||
151 | } | ||
152 | } | ||
153 | |||
154 | lcBytes, err := io.ReadAll(resp.Body) | ||
155 | if err != nil { | ||
156 | return nil, time.Time{}, err | ||
157 | } | ||
158 | |||
159 | const minIOLifecycleCfgUpdatedAt = "X-Minio-LifecycleConfig-UpdatedAt" | ||
160 | var updatedAt time.Time | ||
161 | if timeStr := resp.Header.Get(minIOLifecycleCfgUpdatedAt); timeStr != "" { | ||
162 | updatedAt, err = time.Parse(iso8601DateFormat, timeStr) | ||
163 | if err != nil { | ||
164 | return nil, time.Time{}, err | ||
165 | } | ||
166 | } | ||
167 | |||
168 | return lcBytes, updatedAt, nil | ||
169 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go b/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go new file mode 100644 index 0000000..8de5c01 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go | |||
@@ -0,0 +1,261 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2017-2020 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | import ( | ||
21 | "bufio" | ||
22 | "bytes" | ||
23 | "context" | ||
24 | "encoding/xml" | ||
25 | "net/http" | ||
26 | "net/url" | ||
27 | "time" | ||
28 | |||
29 | jsoniter "github.com/json-iterator/go" | ||
30 | "github.com/minio/minio-go/v7/pkg/notification" | ||
31 | "github.com/minio/minio-go/v7/pkg/s3utils" | ||
32 | ) | ||
33 | |||
34 | // SetBucketNotification saves a new bucket notification with a context to control cancellations and timeouts. | ||
35 | func (c *Client) SetBucketNotification(ctx context.Context, bucketName string, config notification.Configuration) error { | ||
36 | // Input validation. | ||
37 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
38 | return err | ||
39 | } | ||
40 | |||
41 | // Get resources properly escaped and lined up before | ||
42 | // using them in http request. | ||
43 | urlValues := make(url.Values) | ||
44 | urlValues.Set("notification", "") | ||
45 | |||
46 | notifBytes, err := xml.Marshal(&config) | ||
47 | if err != nil { | ||
48 | return err | ||
49 | } | ||
50 | |||
51 | notifBuffer := bytes.NewReader(notifBytes) | ||
52 | reqMetadata := requestMetadata{ | ||
53 | bucketName: bucketName, | ||
54 | queryValues: urlValues, | ||
55 | contentBody: notifBuffer, | ||
56 | contentLength: int64(len(notifBytes)), | ||
57 | contentMD5Base64: sumMD5Base64(notifBytes), | ||
58 | contentSHA256Hex: sum256Hex(notifBytes), | ||
59 | } | ||
60 | |||
61 | // Execute PUT to upload a new bucket notification. | ||
62 | resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) | ||
63 | defer closeResponse(resp) | ||
64 | if err != nil { | ||
65 | return err | ||
66 | } | ||
67 | if resp != nil { | ||
68 | if resp.StatusCode != http.StatusOK { | ||
69 | return httpRespToErrorResponse(resp, bucketName, "") | ||
70 | } | ||
71 | } | ||
72 | return nil | ||
73 | } | ||
74 | |||
75 | // RemoveAllBucketNotification - Remove bucket notification clears all previously specified config | ||
76 | func (c *Client) RemoveAllBucketNotification(ctx context.Context, bucketName string) error { | ||
77 | return c.SetBucketNotification(ctx, bucketName, notification.Configuration{}) | ||
78 | } | ||
79 | |||
80 | // GetBucketNotification returns current bucket notification configuration | ||
81 | func (c *Client) GetBucketNotification(ctx context.Context, bucketName string) (bucketNotification notification.Configuration, err error) { | ||
82 | // Input validation. | ||
83 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
84 | return notification.Configuration{}, err | ||
85 | } | ||
86 | return c.getBucketNotification(ctx, bucketName) | ||
87 | } | ||
88 | |||
89 | // Request server for notification rules. | ||
90 | func (c *Client) getBucketNotification(ctx context.Context, bucketName string) (notification.Configuration, error) { | ||
91 | urlValues := make(url.Values) | ||
92 | urlValues.Set("notification", "") | ||
93 | |||
94 | // Execute GET on bucket to list objects. | ||
95 | resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ | ||
96 | bucketName: bucketName, | ||
97 | queryValues: urlValues, | ||
98 | contentSHA256Hex: emptySHA256Hex, | ||
99 | }) | ||
100 | |||
101 | defer closeResponse(resp) | ||
102 | if err != nil { | ||
103 | return notification.Configuration{}, err | ||
104 | } | ||
105 | return processBucketNotificationResponse(bucketName, resp) | ||
106 | } | ||
107 | |||
108 | // processes the GetNotification http response from the server. | ||
109 | func processBucketNotificationResponse(bucketName string, resp *http.Response) (notification.Configuration, error) { | ||
110 | if resp.StatusCode != http.StatusOK { | ||
111 | errResponse := httpRespToErrorResponse(resp, bucketName, "") | ||
112 | return notification.Configuration{}, errResponse | ||
113 | } | ||
114 | var bucketNotification notification.Configuration | ||
115 | err := xmlDecoder(resp.Body, &bucketNotification) | ||
116 | if err != nil { | ||
117 | return notification.Configuration{}, err | ||
118 | } | ||
119 | return bucketNotification, nil | ||
120 | } | ||
121 | |||
122 | // ListenNotification listen for all events, this is a MinIO specific API | ||
123 | func (c *Client) ListenNotification(ctx context.Context, prefix, suffix string, events []string) <-chan notification.Info { | ||
124 | return c.ListenBucketNotification(ctx, "", prefix, suffix, events) | ||
125 | } | ||
126 | |||
127 | // ListenBucketNotification listen for bucket events, this is a MinIO specific API | ||
128 | func (c *Client) ListenBucketNotification(ctx context.Context, bucketName, prefix, suffix string, events []string) <-chan notification.Info { | ||
129 | notificationInfoCh := make(chan notification.Info, 1) | ||
130 | const notificationCapacity = 4 * 1024 * 1024 | ||
131 | notificationEventBuffer := make([]byte, notificationCapacity) | ||
132 | // Only success, start a routine to start reading line by line. | ||
133 | go func(notificationInfoCh chan<- notification.Info) { | ||
134 | defer close(notificationInfoCh) | ||
135 | |||
136 | // Validate the bucket name. | ||
137 | if bucketName != "" { | ||
138 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
139 | select { | ||
140 | case notificationInfoCh <- notification.Info{ | ||
141 | Err: err, | ||
142 | }: | ||
143 | case <-ctx.Done(): | ||
144 | } | ||
145 | return | ||
146 | } | ||
147 | } | ||
148 | |||
149 | // Check ARN partition to verify if listening bucket is supported | ||
150 | if s3utils.IsAmazonEndpoint(*c.endpointURL) || s3utils.IsGoogleEndpoint(*c.endpointURL) { | ||
151 | select { | ||
152 | case notificationInfoCh <- notification.Info{ | ||
153 | Err: errAPINotSupported("Listening for bucket notification is specific only to `minio` server endpoints"), | ||
154 | }: | ||
155 | case <-ctx.Done(): | ||
156 | } | ||
157 | return | ||
158 | } | ||
159 | |||
160 | // Continuously run and listen on bucket notification. | ||
161 | // Create a done channel to control 'ListObjects' go routine. | ||
162 | retryDoneCh := make(chan struct{}, 1) | ||
163 | |||
164 | // Indicate to our routine to exit cleanly upon return. | ||
165 | defer close(retryDoneCh) | ||
166 | |||
167 | // Prepare urlValues to pass into the request on every loop | ||
168 | urlValues := make(url.Values) | ||
169 | urlValues.Set("ping", "10") | ||
170 | urlValues.Set("prefix", prefix) | ||
171 | urlValues.Set("suffix", suffix) | ||
172 | urlValues["events"] = events | ||
173 | |||
174 | // Wait on the jitter retry loop. | ||
175 | for range c.newRetryTimerContinous(time.Second, time.Second*30, MaxJitter, retryDoneCh) { | ||
176 | // Execute GET on bucket to list objects. | ||
177 | resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ | ||
178 | bucketName: bucketName, | ||
179 | queryValues: urlValues, | ||
180 | contentSHA256Hex: emptySHA256Hex, | ||
181 | }) | ||
182 | if err != nil { | ||
183 | select { | ||
184 | case notificationInfoCh <- notification.Info{ | ||
185 | Err: err, | ||
186 | }: | ||
187 | case <-ctx.Done(): | ||
188 | } | ||
189 | return | ||
190 | } | ||
191 | |||
192 | // Validate http response, upon error return quickly. | ||
193 | if resp.StatusCode != http.StatusOK { | ||
194 | errResponse := httpRespToErrorResponse(resp, bucketName, "") | ||
195 | select { | ||
196 | case notificationInfoCh <- notification.Info{ | ||
197 | Err: errResponse, | ||
198 | }: | ||
199 | case <-ctx.Done(): | ||
200 | } | ||
201 | return | ||
202 | } | ||
203 | |||
204 | // Initialize a new bufio scanner, to read line by line. | ||
205 | bio := bufio.NewScanner(resp.Body) | ||
206 | |||
207 | // Use a higher buffer to support unexpected | ||
208 | // caching done by proxies | ||
209 | bio.Buffer(notificationEventBuffer, notificationCapacity) | ||
210 | json := jsoniter.ConfigCompatibleWithStandardLibrary | ||
211 | |||
212 | // Unmarshal each line, returns marshaled values. | ||
213 | for bio.Scan() { | ||
214 | var notificationInfo notification.Info | ||
215 | if err = json.Unmarshal(bio.Bytes(), ¬ificationInfo); err != nil { | ||
216 | // Unexpected error during json unmarshal, send | ||
217 | // the error to caller for actionable as needed. | ||
218 | select { | ||
219 | case notificationInfoCh <- notification.Info{ | ||
220 | Err: err, | ||
221 | }: | ||
222 | case <-ctx.Done(): | ||
223 | return | ||
224 | } | ||
225 | closeResponse(resp) | ||
226 | continue | ||
227 | } | ||
228 | |||
229 | // Empty events pinged from the server | ||
230 | if len(notificationInfo.Records) == 0 && notificationInfo.Err == nil { | ||
231 | continue | ||
232 | } | ||
233 | |||
234 | // Send notificationInfo | ||
235 | select { | ||
236 | case notificationInfoCh <- notificationInfo: | ||
237 | case <-ctx.Done(): | ||
238 | closeResponse(resp) | ||
239 | return | ||
240 | } | ||
241 | } | ||
242 | |||
243 | if err = bio.Err(); err != nil { | ||
244 | select { | ||
245 | case notificationInfoCh <- notification.Info{ | ||
246 | Err: err, | ||
247 | }: | ||
248 | case <-ctx.Done(): | ||
249 | return | ||
250 | } | ||
251 | } | ||
252 | |||
253 | // Close current connection before looping further. | ||
254 | closeResponse(resp) | ||
255 | |||
256 | } | ||
257 | }(notificationInfoCh) | ||
258 | |||
259 | // Returns the notification info channel, for caller to start reading from. | ||
260 | return notificationInfoCh | ||
261 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go b/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go new file mode 100644 index 0000000..dbb5259 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go | |||
@@ -0,0 +1,147 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2020 MinIO, Inc. | ||
4 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | * you may not use this file except in compliance with the License. | ||
6 | * You may obtain a copy of the License at | ||
7 | * | ||
8 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | * | ||
10 | * Unless required by applicable law or agreed to in writing, software | ||
11 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | * See the License for the specific language governing permissions and | ||
14 | * limitations under the License. | ||
15 | */ | ||
16 | |||
17 | package minio | ||
18 | |||
19 | import ( | ||
20 | "context" | ||
21 | "io" | ||
22 | "net/http" | ||
23 | "net/url" | ||
24 | "strings" | ||
25 | |||
26 | "github.com/minio/minio-go/v7/pkg/s3utils" | ||
27 | ) | ||
28 | |||
29 | // SetBucketPolicy sets the access permissions on an existing bucket. | ||
30 | func (c *Client) SetBucketPolicy(ctx context.Context, bucketName, policy string) error { | ||
31 | // Input validation. | ||
32 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
33 | return err | ||
34 | } | ||
35 | |||
36 | // If policy is empty then delete the bucket policy. | ||
37 | if policy == "" { | ||
38 | return c.removeBucketPolicy(ctx, bucketName) | ||
39 | } | ||
40 | |||
41 | // Save the updated policies. | ||
42 | return c.putBucketPolicy(ctx, bucketName, policy) | ||
43 | } | ||
44 | |||
45 | // Saves a new bucket policy. | ||
46 | func (c *Client) putBucketPolicy(ctx context.Context, bucketName, policy string) error { | ||
47 | // Get resources properly escaped and lined up before | ||
48 | // using them in http request. | ||
49 | urlValues := make(url.Values) | ||
50 | urlValues.Set("policy", "") | ||
51 | |||
52 | reqMetadata := requestMetadata{ | ||
53 | bucketName: bucketName, | ||
54 | queryValues: urlValues, | ||
55 | contentBody: strings.NewReader(policy), | ||
56 | contentLength: int64(len(policy)), | ||
57 | } | ||
58 | |||
59 | // Execute PUT to upload a new bucket policy. | ||
60 | resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) | ||
61 | defer closeResponse(resp) | ||
62 | if err != nil { | ||
63 | return err | ||
64 | } | ||
65 | if resp != nil { | ||
66 | if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { | ||
67 | return httpRespToErrorResponse(resp, bucketName, "") | ||
68 | } | ||
69 | } | ||
70 | return nil | ||
71 | } | ||
72 | |||
73 | // Removes all policies on a bucket. | ||
74 | func (c *Client) removeBucketPolicy(ctx context.Context, bucketName string) error { | ||
75 | // Get resources properly escaped and lined up before | ||
76 | // using them in http request. | ||
77 | urlValues := make(url.Values) | ||
78 | urlValues.Set("policy", "") | ||
79 | |||
80 | // Execute DELETE on objectName. | ||
81 | resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ | ||
82 | bucketName: bucketName, | ||
83 | queryValues: urlValues, | ||
84 | contentSHA256Hex: emptySHA256Hex, | ||
85 | }) | ||
86 | defer closeResponse(resp) | ||
87 | if err != nil { | ||
88 | return err | ||
89 | } | ||
90 | |||
91 | if resp.StatusCode != http.StatusNoContent { | ||
92 | return httpRespToErrorResponse(resp, bucketName, "") | ||
93 | } | ||
94 | |||
95 | return nil | ||
96 | } | ||
97 | |||
98 | // GetBucketPolicy returns the current policy | ||
99 | func (c *Client) GetBucketPolicy(ctx context.Context, bucketName string) (string, error) { | ||
100 | // Input validation. | ||
101 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
102 | return "", err | ||
103 | } | ||
104 | bucketPolicy, err := c.getBucketPolicy(ctx, bucketName) | ||
105 | if err != nil { | ||
106 | errResponse := ToErrorResponse(err) | ||
107 | if errResponse.Code == "NoSuchBucketPolicy" { | ||
108 | return "", nil | ||
109 | } | ||
110 | return "", err | ||
111 | } | ||
112 | return bucketPolicy, nil | ||
113 | } | ||
114 | |||
115 | // Request server for current bucket policy. | ||
116 | func (c *Client) getBucketPolicy(ctx context.Context, bucketName string) (string, error) { | ||
117 | // Get resources properly escaped and lined up before | ||
118 | // using them in http request. | ||
119 | urlValues := make(url.Values) | ||
120 | urlValues.Set("policy", "") | ||
121 | |||
122 | // Execute GET on bucket to list objects. | ||
123 | resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ | ||
124 | bucketName: bucketName, | ||
125 | queryValues: urlValues, | ||
126 | contentSHA256Hex: emptySHA256Hex, | ||
127 | }) | ||
128 | |||
129 | defer closeResponse(resp) | ||
130 | if err != nil { | ||
131 | return "", err | ||
132 | } | ||
133 | |||
134 | if resp != nil { | ||
135 | if resp.StatusCode != http.StatusOK { | ||
136 | return "", httpRespToErrorResponse(resp, bucketName, "") | ||
137 | } | ||
138 | } | ||
139 | |||
140 | bucketPolicyBuf, err := io.ReadAll(resp.Body) | ||
141 | if err != nil { | ||
142 | return "", err | ||
143 | } | ||
144 | |||
145 | policy := string(bucketPolicyBuf) | ||
146 | return policy, err | ||
147 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go b/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go new file mode 100644 index 0000000..b12bb13 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go | |||
@@ -0,0 +1,355 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2020 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | import ( | ||
21 | "bytes" | ||
22 | "context" | ||
23 | "encoding/json" | ||
24 | "encoding/xml" | ||
25 | "io" | ||
26 | "net/http" | ||
27 | "net/url" | ||
28 | "time" | ||
29 | |||
30 | "github.com/google/uuid" | ||
31 | "github.com/minio/minio-go/v7/pkg/replication" | ||
32 | "github.com/minio/minio-go/v7/pkg/s3utils" | ||
33 | ) | ||
34 | |||
35 | // RemoveBucketReplication removes a replication config on an existing bucket. | ||
36 | func (c *Client) RemoveBucketReplication(ctx context.Context, bucketName string) error { | ||
37 | return c.removeBucketReplication(ctx, bucketName) | ||
38 | } | ||
39 | |||
40 | // SetBucketReplication sets a replication config on an existing bucket. | ||
41 | func (c *Client) SetBucketReplication(ctx context.Context, bucketName string, cfg replication.Config) error { | ||
42 | // Input validation. | ||
43 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
44 | return err | ||
45 | } | ||
46 | |||
47 | // If replication is empty then delete it. | ||
48 | if cfg.Empty() { | ||
49 | return c.removeBucketReplication(ctx, bucketName) | ||
50 | } | ||
51 | // Save the updated replication. | ||
52 | return c.putBucketReplication(ctx, bucketName, cfg) | ||
53 | } | ||
54 | |||
55 | // Saves a new bucket replication. | ||
56 | func (c *Client) putBucketReplication(ctx context.Context, bucketName string, cfg replication.Config) error { | ||
57 | // Get resources properly escaped and lined up before | ||
58 | // using them in http request. | ||
59 | urlValues := make(url.Values) | ||
60 | urlValues.Set("replication", "") | ||
61 | replication, err := xml.Marshal(cfg) | ||
62 | if err != nil { | ||
63 | return err | ||
64 | } | ||
65 | |||
66 | reqMetadata := requestMetadata{ | ||
67 | bucketName: bucketName, | ||
68 | queryValues: urlValues, | ||
69 | contentBody: bytes.NewReader(replication), | ||
70 | contentLength: int64(len(replication)), | ||
71 | contentMD5Base64: sumMD5Base64(replication), | ||
72 | } | ||
73 | |||
74 | // Execute PUT to upload a new bucket replication config. | ||
75 | resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) | ||
76 | defer closeResponse(resp) | ||
77 | if err != nil { | ||
78 | return err | ||
79 | } | ||
80 | |||
81 | if resp.StatusCode != http.StatusOK { | ||
82 | return httpRespToErrorResponse(resp, bucketName, "") | ||
83 | } | ||
84 | |||
85 | return nil | ||
86 | } | ||
87 | |||
88 | // Remove replication from a bucket. | ||
89 | func (c *Client) removeBucketReplication(ctx context.Context, bucketName string) error { | ||
90 | // Get resources properly escaped and lined up before | ||
91 | // using them in http request. | ||
92 | urlValues := make(url.Values) | ||
93 | urlValues.Set("replication", "") | ||
94 | |||
95 | // Execute DELETE on objectName. | ||
96 | resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ | ||
97 | bucketName: bucketName, | ||
98 | queryValues: urlValues, | ||
99 | contentSHA256Hex: emptySHA256Hex, | ||
100 | }) | ||
101 | defer closeResponse(resp) | ||
102 | if err != nil { | ||
103 | return err | ||
104 | } | ||
105 | if resp.StatusCode != http.StatusOK { | ||
106 | return httpRespToErrorResponse(resp, bucketName, "") | ||
107 | } | ||
108 | return nil | ||
109 | } | ||
110 | |||
111 | // GetBucketReplication fetches bucket replication configuration.If config is not | ||
112 | // found, returns empty config with nil error. | ||
113 | func (c *Client) GetBucketReplication(ctx context.Context, bucketName string) (cfg replication.Config, err error) { | ||
114 | // Input validation. | ||
115 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
116 | return cfg, err | ||
117 | } | ||
118 | bucketReplicationCfg, err := c.getBucketReplication(ctx, bucketName) | ||
119 | if err != nil { | ||
120 | errResponse := ToErrorResponse(err) | ||
121 | if errResponse.Code == "ReplicationConfigurationNotFoundError" { | ||
122 | return cfg, nil | ||
123 | } | ||
124 | return cfg, err | ||
125 | } | ||
126 | return bucketReplicationCfg, nil | ||
127 | } | ||
128 | |||
129 | // Request server for current bucket replication config. | ||
130 | func (c *Client) getBucketReplication(ctx context.Context, bucketName string) (cfg replication.Config, err error) { | ||
131 | // Get resources properly escaped and lined up before | ||
132 | // using them in http request. | ||
133 | urlValues := make(url.Values) | ||
134 | urlValues.Set("replication", "") | ||
135 | |||
136 | // Execute GET on bucket to get replication config. | ||
137 | resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ | ||
138 | bucketName: bucketName, | ||
139 | queryValues: urlValues, | ||
140 | }) | ||
141 | |||
142 | defer closeResponse(resp) | ||
143 | if err != nil { | ||
144 | return cfg, err | ||
145 | } | ||
146 | |||
147 | if resp.StatusCode != http.StatusOK { | ||
148 | return cfg, httpRespToErrorResponse(resp, bucketName, "") | ||
149 | } | ||
150 | |||
151 | if err = xmlDecoder(resp.Body, &cfg); err != nil { | ||
152 | return cfg, err | ||
153 | } | ||
154 | |||
155 | return cfg, nil | ||
156 | } | ||
157 | |||
158 | // GetBucketReplicationMetrics fetches bucket replication status metrics | ||
159 | func (c *Client) GetBucketReplicationMetrics(ctx context.Context, bucketName string) (s replication.Metrics, err error) { | ||
160 | // Input validation. | ||
161 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
162 | return s, err | ||
163 | } | ||
164 | // Get resources properly escaped and lined up before | ||
165 | // using them in http request. | ||
166 | urlValues := make(url.Values) | ||
167 | urlValues.Set("replication-metrics", "") | ||
168 | |||
169 | // Execute GET on bucket to get replication config. | ||
170 | resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ | ||
171 | bucketName: bucketName, | ||
172 | queryValues: urlValues, | ||
173 | }) | ||
174 | |||
175 | defer closeResponse(resp) | ||
176 | if err != nil { | ||
177 | return s, err | ||
178 | } | ||
179 | |||
180 | if resp.StatusCode != http.StatusOK { | ||
181 | return s, httpRespToErrorResponse(resp, bucketName, "") | ||
182 | } | ||
183 | respBytes, err := io.ReadAll(resp.Body) | ||
184 | if err != nil { | ||
185 | return s, err | ||
186 | } | ||
187 | |||
188 | if err := json.Unmarshal(respBytes, &s); err != nil { | ||
189 | return s, err | ||
190 | } | ||
191 | return s, nil | ||
192 | } | ||
193 | |||
194 | // mustGetUUID - get a random UUID. | ||
195 | func mustGetUUID() string { | ||
196 | u, err := uuid.NewRandom() | ||
197 | if err != nil { | ||
198 | return "" | ||
199 | } | ||
200 | return u.String() | ||
201 | } | ||
202 | |||
203 | // ResetBucketReplication kicks off replication of previously replicated objects if ExistingObjectReplication | ||
204 | // is enabled in the replication config | ||
205 | func (c *Client) ResetBucketReplication(ctx context.Context, bucketName string, olderThan time.Duration) (rID string, err error) { | ||
206 | rID = mustGetUUID() | ||
207 | _, err = c.resetBucketReplicationOnTarget(ctx, bucketName, olderThan, "", rID) | ||
208 | if err != nil { | ||
209 | return rID, err | ||
210 | } | ||
211 | return rID, nil | ||
212 | } | ||
213 | |||
214 | // ResetBucketReplicationOnTarget kicks off replication of previously replicated objects if | ||
215 | // ExistingObjectReplication is enabled in the replication config | ||
216 | func (c *Client) ResetBucketReplicationOnTarget(ctx context.Context, bucketName string, olderThan time.Duration, tgtArn string) (replication.ResyncTargetsInfo, error) { | ||
217 | return c.resetBucketReplicationOnTarget(ctx, bucketName, olderThan, tgtArn, mustGetUUID()) | ||
218 | } | ||
219 | |||
220 | // ResetBucketReplication kicks off replication of previously replicated objects if ExistingObjectReplication | ||
221 | // is enabled in the replication config | ||
222 | func (c *Client) resetBucketReplicationOnTarget(ctx context.Context, bucketName string, olderThan time.Duration, tgtArn, resetID string) (rinfo replication.ResyncTargetsInfo, err error) { | ||
223 | // Input validation. | ||
224 | if err = s3utils.CheckValidBucketName(bucketName); err != nil { | ||
225 | return | ||
226 | } | ||
227 | // Get resources properly escaped and lined up before | ||
228 | // using them in http request. | ||
229 | urlValues := make(url.Values) | ||
230 | urlValues.Set("replication-reset", "") | ||
231 | if olderThan > 0 { | ||
232 | urlValues.Set("older-than", olderThan.String()) | ||
233 | } | ||
234 | if tgtArn != "" { | ||
235 | urlValues.Set("arn", tgtArn) | ||
236 | } | ||
237 | urlValues.Set("reset-id", resetID) | ||
238 | // Execute GET on bucket to get replication config. | ||
239 | resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{ | ||
240 | bucketName: bucketName, | ||
241 | queryValues: urlValues, | ||
242 | }) | ||
243 | |||
244 | defer closeResponse(resp) | ||
245 | if err != nil { | ||
246 | return rinfo, err | ||
247 | } | ||
248 | |||
249 | if resp.StatusCode != http.StatusOK { | ||
250 | return rinfo, httpRespToErrorResponse(resp, bucketName, "") | ||
251 | } | ||
252 | |||
253 | if err = json.NewDecoder(resp.Body).Decode(&rinfo); err != nil { | ||
254 | return rinfo, err | ||
255 | } | ||
256 | return rinfo, nil | ||
257 | } | ||
258 | |||
259 | // GetBucketReplicationResyncStatus gets the status of replication resync | ||
260 | func (c *Client) GetBucketReplicationResyncStatus(ctx context.Context, bucketName, arn string) (rinfo replication.ResyncTargetsInfo, err error) { | ||
261 | // Input validation. | ||
262 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
263 | return rinfo, err | ||
264 | } | ||
265 | // Get resources properly escaped and lined up before | ||
266 | // using them in http request. | ||
267 | urlValues := make(url.Values) | ||
268 | urlValues.Set("replication-reset-status", "") | ||
269 | if arn != "" { | ||
270 | urlValues.Set("arn", arn) | ||
271 | } | ||
272 | // Execute GET on bucket to get replication config. | ||
273 | resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ | ||
274 | bucketName: bucketName, | ||
275 | queryValues: urlValues, | ||
276 | }) | ||
277 | |||
278 | defer closeResponse(resp) | ||
279 | if err != nil { | ||
280 | return rinfo, err | ||
281 | } | ||
282 | |||
283 | if resp.StatusCode != http.StatusOK { | ||
284 | return rinfo, httpRespToErrorResponse(resp, bucketName, "") | ||
285 | } | ||
286 | |||
287 | if err = json.NewDecoder(resp.Body).Decode(&rinfo); err != nil { | ||
288 | return rinfo, err | ||
289 | } | ||
290 | return rinfo, nil | ||
291 | } | ||
292 | |||
293 | // GetBucketReplicationMetricsV2 fetches bucket replication status metrics | ||
294 | func (c *Client) GetBucketReplicationMetricsV2(ctx context.Context, bucketName string) (s replication.MetricsV2, err error) { | ||
295 | // Input validation. | ||
296 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
297 | return s, err | ||
298 | } | ||
299 | // Get resources properly escaped and lined up before | ||
300 | // using them in http request. | ||
301 | urlValues := make(url.Values) | ||
302 | urlValues.Set("replication-metrics", "2") | ||
303 | |||
304 | // Execute GET on bucket to get replication metrics. | ||
305 | resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ | ||
306 | bucketName: bucketName, | ||
307 | queryValues: urlValues, | ||
308 | }) | ||
309 | |||
310 | defer closeResponse(resp) | ||
311 | if err != nil { | ||
312 | return s, err | ||
313 | } | ||
314 | |||
315 | if resp.StatusCode != http.StatusOK { | ||
316 | return s, httpRespToErrorResponse(resp, bucketName, "") | ||
317 | } | ||
318 | respBytes, err := io.ReadAll(resp.Body) | ||
319 | if err != nil { | ||
320 | return s, err | ||
321 | } | ||
322 | |||
323 | if err := json.Unmarshal(respBytes, &s); err != nil { | ||
324 | return s, err | ||
325 | } | ||
326 | return s, nil | ||
327 | } | ||
328 | |||
329 | // CheckBucketReplication validates if replication is set up properly for a bucket | ||
330 | func (c *Client) CheckBucketReplication(ctx context.Context, bucketName string) (err error) { | ||
331 | // Input validation. | ||
332 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
333 | return err | ||
334 | } | ||
335 | // Get resources properly escaped and lined up before | ||
336 | // using them in http request. | ||
337 | urlValues := make(url.Values) | ||
338 | urlValues.Set("replication-check", "") | ||
339 | |||
340 | // Execute GET on bucket to get replication config. | ||
341 | resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ | ||
342 | bucketName: bucketName, | ||
343 | queryValues: urlValues, | ||
344 | }) | ||
345 | |||
346 | defer closeResponse(resp) | ||
347 | if err != nil { | ||
348 | return err | ||
349 | } | ||
350 | |||
351 | if resp.StatusCode != http.StatusOK { | ||
352 | return httpRespToErrorResponse(resp, bucketName, "") | ||
353 | } | ||
354 | return nil | ||
355 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go b/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go new file mode 100644 index 0000000..86d7429 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go | |||
@@ -0,0 +1,134 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2020 MinIO, Inc. | ||
4 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | * you may not use this file except in compliance with the License. | ||
6 | * You may obtain a copy of the License at | ||
7 | * | ||
8 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | * | ||
10 | * Unless required by applicable law or agreed to in writing, software | ||
11 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | * See the License for the specific language governing permissions and | ||
14 | * limitations under the License. | ||
15 | */ | ||
16 | |||
17 | package minio | ||
18 | |||
19 | import ( | ||
20 | "bytes" | ||
21 | "context" | ||
22 | "encoding/xml" | ||
23 | "errors" | ||
24 | "io" | ||
25 | "net/http" | ||
26 | "net/url" | ||
27 | |||
28 | "github.com/minio/minio-go/v7/pkg/s3utils" | ||
29 | "github.com/minio/minio-go/v7/pkg/tags" | ||
30 | ) | ||
31 | |||
32 | // GetBucketTagging fetch tagging configuration for a bucket with a | ||
33 | // context to control cancellations and timeouts. | ||
34 | func (c *Client) GetBucketTagging(ctx context.Context, bucketName string) (*tags.Tags, error) { | ||
35 | // Input validation. | ||
36 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
37 | return nil, err | ||
38 | } | ||
39 | |||
40 | // Get resources properly escaped and lined up before | ||
41 | // using them in http request. | ||
42 | urlValues := make(url.Values) | ||
43 | urlValues.Set("tagging", "") | ||
44 | |||
45 | // Execute GET on bucket to get tagging configuration. | ||
46 | resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ | ||
47 | bucketName: bucketName, | ||
48 | queryValues: urlValues, | ||
49 | }) | ||
50 | |||
51 | defer closeResponse(resp) | ||
52 | if err != nil { | ||
53 | return nil, err | ||
54 | } | ||
55 | |||
56 | if resp.StatusCode != http.StatusOK { | ||
57 | return nil, httpRespToErrorResponse(resp, bucketName, "") | ||
58 | } | ||
59 | |||
60 | defer io.Copy(io.Discard, resp.Body) | ||
61 | return tags.ParseBucketXML(resp.Body) | ||
62 | } | ||
63 | |||
64 | // SetBucketTagging sets tagging configuration for a bucket | ||
65 | // with a context to control cancellations and timeouts. | ||
66 | func (c *Client) SetBucketTagging(ctx context.Context, bucketName string, tags *tags.Tags) error { | ||
67 | // Input validation. | ||
68 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
69 | return err | ||
70 | } | ||
71 | |||
72 | if tags == nil { | ||
73 | return errors.New("nil tags passed") | ||
74 | } | ||
75 | |||
76 | buf, err := xml.Marshal(tags) | ||
77 | if err != nil { | ||
78 | return err | ||
79 | } | ||
80 | |||
81 | // Get resources properly escaped and lined up before | ||
82 | // using them in http request. | ||
83 | urlValues := make(url.Values) | ||
84 | urlValues.Set("tagging", "") | ||
85 | |||
86 | // Content-length is mandatory to set a default encryption configuration | ||
87 | reqMetadata := requestMetadata{ | ||
88 | bucketName: bucketName, | ||
89 | queryValues: urlValues, | ||
90 | contentBody: bytes.NewReader(buf), | ||
91 | contentLength: int64(len(buf)), | ||
92 | contentMD5Base64: sumMD5Base64(buf), | ||
93 | } | ||
94 | |||
95 | // Execute PUT on bucket to put tagging configuration. | ||
96 | resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) | ||
97 | defer closeResponse(resp) | ||
98 | if err != nil { | ||
99 | return err | ||
100 | } | ||
101 | if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { | ||
102 | return httpRespToErrorResponse(resp, bucketName, "") | ||
103 | } | ||
104 | return nil | ||
105 | } | ||
106 | |||
107 | // RemoveBucketTagging removes tagging configuration for a | ||
108 | // bucket with a context to control cancellations and timeouts. | ||
109 | func (c *Client) RemoveBucketTagging(ctx context.Context, bucketName string) error { | ||
110 | // Input validation. | ||
111 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
112 | return err | ||
113 | } | ||
114 | |||
115 | // Get resources properly escaped and lined up before | ||
116 | // using them in http request. | ||
117 | urlValues := make(url.Values) | ||
118 | urlValues.Set("tagging", "") | ||
119 | |||
120 | // Execute DELETE on bucket to remove tagging configuration. | ||
121 | resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ | ||
122 | bucketName: bucketName, | ||
123 | queryValues: urlValues, | ||
124 | contentSHA256Hex: emptySHA256Hex, | ||
125 | }) | ||
126 | defer closeResponse(resp) | ||
127 | if err != nil { | ||
128 | return err | ||
129 | } | ||
130 | if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { | ||
131 | return httpRespToErrorResponse(resp, bucketName, "") | ||
132 | } | ||
133 | return nil | ||
134 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go b/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go new file mode 100644 index 0000000..8c84e4f --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go | |||
@@ -0,0 +1,146 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2020 MinIO, Inc. | ||
4 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | * you may not use this file except in compliance with the License. | ||
6 | * You may obtain a copy of the License at | ||
7 | * | ||
8 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | * | ||
10 | * Unless required by applicable law or agreed to in writing, software | ||
11 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | * See the License for the specific language governing permissions and | ||
14 | * limitations under the License. | ||
15 | */ | ||
16 | |||
17 | package minio | ||
18 | |||
19 | import ( | ||
20 | "bytes" | ||
21 | "context" | ||
22 | "encoding/xml" | ||
23 | "net/http" | ||
24 | "net/url" | ||
25 | |||
26 | "github.com/minio/minio-go/v7/pkg/s3utils" | ||
27 | ) | ||
28 | |||
29 | // SetBucketVersioning sets a bucket versioning configuration | ||
30 | func (c *Client) SetBucketVersioning(ctx context.Context, bucketName string, config BucketVersioningConfiguration) error { | ||
31 | // Input validation. | ||
32 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
33 | return err | ||
34 | } | ||
35 | |||
36 | buf, err := xml.Marshal(config) | ||
37 | if err != nil { | ||
38 | return err | ||
39 | } | ||
40 | |||
41 | // Get resources properly escaped and lined up before | ||
42 | // using them in http request. | ||
43 | urlValues := make(url.Values) | ||
44 | urlValues.Set("versioning", "") | ||
45 | |||
46 | reqMetadata := requestMetadata{ | ||
47 | bucketName: bucketName, | ||
48 | queryValues: urlValues, | ||
49 | contentBody: bytes.NewReader(buf), | ||
50 | contentLength: int64(len(buf)), | ||
51 | contentMD5Base64: sumMD5Base64(buf), | ||
52 | contentSHA256Hex: sum256Hex(buf), | ||
53 | } | ||
54 | |||
55 | // Execute PUT to set a bucket versioning. | ||
56 | resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) | ||
57 | defer closeResponse(resp) | ||
58 | if err != nil { | ||
59 | return err | ||
60 | } | ||
61 | if resp != nil { | ||
62 | if resp.StatusCode != http.StatusOK { | ||
63 | return httpRespToErrorResponse(resp, bucketName, "") | ||
64 | } | ||
65 | } | ||
66 | return nil | ||
67 | } | ||
68 | |||
69 | // EnableVersioning - enable object versioning in given bucket. | ||
70 | func (c *Client) EnableVersioning(ctx context.Context, bucketName string) error { | ||
71 | return c.SetBucketVersioning(ctx, bucketName, BucketVersioningConfiguration{Status: "Enabled"}) | ||
72 | } | ||
73 | |||
74 | // SuspendVersioning - suspend object versioning in given bucket. | ||
75 | func (c *Client) SuspendVersioning(ctx context.Context, bucketName string) error { | ||
76 | return c.SetBucketVersioning(ctx, bucketName, BucketVersioningConfiguration{Status: "Suspended"}) | ||
77 | } | ||
78 | |||
79 | // ExcludedPrefix - holds individual prefixes excluded from being versioned. | ||
80 | type ExcludedPrefix struct { | ||
81 | Prefix string | ||
82 | } | ||
83 | |||
84 | // BucketVersioningConfiguration is the versioning configuration structure | ||
85 | type BucketVersioningConfiguration struct { | ||
86 | XMLName xml.Name `xml:"VersioningConfiguration"` | ||
87 | Status string `xml:"Status"` | ||
88 | MFADelete string `xml:"MfaDelete,omitempty"` | ||
89 | // MinIO extension - allows selective, prefix-level versioning exclusion. | ||
90 | // Requires versioning to be enabled | ||
91 | ExcludedPrefixes []ExcludedPrefix `xml:",omitempty"` | ||
92 | ExcludeFolders bool `xml:",omitempty"` | ||
93 | } | ||
94 | |||
95 | // Various supported states | ||
96 | const ( | ||
97 | Enabled = "Enabled" | ||
98 | // Disabled State = "Disabled" only used by MFA Delete not supported yet. | ||
99 | Suspended = "Suspended" | ||
100 | ) | ||
101 | |||
102 | // Enabled returns true if bucket versioning is enabled | ||
103 | func (b BucketVersioningConfiguration) Enabled() bool { | ||
104 | return b.Status == Enabled | ||
105 | } | ||
106 | |||
107 | // Suspended returns true if bucket versioning is suspended | ||
108 | func (b BucketVersioningConfiguration) Suspended() bool { | ||
109 | return b.Status == Suspended | ||
110 | } | ||
111 | |||
112 | // GetBucketVersioning gets the versioning configuration on | ||
113 | // an existing bucket with a context to control cancellations and timeouts. | ||
114 | func (c *Client) GetBucketVersioning(ctx context.Context, bucketName string) (BucketVersioningConfiguration, error) { | ||
115 | // Input validation. | ||
116 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
117 | return BucketVersioningConfiguration{}, err | ||
118 | } | ||
119 | |||
120 | // Get resources properly escaped and lined up before | ||
121 | // using them in http request. | ||
122 | urlValues := make(url.Values) | ||
123 | urlValues.Set("versioning", "") | ||
124 | |||
125 | // Execute GET on bucket to get the versioning configuration. | ||
126 | resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ | ||
127 | bucketName: bucketName, | ||
128 | queryValues: urlValues, | ||
129 | }) | ||
130 | |||
131 | defer closeResponse(resp) | ||
132 | if err != nil { | ||
133 | return BucketVersioningConfiguration{}, err | ||
134 | } | ||
135 | |||
136 | if resp.StatusCode != http.StatusOK { | ||
137 | return BucketVersioningConfiguration{}, httpRespToErrorResponse(resp, bucketName, "") | ||
138 | } | ||
139 | |||
140 | versioningConfig := BucketVersioningConfiguration{} | ||
141 | if err = xmlDecoder(resp.Body, &versioningConfig); err != nil { | ||
142 | return versioningConfig, err | ||
143 | } | ||
144 | |||
145 | return versioningConfig, nil | ||
146 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/api-compose-object.go b/vendor/github.com/minio/minio-go/v7/api-compose-object.go new file mode 100644 index 0000000..e64a244 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-compose-object.go | |||
@@ -0,0 +1,594 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2017, 2018 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | import ( | ||
21 | "context" | ||
22 | "fmt" | ||
23 | "io" | ||
24 | "net/http" | ||
25 | "net/url" | ||
26 | "strconv" | ||
27 | "strings" | ||
28 | "time" | ||
29 | |||
30 | "github.com/google/uuid" | ||
31 | "github.com/minio/minio-go/v7/pkg/encrypt" | ||
32 | "github.com/minio/minio-go/v7/pkg/s3utils" | ||
33 | ) | ||
34 | |||
35 | // CopyDestOptions represents options specified by user for CopyObject/ComposeObject APIs | ||
36 | type CopyDestOptions struct { | ||
37 | Bucket string // points to destination bucket | ||
38 | Object string // points to destination object | ||
39 | |||
40 | // `Encryption` is the key info for server-side-encryption with customer | ||
41 | // provided key. If it is nil, no encryption is performed. | ||
42 | Encryption encrypt.ServerSide | ||
43 | |||
44 | // `userMeta` is the user-metadata key-value pairs to be set on the | ||
45 | // destination. The keys are automatically prefixed with `x-amz-meta-` | ||
46 | // if needed. If nil is passed, and if only a single source (of any | ||
47 | // size) is provided in the ComposeObject call, then metadata from the | ||
48 | // source is copied to the destination. | ||
49 | // if no user-metadata is provided, it is copied from source | ||
50 | // (when there is only once source object in the compose | ||
51 | // request) | ||
52 | UserMetadata map[string]string | ||
53 | // UserMetadata is only set to destination if ReplaceMetadata is true | ||
54 | // other value is UserMetadata is ignored and we preserve src.UserMetadata | ||
55 | // NOTE: if you set this value to true and now metadata is present | ||
56 | // in UserMetadata your destination object will not have any metadata | ||
57 | // set. | ||
58 | ReplaceMetadata bool | ||
59 | |||
60 | // `userTags` is the user defined object tags to be set on destination. | ||
61 | // This will be set only if the `replaceTags` field is set to true. | ||
62 | // Otherwise this field is ignored | ||
63 | UserTags map[string]string | ||
64 | ReplaceTags bool | ||
65 | |||
66 | // Specifies whether you want to apply a Legal Hold to the copied object. | ||
67 | LegalHold LegalHoldStatus | ||
68 | |||
69 | // Object Retention related fields | ||
70 | Mode RetentionMode | ||
71 | RetainUntilDate time.Time | ||
72 | |||
73 | Size int64 // Needs to be specified if progress bar is specified. | ||
74 | // Progress of the entire copy operation will be sent here. | ||
75 | Progress io.Reader | ||
76 | } | ||
77 | |||
78 | // Process custom-metadata to remove a `x-amz-meta-` prefix if | ||
79 | // present and validate that keys are distinct (after this | ||
80 | // prefix removal). | ||
81 | func filterCustomMeta(userMeta map[string]string) map[string]string { | ||
82 | m := make(map[string]string) | ||
83 | for k, v := range userMeta { | ||
84 | if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") { | ||
85 | k = k[len("x-amz-meta-"):] | ||
86 | } | ||
87 | if _, ok := m[k]; ok { | ||
88 | continue | ||
89 | } | ||
90 | m[k] = v | ||
91 | } | ||
92 | return m | ||
93 | } | ||
94 | |||
95 | // Marshal converts all the CopyDestOptions into their | ||
96 | // equivalent HTTP header representation | ||
97 | func (opts CopyDestOptions) Marshal(header http.Header) { | ||
98 | const replaceDirective = "REPLACE" | ||
99 | if opts.ReplaceTags { | ||
100 | header.Set(amzTaggingHeaderDirective, replaceDirective) | ||
101 | if tags := s3utils.TagEncode(opts.UserTags); tags != "" { | ||
102 | header.Set(amzTaggingHeader, tags) | ||
103 | } | ||
104 | } | ||
105 | |||
106 | if opts.LegalHold != LegalHoldStatus("") { | ||
107 | header.Set(amzLegalHoldHeader, opts.LegalHold.String()) | ||
108 | } | ||
109 | |||
110 | if opts.Mode != RetentionMode("") && !opts.RetainUntilDate.IsZero() { | ||
111 | header.Set(amzLockMode, opts.Mode.String()) | ||
112 | header.Set(amzLockRetainUntil, opts.RetainUntilDate.Format(time.RFC3339)) | ||
113 | } | ||
114 | |||
115 | if opts.Encryption != nil { | ||
116 | opts.Encryption.Marshal(header) | ||
117 | } | ||
118 | |||
119 | if opts.ReplaceMetadata { | ||
120 | header.Set("x-amz-metadata-directive", replaceDirective) | ||
121 | for k, v := range filterCustomMeta(opts.UserMetadata) { | ||
122 | if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) { | ||
123 | header.Set(k, v) | ||
124 | } else { | ||
125 | header.Set("x-amz-meta-"+k, v) | ||
126 | } | ||
127 | } | ||
128 | } | ||
129 | } | ||
130 | |||
131 | // toDestinationInfo returns a validated copyOptions object. | ||
132 | func (opts CopyDestOptions) validate() (err error) { | ||
133 | // Input validation. | ||
134 | if err = s3utils.CheckValidBucketName(opts.Bucket); err != nil { | ||
135 | return err | ||
136 | } | ||
137 | if err = s3utils.CheckValidObjectName(opts.Object); err != nil { | ||
138 | return err | ||
139 | } | ||
140 | if opts.Progress != nil && opts.Size < 0 { | ||
141 | return errInvalidArgument("For progress bar effective size needs to be specified") | ||
142 | } | ||
143 | return nil | ||
144 | } | ||
145 | |||
146 | // CopySrcOptions represents a source object to be copied, using | ||
147 | // server-side copying APIs. | ||
148 | type CopySrcOptions struct { | ||
149 | Bucket, Object string | ||
150 | VersionID string | ||
151 | MatchETag string | ||
152 | NoMatchETag string | ||
153 | MatchModifiedSince time.Time | ||
154 | MatchUnmodifiedSince time.Time | ||
155 | MatchRange bool | ||
156 | Start, End int64 | ||
157 | Encryption encrypt.ServerSide | ||
158 | } | ||
159 | |||
160 | // Marshal converts all the CopySrcOptions into their | ||
161 | // equivalent HTTP header representation | ||
162 | func (opts CopySrcOptions) Marshal(header http.Header) { | ||
163 | // Set the source header | ||
164 | header.Set("x-amz-copy-source", s3utils.EncodePath(opts.Bucket+"/"+opts.Object)) | ||
165 | if opts.VersionID != "" { | ||
166 | header.Set("x-amz-copy-source", s3utils.EncodePath(opts.Bucket+"/"+opts.Object)+"?versionId="+opts.VersionID) | ||
167 | } | ||
168 | |||
169 | if opts.MatchETag != "" { | ||
170 | header.Set("x-amz-copy-source-if-match", opts.MatchETag) | ||
171 | } | ||
172 | if opts.NoMatchETag != "" { | ||
173 | header.Set("x-amz-copy-source-if-none-match", opts.NoMatchETag) | ||
174 | } | ||
175 | |||
176 | if !opts.MatchModifiedSince.IsZero() { | ||
177 | header.Set("x-amz-copy-source-if-modified-since", opts.MatchModifiedSince.Format(http.TimeFormat)) | ||
178 | } | ||
179 | if !opts.MatchUnmodifiedSince.IsZero() { | ||
180 | header.Set("x-amz-copy-source-if-unmodified-since", opts.MatchUnmodifiedSince.Format(http.TimeFormat)) | ||
181 | } | ||
182 | |||
183 | if opts.Encryption != nil { | ||
184 | encrypt.SSECopy(opts.Encryption).Marshal(header) | ||
185 | } | ||
186 | } | ||
187 | |||
188 | func (opts CopySrcOptions) validate() (err error) { | ||
189 | // Input validation. | ||
190 | if err = s3utils.CheckValidBucketName(opts.Bucket); err != nil { | ||
191 | return err | ||
192 | } | ||
193 | if err = s3utils.CheckValidObjectName(opts.Object); err != nil { | ||
194 | return err | ||
195 | } | ||
196 | if opts.Start > opts.End || opts.Start < 0 { | ||
197 | return errInvalidArgument("start must be non-negative, and start must be at most end.") | ||
198 | } | ||
199 | return nil | ||
200 | } | ||
201 | |||
202 | // Low level implementation of CopyObject API, supports only upto 5GiB worth of copy. | ||
203 | func (c *Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, | ||
204 | metadata map[string]string, srcOpts CopySrcOptions, dstOpts PutObjectOptions, | ||
205 | ) (ObjectInfo, error) { | ||
206 | // Build headers. | ||
207 | headers := make(http.Header) | ||
208 | |||
209 | // Set all the metadata headers. | ||
210 | for k, v := range metadata { | ||
211 | headers.Set(k, v) | ||
212 | } | ||
213 | if !dstOpts.Internal.ReplicationStatus.Empty() { | ||
214 | headers.Set(amzBucketReplicationStatus, string(dstOpts.Internal.ReplicationStatus)) | ||
215 | } | ||
216 | if !dstOpts.Internal.SourceMTime.IsZero() { | ||
217 | headers.Set(minIOBucketSourceMTime, dstOpts.Internal.SourceMTime.Format(time.RFC3339Nano)) | ||
218 | } | ||
219 | if dstOpts.Internal.SourceETag != "" { | ||
220 | headers.Set(minIOBucketSourceETag, dstOpts.Internal.SourceETag) | ||
221 | } | ||
222 | if dstOpts.Internal.ReplicationRequest { | ||
223 | headers.Set(minIOBucketReplicationRequest, "true") | ||
224 | } | ||
225 | if dstOpts.Internal.ReplicationValidityCheck { | ||
226 | headers.Set(minIOBucketReplicationCheck, "true") | ||
227 | } | ||
228 | if !dstOpts.Internal.LegalholdTimestamp.IsZero() { | ||
229 | headers.Set(minIOBucketReplicationObjectLegalHoldTimestamp, dstOpts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano)) | ||
230 | } | ||
231 | if !dstOpts.Internal.RetentionTimestamp.IsZero() { | ||
232 | headers.Set(minIOBucketReplicationObjectRetentionTimestamp, dstOpts.Internal.RetentionTimestamp.Format(time.RFC3339Nano)) | ||
233 | } | ||
234 | if !dstOpts.Internal.TaggingTimestamp.IsZero() { | ||
235 | headers.Set(minIOBucketReplicationTaggingTimestamp, dstOpts.Internal.TaggingTimestamp.Format(time.RFC3339Nano)) | ||
236 | } | ||
237 | |||
238 | if len(dstOpts.UserTags) != 0 { | ||
239 | headers.Set(amzTaggingHeader, s3utils.TagEncode(dstOpts.UserTags)) | ||
240 | } | ||
241 | |||
242 | reqMetadata := requestMetadata{ | ||
243 | bucketName: destBucket, | ||
244 | objectName: destObject, | ||
245 | customHeader: headers, | ||
246 | } | ||
247 | if dstOpts.Internal.SourceVersionID != "" { | ||
248 | if dstOpts.Internal.SourceVersionID != nullVersionID { | ||
249 | if _, err := uuid.Parse(dstOpts.Internal.SourceVersionID); err != nil { | ||
250 | return ObjectInfo{}, errInvalidArgument(err.Error()) | ||
251 | } | ||
252 | } | ||
253 | urlValues := make(url.Values) | ||
254 | urlValues.Set("versionId", dstOpts.Internal.SourceVersionID) | ||
255 | reqMetadata.queryValues = urlValues | ||
256 | } | ||
257 | |||
258 | // Set the source header | ||
259 | headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)) | ||
260 | if srcOpts.VersionID != "" { | ||
261 | headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)+"?versionId="+srcOpts.VersionID) | ||
262 | } | ||
263 | // Send upload-part-copy request | ||
264 | resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) | ||
265 | defer closeResponse(resp) | ||
266 | if err != nil { | ||
267 | return ObjectInfo{}, err | ||
268 | } | ||
269 | |||
270 | // Check if we got an error response. | ||
271 | if resp.StatusCode != http.StatusOK { | ||
272 | return ObjectInfo{}, httpRespToErrorResponse(resp, srcBucket, srcObject) | ||
273 | } | ||
274 | |||
275 | cpObjRes := copyObjectResult{} | ||
276 | err = xmlDecoder(resp.Body, &cpObjRes) | ||
277 | if err != nil { | ||
278 | return ObjectInfo{}, err | ||
279 | } | ||
280 | |||
281 | objInfo := ObjectInfo{ | ||
282 | Key: destObject, | ||
283 | ETag: strings.Trim(cpObjRes.ETag, "\""), | ||
284 | LastModified: cpObjRes.LastModified, | ||
285 | } | ||
286 | return objInfo, nil | ||
287 | } | ||
288 | |||
289 | func (c *Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string, | ||
290 | partID int, startOffset, length int64, metadata map[string]string, | ||
291 | ) (p CompletePart, err error) { | ||
292 | headers := make(http.Header) | ||
293 | |||
294 | // Set source | ||
295 | headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)) | ||
296 | |||
297 | if startOffset < 0 { | ||
298 | return p, errInvalidArgument("startOffset must be non-negative") | ||
299 | } | ||
300 | |||
301 | if length >= 0 { | ||
302 | headers.Set("x-amz-copy-source-range", fmt.Sprintf("bytes=%d-%d", startOffset, startOffset+length-1)) | ||
303 | } | ||
304 | |||
305 | for k, v := range metadata { | ||
306 | headers.Set(k, v) | ||
307 | } | ||
308 | |||
309 | queryValues := make(url.Values) | ||
310 | queryValues.Set("partNumber", strconv.Itoa(partID)) | ||
311 | queryValues.Set("uploadId", uploadID) | ||
312 | |||
313 | resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{ | ||
314 | bucketName: destBucket, | ||
315 | objectName: destObject, | ||
316 | customHeader: headers, | ||
317 | queryValues: queryValues, | ||
318 | }) | ||
319 | defer closeResponse(resp) | ||
320 | if err != nil { | ||
321 | return | ||
322 | } | ||
323 | |||
324 | // Check if we got an error response. | ||
325 | if resp.StatusCode != http.StatusOK { | ||
326 | return p, httpRespToErrorResponse(resp, destBucket, destObject) | ||
327 | } | ||
328 | |||
329 | // Decode copy-part response on success. | ||
330 | cpObjRes := copyObjectResult{} | ||
331 | err = xmlDecoder(resp.Body, &cpObjRes) | ||
332 | if err != nil { | ||
333 | return p, err | ||
334 | } | ||
335 | p.PartNumber, p.ETag = partID, cpObjRes.ETag | ||
336 | return p, nil | ||
337 | } | ||
338 | |||
339 | // uploadPartCopy - helper function to create a part in a multipart | ||
340 | // upload via an upload-part-copy request | ||
341 | // https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html | ||
342 | func (c *Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID string, partNumber int, | ||
343 | headers http.Header, | ||
344 | ) (p CompletePart, err error) { | ||
345 | // Build query parameters | ||
346 | urlValues := make(url.Values) | ||
347 | urlValues.Set("partNumber", strconv.Itoa(partNumber)) | ||
348 | urlValues.Set("uploadId", uploadID) | ||
349 | |||
350 | // Send upload-part-copy request | ||
351 | resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{ | ||
352 | bucketName: bucket, | ||
353 | objectName: object, | ||
354 | customHeader: headers, | ||
355 | queryValues: urlValues, | ||
356 | }) | ||
357 | defer closeResponse(resp) | ||
358 | if err != nil { | ||
359 | return p, err | ||
360 | } | ||
361 | |||
362 | // Check if we got an error response. | ||
363 | if resp.StatusCode != http.StatusOK { | ||
364 | return p, httpRespToErrorResponse(resp, bucket, object) | ||
365 | } | ||
366 | |||
367 | // Decode copy-part response on success. | ||
368 | cpObjRes := copyObjectResult{} | ||
369 | err = xmlDecoder(resp.Body, &cpObjRes) | ||
370 | if err != nil { | ||
371 | return p, err | ||
372 | } | ||
373 | p.PartNumber, p.ETag = partNumber, cpObjRes.ETag | ||
374 | return p, nil | ||
375 | } | ||
376 | |||
377 | // ComposeObject - creates an object using server-side copying | ||
378 | // of existing objects. It takes a list of source objects (with optional offsets) | ||
379 | // and concatenates them into a new object using only server-side copying | ||
380 | // operations. Optionally takes progress reader hook for applications to | ||
381 | // look at current progress. | ||
382 | func (c *Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ...CopySrcOptions) (UploadInfo, error) { | ||
383 | if len(srcs) < 1 || len(srcs) > maxPartsCount { | ||
384 | return UploadInfo{}, errInvalidArgument("There must be as least one and up to 10000 source objects.") | ||
385 | } | ||
386 | |||
387 | for _, src := range srcs { | ||
388 | if err := src.validate(); err != nil { | ||
389 | return UploadInfo{}, err | ||
390 | } | ||
391 | } | ||
392 | |||
393 | if err := dst.validate(); err != nil { | ||
394 | return UploadInfo{}, err | ||
395 | } | ||
396 | |||
397 | srcObjectInfos := make([]ObjectInfo, len(srcs)) | ||
398 | srcObjectSizes := make([]int64, len(srcs)) | ||
399 | var totalSize, totalParts int64 | ||
400 | var err error | ||
401 | for i, src := range srcs { | ||
402 | opts := StatObjectOptions{ServerSideEncryption: encrypt.SSE(src.Encryption), VersionID: src.VersionID} | ||
403 | srcObjectInfos[i], err = c.StatObject(context.Background(), src.Bucket, src.Object, opts) | ||
404 | if err != nil { | ||
405 | return UploadInfo{}, err | ||
406 | } | ||
407 | |||
408 | srcCopySize := srcObjectInfos[i].Size | ||
409 | // Check if a segment is specified, and if so, is the | ||
410 | // segment within object bounds? | ||
411 | if src.MatchRange { | ||
412 | // Since range is specified, | ||
413 | // 0 <= src.start <= src.end | ||
414 | // so only invalid case to check is: | ||
415 | if src.End >= srcCopySize || src.Start < 0 { | ||
416 | return UploadInfo{}, errInvalidArgument( | ||
417 | fmt.Sprintf("CopySrcOptions %d has invalid segment-to-copy [%d, %d] (size is %d)", | ||
418 | i, src.Start, src.End, srcCopySize)) | ||
419 | } | ||
420 | srcCopySize = src.End - src.Start + 1 | ||
421 | } | ||
422 | |||
423 | // Only the last source may be less than `absMinPartSize` | ||
424 | if srcCopySize < absMinPartSize && i < len(srcs)-1 { | ||
425 | return UploadInfo{}, errInvalidArgument( | ||
426 | fmt.Sprintf("CopySrcOptions %d is too small (%d) and it is not the last part", i, srcCopySize)) | ||
427 | } | ||
428 | |||
429 | // Is data to copy too large? | ||
430 | totalSize += srcCopySize | ||
431 | if totalSize > maxMultipartPutObjectSize { | ||
432 | return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Cannot compose an object of size %d (> 5TiB)", totalSize)) | ||
433 | } | ||
434 | |||
435 | // record source size | ||
436 | srcObjectSizes[i] = srcCopySize | ||
437 | |||
438 | // calculate parts needed for current source | ||
439 | totalParts += partsRequired(srcCopySize) | ||
440 | // Do we need more parts than we are allowed? | ||
441 | if totalParts > maxPartsCount { | ||
442 | return UploadInfo{}, errInvalidArgument(fmt.Sprintf( | ||
443 | "Your proposed compose object requires more than %d parts", maxPartsCount)) | ||
444 | } | ||
445 | } | ||
446 | |||
447 | // Single source object case (i.e. when only one source is | ||
448 | // involved, it is being copied wholly and at most 5GiB in | ||
449 | // size, emptyfiles are also supported). | ||
450 | if (totalParts == 1 && srcs[0].Start == -1 && totalSize <= maxPartSize) || (totalSize == 0) { | ||
451 | return c.CopyObject(ctx, dst, srcs[0]) | ||
452 | } | ||
453 | |||
454 | // Now, handle multipart-copy cases. | ||
455 | |||
456 | // 1. Ensure that the object has not been changed while | ||
457 | // we are copying data. | ||
458 | for i, src := range srcs { | ||
459 | src.MatchETag = srcObjectInfos[i].ETag | ||
460 | } | ||
461 | |||
462 | // 2. Initiate a new multipart upload. | ||
463 | |||
464 | // Set user-metadata on the destination object. If no | ||
465 | // user-metadata is specified, and there is only one source, | ||
466 | // (only) then metadata from source is copied. | ||
467 | var userMeta map[string]string | ||
468 | if dst.ReplaceMetadata { | ||
469 | userMeta = dst.UserMetadata | ||
470 | } else { | ||
471 | userMeta = srcObjectInfos[0].UserMetadata | ||
472 | } | ||
473 | |||
474 | var userTags map[string]string | ||
475 | if dst.ReplaceTags { | ||
476 | userTags = dst.UserTags | ||
477 | } else { | ||
478 | userTags = srcObjectInfos[0].UserTags | ||
479 | } | ||
480 | |||
481 | uploadID, err := c.newUploadID(ctx, dst.Bucket, dst.Object, PutObjectOptions{ | ||
482 | ServerSideEncryption: dst.Encryption, | ||
483 | UserMetadata: userMeta, | ||
484 | UserTags: userTags, | ||
485 | Mode: dst.Mode, | ||
486 | RetainUntilDate: dst.RetainUntilDate, | ||
487 | LegalHold: dst.LegalHold, | ||
488 | }) | ||
489 | if err != nil { | ||
490 | return UploadInfo{}, err | ||
491 | } | ||
492 | |||
493 | // 3. Perform copy part uploads | ||
494 | objParts := []CompletePart{} | ||
495 | partIndex := 1 | ||
496 | for i, src := range srcs { | ||
497 | h := make(http.Header) | ||
498 | src.Marshal(h) | ||
499 | if dst.Encryption != nil && dst.Encryption.Type() == encrypt.SSEC { | ||
500 | dst.Encryption.Marshal(h) | ||
501 | } | ||
502 | |||
503 | // calculate start/end indices of parts after | ||
504 | // splitting. | ||
505 | startIdx, endIdx := calculateEvenSplits(srcObjectSizes[i], src) | ||
506 | for j, start := range startIdx { | ||
507 | end := endIdx[j] | ||
508 | |||
509 | // Add (or reset) source range header for | ||
510 | // upload part copy request. | ||
511 | h.Set("x-amz-copy-source-range", | ||
512 | fmt.Sprintf("bytes=%d-%d", start, end)) | ||
513 | |||
514 | // make upload-part-copy request | ||
515 | complPart, err := c.uploadPartCopy(ctx, dst.Bucket, | ||
516 | dst.Object, uploadID, partIndex, h) | ||
517 | if err != nil { | ||
518 | return UploadInfo{}, err | ||
519 | } | ||
520 | if dst.Progress != nil { | ||
521 | io.CopyN(io.Discard, dst.Progress, end-start+1) | ||
522 | } | ||
523 | objParts = append(objParts, complPart) | ||
524 | partIndex++ | ||
525 | } | ||
526 | } | ||
527 | |||
528 | // 4. Make final complete-multipart request. | ||
529 | uploadInfo, err := c.completeMultipartUpload(ctx, dst.Bucket, dst.Object, uploadID, | ||
530 | completeMultipartUpload{Parts: objParts}, PutObjectOptions{ServerSideEncryption: dst.Encryption}) | ||
531 | if err != nil { | ||
532 | return UploadInfo{}, err | ||
533 | } | ||
534 | |||
535 | uploadInfo.Size = totalSize | ||
536 | return uploadInfo, nil | ||
537 | } | ||
538 | |||
539 | // partsRequired is maximum parts possible with | ||
540 | // max part size of ceiling(maxMultipartPutObjectSize / (maxPartsCount - 1)) | ||
541 | func partsRequired(size int64) int64 { | ||
542 | maxPartSize := maxMultipartPutObjectSize / (maxPartsCount - 1) | ||
543 | r := size / int64(maxPartSize) | ||
544 | if size%int64(maxPartSize) > 0 { | ||
545 | r++ | ||
546 | } | ||
547 | return r | ||
548 | } | ||
549 | |||
550 | // calculateEvenSplits - computes splits for a source and returns | ||
551 | // start and end index slices. Splits happen evenly to be sure that no | ||
552 | // part is less than 5MiB, as that could fail the multipart request if | ||
553 | // it is not the last part. | ||
554 | func calculateEvenSplits(size int64, src CopySrcOptions) (startIndex, endIndex []int64) { | ||
555 | if size == 0 { | ||
556 | return | ||
557 | } | ||
558 | |||
559 | reqParts := partsRequired(size) | ||
560 | startIndex = make([]int64, reqParts) | ||
561 | endIndex = make([]int64, reqParts) | ||
562 | // Compute number of required parts `k`, as: | ||
563 | // | ||
564 | // k = ceiling(size / copyPartSize) | ||
565 | // | ||
566 | // Now, distribute the `size` bytes in the source into | ||
567 | // k parts as evenly as possible: | ||
568 | // | ||
569 | // r parts sized (q+1) bytes, and | ||
570 | // (k - r) parts sized q bytes, where | ||
571 | // | ||
572 | // size = q * k + r (by simple division of size by k, | ||
573 | // so that 0 <= r < k) | ||
574 | // | ||
575 | start := src.Start | ||
576 | if start == -1 { | ||
577 | start = 0 | ||
578 | } | ||
579 | quot, rem := size/reqParts, size%reqParts | ||
580 | nextStart := start | ||
581 | for j := int64(0); j < reqParts; j++ { | ||
582 | curPartSize := quot | ||
583 | if j < rem { | ||
584 | curPartSize++ | ||
585 | } | ||
586 | |||
587 | cStart := nextStart | ||
588 | cEnd := cStart + curPartSize - 1 | ||
589 | nextStart = cEnd + 1 | ||
590 | |||
591 | startIndex[j], endIndex[j] = cStart, cEnd | ||
592 | } | ||
593 | return | ||
594 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/api-copy-object.go b/vendor/github.com/minio/minio-go/v7/api-copy-object.go new file mode 100644 index 0000000..0c95d91 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-copy-object.go | |||
@@ -0,0 +1,76 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2017, 2018 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | import ( | ||
21 | "context" | ||
22 | "io" | ||
23 | "net/http" | ||
24 | ) | ||
25 | |||
26 | // CopyObject - copy a source object into a new object | ||
27 | func (c *Client) CopyObject(ctx context.Context, dst CopyDestOptions, src CopySrcOptions) (UploadInfo, error) { | ||
28 | if err := src.validate(); err != nil { | ||
29 | return UploadInfo{}, err | ||
30 | } | ||
31 | |||
32 | if err := dst.validate(); err != nil { | ||
33 | return UploadInfo{}, err | ||
34 | } | ||
35 | |||
36 | header := make(http.Header) | ||
37 | dst.Marshal(header) | ||
38 | src.Marshal(header) | ||
39 | |||
40 | resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{ | ||
41 | bucketName: dst.Bucket, | ||
42 | objectName: dst.Object, | ||
43 | customHeader: header, | ||
44 | }) | ||
45 | if err != nil { | ||
46 | return UploadInfo{}, err | ||
47 | } | ||
48 | defer closeResponse(resp) | ||
49 | |||
50 | if resp.StatusCode != http.StatusOK { | ||
51 | return UploadInfo{}, httpRespToErrorResponse(resp, dst.Bucket, dst.Object) | ||
52 | } | ||
53 | |||
54 | // Update the progress properly after successful copy. | ||
55 | if dst.Progress != nil { | ||
56 | io.Copy(io.Discard, io.LimitReader(dst.Progress, dst.Size)) | ||
57 | } | ||
58 | |||
59 | cpObjRes := copyObjectResult{} | ||
60 | if err = xmlDecoder(resp.Body, &cpObjRes); err != nil { | ||
61 | return UploadInfo{}, err | ||
62 | } | ||
63 | |||
64 | // extract lifecycle expiry date and rule ID | ||
65 | expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration)) | ||
66 | |||
67 | return UploadInfo{ | ||
68 | Bucket: dst.Bucket, | ||
69 | Key: dst.Object, | ||
70 | LastModified: cpObjRes.LastModified, | ||
71 | ETag: trimEtag(resp.Header.Get("ETag")), | ||
72 | VersionID: resp.Header.Get(amzVersionID), | ||
73 | Expiration: expTime, | ||
74 | ExpirationRuleID: ruleID, | ||
75 | }, nil | ||
76 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/api-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-datatypes.go new file mode 100644 index 0000000..97a6f80 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-datatypes.go | |||
@@ -0,0 +1,254 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2015-2020 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | import ( | ||
21 | "encoding/xml" | ||
22 | "io" | ||
23 | "net/http" | ||
24 | "net/url" | ||
25 | "strings" | ||
26 | "time" | ||
27 | ) | ||
28 | |||
29 | // BucketInfo container for bucket metadata. | ||
30 | type BucketInfo struct { | ||
31 | // The name of the bucket. | ||
32 | Name string `json:"name"` | ||
33 | // Date the bucket was created. | ||
34 | CreationDate time.Time `json:"creationDate"` | ||
35 | } | ||
36 | |||
37 | // StringMap represents map with custom UnmarshalXML | ||
38 | type StringMap map[string]string | ||
39 | |||
40 | // UnmarshalXML unmarshals the XML into a map of string to strings, | ||
41 | // creating a key in the map for each tag and setting it's value to the | ||
42 | // tags contents. | ||
43 | // | ||
44 | // The fact this function is on the pointer of Map is important, so that | ||
45 | // if m is nil it can be initialized, which is often the case if m is | ||
46 | // nested in another xml structural. This is also why the first thing done | ||
47 | // on the first line is initialize it. | ||
48 | func (m *StringMap) UnmarshalXML(d *xml.Decoder, _ xml.StartElement) error { | ||
49 | *m = StringMap{} | ||
50 | for { | ||
51 | // Format is <key>value</key> | ||
52 | var e struct { | ||
53 | XMLName xml.Name | ||
54 | Value string `xml:",chardata"` | ||
55 | } | ||
56 | err := d.Decode(&e) | ||
57 | if err == io.EOF { | ||
58 | break | ||
59 | } | ||
60 | if err != nil { | ||
61 | return err | ||
62 | } | ||
63 | (*m)[e.XMLName.Local] = e.Value | ||
64 | } | ||
65 | return nil | ||
66 | } | ||
67 | |||
68 | // URLMap represents map with custom UnmarshalXML | ||
69 | type URLMap map[string]string | ||
70 | |||
71 | // UnmarshalXML unmarshals the XML into a map of string to strings, | ||
72 | // creating a key in the map for each tag and setting it's value to the | ||
73 | // tags contents. | ||
74 | // | ||
75 | // The fact this function is on the pointer of Map is important, so that | ||
76 | // if m is nil it can be initialized, which is often the case if m is | ||
77 | // nested in another xml structural. This is also why the first thing done | ||
78 | // on the first line is initialize it. | ||
79 | func (m *URLMap) UnmarshalXML(d *xml.Decoder, se xml.StartElement) error { | ||
80 | *m = URLMap{} | ||
81 | var tgs string | ||
82 | if err := d.DecodeElement(&tgs, &se); err != nil { | ||
83 | if err == io.EOF { | ||
84 | return nil | ||
85 | } | ||
86 | return err | ||
87 | } | ||
88 | for tgs != "" { | ||
89 | var key string | ||
90 | key, tgs, _ = stringsCut(tgs, "&") | ||
91 | if key == "" { | ||
92 | continue | ||
93 | } | ||
94 | key, value, _ := stringsCut(key, "=") | ||
95 | key, err := url.QueryUnescape(key) | ||
96 | if err != nil { | ||
97 | return err | ||
98 | } | ||
99 | |||
100 | value, err = url.QueryUnescape(value) | ||
101 | if err != nil { | ||
102 | return err | ||
103 | } | ||
104 | (*m)[key] = value | ||
105 | } | ||
106 | return nil | ||
107 | } | ||
108 | |||
109 | // stringsCut slices s around the first instance of sep, | ||
110 | // returning the text before and after sep. | ||
111 | // The found result reports whether sep appears in s. | ||
112 | // If sep does not appear in s, cut returns s, "", false. | ||
113 | func stringsCut(s, sep string) (before, after string, found bool) { | ||
114 | if i := strings.Index(s, sep); i >= 0 { | ||
115 | return s[:i], s[i+len(sep):], true | ||
116 | } | ||
117 | return s, "", false | ||
118 | } | ||
119 | |||
120 | // Owner name. | ||
121 | type Owner struct { | ||
122 | XMLName xml.Name `xml:"Owner" json:"owner"` | ||
123 | DisplayName string `xml:"ID" json:"name"` | ||
124 | ID string `xml:"DisplayName" json:"id"` | ||
125 | } | ||
126 | |||
127 | // UploadInfo contains information about the | ||
128 | // newly uploaded or copied object. | ||
129 | type UploadInfo struct { | ||
130 | Bucket string | ||
131 | Key string | ||
132 | ETag string | ||
133 | Size int64 | ||
134 | LastModified time.Time | ||
135 | Location string | ||
136 | VersionID string | ||
137 | |||
138 | // Lifecycle expiry-date and ruleID associated with the expiry | ||
139 | // not to be confused with `Expires` HTTP header. | ||
140 | Expiration time.Time | ||
141 | ExpirationRuleID string | ||
142 | |||
143 | // Verified checksum values, if any. | ||
144 | // Values are base64 (standard) encoded. | ||
145 | // For multipart objects this is a checksum of the checksum of each part. | ||
146 | ChecksumCRC32 string | ||
147 | ChecksumCRC32C string | ||
148 | ChecksumSHA1 string | ||
149 | ChecksumSHA256 string | ||
150 | } | ||
151 | |||
152 | // RestoreInfo contains information of the restore operation of an archived object | ||
153 | type RestoreInfo struct { | ||
154 | // Is the restoring operation is still ongoing | ||
155 | OngoingRestore bool | ||
156 | // When the restored copy of the archived object will be removed | ||
157 | ExpiryTime time.Time | ||
158 | } | ||
159 | |||
160 | // ObjectInfo container for object metadata. | ||
161 | type ObjectInfo struct { | ||
162 | // An ETag is optionally set to md5sum of an object. In case of multipart objects, | ||
163 | // ETag is of the form MD5SUM-N where MD5SUM is md5sum of all individual md5sums of | ||
164 | // each parts concatenated into one string. | ||
165 | ETag string `json:"etag"` | ||
166 | |||
167 | Key string `json:"name"` // Name of the object | ||
168 | LastModified time.Time `json:"lastModified"` // Date and time the object was last modified. | ||
169 | Size int64 `json:"size"` // Size in bytes of the object. | ||
170 | ContentType string `json:"contentType"` // A standard MIME type describing the format of the object data. | ||
171 | Expires time.Time `json:"expires"` // The date and time at which the object is no longer able to be cached. | ||
172 | |||
173 | // Collection of additional metadata on the object. | ||
174 | // eg: x-amz-meta-*, content-encoding etc. | ||
175 | Metadata http.Header `json:"metadata" xml:"-"` | ||
176 | |||
177 | // x-amz-meta-* headers stripped "x-amz-meta-" prefix containing the first value. | ||
178 | // Only returned by MinIO servers. | ||
179 | UserMetadata StringMap `json:"userMetadata,omitempty"` | ||
180 | |||
181 | // x-amz-tagging values in their k/v values. | ||
182 | // Only returned by MinIO servers. | ||
183 | UserTags URLMap `json:"userTags,omitempty" xml:"UserTags"` | ||
184 | |||
185 | // x-amz-tagging-count value | ||
186 | UserTagCount int | ||
187 | |||
188 | // Owner name. | ||
189 | Owner Owner | ||
190 | |||
191 | // ACL grant. | ||
192 | Grant []Grant | ||
193 | |||
194 | // The class of storage used to store the object. | ||
195 | StorageClass string `json:"storageClass"` | ||
196 | |||
197 | // Versioning related information | ||
198 | IsLatest bool | ||
199 | IsDeleteMarker bool | ||
200 | VersionID string `xml:"VersionId"` | ||
201 | |||
202 | // x-amz-replication-status value is either in one of the following states | ||
203 | // - COMPLETED | ||
204 | // - PENDING | ||
205 | // - FAILED | ||
206 | // - REPLICA (on the destination) | ||
207 | ReplicationStatus string `xml:"ReplicationStatus"` | ||
208 | // set to true if delete marker has backing object version on target, and eligible to replicate | ||
209 | ReplicationReady bool | ||
210 | // Lifecycle expiry-date and ruleID associated with the expiry | ||
211 | // not to be confused with `Expires` HTTP header. | ||
212 | Expiration time.Time | ||
213 | ExpirationRuleID string | ||
214 | |||
215 | Restore *RestoreInfo | ||
216 | |||
217 | // Checksum values | ||
218 | ChecksumCRC32 string | ||
219 | ChecksumCRC32C string | ||
220 | ChecksumSHA1 string | ||
221 | ChecksumSHA256 string | ||
222 | |||
223 | Internal *struct { | ||
224 | K int // Data blocks | ||
225 | M int // Parity blocks | ||
226 | } `xml:"Internal"` | ||
227 | |||
228 | // Error | ||
229 | Err error `json:"-"` | ||
230 | } | ||
231 | |||
232 | // ObjectMultipartInfo container for multipart object metadata. | ||
233 | type ObjectMultipartInfo struct { | ||
234 | // Date and time at which the multipart upload was initiated. | ||
235 | Initiated time.Time `type:"timestamp" timestampFormat:"iso8601"` | ||
236 | |||
237 | Initiator initiator | ||
238 | Owner owner | ||
239 | |||
240 | // The type of storage to use for the object. Defaults to 'STANDARD'. | ||
241 | StorageClass string | ||
242 | |||
243 | // Key of the object for which the multipart upload was initiated. | ||
244 | Key string | ||
245 | |||
246 | // Size in bytes of the object. | ||
247 | Size int64 | ||
248 | |||
249 | // Upload ID that identifies the multipart upload. | ||
250 | UploadID string `xml:"UploadId"` | ||
251 | |||
252 | // Error | ||
253 | Err error | ||
254 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/api-error-response.go b/vendor/github.com/minio/minio-go/v7/api-error-response.go new file mode 100644 index 0000000..7df211f --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-error-response.go | |||
@@ -0,0 +1,284 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2015-2020 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | import ( | ||
21 | "bytes" | ||
22 | "encoding/xml" | ||
23 | "fmt" | ||
24 | "io" | ||
25 | "net/http" | ||
26 | "strings" | ||
27 | ) | ||
28 | |||
29 | /* **** SAMPLE ERROR RESPONSE **** | ||
30 | <?xml version="1.0" encoding="UTF-8"?> | ||
31 | <Error> | ||
32 | <Code>AccessDenied</Code> | ||
33 | <Message>Access Denied</Message> | ||
34 | <BucketName>bucketName</BucketName> | ||
35 | <Key>objectName</Key> | ||
36 | <RequestId>F19772218238A85A</RequestId> | ||
37 | <HostId>GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD</HostId> | ||
38 | </Error> | ||
39 | */ | ||
40 | |||
41 | // ErrorResponse - Is the typed error returned by all API operations. | ||
42 | // ErrorResponse struct should be comparable since it is compared inside | ||
43 | // golang http API (https://github.com/golang/go/issues/29768) | ||
44 | type ErrorResponse struct { | ||
45 | XMLName xml.Name `xml:"Error" json:"-"` | ||
46 | Code string | ||
47 | Message string | ||
48 | BucketName string | ||
49 | Key string | ||
50 | Resource string | ||
51 | RequestID string `xml:"RequestId"` | ||
52 | HostID string `xml:"HostId"` | ||
53 | |||
54 | // Region where the bucket is located. This header is returned | ||
55 | // only in HEAD bucket and ListObjects response. | ||
56 | Region string | ||
57 | |||
58 | // Captures the server string returned in response header. | ||
59 | Server string | ||
60 | |||
61 | // Underlying HTTP status code for the returned error | ||
62 | StatusCode int `xml:"-" json:"-"` | ||
63 | } | ||
64 | |||
65 | // ToErrorResponse - Returns parsed ErrorResponse struct from body and | ||
66 | // http headers. | ||
67 | // | ||
68 | // For example: | ||
69 | // | ||
70 | // import s3 "github.com/minio/minio-go/v7" | ||
71 | // ... | ||
72 | // ... | ||
73 | // reader, stat, err := s3.GetObject(...) | ||
74 | // if err != nil { | ||
75 | // resp := s3.ToErrorResponse(err) | ||
76 | // } | ||
77 | // ... | ||
78 | func ToErrorResponse(err error) ErrorResponse { | ||
79 | switch err := err.(type) { | ||
80 | case ErrorResponse: | ||
81 | return err | ||
82 | default: | ||
83 | return ErrorResponse{} | ||
84 | } | ||
85 | } | ||
86 | |||
87 | // Error - Returns S3 error string. | ||
88 | func (e ErrorResponse) Error() string { | ||
89 | if e.Message == "" { | ||
90 | msg, ok := s3ErrorResponseMap[e.Code] | ||
91 | if !ok { | ||
92 | msg = fmt.Sprintf("Error response code %s.", e.Code) | ||
93 | } | ||
94 | return msg | ||
95 | } | ||
96 | return e.Message | ||
97 | } | ||
98 | |||
99 | // Common string for errors to report issue location in unexpected | ||
100 | // cases. | ||
101 | const ( | ||
102 | reportIssue = "Please report this issue at https://github.com/minio/minio-go/issues." | ||
103 | ) | ||
104 | |||
105 | // xmlDecodeAndBody reads the whole body up to 1MB and | ||
106 | // tries to XML decode it into v. | ||
107 | // The body that was read and any error from reading or decoding is returned. | ||
108 | func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) { | ||
109 | // read the whole body (up to 1MB) | ||
110 | const maxBodyLength = 1 << 20 | ||
111 | body, err := io.ReadAll(io.LimitReader(bodyReader, maxBodyLength)) | ||
112 | if err != nil { | ||
113 | return nil, err | ||
114 | } | ||
115 | return bytes.TrimSpace(body), xmlDecoder(bytes.NewReader(body), v) | ||
116 | } | ||
117 | |||
118 | // httpRespToErrorResponse returns a new encoded ErrorResponse | ||
119 | // structure as error. | ||
120 | func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) error { | ||
121 | if resp == nil { | ||
122 | msg := "Empty http response. " + reportIssue | ||
123 | return errInvalidArgument(msg) | ||
124 | } | ||
125 | |||
126 | errResp := ErrorResponse{ | ||
127 | StatusCode: resp.StatusCode, | ||
128 | Server: resp.Header.Get("Server"), | ||
129 | } | ||
130 | |||
131 | errBody, err := xmlDecodeAndBody(resp.Body, &errResp) | ||
132 | // Xml decoding failed with no body, fall back to HTTP headers. | ||
133 | if err != nil { | ||
134 | switch resp.StatusCode { | ||
135 | case http.StatusNotFound: | ||
136 | if objectName == "" { | ||
137 | errResp = ErrorResponse{ | ||
138 | StatusCode: resp.StatusCode, | ||
139 | Code: "NoSuchBucket", | ||
140 | Message: "The specified bucket does not exist.", | ||
141 | BucketName: bucketName, | ||
142 | } | ||
143 | } else { | ||
144 | errResp = ErrorResponse{ | ||
145 | StatusCode: resp.StatusCode, | ||
146 | Code: "NoSuchKey", | ||
147 | Message: "The specified key does not exist.", | ||
148 | BucketName: bucketName, | ||
149 | Key: objectName, | ||
150 | } | ||
151 | } | ||
152 | case http.StatusForbidden: | ||
153 | errResp = ErrorResponse{ | ||
154 | StatusCode: resp.StatusCode, | ||
155 | Code: "AccessDenied", | ||
156 | Message: "Access Denied.", | ||
157 | BucketName: bucketName, | ||
158 | Key: objectName, | ||
159 | } | ||
160 | case http.StatusConflict: | ||
161 | errResp = ErrorResponse{ | ||
162 | StatusCode: resp.StatusCode, | ||
163 | Code: "Conflict", | ||
164 | Message: "Bucket not empty.", | ||
165 | BucketName: bucketName, | ||
166 | } | ||
167 | case http.StatusPreconditionFailed: | ||
168 | errResp = ErrorResponse{ | ||
169 | StatusCode: resp.StatusCode, | ||
170 | Code: "PreconditionFailed", | ||
171 | Message: s3ErrorResponseMap["PreconditionFailed"], | ||
172 | BucketName: bucketName, | ||
173 | Key: objectName, | ||
174 | } | ||
175 | default: | ||
176 | msg := resp.Status | ||
177 | if len(errBody) > 0 { | ||
178 | msg = string(errBody) | ||
179 | if len(msg) > 1024 { | ||
180 | msg = msg[:1024] + "..." | ||
181 | } | ||
182 | } | ||
183 | errResp = ErrorResponse{ | ||
184 | StatusCode: resp.StatusCode, | ||
185 | Code: resp.Status, | ||
186 | Message: msg, | ||
187 | BucketName: bucketName, | ||
188 | } | ||
189 | } | ||
190 | } | ||
191 | |||
192 | code := resp.Header.Get("x-minio-error-code") | ||
193 | if code != "" { | ||
194 | errResp.Code = code | ||
195 | } | ||
196 | desc := resp.Header.Get("x-minio-error-desc") | ||
197 | if desc != "" { | ||
198 | errResp.Message = strings.Trim(desc, `"`) | ||
199 | } | ||
200 | |||
201 | // Save hostID, requestID and region information | ||
202 | // from headers if not available through error XML. | ||
203 | if errResp.RequestID == "" { | ||
204 | errResp.RequestID = resp.Header.Get("x-amz-request-id") | ||
205 | } | ||
206 | if errResp.HostID == "" { | ||
207 | errResp.HostID = resp.Header.Get("x-amz-id-2") | ||
208 | } | ||
209 | if errResp.Region == "" { | ||
210 | errResp.Region = resp.Header.Get("x-amz-bucket-region") | ||
211 | } | ||
212 | if errResp.Code == "InvalidRegion" && errResp.Region != "" { | ||
213 | errResp.Message = fmt.Sprintf("Region does not match, expecting region ‘%s’.", errResp.Region) | ||
214 | } | ||
215 | |||
216 | return errResp | ||
217 | } | ||
218 | |||
219 | // errTransferAccelerationBucket - bucket name is invalid to be used with transfer acceleration. | ||
220 | func errTransferAccelerationBucket(bucketName string) error { | ||
221 | return ErrorResponse{ | ||
222 | StatusCode: http.StatusBadRequest, | ||
223 | Code: "InvalidArgument", | ||
224 | Message: "The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods ‘.’.", | ||
225 | BucketName: bucketName, | ||
226 | } | ||
227 | } | ||
228 | |||
229 | // errEntityTooLarge - Input size is larger than supported maximum. | ||
230 | func errEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error { | ||
231 | msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", totalSize, maxObjectSize) | ||
232 | return ErrorResponse{ | ||
233 | StatusCode: http.StatusBadRequest, | ||
234 | Code: "EntityTooLarge", | ||
235 | Message: msg, | ||
236 | BucketName: bucketName, | ||
237 | Key: objectName, | ||
238 | } | ||
239 | } | ||
240 | |||
241 | // errEntityTooSmall - Input size is smaller than supported minimum. | ||
242 | func errEntityTooSmall(totalSize int64, bucketName, objectName string) error { | ||
243 | msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size ‘0B’ for single PUT operation.", totalSize) | ||
244 | return ErrorResponse{ | ||
245 | StatusCode: http.StatusBadRequest, | ||
246 | Code: "EntityTooSmall", | ||
247 | Message: msg, | ||
248 | BucketName: bucketName, | ||
249 | Key: objectName, | ||
250 | } | ||
251 | } | ||
252 | |||
253 | // errUnexpectedEOF - Unexpected end of file reached. | ||
254 | func errUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) error { | ||
255 | msg := fmt.Sprintf("Data read ‘%d’ is not equal to the size ‘%d’ of the input Reader.", totalRead, totalSize) | ||
256 | return ErrorResponse{ | ||
257 | StatusCode: http.StatusBadRequest, | ||
258 | Code: "UnexpectedEOF", | ||
259 | Message: msg, | ||
260 | BucketName: bucketName, | ||
261 | Key: objectName, | ||
262 | } | ||
263 | } | ||
264 | |||
265 | // errInvalidArgument - Invalid argument response. | ||
266 | func errInvalidArgument(message string) error { | ||
267 | return ErrorResponse{ | ||
268 | StatusCode: http.StatusBadRequest, | ||
269 | Code: "InvalidArgument", | ||
270 | Message: message, | ||
271 | RequestID: "minio", | ||
272 | } | ||
273 | } | ||
274 | |||
275 | // errAPINotSupported - API not supported response | ||
276 | // The specified API call is not supported | ||
277 | func errAPINotSupported(message string) error { | ||
278 | return ErrorResponse{ | ||
279 | StatusCode: http.StatusNotImplemented, | ||
280 | Code: "APINotSupported", | ||
281 | Message: message, | ||
282 | RequestID: "minio", | ||
283 | } | ||
284 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go b/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go new file mode 100644 index 0000000..9041d99 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go | |||
@@ -0,0 +1,152 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2018 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | import ( | ||
21 | "context" | ||
22 | "encoding/xml" | ||
23 | "net/http" | ||
24 | "net/url" | ||
25 | ) | ||
26 | |||
27 | // Grantee represents the person being granted permissions. | ||
28 | type Grantee struct { | ||
29 | XMLName xml.Name `xml:"Grantee"` | ||
30 | ID string `xml:"ID"` | ||
31 | DisplayName string `xml:"DisplayName"` | ||
32 | URI string `xml:"URI"` | ||
33 | } | ||
34 | |||
35 | // Grant holds grant information | ||
36 | type Grant struct { | ||
37 | XMLName xml.Name `xml:"Grant"` | ||
38 | Grantee Grantee | ||
39 | Permission string `xml:"Permission"` | ||
40 | } | ||
41 | |||
42 | // AccessControlList contains the set of grantees and the permissions assigned to each grantee. | ||
43 | type AccessControlList struct { | ||
44 | XMLName xml.Name `xml:"AccessControlList"` | ||
45 | Grant []Grant | ||
46 | Permission string `xml:"Permission"` | ||
47 | } | ||
48 | |||
49 | type accessControlPolicy struct { | ||
50 | XMLName xml.Name `xml:"AccessControlPolicy"` | ||
51 | Owner Owner | ||
52 | AccessControlList AccessControlList | ||
53 | } | ||
54 | |||
55 | // GetObjectACL get object ACLs | ||
56 | func (c *Client) GetObjectACL(ctx context.Context, bucketName, objectName string) (*ObjectInfo, error) { | ||
57 | resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ | ||
58 | bucketName: bucketName, | ||
59 | objectName: objectName, | ||
60 | queryValues: url.Values{ | ||
61 | "acl": []string{""}, | ||
62 | }, | ||
63 | }) | ||
64 | if err != nil { | ||
65 | return nil, err | ||
66 | } | ||
67 | defer closeResponse(resp) | ||
68 | |||
69 | if resp.StatusCode != http.StatusOK { | ||
70 | return nil, httpRespToErrorResponse(resp, bucketName, objectName) | ||
71 | } | ||
72 | |||
73 | res := &accessControlPolicy{} | ||
74 | |||
75 | if err := xmlDecoder(resp.Body, res); err != nil { | ||
76 | return nil, err | ||
77 | } | ||
78 | |||
79 | objInfo, err := c.StatObject(ctx, bucketName, objectName, StatObjectOptions{}) | ||
80 | if err != nil { | ||
81 | return nil, err | ||
82 | } | ||
83 | |||
84 | objInfo.Owner.DisplayName = res.Owner.DisplayName | ||
85 | objInfo.Owner.ID = res.Owner.ID | ||
86 | |||
87 | objInfo.Grant = append(objInfo.Grant, res.AccessControlList.Grant...) | ||
88 | |||
89 | cannedACL := getCannedACL(res) | ||
90 | if cannedACL != "" { | ||
91 | objInfo.Metadata.Add("X-Amz-Acl", cannedACL) | ||
92 | return &objInfo, nil | ||
93 | } | ||
94 | |||
95 | grantACL := getAmzGrantACL(res) | ||
96 | for k, v := range grantACL { | ||
97 | objInfo.Metadata[k] = v | ||
98 | } | ||
99 | |||
100 | return &objInfo, nil | ||
101 | } | ||
102 | |||
103 | func getCannedACL(aCPolicy *accessControlPolicy) string { | ||
104 | grants := aCPolicy.AccessControlList.Grant | ||
105 | |||
106 | switch { | ||
107 | case len(grants) == 1: | ||
108 | if grants[0].Grantee.URI == "" && grants[0].Permission == "FULL_CONTROL" { | ||
109 | return "private" | ||
110 | } | ||
111 | case len(grants) == 2: | ||
112 | for _, g := range grants { | ||
113 | if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" && g.Permission == "READ" { | ||
114 | return "authenticated-read" | ||
115 | } | ||
116 | if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "READ" { | ||
117 | return "public-read" | ||
118 | } | ||
119 | if g.Permission == "READ" && g.Grantee.ID == aCPolicy.Owner.ID { | ||
120 | return "bucket-owner-read" | ||
121 | } | ||
122 | } | ||
123 | case len(grants) == 3: | ||
124 | for _, g := range grants { | ||
125 | if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "WRITE" { | ||
126 | return "public-read-write" | ||
127 | } | ||
128 | } | ||
129 | } | ||
130 | return "" | ||
131 | } | ||
132 | |||
133 | func getAmzGrantACL(aCPolicy *accessControlPolicy) map[string][]string { | ||
134 | grants := aCPolicy.AccessControlList.Grant | ||
135 | res := map[string][]string{} | ||
136 | |||
137 | for _, g := range grants { | ||
138 | switch { | ||
139 | case g.Permission == "READ": | ||
140 | res["X-Amz-Grant-Read"] = append(res["X-Amz-Grant-Read"], "id="+g.Grantee.ID) | ||
141 | case g.Permission == "WRITE": | ||
142 | res["X-Amz-Grant-Write"] = append(res["X-Amz-Grant-Write"], "id="+g.Grantee.ID) | ||
143 | case g.Permission == "READ_ACP": | ||
144 | res["X-Amz-Grant-Read-Acp"] = append(res["X-Amz-Grant-Read-Acp"], "id="+g.Grantee.ID) | ||
145 | case g.Permission == "WRITE_ACP": | ||
146 | res["X-Amz-Grant-Write-Acp"] = append(res["X-Amz-Grant-Write-Acp"], "id="+g.Grantee.ID) | ||
147 | case g.Permission == "FULL_CONTROL": | ||
148 | res["X-Amz-Grant-Full-Control"] = append(res["X-Amz-Grant-Full-Control"], "id="+g.Grantee.ID) | ||
149 | } | ||
150 | } | ||
151 | return res | ||
152 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object-file.go b/vendor/github.com/minio/minio-go/v7/api-get-object-file.go new file mode 100644 index 0000000..2332dbf --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-get-object-file.go | |||
@@ -0,0 +1,127 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2015-2017 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | import ( | ||
21 | "context" | ||
22 | "io" | ||
23 | "os" | ||
24 | "path/filepath" | ||
25 | |||
26 | "github.com/minio/minio-go/v7/pkg/s3utils" | ||
27 | ) | ||
28 | |||
29 | // FGetObject - download contents of an object to a local file. | ||
30 | // The options can be used to specify the GET request further. | ||
31 | func (c *Client) FGetObject(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error { | ||
32 | // Input validation. | ||
33 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
34 | return err | ||
35 | } | ||
36 | if err := s3utils.CheckValidObjectName(objectName); err != nil { | ||
37 | return err | ||
38 | } | ||
39 | |||
40 | // Verify if destination already exists. | ||
41 | st, err := os.Stat(filePath) | ||
42 | if err == nil { | ||
43 | // If the destination exists and is a directory. | ||
44 | if st.IsDir() { | ||
45 | return errInvalidArgument("fileName is a directory.") | ||
46 | } | ||
47 | } | ||
48 | |||
49 | // Proceed if file does not exist. return for all other errors. | ||
50 | if err != nil { | ||
51 | if !os.IsNotExist(err) { | ||
52 | return err | ||
53 | } | ||
54 | } | ||
55 | |||
56 | // Extract top level directory. | ||
57 | objectDir, _ := filepath.Split(filePath) | ||
58 | if objectDir != "" { | ||
59 | // Create any missing top level directories. | ||
60 | if err := os.MkdirAll(objectDir, 0o700); err != nil { | ||
61 | return err | ||
62 | } | ||
63 | } | ||
64 | |||
65 | // Gather md5sum. | ||
66 | objectStat, err := c.StatObject(ctx, bucketName, objectName, StatObjectOptions(opts)) | ||
67 | if err != nil { | ||
68 | return err | ||
69 | } | ||
70 | |||
71 | // Write to a temporary file "fileName.part.minio" before saving. | ||
72 | filePartPath := filePath + objectStat.ETag + ".part.minio" | ||
73 | |||
74 | // If exists, open in append mode. If not create it as a part file. | ||
75 | filePart, err := os.OpenFile(filePartPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0o600) | ||
76 | if err != nil { | ||
77 | return err | ||
78 | } | ||
79 | |||
80 | // If we return early with an error, be sure to close and delete | ||
81 | // filePart. If we have an error along the way there is a chance | ||
82 | // that filePart is somehow damaged, and we should discard it. | ||
83 | closeAndRemove := true | ||
84 | defer func() { | ||
85 | if closeAndRemove { | ||
86 | _ = filePart.Close() | ||
87 | _ = os.Remove(filePartPath) | ||
88 | } | ||
89 | }() | ||
90 | |||
91 | // Issue Stat to get the current offset. | ||
92 | st, err = filePart.Stat() | ||
93 | if err != nil { | ||
94 | return err | ||
95 | } | ||
96 | |||
97 | // Initialize get object request headers to set the | ||
98 | // appropriate range offsets to read from. | ||
99 | if st.Size() > 0 { | ||
100 | opts.SetRange(st.Size(), 0) | ||
101 | } | ||
102 | |||
103 | // Seek to current position for incoming reader. | ||
104 | objectReader, objectStat, _, err := c.getObject(ctx, bucketName, objectName, opts) | ||
105 | if err != nil { | ||
106 | return err | ||
107 | } | ||
108 | |||
109 | // Write to the part file. | ||
110 | if _, err = io.CopyN(filePart, objectReader, objectStat.Size); err != nil { | ||
111 | return err | ||
112 | } | ||
113 | |||
114 | // Close the file before rename, this is specifically needed for Windows users. | ||
115 | closeAndRemove = false | ||
116 | if err = filePart.Close(); err != nil { | ||
117 | return err | ||
118 | } | ||
119 | |||
120 | // Safely completed. Now commit by renaming to actual filename. | ||
121 | if err = os.Rename(filePartPath, filePath); err != nil { | ||
122 | return err | ||
123 | } | ||
124 | |||
125 | // Return. | ||
126 | return nil | ||
127 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object.go b/vendor/github.com/minio/minio-go/v7/api-get-object.go new file mode 100644 index 0000000..9e6b154 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-get-object.go | |||
@@ -0,0 +1,683 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2015-2020 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | import ( | ||
21 | "context" | ||
22 | "errors" | ||
23 | "fmt" | ||
24 | "io" | ||
25 | "net/http" | ||
26 | "sync" | ||
27 | |||
28 | "github.com/minio/minio-go/v7/pkg/s3utils" | ||
29 | ) | ||
30 | |||
31 | // GetObject wrapper function that accepts a request context | ||
32 | func (c *Client) GetObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) { | ||
33 | // Input validation. | ||
34 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
35 | return nil, err | ||
36 | } | ||
37 | if err := s3utils.CheckValidObjectName(objectName); err != nil { | ||
38 | return nil, err | ||
39 | } | ||
40 | |||
41 | gctx, cancel := context.WithCancel(ctx) | ||
42 | |||
43 | // Detect if snowball is server location we are talking to. | ||
44 | var snowball bool | ||
45 | if location, ok := c.bucketLocCache.Get(bucketName); ok { | ||
46 | snowball = location == "snowball" | ||
47 | } | ||
48 | |||
49 | var ( | ||
50 | err error | ||
51 | httpReader io.ReadCloser | ||
52 | objectInfo ObjectInfo | ||
53 | totalRead int | ||
54 | ) | ||
55 | |||
56 | // Create request channel. | ||
57 | reqCh := make(chan getRequest) | ||
58 | // Create response channel. | ||
59 | resCh := make(chan getResponse) | ||
60 | |||
61 | // This routine feeds partial object data as and when the caller reads. | ||
62 | go func() { | ||
63 | defer close(resCh) | ||
64 | defer func() { | ||
65 | // Close the http response body before returning. | ||
66 | // This ends the connection with the server. | ||
67 | if httpReader != nil { | ||
68 | httpReader.Close() | ||
69 | } | ||
70 | }() | ||
71 | defer cancel() | ||
72 | |||
73 | // Used to verify if etag of object has changed since last read. | ||
74 | var etag string | ||
75 | |||
76 | for req := range reqCh { | ||
77 | // If this is the first request we may not need to do a getObject request yet. | ||
78 | if req.isFirstReq { | ||
79 | // First request is a Read/ReadAt. | ||
80 | if req.isReadOp { | ||
81 | // Differentiate between wanting the whole object and just a range. | ||
82 | if req.isReadAt { | ||
83 | // If this is a ReadAt request only get the specified range. | ||
84 | // Range is set with respect to the offset and length of the buffer requested. | ||
85 | // Do not set objectInfo from the first readAt request because it will not get | ||
86 | // the whole object. | ||
87 | opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1) | ||
88 | } else if req.Offset > 0 { | ||
89 | opts.SetRange(req.Offset, 0) | ||
90 | } | ||
91 | httpReader, objectInfo, _, err = c.getObject(gctx, bucketName, objectName, opts) | ||
92 | if err != nil { | ||
93 | resCh <- getResponse{Error: err} | ||
94 | return | ||
95 | } | ||
96 | etag = objectInfo.ETag | ||
97 | // Read at least firstReq.Buffer bytes, if not we have | ||
98 | // reached our EOF. | ||
99 | size, err := readFull(httpReader, req.Buffer) | ||
100 | totalRead += size | ||
101 | if size > 0 && err == io.ErrUnexpectedEOF { | ||
102 | if int64(size) < objectInfo.Size { | ||
103 | // In situations when returned size | ||
104 | // is less than the expected content | ||
105 | // length set by the server, make sure | ||
106 | // we return io.ErrUnexpectedEOF | ||
107 | err = io.ErrUnexpectedEOF | ||
108 | } else { | ||
109 | // If an EOF happens after reading some but not | ||
110 | // all the bytes ReadFull returns ErrUnexpectedEOF | ||
111 | err = io.EOF | ||
112 | } | ||
113 | } else if size == 0 && err == io.EOF && objectInfo.Size > 0 { | ||
114 | // Special cases when server writes more data | ||
115 | // than the content-length, net/http response | ||
116 | // body returns an error, instead of converting | ||
117 | // it to io.EOF - return unexpected EOF. | ||
118 | err = io.ErrUnexpectedEOF | ||
119 | } | ||
120 | // Send back the first response. | ||
121 | resCh <- getResponse{ | ||
122 | objectInfo: objectInfo, | ||
123 | Size: size, | ||
124 | Error: err, | ||
125 | didRead: true, | ||
126 | } | ||
127 | } else { | ||
128 | // First request is a Stat or Seek call. | ||
129 | // Only need to run a StatObject until an actual Read or ReadAt request comes through. | ||
130 | |||
131 | // Remove range header if already set, for stat Operations to get original file size. | ||
132 | delete(opts.headers, "Range") | ||
133 | objectInfo, err = c.StatObject(gctx, bucketName, objectName, StatObjectOptions(opts)) | ||
134 | if err != nil { | ||
135 | resCh <- getResponse{ | ||
136 | Error: err, | ||
137 | } | ||
138 | // Exit the go-routine. | ||
139 | return | ||
140 | } | ||
141 | etag = objectInfo.ETag | ||
142 | // Send back the first response. | ||
143 | resCh <- getResponse{ | ||
144 | objectInfo: objectInfo, | ||
145 | } | ||
146 | } | ||
147 | } else if req.settingObjectInfo { // Request is just to get objectInfo. | ||
148 | // Remove range header if already set, for stat Operations to get original file size. | ||
149 | delete(opts.headers, "Range") | ||
150 | // Check whether this is snowball | ||
151 | // if yes do not use If-Match feature | ||
152 | // it doesn't work. | ||
153 | if etag != "" && !snowball { | ||
154 | opts.SetMatchETag(etag) | ||
155 | } | ||
156 | objectInfo, err := c.StatObject(gctx, bucketName, objectName, StatObjectOptions(opts)) | ||
157 | if err != nil { | ||
158 | resCh <- getResponse{ | ||
159 | Error: err, | ||
160 | } | ||
161 | // Exit the goroutine. | ||
162 | return | ||
163 | } | ||
164 | // Send back the objectInfo. | ||
165 | resCh <- getResponse{ | ||
166 | objectInfo: objectInfo, | ||
167 | } | ||
168 | } else { | ||
169 | // Offset changes fetch the new object at an Offset. | ||
170 | // Because the httpReader may not be set by the first | ||
171 | // request if it was a stat or seek it must be checked | ||
172 | // if the object has been read or not to only initialize | ||
173 | // new ones when they haven't been already. | ||
174 | // All readAt requests are new requests. | ||
175 | if req.DidOffsetChange || !req.beenRead { | ||
176 | // Check whether this is snowball | ||
177 | // if yes do not use If-Match feature | ||
178 | // it doesn't work. | ||
179 | if etag != "" && !snowball { | ||
180 | opts.SetMatchETag(etag) | ||
181 | } | ||
182 | if httpReader != nil { | ||
183 | // Close previously opened http reader. | ||
184 | httpReader.Close() | ||
185 | } | ||
186 | // If this request is a readAt only get the specified range. | ||
187 | if req.isReadAt { | ||
188 | // Range is set with respect to the offset and length of the buffer requested. | ||
189 | opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1) | ||
190 | } else if req.Offset > 0 { // Range is set with respect to the offset. | ||
191 | opts.SetRange(req.Offset, 0) | ||
192 | } else { | ||
193 | // Remove range header if already set | ||
194 | delete(opts.headers, "Range") | ||
195 | } | ||
196 | httpReader, objectInfo, _, err = c.getObject(gctx, bucketName, objectName, opts) | ||
197 | if err != nil { | ||
198 | resCh <- getResponse{ | ||
199 | Error: err, | ||
200 | } | ||
201 | return | ||
202 | } | ||
203 | totalRead = 0 | ||
204 | } | ||
205 | |||
206 | // Read at least req.Buffer bytes, if not we have | ||
207 | // reached our EOF. | ||
208 | size, err := readFull(httpReader, req.Buffer) | ||
209 | totalRead += size | ||
210 | if size > 0 && err == io.ErrUnexpectedEOF { | ||
211 | if int64(totalRead) < objectInfo.Size { | ||
212 | // In situations when returned size | ||
213 | // is less than the expected content | ||
214 | // length set by the server, make sure | ||
215 | // we return io.ErrUnexpectedEOF | ||
216 | err = io.ErrUnexpectedEOF | ||
217 | } else { | ||
218 | // If an EOF happens after reading some but not | ||
219 | // all the bytes ReadFull returns ErrUnexpectedEOF | ||
220 | err = io.EOF | ||
221 | } | ||
222 | } else if size == 0 && err == io.EOF && objectInfo.Size > 0 { | ||
223 | // Special cases when server writes more data | ||
224 | // than the content-length, net/http response | ||
225 | // body returns an error, instead of converting | ||
226 | // it to io.EOF - return unexpected EOF. | ||
227 | err = io.ErrUnexpectedEOF | ||
228 | } | ||
229 | |||
230 | // Reply back how much was read. | ||
231 | resCh <- getResponse{ | ||
232 | Size: size, | ||
233 | Error: err, | ||
234 | didRead: true, | ||
235 | objectInfo: objectInfo, | ||
236 | } | ||
237 | } | ||
238 | } | ||
239 | }() | ||
240 | |||
241 | // Create a newObject through the information sent back by reqCh. | ||
242 | return newObject(gctx, cancel, reqCh, resCh), nil | ||
243 | } | ||
244 | |||
245 | // get request message container to communicate with internal | ||
246 | // go-routine. | ||
247 | type getRequest struct { | ||
248 | Buffer []byte | ||
249 | Offset int64 // readAt offset. | ||
250 | DidOffsetChange bool // Tracks the offset changes for Seek requests. | ||
251 | beenRead bool // Determines if this is the first time an object is being read. | ||
252 | isReadAt bool // Determines if this request is a request to a specific range | ||
253 | isReadOp bool // Determines if this request is a Read or Read/At request. | ||
254 | isFirstReq bool // Determines if this request is the first time an object is being accessed. | ||
255 | settingObjectInfo bool // Determines if this request is to set the objectInfo of an object. | ||
256 | } | ||
257 | |||
258 | // get response message container to reply back for the request. | ||
259 | type getResponse struct { | ||
260 | Size int | ||
261 | Error error | ||
262 | didRead bool // Lets subsequent calls know whether or not httpReader has been initiated. | ||
263 | objectInfo ObjectInfo // Used for the first request. | ||
264 | } | ||
265 | |||
266 | // Object represents an open object. It implements | ||
267 | // Reader, ReaderAt, Seeker, Closer for a HTTP stream. | ||
268 | type Object struct { | ||
269 | // Mutex. | ||
270 | mutex *sync.Mutex | ||
271 | |||
272 | // User allocated and defined. | ||
273 | reqCh chan<- getRequest | ||
274 | resCh <-chan getResponse | ||
275 | ctx context.Context | ||
276 | cancel context.CancelFunc | ||
277 | currOffset int64 | ||
278 | objectInfo ObjectInfo | ||
279 | |||
280 | // Ask lower level to initiate data fetching based on currOffset | ||
281 | seekData bool | ||
282 | |||
283 | // Keeps track of closed call. | ||
284 | isClosed bool | ||
285 | |||
286 | // Keeps track of if this is the first call. | ||
287 | isStarted bool | ||
288 | |||
289 | // Previous error saved for future calls. | ||
290 | prevErr error | ||
291 | |||
292 | // Keeps track of if this object has been read yet. | ||
293 | beenRead bool | ||
294 | |||
295 | // Keeps track of if objectInfo has been set yet. | ||
296 | objectInfoSet bool | ||
297 | } | ||
298 | |||
299 | // doGetRequest - sends and blocks on the firstReqCh and reqCh of an object. | ||
300 | // Returns back the size of the buffer read, if anything was read, as well | ||
301 | // as any error encountered. For all first requests sent on the object | ||
302 | // it is also responsible for sending back the objectInfo. | ||
303 | func (o *Object) doGetRequest(request getRequest) (getResponse, error) { | ||
304 | select { | ||
305 | case <-o.ctx.Done(): | ||
306 | return getResponse{}, o.ctx.Err() | ||
307 | case o.reqCh <- request: | ||
308 | } | ||
309 | |||
310 | response := <-o.resCh | ||
311 | |||
312 | // Return any error to the top level. | ||
313 | if response.Error != nil { | ||
314 | return response, response.Error | ||
315 | } | ||
316 | |||
317 | // This was the first request. | ||
318 | if !o.isStarted { | ||
319 | // The object has been operated on. | ||
320 | o.isStarted = true | ||
321 | } | ||
322 | // Set the objectInfo if the request was not readAt | ||
323 | // and it hasn't been set before. | ||
324 | if !o.objectInfoSet && !request.isReadAt { | ||
325 | o.objectInfo = response.objectInfo | ||
326 | o.objectInfoSet = true | ||
327 | } | ||
328 | // Set beenRead only if it has not been set before. | ||
329 | if !o.beenRead { | ||
330 | o.beenRead = response.didRead | ||
331 | } | ||
332 | // Data are ready on the wire, no need to reinitiate connection in lower level | ||
333 | o.seekData = false | ||
334 | |||
335 | return response, nil | ||
336 | } | ||
337 | |||
338 | // setOffset - handles the setting of offsets for | ||
339 | // Read/ReadAt/Seek requests. | ||
340 | func (o *Object) setOffset(bytesRead int64) error { | ||
341 | // Update the currentOffset. | ||
342 | o.currOffset += bytesRead | ||
343 | |||
344 | if o.objectInfo.Size > -1 && o.currOffset >= o.objectInfo.Size { | ||
345 | return io.EOF | ||
346 | } | ||
347 | return nil | ||
348 | } | ||
349 | |||
350 | // Read reads up to len(b) bytes into b. It returns the number of | ||
351 | // bytes read (0 <= n <= len(b)) and any error encountered. Returns | ||
352 | // io.EOF upon end of file. | ||
353 | func (o *Object) Read(b []byte) (n int, err error) { | ||
354 | if o == nil { | ||
355 | return 0, errInvalidArgument("Object is nil") | ||
356 | } | ||
357 | |||
358 | // Locking. | ||
359 | o.mutex.Lock() | ||
360 | defer o.mutex.Unlock() | ||
361 | |||
362 | // prevErr is previous error saved from previous operation. | ||
363 | if o.prevErr != nil || o.isClosed { | ||
364 | return 0, o.prevErr | ||
365 | } | ||
366 | |||
367 | // Create a new request. | ||
368 | readReq := getRequest{ | ||
369 | isReadOp: true, | ||
370 | beenRead: o.beenRead, | ||
371 | Buffer: b, | ||
372 | } | ||
373 | |||
374 | // Alert that this is the first request. | ||
375 | if !o.isStarted { | ||
376 | readReq.isFirstReq = true | ||
377 | } | ||
378 | |||
379 | // Ask to establish a new data fetch routine based on seekData flag | ||
380 | readReq.DidOffsetChange = o.seekData | ||
381 | readReq.Offset = o.currOffset | ||
382 | |||
383 | // Send and receive from the first request. | ||
384 | response, err := o.doGetRequest(readReq) | ||
385 | if err != nil && err != io.EOF { | ||
386 | // Save the error for future calls. | ||
387 | o.prevErr = err | ||
388 | return response.Size, err | ||
389 | } | ||
390 | |||
391 | // Bytes read. | ||
392 | bytesRead := int64(response.Size) | ||
393 | |||
394 | // Set the new offset. | ||
395 | oerr := o.setOffset(bytesRead) | ||
396 | if oerr != nil { | ||
397 | // Save the error for future calls. | ||
398 | o.prevErr = oerr | ||
399 | return response.Size, oerr | ||
400 | } | ||
401 | |||
402 | // Return the response. | ||
403 | return response.Size, err | ||
404 | } | ||
405 | |||
406 | // Stat returns the ObjectInfo structure describing Object. | ||
407 | func (o *Object) Stat() (ObjectInfo, error) { | ||
408 | if o == nil { | ||
409 | return ObjectInfo{}, errInvalidArgument("Object is nil") | ||
410 | } | ||
411 | // Locking. | ||
412 | o.mutex.Lock() | ||
413 | defer o.mutex.Unlock() | ||
414 | |||
415 | if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed { | ||
416 | return ObjectInfo{}, o.prevErr | ||
417 | } | ||
418 | |||
419 | // This is the first request. | ||
420 | if !o.isStarted || !o.objectInfoSet { | ||
421 | // Send the request and get the response. | ||
422 | _, err := o.doGetRequest(getRequest{ | ||
423 | isFirstReq: !o.isStarted, | ||
424 | settingObjectInfo: !o.objectInfoSet, | ||
425 | }) | ||
426 | if err != nil { | ||
427 | o.prevErr = err | ||
428 | return ObjectInfo{}, err | ||
429 | } | ||
430 | } | ||
431 | |||
432 | return o.objectInfo, nil | ||
433 | } | ||
434 | |||
435 | // ReadAt reads len(b) bytes from the File starting at byte offset | ||
436 | // off. It returns the number of bytes read and the error, if any. | ||
437 | // ReadAt always returns a non-nil error when n < len(b). At end of | ||
438 | // file, that error is io.EOF. | ||
439 | func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) { | ||
440 | if o == nil { | ||
441 | return 0, errInvalidArgument("Object is nil") | ||
442 | } | ||
443 | |||
444 | // Locking. | ||
445 | o.mutex.Lock() | ||
446 | defer o.mutex.Unlock() | ||
447 | |||
448 | // prevErr is error which was saved in previous operation. | ||
449 | if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed { | ||
450 | return 0, o.prevErr | ||
451 | } | ||
452 | |||
453 | // Set the current offset to ReadAt offset, because the current offset will be shifted at the end of this method. | ||
454 | o.currOffset = offset | ||
455 | |||
456 | // Can only compare offsets to size when size has been set. | ||
457 | if o.objectInfoSet { | ||
458 | // If offset is negative than we return io.EOF. | ||
459 | // If offset is greater than or equal to object size we return io.EOF. | ||
460 | if (o.objectInfo.Size > -1 && offset >= o.objectInfo.Size) || offset < 0 { | ||
461 | return 0, io.EOF | ||
462 | } | ||
463 | } | ||
464 | |||
465 | // Create the new readAt request. | ||
466 | readAtReq := getRequest{ | ||
467 | isReadOp: true, | ||
468 | isReadAt: true, | ||
469 | DidOffsetChange: true, // Offset always changes. | ||
470 | beenRead: o.beenRead, // Set if this is the first request to try and read. | ||
471 | Offset: offset, // Set the offset. | ||
472 | Buffer: b, | ||
473 | } | ||
474 | |||
475 | // Alert that this is the first request. | ||
476 | if !o.isStarted { | ||
477 | readAtReq.isFirstReq = true | ||
478 | } | ||
479 | |||
480 | // Send and receive from the first request. | ||
481 | response, err := o.doGetRequest(readAtReq) | ||
482 | if err != nil && err != io.EOF { | ||
483 | // Save the error. | ||
484 | o.prevErr = err | ||
485 | return response.Size, err | ||
486 | } | ||
487 | // Bytes read. | ||
488 | bytesRead := int64(response.Size) | ||
489 | // There is no valid objectInfo yet | ||
490 | // to compare against for EOF. | ||
491 | if !o.objectInfoSet { | ||
492 | // Update the currentOffset. | ||
493 | o.currOffset += bytesRead | ||
494 | } else { | ||
495 | // If this was not the first request update | ||
496 | // the offsets and compare against objectInfo | ||
497 | // for EOF. | ||
498 | oerr := o.setOffset(bytesRead) | ||
499 | if oerr != nil { | ||
500 | o.prevErr = oerr | ||
501 | return response.Size, oerr | ||
502 | } | ||
503 | } | ||
504 | return response.Size, err | ||
505 | } | ||
506 | |||
507 | // Seek sets the offset for the next Read or Write to offset, | ||
508 | // interpreted according to whence: 0 means relative to the | ||
509 | // origin of the file, 1 means relative to the current offset, | ||
510 | // and 2 means relative to the end. | ||
511 | // Seek returns the new offset and an error, if any. | ||
512 | // | ||
513 | // Seeking to a negative offset is an error. Seeking to any positive | ||
514 | // offset is legal, subsequent io operations succeed until the | ||
515 | // underlying object is not closed. | ||
516 | func (o *Object) Seek(offset int64, whence int) (n int64, err error) { | ||
517 | if o == nil { | ||
518 | return 0, errInvalidArgument("Object is nil") | ||
519 | } | ||
520 | |||
521 | // Locking. | ||
522 | o.mutex.Lock() | ||
523 | defer o.mutex.Unlock() | ||
524 | |||
525 | // At EOF seeking is legal allow only io.EOF, for any other errors we return. | ||
526 | if o.prevErr != nil && o.prevErr != io.EOF { | ||
527 | return 0, o.prevErr | ||
528 | } | ||
529 | |||
530 | // Negative offset is valid for whence of '2'. | ||
531 | if offset < 0 && whence != 2 { | ||
532 | return 0, errInvalidArgument(fmt.Sprintf("Negative position not allowed for %d", whence)) | ||
533 | } | ||
534 | |||
535 | // This is the first request. So before anything else | ||
536 | // get the ObjectInfo. | ||
537 | if !o.isStarted || !o.objectInfoSet { | ||
538 | // Create the new Seek request. | ||
539 | seekReq := getRequest{ | ||
540 | isReadOp: false, | ||
541 | Offset: offset, | ||
542 | isFirstReq: true, | ||
543 | } | ||
544 | // Send and receive from the seek request. | ||
545 | _, err := o.doGetRequest(seekReq) | ||
546 | if err != nil { | ||
547 | // Save the error. | ||
548 | o.prevErr = err | ||
549 | return 0, err | ||
550 | } | ||
551 | } | ||
552 | |||
553 | newOffset := o.currOffset | ||
554 | |||
555 | // Switch through whence. | ||
556 | switch whence { | ||
557 | default: | ||
558 | return 0, errInvalidArgument(fmt.Sprintf("Invalid whence %d", whence)) | ||
559 | case 0: | ||
560 | if o.objectInfo.Size > -1 && offset > o.objectInfo.Size { | ||
561 | return 0, io.EOF | ||
562 | } | ||
563 | newOffset = offset | ||
564 | case 1: | ||
565 | if o.objectInfo.Size > -1 && o.currOffset+offset > o.objectInfo.Size { | ||
566 | return 0, io.EOF | ||
567 | } | ||
568 | newOffset += offset | ||
569 | case 2: | ||
570 | // If we don't know the object size return an error for io.SeekEnd | ||
571 | if o.objectInfo.Size < 0 { | ||
572 | return 0, errInvalidArgument("Whence END is not supported when the object size is unknown") | ||
573 | } | ||
574 | // Seeking to positive offset is valid for whence '2', but | ||
575 | // since we are backing a Reader we have reached 'EOF' if | ||
576 | // offset is positive. | ||
577 | if offset > 0 { | ||
578 | return 0, io.EOF | ||
579 | } | ||
580 | // Seeking to negative position not allowed for whence. | ||
581 | if o.objectInfo.Size+offset < 0 { | ||
582 | return 0, errInvalidArgument(fmt.Sprintf("Seeking at negative offset not allowed for %d", whence)) | ||
583 | } | ||
584 | newOffset = o.objectInfo.Size + offset | ||
585 | } | ||
586 | // Reset the saved error since we successfully seeked, let the Read | ||
587 | // and ReadAt decide. | ||
588 | if o.prevErr == io.EOF { | ||
589 | o.prevErr = nil | ||
590 | } | ||
591 | |||
592 | // Ask lower level to fetch again from source when necessary | ||
593 | o.seekData = (newOffset != o.currOffset) || o.seekData | ||
594 | o.currOffset = newOffset | ||
595 | |||
596 | // Return the effective offset. | ||
597 | return o.currOffset, nil | ||
598 | } | ||
599 | |||
600 | // Close - The behavior of Close after the first call returns error | ||
601 | // for subsequent Close() calls. | ||
602 | func (o *Object) Close() (err error) { | ||
603 | if o == nil { | ||
604 | return errInvalidArgument("Object is nil") | ||
605 | } | ||
606 | |||
607 | // Locking. | ||
608 | o.mutex.Lock() | ||
609 | defer o.mutex.Unlock() | ||
610 | |||
611 | // if already closed return an error. | ||
612 | if o.isClosed { | ||
613 | return o.prevErr | ||
614 | } | ||
615 | |||
616 | // Close successfully. | ||
617 | o.cancel() | ||
618 | |||
619 | // Close the request channel to indicate the internal go-routine to exit. | ||
620 | close(o.reqCh) | ||
621 | |||
622 | // Save for future operations. | ||
623 | errMsg := "Object is already closed. Bad file descriptor." | ||
624 | o.prevErr = errors.New(errMsg) | ||
625 | // Save here that we closed done channel successfully. | ||
626 | o.isClosed = true | ||
627 | return nil | ||
628 | } | ||
629 | |||
630 | // newObject instantiates a new *minio.Object* | ||
631 | // ObjectInfo will be set by setObjectInfo | ||
632 | func newObject(ctx context.Context, cancel context.CancelFunc, reqCh chan<- getRequest, resCh <-chan getResponse) *Object { | ||
633 | return &Object{ | ||
634 | ctx: ctx, | ||
635 | cancel: cancel, | ||
636 | mutex: &sync.Mutex{}, | ||
637 | reqCh: reqCh, | ||
638 | resCh: resCh, | ||
639 | } | ||
640 | } | ||
641 | |||
642 | // getObject - retrieve object from Object Storage. | ||
643 | // | ||
644 | // Additionally this function also takes range arguments to download the specified | ||
645 | // range bytes of an object. Setting offset and length = 0 will download the full object. | ||
646 | // | ||
647 | // For more information about the HTTP Range header. | ||
648 | // go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. | ||
649 | func (c *Client) getObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) { | ||
650 | // Validate input arguments. | ||
651 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
652 | return nil, ObjectInfo{}, nil, err | ||
653 | } | ||
654 | if err := s3utils.CheckValidObjectName(objectName); err != nil { | ||
655 | return nil, ObjectInfo{}, nil, err | ||
656 | } | ||
657 | |||
658 | // Execute GET on objectName. | ||
659 | resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ | ||
660 | bucketName: bucketName, | ||
661 | objectName: objectName, | ||
662 | queryValues: opts.toQueryValues(), | ||
663 | customHeader: opts.Header(), | ||
664 | contentSHA256Hex: emptySHA256Hex, | ||
665 | }) | ||
666 | if err != nil { | ||
667 | return nil, ObjectInfo{}, nil, err | ||
668 | } | ||
669 | if resp != nil { | ||
670 | if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { | ||
671 | return nil, ObjectInfo{}, nil, httpRespToErrorResponse(resp, bucketName, objectName) | ||
672 | } | ||
673 | } | ||
674 | |||
675 | objectStat, err := ToObjectInfo(bucketName, objectName, resp.Header) | ||
676 | if err != nil { | ||
677 | closeResponse(resp) | ||
678 | return nil, ObjectInfo{}, nil, err | ||
679 | } | ||
680 | |||
681 | // do not close body here, caller will close | ||
682 | return resp.Body, objectStat, resp.Header, nil | ||
683 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/api-get-options.go b/vendor/github.com/minio/minio-go/v7/api-get-options.go new file mode 100644 index 0000000..a0216e2 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-get-options.go | |||
@@ -0,0 +1,203 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2015-2020 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | import ( | ||
21 | "fmt" | ||
22 | "net/http" | ||
23 | "net/url" | ||
24 | "strconv" | ||
25 | "time" | ||
26 | |||
27 | "github.com/minio/minio-go/v7/pkg/encrypt" | ||
28 | ) | ||
29 | |||
30 | // AdvancedGetOptions for internal use by MinIO server - not intended for client use. | ||
31 | type AdvancedGetOptions struct { | ||
32 | ReplicationDeleteMarker bool | ||
33 | IsReplicationReadyForDeleteMarker bool | ||
34 | ReplicationProxyRequest string | ||
35 | } | ||
36 | |||
37 | // GetObjectOptions are used to specify additional headers or options | ||
38 | // during GET requests. | ||
39 | type GetObjectOptions struct { | ||
40 | headers map[string]string | ||
41 | reqParams url.Values | ||
42 | ServerSideEncryption encrypt.ServerSide | ||
43 | VersionID string | ||
44 | PartNumber int | ||
45 | |||
46 | // Include any checksums, if object was uploaded with checksum. | ||
47 | // For multipart objects this is a checksum of part checksums. | ||
48 | // https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html | ||
49 | Checksum bool | ||
50 | |||
51 | // To be not used by external applications | ||
52 | Internal AdvancedGetOptions | ||
53 | } | ||
54 | |||
55 | // StatObjectOptions are used to specify additional headers or options | ||
56 | // during GET info/stat requests. | ||
57 | type StatObjectOptions = GetObjectOptions | ||
58 | |||
59 | // Header returns the http.Header representation of the GET options. | ||
60 | func (o GetObjectOptions) Header() http.Header { | ||
61 | headers := make(http.Header, len(o.headers)) | ||
62 | for k, v := range o.headers { | ||
63 | headers.Set(k, v) | ||
64 | } | ||
65 | if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() == encrypt.SSEC { | ||
66 | o.ServerSideEncryption.Marshal(headers) | ||
67 | } | ||
68 | // this header is set for active-active replication scenario where GET/HEAD | ||
69 | // to site A is proxy'd to site B if object/version missing on site A. | ||
70 | if o.Internal.ReplicationProxyRequest != "" { | ||
71 | headers.Set(minIOBucketReplicationProxyRequest, o.Internal.ReplicationProxyRequest) | ||
72 | } | ||
73 | if o.Checksum { | ||
74 | headers.Set("x-amz-checksum-mode", "ENABLED") | ||
75 | } | ||
76 | return headers | ||
77 | } | ||
78 | |||
79 | // Set adds a key value pair to the options. The | ||
80 | // key-value pair will be part of the HTTP GET request | ||
81 | // headers. | ||
82 | func (o *GetObjectOptions) Set(key, value string) { | ||
83 | if o.headers == nil { | ||
84 | o.headers = make(map[string]string) | ||
85 | } | ||
86 | o.headers[http.CanonicalHeaderKey(key)] = value | ||
87 | } | ||
88 | |||
89 | // SetReqParam - set request query string parameter | ||
90 | // supported key: see supportedQueryValues and allowedCustomQueryPrefix. | ||
91 | // If an unsupported key is passed in, it will be ignored and nothing will be done. | ||
92 | func (o *GetObjectOptions) SetReqParam(key, value string) { | ||
93 | if !isCustomQueryValue(key) && !isStandardQueryValue(key) { | ||
94 | // do nothing | ||
95 | return | ||
96 | } | ||
97 | if o.reqParams == nil { | ||
98 | o.reqParams = make(url.Values) | ||
99 | } | ||
100 | o.reqParams.Set(key, value) | ||
101 | } | ||
102 | |||
103 | // AddReqParam - add request query string parameter | ||
104 | // supported key: see supportedQueryValues and allowedCustomQueryPrefix. | ||
105 | // If an unsupported key is passed in, it will be ignored and nothing will be done. | ||
106 | func (o *GetObjectOptions) AddReqParam(key, value string) { | ||
107 | if !isCustomQueryValue(key) && !isStandardQueryValue(key) { | ||
108 | // do nothing | ||
109 | return | ||
110 | } | ||
111 | if o.reqParams == nil { | ||
112 | o.reqParams = make(url.Values) | ||
113 | } | ||
114 | o.reqParams.Add(key, value) | ||
115 | } | ||
116 | |||
117 | // SetMatchETag - set match etag. | ||
118 | func (o *GetObjectOptions) SetMatchETag(etag string) error { | ||
119 | if etag == "" { | ||
120 | return errInvalidArgument("ETag cannot be empty.") | ||
121 | } | ||
122 | o.Set("If-Match", "\""+etag+"\"") | ||
123 | return nil | ||
124 | } | ||
125 | |||
126 | // SetMatchETagExcept - set match etag except. | ||
127 | func (o *GetObjectOptions) SetMatchETagExcept(etag string) error { | ||
128 | if etag == "" { | ||
129 | return errInvalidArgument("ETag cannot be empty.") | ||
130 | } | ||
131 | o.Set("If-None-Match", "\""+etag+"\"") | ||
132 | return nil | ||
133 | } | ||
134 | |||
135 | // SetUnmodified - set unmodified time since. | ||
136 | func (o *GetObjectOptions) SetUnmodified(modTime time.Time) error { | ||
137 | if modTime.IsZero() { | ||
138 | return errInvalidArgument("Modified since cannot be empty.") | ||
139 | } | ||
140 | o.Set("If-Unmodified-Since", modTime.Format(http.TimeFormat)) | ||
141 | return nil | ||
142 | } | ||
143 | |||
144 | // SetModified - set modified time since. | ||
145 | func (o *GetObjectOptions) SetModified(modTime time.Time) error { | ||
146 | if modTime.IsZero() { | ||
147 | return errInvalidArgument("Modified since cannot be empty.") | ||
148 | } | ||
149 | o.Set("If-Modified-Since", modTime.Format(http.TimeFormat)) | ||
150 | return nil | ||
151 | } | ||
152 | |||
153 | // SetRange - set the start and end offset of the object to be read. | ||
154 | // See https://tools.ietf.org/html/rfc7233#section-3.1 for reference. | ||
155 | func (o *GetObjectOptions) SetRange(start, end int64) error { | ||
156 | switch { | ||
157 | case start == 0 && end < 0: | ||
158 | // Read last '-end' bytes. `bytes=-N`. | ||
159 | o.Set("Range", fmt.Sprintf("bytes=%d", end)) | ||
160 | case 0 < start && end == 0: | ||
161 | // Read everything starting from offset | ||
162 | // 'start'. `bytes=N-`. | ||
163 | o.Set("Range", fmt.Sprintf("bytes=%d-", start)) | ||
164 | case 0 <= start && start <= end: | ||
165 | // Read everything starting at 'start' till the | ||
166 | // 'end'. `bytes=N-M` | ||
167 | o.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end)) | ||
168 | default: | ||
169 | // All other cases such as | ||
170 | // bytes=-3- | ||
171 | // bytes=5-3 | ||
172 | // bytes=-2-4 | ||
173 | // bytes=-3-0 | ||
174 | // bytes=-3--2 | ||
175 | // are invalid. | ||
176 | return errInvalidArgument( | ||
177 | fmt.Sprintf( | ||
178 | "Invalid range specified: start=%d end=%d", | ||
179 | start, end)) | ||
180 | } | ||
181 | return nil | ||
182 | } | ||
183 | |||
184 | // toQueryValues - Convert the versionId, partNumber, and reqParams in Options to query string parameters. | ||
185 | func (o *GetObjectOptions) toQueryValues() url.Values { | ||
186 | urlValues := make(url.Values) | ||
187 | if o.VersionID != "" { | ||
188 | urlValues.Set("versionId", o.VersionID) | ||
189 | } | ||
190 | if o.PartNumber > 0 { | ||
191 | urlValues.Set("partNumber", strconv.Itoa(o.PartNumber)) | ||
192 | } | ||
193 | |||
194 | if o.reqParams != nil { | ||
195 | for key, values := range o.reqParams { | ||
196 | for _, value := range values { | ||
197 | urlValues.Add(key, value) | ||
198 | } | ||
199 | } | ||
200 | } | ||
201 | |||
202 | return urlValues | ||
203 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/api-list.go b/vendor/github.com/minio/minio-go/v7/api-list.go new file mode 100644 index 0000000..31b6edf --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-list.go | |||
@@ -0,0 +1,1057 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2015-2020 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | import ( | ||
21 | "context" | ||
22 | "fmt" | ||
23 | "net/http" | ||
24 | "net/url" | ||
25 | "time" | ||
26 | |||
27 | "github.com/minio/minio-go/v7/pkg/s3utils" | ||
28 | ) | ||
29 | |||
30 | // ListBuckets list all buckets owned by this authenticated user. | ||
31 | // | ||
32 | // This call requires explicit authentication, no anonymous requests are | ||
33 | // allowed for listing buckets. | ||
34 | // | ||
35 | // api := client.New(....) | ||
36 | // for message := range api.ListBuckets(context.Background()) { | ||
37 | // fmt.Println(message) | ||
38 | // } | ||
39 | func (c *Client) ListBuckets(ctx context.Context) ([]BucketInfo, error) { | ||
40 | // Execute GET on service. | ||
41 | resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{contentSHA256Hex: emptySHA256Hex}) | ||
42 | defer closeResponse(resp) | ||
43 | if err != nil { | ||
44 | return nil, err | ||
45 | } | ||
46 | if resp != nil { | ||
47 | if resp.StatusCode != http.StatusOK { | ||
48 | return nil, httpRespToErrorResponse(resp, "", "") | ||
49 | } | ||
50 | } | ||
51 | listAllMyBucketsResult := listAllMyBucketsResult{} | ||
52 | err = xmlDecoder(resp.Body, &listAllMyBucketsResult) | ||
53 | if err != nil { | ||
54 | return nil, err | ||
55 | } | ||
56 | return listAllMyBucketsResult.Buckets.Bucket, nil | ||
57 | } | ||
58 | |||
59 | // Bucket List Operations. | ||
60 | func (c *Client) listObjectsV2(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { | ||
61 | // Allocate new list objects channel. | ||
62 | objectStatCh := make(chan ObjectInfo, 1) | ||
63 | // Default listing is delimited at "/" | ||
64 | delimiter := "/" | ||
65 | if opts.Recursive { | ||
66 | // If recursive we do not delimit. | ||
67 | delimiter = "" | ||
68 | } | ||
69 | |||
70 | // Return object owner information by default | ||
71 | fetchOwner := true | ||
72 | |||
73 | sendObjectInfo := func(info ObjectInfo) { | ||
74 | select { | ||
75 | case objectStatCh <- info: | ||
76 | case <-ctx.Done(): | ||
77 | } | ||
78 | } | ||
79 | |||
80 | // Validate bucket name. | ||
81 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
82 | defer close(objectStatCh) | ||
83 | sendObjectInfo(ObjectInfo{ | ||
84 | Err: err, | ||
85 | }) | ||
86 | return objectStatCh | ||
87 | } | ||
88 | |||
89 | // Validate incoming object prefix. | ||
90 | if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { | ||
91 | defer close(objectStatCh) | ||
92 | sendObjectInfo(ObjectInfo{ | ||
93 | Err: err, | ||
94 | }) | ||
95 | return objectStatCh | ||
96 | } | ||
97 | |||
98 | // Initiate list objects goroutine here. | ||
99 | go func(objectStatCh chan<- ObjectInfo) { | ||
100 | defer func() { | ||
101 | if contextCanceled(ctx) { | ||
102 | objectStatCh <- ObjectInfo{ | ||
103 | Err: ctx.Err(), | ||
104 | } | ||
105 | } | ||
106 | close(objectStatCh) | ||
107 | }() | ||
108 | |||
109 | // Save continuationToken for next request. | ||
110 | var continuationToken string | ||
111 | for { | ||
112 | // Get list of objects a maximum of 1000 per request. | ||
113 | result, err := c.listObjectsV2Query(ctx, bucketName, opts.Prefix, continuationToken, | ||
114 | fetchOwner, opts.WithMetadata, delimiter, opts.StartAfter, opts.MaxKeys, opts.headers) | ||
115 | if err != nil { | ||
116 | sendObjectInfo(ObjectInfo{ | ||
117 | Err: err, | ||
118 | }) | ||
119 | return | ||
120 | } | ||
121 | |||
122 | // If contents are available loop through and send over channel. | ||
123 | for _, object := range result.Contents { | ||
124 | object.ETag = trimEtag(object.ETag) | ||
125 | select { | ||
126 | // Send object content. | ||
127 | case objectStatCh <- object: | ||
128 | // If receives done from the caller, return here. | ||
129 | case <-ctx.Done(): | ||
130 | return | ||
131 | } | ||
132 | } | ||
133 | |||
134 | // Send all common prefixes if any. | ||
135 | // NOTE: prefixes are only present if the request is delimited. | ||
136 | for _, obj := range result.CommonPrefixes { | ||
137 | select { | ||
138 | // Send object prefixes. | ||
139 | case objectStatCh <- ObjectInfo{Key: obj.Prefix}: | ||
140 | // If receives done from the caller, return here. | ||
141 | case <-ctx.Done(): | ||
142 | return | ||
143 | } | ||
144 | } | ||
145 | |||
146 | // If continuation token present, save it for next request. | ||
147 | if result.NextContinuationToken != "" { | ||
148 | continuationToken = result.NextContinuationToken | ||
149 | } | ||
150 | |||
151 | // Listing ends result is not truncated, return right here. | ||
152 | if !result.IsTruncated { | ||
153 | return | ||
154 | } | ||
155 | |||
156 | // Add this to catch broken S3 API implementations. | ||
157 | if continuationToken == "" { | ||
158 | sendObjectInfo(ObjectInfo{ | ||
159 | Err: fmt.Errorf("listObjectsV2 is truncated without continuationToken, %s S3 server is incompatible with S3 API", c.endpointURL), | ||
160 | }) | ||
161 | return | ||
162 | } | ||
163 | } | ||
164 | }(objectStatCh) | ||
165 | return objectStatCh | ||
166 | } | ||
167 | |||
168 | // listObjectsV2Query - (List Objects V2) - List some or all (up to 1000) of the objects in a bucket. | ||
169 | // | ||
170 | // You can use the request parameters as selection criteria to return a subset of the objects in a bucket. | ||
171 | // request parameters :- | ||
172 | // --------- | ||
173 | // ?prefix - Limits the response to keys that begin with the specified prefix. | ||
174 | // ?continuation-token - Used to continue iterating over a set of objects | ||
175 | // ?metadata - Specifies if we want metadata for the objects as part of list operation. | ||
176 | // ?delimiter - A delimiter is a character you use to group keys. | ||
177 | // ?start-after - Sets a marker to start listing lexically at this key onwards. | ||
178 | // ?max-keys - Sets the maximum number of keys returned in the response body. | ||
179 | func (c *Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix, continuationToken string, fetchOwner, metadata bool, delimiter, startAfter string, maxkeys int, headers http.Header) (ListBucketV2Result, error) { | ||
180 | // Validate bucket name. | ||
181 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
182 | return ListBucketV2Result{}, err | ||
183 | } | ||
184 | // Validate object prefix. | ||
185 | if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { | ||
186 | return ListBucketV2Result{}, err | ||
187 | } | ||
188 | // Get resources properly escaped and lined up before | ||
189 | // using them in http request. | ||
190 | urlValues := make(url.Values) | ||
191 | |||
192 | // Always set list-type in ListObjects V2 | ||
193 | urlValues.Set("list-type", "2") | ||
194 | |||
195 | if metadata { | ||
196 | urlValues.Set("metadata", "true") | ||
197 | } | ||
198 | |||
199 | // Set this conditionally if asked | ||
200 | if startAfter != "" { | ||
201 | urlValues.Set("start-after", startAfter) | ||
202 | } | ||
203 | |||
204 | // Always set encoding-type in ListObjects V2 | ||
205 | urlValues.Set("encoding-type", "url") | ||
206 | |||
207 | // Set object prefix, prefix value to be set to empty is okay. | ||
208 | urlValues.Set("prefix", objectPrefix) | ||
209 | |||
210 | // Set delimiter, delimiter value to be set to empty is okay. | ||
211 | urlValues.Set("delimiter", delimiter) | ||
212 | |||
213 | // Set continuation token | ||
214 | if continuationToken != "" { | ||
215 | urlValues.Set("continuation-token", continuationToken) | ||
216 | } | ||
217 | |||
218 | // Fetch owner when listing | ||
219 | if fetchOwner { | ||
220 | urlValues.Set("fetch-owner", "true") | ||
221 | } | ||
222 | |||
223 | // Set max keys. | ||
224 | if maxkeys > 0 { | ||
225 | urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) | ||
226 | } | ||
227 | |||
228 | // Execute GET on bucket to list objects. | ||
229 | resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ | ||
230 | bucketName: bucketName, | ||
231 | queryValues: urlValues, | ||
232 | contentSHA256Hex: emptySHA256Hex, | ||
233 | customHeader: headers, | ||
234 | }) | ||
235 | defer closeResponse(resp) | ||
236 | if err != nil { | ||
237 | return ListBucketV2Result{}, err | ||
238 | } | ||
239 | if resp != nil { | ||
240 | if resp.StatusCode != http.StatusOK { | ||
241 | return ListBucketV2Result{}, httpRespToErrorResponse(resp, bucketName, "") | ||
242 | } | ||
243 | } | ||
244 | |||
245 | // Decode listBuckets XML. | ||
246 | listBucketResult := ListBucketV2Result{} | ||
247 | if err = xmlDecoder(resp.Body, &listBucketResult); err != nil { | ||
248 | return listBucketResult, err | ||
249 | } | ||
250 | |||
251 | // This is an additional verification check to make | ||
252 | // sure proper responses are received. | ||
253 | if listBucketResult.IsTruncated && listBucketResult.NextContinuationToken == "" { | ||
254 | return listBucketResult, ErrorResponse{ | ||
255 | Code: "NotImplemented", | ||
256 | Message: "Truncated response should have continuation token set", | ||
257 | } | ||
258 | } | ||
259 | |||
260 | for i, obj := range listBucketResult.Contents { | ||
261 | listBucketResult.Contents[i].Key, err = decodeS3Name(obj.Key, listBucketResult.EncodingType) | ||
262 | if err != nil { | ||
263 | return listBucketResult, err | ||
264 | } | ||
265 | listBucketResult.Contents[i].LastModified = listBucketResult.Contents[i].LastModified.Truncate(time.Millisecond) | ||
266 | } | ||
267 | |||
268 | for i, obj := range listBucketResult.CommonPrefixes { | ||
269 | listBucketResult.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listBucketResult.EncodingType) | ||
270 | if err != nil { | ||
271 | return listBucketResult, err | ||
272 | } | ||
273 | } | ||
274 | |||
275 | // Success. | ||
276 | return listBucketResult, nil | ||
277 | } | ||
278 | |||
279 | func (c *Client) listObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { | ||
280 | // Allocate new list objects channel. | ||
281 | objectStatCh := make(chan ObjectInfo, 1) | ||
282 | // Default listing is delimited at "/" | ||
283 | delimiter := "/" | ||
284 | if opts.Recursive { | ||
285 | // If recursive we do not delimit. | ||
286 | delimiter = "" | ||
287 | } | ||
288 | |||
289 | sendObjectInfo := func(info ObjectInfo) { | ||
290 | select { | ||
291 | case objectStatCh <- info: | ||
292 | case <-ctx.Done(): | ||
293 | } | ||
294 | } | ||
295 | |||
296 | // Validate bucket name. | ||
297 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
298 | defer close(objectStatCh) | ||
299 | sendObjectInfo(ObjectInfo{ | ||
300 | Err: err, | ||
301 | }) | ||
302 | return objectStatCh | ||
303 | } | ||
304 | // Validate incoming object prefix. | ||
305 | if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { | ||
306 | defer close(objectStatCh) | ||
307 | sendObjectInfo(ObjectInfo{ | ||
308 | Err: err, | ||
309 | }) | ||
310 | return objectStatCh | ||
311 | } | ||
312 | |||
313 | // Initiate list objects goroutine here. | ||
314 | go func(objectStatCh chan<- ObjectInfo) { | ||
315 | defer func() { | ||
316 | if contextCanceled(ctx) { | ||
317 | objectStatCh <- ObjectInfo{ | ||
318 | Err: ctx.Err(), | ||
319 | } | ||
320 | } | ||
321 | close(objectStatCh) | ||
322 | }() | ||
323 | |||
324 | marker := opts.StartAfter | ||
325 | for { | ||
326 | // Get list of objects a maximum of 1000 per request. | ||
327 | result, err := c.listObjectsQuery(ctx, bucketName, opts.Prefix, marker, delimiter, opts.MaxKeys, opts.headers) | ||
328 | if err != nil { | ||
329 | sendObjectInfo(ObjectInfo{ | ||
330 | Err: err, | ||
331 | }) | ||
332 | return | ||
333 | } | ||
334 | |||
335 | // If contents are available loop through and send over channel. | ||
336 | for _, object := range result.Contents { | ||
337 | // Save the marker. | ||
338 | marker = object.Key | ||
339 | object.ETag = trimEtag(object.ETag) | ||
340 | select { | ||
341 | // Send object content. | ||
342 | case objectStatCh <- object: | ||
343 | // If receives done from the caller, return here. | ||
344 | case <-ctx.Done(): | ||
345 | return | ||
346 | } | ||
347 | } | ||
348 | |||
349 | // Send all common prefixes if any. | ||
350 | // NOTE: prefixes are only present if the request is delimited. | ||
351 | for _, obj := range result.CommonPrefixes { | ||
352 | select { | ||
353 | // Send object prefixes. | ||
354 | case objectStatCh <- ObjectInfo{Key: obj.Prefix}: | ||
355 | // If receives done from the caller, return here. | ||
356 | case <-ctx.Done(): | ||
357 | return | ||
358 | } | ||
359 | } | ||
360 | |||
361 | // If next marker present, save it for next request. | ||
362 | if result.NextMarker != "" { | ||
363 | marker = result.NextMarker | ||
364 | } | ||
365 | |||
366 | // Listing ends result is not truncated, return right here. | ||
367 | if !result.IsTruncated { | ||
368 | return | ||
369 | } | ||
370 | } | ||
371 | }(objectStatCh) | ||
372 | return objectStatCh | ||
373 | } | ||
374 | |||
375 | func (c *Client) listObjectVersions(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { | ||
376 | // Allocate new list objects channel. | ||
377 | resultCh := make(chan ObjectInfo, 1) | ||
378 | // Default listing is delimited at "/" | ||
379 | delimiter := "/" | ||
380 | if opts.Recursive { | ||
381 | // If recursive we do not delimit. | ||
382 | delimiter = "" | ||
383 | } | ||
384 | |||
385 | sendObjectInfo := func(info ObjectInfo) { | ||
386 | select { | ||
387 | case resultCh <- info: | ||
388 | case <-ctx.Done(): | ||
389 | } | ||
390 | } | ||
391 | |||
392 | // Validate bucket name. | ||
393 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
394 | defer close(resultCh) | ||
395 | sendObjectInfo(ObjectInfo{ | ||
396 | Err: err, | ||
397 | }) | ||
398 | return resultCh | ||
399 | } | ||
400 | |||
401 | // Validate incoming object prefix. | ||
402 | if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { | ||
403 | defer close(resultCh) | ||
404 | sendObjectInfo(ObjectInfo{ | ||
405 | Err: err, | ||
406 | }) | ||
407 | return resultCh | ||
408 | } | ||
409 | |||
410 | // Initiate list objects goroutine here. | ||
411 | go func(resultCh chan<- ObjectInfo) { | ||
412 | defer func() { | ||
413 | if contextCanceled(ctx) { | ||
414 | resultCh <- ObjectInfo{ | ||
415 | Err: ctx.Err(), | ||
416 | } | ||
417 | } | ||
418 | close(resultCh) | ||
419 | }() | ||
420 | |||
421 | var ( | ||
422 | keyMarker = "" | ||
423 | versionIDMarker = "" | ||
424 | ) | ||
425 | |||
426 | for { | ||
427 | // Get list of objects a maximum of 1000 per request. | ||
428 | result, err := c.listObjectVersionsQuery(ctx, bucketName, opts, keyMarker, versionIDMarker, delimiter) | ||
429 | if err != nil { | ||
430 | sendObjectInfo(ObjectInfo{ | ||
431 | Err: err, | ||
432 | }) | ||
433 | return | ||
434 | } | ||
435 | |||
436 | // If contents are available loop through and send over channel. | ||
437 | for _, version := range result.Versions { | ||
438 | info := ObjectInfo{ | ||
439 | ETag: trimEtag(version.ETag), | ||
440 | Key: version.Key, | ||
441 | LastModified: version.LastModified.Truncate(time.Millisecond), | ||
442 | Size: version.Size, | ||
443 | Owner: version.Owner, | ||
444 | StorageClass: version.StorageClass, | ||
445 | IsLatest: version.IsLatest, | ||
446 | VersionID: version.VersionID, | ||
447 | IsDeleteMarker: version.isDeleteMarker, | ||
448 | UserTags: version.UserTags, | ||
449 | UserMetadata: version.UserMetadata, | ||
450 | Internal: version.Internal, | ||
451 | } | ||
452 | select { | ||
453 | // Send object version info. | ||
454 | case resultCh <- info: | ||
455 | // If receives done from the caller, return here. | ||
456 | case <-ctx.Done(): | ||
457 | return | ||
458 | } | ||
459 | } | ||
460 | |||
461 | // Send all common prefixes if any. | ||
462 | // NOTE: prefixes are only present if the request is delimited. | ||
463 | for _, obj := range result.CommonPrefixes { | ||
464 | select { | ||
465 | // Send object prefixes. | ||
466 | case resultCh <- ObjectInfo{Key: obj.Prefix}: | ||
467 | // If receives done from the caller, return here. | ||
468 | case <-ctx.Done(): | ||
469 | return | ||
470 | } | ||
471 | } | ||
472 | |||
473 | // If next key marker is present, save it for next request. | ||
474 | if result.NextKeyMarker != "" { | ||
475 | keyMarker = result.NextKeyMarker | ||
476 | } | ||
477 | |||
478 | // If next version id marker is present, save it for next request. | ||
479 | if result.NextVersionIDMarker != "" { | ||
480 | versionIDMarker = result.NextVersionIDMarker | ||
481 | } | ||
482 | |||
483 | // Listing ends result is not truncated, return right here. | ||
484 | if !result.IsTruncated { | ||
485 | return | ||
486 | } | ||
487 | } | ||
488 | }(resultCh) | ||
489 | return resultCh | ||
490 | } | ||
491 | |||
492 | // listObjectVersions - (List Object Versions) - List some or all (up to 1000) of the existing objects | ||
493 | // and their versions in a bucket. | ||
494 | // | ||
495 | // You can use the request parameters as selection criteria to return a subset of the objects in a bucket. | ||
496 | // request parameters :- | ||
497 | // --------- | ||
498 | // ?key-marker - Specifies the key to start with when listing objects in a bucket. | ||
499 | // ?version-id-marker - Specifies the version id marker to start with when listing objects with versions in a bucket. | ||
500 | // ?delimiter - A delimiter is a character you use to group keys. | ||
501 | // ?prefix - Limits the response to keys that begin with the specified prefix. | ||
502 | // ?max-keys - Sets the maximum number of keys returned in the response body. | ||
503 | func (c *Client) listObjectVersionsQuery(ctx context.Context, bucketName string, opts ListObjectsOptions, keyMarker, versionIDMarker, delimiter string) (ListVersionsResult, error) { | ||
504 | // Validate bucket name. | ||
505 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
506 | return ListVersionsResult{}, err | ||
507 | } | ||
508 | // Validate object prefix. | ||
509 | if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { | ||
510 | return ListVersionsResult{}, err | ||
511 | } | ||
512 | // Get resources properly escaped and lined up before | ||
513 | // using them in http request. | ||
514 | urlValues := make(url.Values) | ||
515 | |||
516 | // Set versions to trigger versioning API | ||
517 | urlValues.Set("versions", "") | ||
518 | |||
519 | // Set object prefix, prefix value to be set to empty is okay. | ||
520 | urlValues.Set("prefix", opts.Prefix) | ||
521 | |||
522 | // Set delimiter, delimiter value to be set to empty is okay. | ||
523 | urlValues.Set("delimiter", delimiter) | ||
524 | |||
525 | // Set object marker. | ||
526 | if keyMarker != "" { | ||
527 | urlValues.Set("key-marker", keyMarker) | ||
528 | } | ||
529 | |||
530 | // Set max keys. | ||
531 | if opts.MaxKeys > 0 { | ||
532 | urlValues.Set("max-keys", fmt.Sprintf("%d", opts.MaxKeys)) | ||
533 | } | ||
534 | |||
535 | // Set version ID marker | ||
536 | if versionIDMarker != "" { | ||
537 | urlValues.Set("version-id-marker", versionIDMarker) | ||
538 | } | ||
539 | |||
540 | if opts.WithMetadata { | ||
541 | urlValues.Set("metadata", "true") | ||
542 | } | ||
543 | |||
544 | // Always set encoding-type | ||
545 | urlValues.Set("encoding-type", "url") | ||
546 | |||
547 | // Execute GET on bucket to list objects. | ||
548 | resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ | ||
549 | bucketName: bucketName, | ||
550 | queryValues: urlValues, | ||
551 | contentSHA256Hex: emptySHA256Hex, | ||
552 | customHeader: opts.headers, | ||
553 | }) | ||
554 | defer closeResponse(resp) | ||
555 | if err != nil { | ||
556 | return ListVersionsResult{}, err | ||
557 | } | ||
558 | if resp != nil { | ||
559 | if resp.StatusCode != http.StatusOK { | ||
560 | return ListVersionsResult{}, httpRespToErrorResponse(resp, bucketName, "") | ||
561 | } | ||
562 | } | ||
563 | |||
564 | // Decode ListVersionsResult XML. | ||
565 | listObjectVersionsOutput := ListVersionsResult{} | ||
566 | err = xmlDecoder(resp.Body, &listObjectVersionsOutput) | ||
567 | if err != nil { | ||
568 | return ListVersionsResult{}, err | ||
569 | } | ||
570 | |||
571 | for i, obj := range listObjectVersionsOutput.Versions { | ||
572 | listObjectVersionsOutput.Versions[i].Key, err = decodeS3Name(obj.Key, listObjectVersionsOutput.EncodingType) | ||
573 | if err != nil { | ||
574 | return listObjectVersionsOutput, err | ||
575 | } | ||
576 | } | ||
577 | |||
578 | for i, obj := range listObjectVersionsOutput.CommonPrefixes { | ||
579 | listObjectVersionsOutput.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listObjectVersionsOutput.EncodingType) | ||
580 | if err != nil { | ||
581 | return listObjectVersionsOutput, err | ||
582 | } | ||
583 | } | ||
584 | |||
585 | if listObjectVersionsOutput.NextKeyMarker != "" { | ||
586 | listObjectVersionsOutput.NextKeyMarker, err = decodeS3Name(listObjectVersionsOutput.NextKeyMarker, listObjectVersionsOutput.EncodingType) | ||
587 | if err != nil { | ||
588 | return listObjectVersionsOutput, err | ||
589 | } | ||
590 | } | ||
591 | |||
592 | return listObjectVersionsOutput, nil | ||
593 | } | ||
594 | |||
595 | // listObjects - (List Objects) - List some or all (up to 1000) of the objects in a bucket. | ||
596 | // | ||
597 | // You can use the request parameters as selection criteria to return a subset of the objects in a bucket. | ||
598 | // request parameters :- | ||
599 | // --------- | ||
600 | // ?marker - Specifies the key to start with when listing objects in a bucket. | ||
601 | // ?delimiter - A delimiter is a character you use to group keys. | ||
602 | // ?prefix - Limits the response to keys that begin with the specified prefix. | ||
603 | // ?max-keys - Sets the maximum number of keys returned in the response body. | ||
604 | func (c *Client) listObjectsQuery(ctx context.Context, bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int, headers http.Header) (ListBucketResult, error) { | ||
605 | // Validate bucket name. | ||
606 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
607 | return ListBucketResult{}, err | ||
608 | } | ||
609 | // Validate object prefix. | ||
610 | if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { | ||
611 | return ListBucketResult{}, err | ||
612 | } | ||
613 | // Get resources properly escaped and lined up before | ||
614 | // using them in http request. | ||
615 | urlValues := make(url.Values) | ||
616 | |||
617 | // Set object prefix, prefix value to be set to empty is okay. | ||
618 | urlValues.Set("prefix", objectPrefix) | ||
619 | |||
620 | // Set delimiter, delimiter value to be set to empty is okay. | ||
621 | urlValues.Set("delimiter", delimiter) | ||
622 | |||
623 | // Set object marker. | ||
624 | if objectMarker != "" { | ||
625 | urlValues.Set("marker", objectMarker) | ||
626 | } | ||
627 | |||
628 | // Set max keys. | ||
629 | if maxkeys > 0 { | ||
630 | urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) | ||
631 | } | ||
632 | |||
633 | // Always set encoding-type | ||
634 | urlValues.Set("encoding-type", "url") | ||
635 | |||
636 | // Execute GET on bucket to list objects. | ||
637 | resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ | ||
638 | bucketName: bucketName, | ||
639 | queryValues: urlValues, | ||
640 | contentSHA256Hex: emptySHA256Hex, | ||
641 | customHeader: headers, | ||
642 | }) | ||
643 | defer closeResponse(resp) | ||
644 | if err != nil { | ||
645 | return ListBucketResult{}, err | ||
646 | } | ||
647 | if resp != nil { | ||
648 | if resp.StatusCode != http.StatusOK { | ||
649 | return ListBucketResult{}, httpRespToErrorResponse(resp, bucketName, "") | ||
650 | } | ||
651 | } | ||
652 | // Decode listBuckets XML. | ||
653 | listBucketResult := ListBucketResult{} | ||
654 | err = xmlDecoder(resp.Body, &listBucketResult) | ||
655 | if err != nil { | ||
656 | return listBucketResult, err | ||
657 | } | ||
658 | |||
659 | for i, obj := range listBucketResult.Contents { | ||
660 | listBucketResult.Contents[i].Key, err = decodeS3Name(obj.Key, listBucketResult.EncodingType) | ||
661 | if err != nil { | ||
662 | return listBucketResult, err | ||
663 | } | ||
664 | listBucketResult.Contents[i].LastModified = listBucketResult.Contents[i].LastModified.Truncate(time.Millisecond) | ||
665 | } | ||
666 | |||
667 | for i, obj := range listBucketResult.CommonPrefixes { | ||
668 | listBucketResult.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listBucketResult.EncodingType) | ||
669 | if err != nil { | ||
670 | return listBucketResult, err | ||
671 | } | ||
672 | } | ||
673 | |||
674 | if listBucketResult.NextMarker != "" { | ||
675 | listBucketResult.NextMarker, err = decodeS3Name(listBucketResult.NextMarker, listBucketResult.EncodingType) | ||
676 | if err != nil { | ||
677 | return listBucketResult, err | ||
678 | } | ||
679 | } | ||
680 | |||
681 | return listBucketResult, nil | ||
682 | } | ||
683 | |||
684 | // ListObjectsOptions holds all options of a list object request | ||
685 | type ListObjectsOptions struct { | ||
686 | // Include objects versions in the listing | ||
687 | WithVersions bool | ||
688 | // Include objects metadata in the listing | ||
689 | WithMetadata bool | ||
690 | // Only list objects with the prefix | ||
691 | Prefix string | ||
692 | // Ignore '/' delimiter | ||
693 | Recursive bool | ||
694 | // The maximum number of objects requested per | ||
695 | // batch, advanced use-case not useful for most | ||
696 | // applications | ||
697 | MaxKeys int | ||
698 | // StartAfter start listing lexically at this | ||
699 | // object onwards, this value can also be set | ||
700 | // for Marker when `UseV1` is set to true. | ||
701 | StartAfter string | ||
702 | |||
703 | // Use the deprecated list objects V1 API | ||
704 | UseV1 bool | ||
705 | |||
706 | headers http.Header | ||
707 | } | ||
708 | |||
709 | // Set adds a key value pair to the options. The | ||
710 | // key-value pair will be part of the HTTP GET request | ||
711 | // headers. | ||
712 | func (o *ListObjectsOptions) Set(key, value string) { | ||
713 | if o.headers == nil { | ||
714 | o.headers = make(http.Header) | ||
715 | } | ||
716 | o.headers.Set(key, value) | ||
717 | } | ||
718 | |||
719 | // ListObjects returns objects list after evaluating the passed options. | ||
720 | // | ||
721 | // api := client.New(....) | ||
722 | // for object := range api.ListObjects(ctx, "mytestbucket", minio.ListObjectsOptions{Prefix: "starthere", Recursive:true}) { | ||
723 | // fmt.Println(object) | ||
724 | // } | ||
725 | // | ||
726 | // If caller cancels the context, then the last entry on the 'chan ObjectInfo' will be the context.Error() | ||
727 | // caller must drain the channel entirely and wait until channel is closed before proceeding, without | ||
728 | // waiting on the channel to be closed completely you might leak goroutines. | ||
729 | func (c *Client) ListObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { | ||
730 | if opts.WithVersions { | ||
731 | return c.listObjectVersions(ctx, bucketName, opts) | ||
732 | } | ||
733 | |||
734 | // Use legacy list objects v1 API | ||
735 | if opts.UseV1 { | ||
736 | return c.listObjects(ctx, bucketName, opts) | ||
737 | } | ||
738 | |||
739 | // Check whether this is snowball region, if yes ListObjectsV2 doesn't work, fallback to listObjectsV1. | ||
740 | if location, ok := c.bucketLocCache.Get(bucketName); ok { | ||
741 | if location == "snowball" { | ||
742 | return c.listObjects(ctx, bucketName, opts) | ||
743 | } | ||
744 | } | ||
745 | |||
746 | return c.listObjectsV2(ctx, bucketName, opts) | ||
747 | } | ||
748 | |||
749 | // ListIncompleteUploads - List incompletely uploaded multipart objects. | ||
750 | // | ||
751 | // ListIncompleteUploads lists all incompleted objects matching the | ||
752 | // objectPrefix from the specified bucket. If recursion is enabled | ||
753 | // it would list all subdirectories and all its contents. | ||
754 | // | ||
755 | // Your input parameters are just bucketName, objectPrefix, recursive. | ||
756 | // If you enable recursive as 'true' this function will return back all | ||
757 | // the multipart objects in a given bucket name. | ||
758 | // | ||
759 | // api := client.New(....) | ||
760 | // // Recurively list all objects in 'mytestbucket' | ||
761 | // recursive := true | ||
762 | // for message := range api.ListIncompleteUploads(context.Background(), "mytestbucket", "starthere", recursive) { | ||
763 | // fmt.Println(message) | ||
764 | // } | ||
765 | func (c *Client) ListIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo { | ||
766 | return c.listIncompleteUploads(ctx, bucketName, objectPrefix, recursive) | ||
767 | } | ||
768 | |||
769 | // contextCanceled returns whether a context is canceled. | ||
770 | func contextCanceled(ctx context.Context) bool { | ||
771 | select { | ||
772 | case <-ctx.Done(): | ||
773 | return true | ||
774 | default: | ||
775 | return false | ||
776 | } | ||
777 | } | ||
778 | |||
779 | // listIncompleteUploads lists all incomplete uploads. | ||
780 | func (c *Client) listIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo { | ||
781 | // Allocate channel for multipart uploads. | ||
782 | objectMultipartStatCh := make(chan ObjectMultipartInfo, 1) | ||
783 | // Delimiter is set to "/" by default. | ||
784 | delimiter := "/" | ||
785 | if recursive { | ||
786 | // If recursive do not delimit. | ||
787 | delimiter = "" | ||
788 | } | ||
789 | // Validate bucket name. | ||
790 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
791 | defer close(objectMultipartStatCh) | ||
792 | objectMultipartStatCh <- ObjectMultipartInfo{ | ||
793 | Err: err, | ||
794 | } | ||
795 | return objectMultipartStatCh | ||
796 | } | ||
797 | // Validate incoming object prefix. | ||
798 | if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { | ||
799 | defer close(objectMultipartStatCh) | ||
800 | objectMultipartStatCh <- ObjectMultipartInfo{ | ||
801 | Err: err, | ||
802 | } | ||
803 | return objectMultipartStatCh | ||
804 | } | ||
805 | go func(objectMultipartStatCh chan<- ObjectMultipartInfo) { | ||
806 | defer func() { | ||
807 | if contextCanceled(ctx) { | ||
808 | objectMultipartStatCh <- ObjectMultipartInfo{ | ||
809 | Err: ctx.Err(), | ||
810 | } | ||
811 | } | ||
812 | close(objectMultipartStatCh) | ||
813 | }() | ||
814 | |||
815 | // object and upload ID marker for future requests. | ||
816 | var objectMarker string | ||
817 | var uploadIDMarker string | ||
818 | for { | ||
819 | // list all multipart uploads. | ||
820 | result, err := c.listMultipartUploadsQuery(ctx, bucketName, objectMarker, uploadIDMarker, objectPrefix, delimiter, 0) | ||
821 | if err != nil { | ||
822 | objectMultipartStatCh <- ObjectMultipartInfo{ | ||
823 | Err: err, | ||
824 | } | ||
825 | return | ||
826 | } | ||
827 | objectMarker = result.NextKeyMarker | ||
828 | uploadIDMarker = result.NextUploadIDMarker | ||
829 | |||
830 | // Send all multipart uploads. | ||
831 | for _, obj := range result.Uploads { | ||
832 | // Calculate total size of the uploaded parts if 'aggregateSize' is enabled. | ||
833 | select { | ||
834 | // Send individual uploads here. | ||
835 | case objectMultipartStatCh <- obj: | ||
836 | // If the context is canceled | ||
837 | case <-ctx.Done(): | ||
838 | return | ||
839 | } | ||
840 | } | ||
841 | // Send all common prefixes if any. | ||
842 | // NOTE: prefixes are only present if the request is delimited. | ||
843 | for _, obj := range result.CommonPrefixes { | ||
844 | select { | ||
845 | // Send delimited prefixes here. | ||
846 | case objectMultipartStatCh <- ObjectMultipartInfo{Key: obj.Prefix, Size: 0}: | ||
847 | // If context is canceled. | ||
848 | case <-ctx.Done(): | ||
849 | return | ||
850 | } | ||
851 | } | ||
852 | // Listing ends if result not truncated, return right here. | ||
853 | if !result.IsTruncated { | ||
854 | return | ||
855 | } | ||
856 | } | ||
857 | }(objectMultipartStatCh) | ||
858 | // return. | ||
859 | return objectMultipartStatCh | ||
860 | } | ||
861 | |||
862 | // listMultipartUploadsQuery - (List Multipart Uploads). | ||
863 | // - Lists some or all (up to 1000) in-progress multipart uploads in a bucket. | ||
864 | // | ||
865 | // You can use the request parameters as selection criteria to return a subset of the uploads in a bucket. | ||
866 | // request parameters. :- | ||
867 | // --------- | ||
868 | // ?key-marker - Specifies the multipart upload after which listing should begin. | ||
869 | // ?upload-id-marker - Together with key-marker specifies the multipart upload after which listing should begin. | ||
870 | // ?delimiter - A delimiter is a character you use to group keys. | ||
871 | // ?prefix - Limits the response to keys that begin with the specified prefix. | ||
872 | // ?max-uploads - Sets the maximum number of multipart uploads returned in the response body. | ||
873 | func (c *Client) listMultipartUploadsQuery(ctx context.Context, bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (ListMultipartUploadsResult, error) { | ||
874 | // Get resources properly escaped and lined up before using them in http request. | ||
875 | urlValues := make(url.Values) | ||
876 | // Set uploads. | ||
877 | urlValues.Set("uploads", "") | ||
878 | // Set object key marker. | ||
879 | if keyMarker != "" { | ||
880 | urlValues.Set("key-marker", keyMarker) | ||
881 | } | ||
882 | // Set upload id marker. | ||
883 | if uploadIDMarker != "" { | ||
884 | urlValues.Set("upload-id-marker", uploadIDMarker) | ||
885 | } | ||
886 | |||
887 | // Set object prefix, prefix value to be set to empty is okay. | ||
888 | urlValues.Set("prefix", prefix) | ||
889 | |||
890 | // Set delimiter, delimiter value to be set to empty is okay. | ||
891 | urlValues.Set("delimiter", delimiter) | ||
892 | |||
893 | // Always set encoding-type | ||
894 | urlValues.Set("encoding-type", "url") | ||
895 | |||
896 | // maxUploads should be 1000 or less. | ||
897 | if maxUploads > 0 { | ||
898 | // Set max-uploads. | ||
899 | urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads)) | ||
900 | } | ||
901 | |||
902 | // Execute GET on bucketName to list multipart uploads. | ||
903 | resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ | ||
904 | bucketName: bucketName, | ||
905 | queryValues: urlValues, | ||
906 | contentSHA256Hex: emptySHA256Hex, | ||
907 | }) | ||
908 | defer closeResponse(resp) | ||
909 | if err != nil { | ||
910 | return ListMultipartUploadsResult{}, err | ||
911 | } | ||
912 | if resp != nil { | ||
913 | if resp.StatusCode != http.StatusOK { | ||
914 | return ListMultipartUploadsResult{}, httpRespToErrorResponse(resp, bucketName, "") | ||
915 | } | ||
916 | } | ||
917 | // Decode response body. | ||
918 | listMultipartUploadsResult := ListMultipartUploadsResult{} | ||
919 | err = xmlDecoder(resp.Body, &listMultipartUploadsResult) | ||
920 | if err != nil { | ||
921 | return listMultipartUploadsResult, err | ||
922 | } | ||
923 | |||
924 | listMultipartUploadsResult.NextKeyMarker, err = decodeS3Name(listMultipartUploadsResult.NextKeyMarker, listMultipartUploadsResult.EncodingType) | ||
925 | if err != nil { | ||
926 | return listMultipartUploadsResult, err | ||
927 | } | ||
928 | |||
929 | listMultipartUploadsResult.NextUploadIDMarker, err = decodeS3Name(listMultipartUploadsResult.NextUploadIDMarker, listMultipartUploadsResult.EncodingType) | ||
930 | if err != nil { | ||
931 | return listMultipartUploadsResult, err | ||
932 | } | ||
933 | |||
934 | for i, obj := range listMultipartUploadsResult.Uploads { | ||
935 | listMultipartUploadsResult.Uploads[i].Key, err = decodeS3Name(obj.Key, listMultipartUploadsResult.EncodingType) | ||
936 | if err != nil { | ||
937 | return listMultipartUploadsResult, err | ||
938 | } | ||
939 | } | ||
940 | |||
941 | for i, obj := range listMultipartUploadsResult.CommonPrefixes { | ||
942 | listMultipartUploadsResult.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listMultipartUploadsResult.EncodingType) | ||
943 | if err != nil { | ||
944 | return listMultipartUploadsResult, err | ||
945 | } | ||
946 | } | ||
947 | |||
948 | return listMultipartUploadsResult, nil | ||
949 | } | ||
950 | |||
951 | // listObjectParts list all object parts recursively. | ||
952 | // | ||
953 | //lint:ignore U1000 Keep this around | ||
954 | func (c *Client) listObjectParts(ctx context.Context, bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) { | ||
955 | // Part number marker for the next batch of request. | ||
956 | var nextPartNumberMarker int | ||
957 | partsInfo = make(map[int]ObjectPart) | ||
958 | for { | ||
959 | // Get list of uploaded parts a maximum of 1000 per request. | ||
960 | listObjPartsResult, err := c.listObjectPartsQuery(ctx, bucketName, objectName, uploadID, nextPartNumberMarker, 1000) | ||
961 | if err != nil { | ||
962 | return nil, err | ||
963 | } | ||
964 | // Append to parts info. | ||
965 | for _, part := range listObjPartsResult.ObjectParts { | ||
966 | // Trim off the odd double quotes from ETag in the beginning and end. | ||
967 | part.ETag = trimEtag(part.ETag) | ||
968 | partsInfo[part.PartNumber] = part | ||
969 | } | ||
970 | // Keep part number marker, for the next iteration. | ||
971 | nextPartNumberMarker = listObjPartsResult.NextPartNumberMarker | ||
972 | // Listing ends result is not truncated, return right here. | ||
973 | if !listObjPartsResult.IsTruncated { | ||
974 | break | ||
975 | } | ||
976 | } | ||
977 | |||
978 | // Return all the parts. | ||
979 | return partsInfo, nil | ||
980 | } | ||
981 | |||
982 | // findUploadIDs lists all incomplete uploads and find the uploadIDs of the matching object name. | ||
983 | func (c *Client) findUploadIDs(ctx context.Context, bucketName, objectName string) ([]string, error) { | ||
984 | var uploadIDs []string | ||
985 | // Make list incomplete uploads recursive. | ||
986 | isRecursive := true | ||
987 | // List all incomplete uploads. | ||
988 | for mpUpload := range c.listIncompleteUploads(ctx, bucketName, objectName, isRecursive) { | ||
989 | if mpUpload.Err != nil { | ||
990 | return nil, mpUpload.Err | ||
991 | } | ||
992 | if objectName == mpUpload.Key { | ||
993 | uploadIDs = append(uploadIDs, mpUpload.UploadID) | ||
994 | } | ||
995 | } | ||
996 | // Return the latest upload id. | ||
997 | return uploadIDs, nil | ||
998 | } | ||
999 | |||
1000 | // listObjectPartsQuery (List Parts query) | ||
1001 | // - lists some or all (up to 1000) parts that have been uploaded | ||
1002 | // for a specific multipart upload | ||
1003 | // | ||
1004 | // You can use the request parameters as selection criteria to return | ||
1005 | // a subset of the uploads in a bucket, request parameters :- | ||
1006 | // --------- | ||
1007 | // ?part-number-marker - Specifies the part after which listing should | ||
1008 | // begin. | ||
1009 | // ?max-parts - Maximum parts to be listed per request. | ||
1010 | func (c *Client) listObjectPartsQuery(ctx context.Context, bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (ListObjectPartsResult, error) { | ||
1011 | // Get resources properly escaped and lined up before using them in http request. | ||
1012 | urlValues := make(url.Values) | ||
1013 | // Set part number marker. | ||
1014 | urlValues.Set("part-number-marker", fmt.Sprintf("%d", partNumberMarker)) | ||
1015 | // Set upload id. | ||
1016 | urlValues.Set("uploadId", uploadID) | ||
1017 | |||
1018 | // maxParts should be 1000 or less. | ||
1019 | if maxParts > 0 { | ||
1020 | // Set max parts. | ||
1021 | urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts)) | ||
1022 | } | ||
1023 | |||
1024 | // Execute GET on objectName to get list of parts. | ||
1025 | resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ | ||
1026 | bucketName: bucketName, | ||
1027 | objectName: objectName, | ||
1028 | queryValues: urlValues, | ||
1029 | contentSHA256Hex: emptySHA256Hex, | ||
1030 | }) | ||
1031 | defer closeResponse(resp) | ||
1032 | if err != nil { | ||
1033 | return ListObjectPartsResult{}, err | ||
1034 | } | ||
1035 | if resp != nil { | ||
1036 | if resp.StatusCode != http.StatusOK { | ||
1037 | return ListObjectPartsResult{}, httpRespToErrorResponse(resp, bucketName, objectName) | ||
1038 | } | ||
1039 | } | ||
1040 | // Decode list object parts XML. | ||
1041 | listObjectPartsResult := ListObjectPartsResult{} | ||
1042 | err = xmlDecoder(resp.Body, &listObjectPartsResult) | ||
1043 | if err != nil { | ||
1044 | return listObjectPartsResult, err | ||
1045 | } | ||
1046 | return listObjectPartsResult, nil | ||
1047 | } | ||
1048 | |||
1049 | // Decode an S3 object name according to the encoding type | ||
1050 | func decodeS3Name(name, encodingType string) (string, error) { | ||
1051 | switch encodingType { | ||
1052 | case "url": | ||
1053 | return url.QueryUnescape(name) | ||
1054 | default: | ||
1055 | return name, nil | ||
1056 | } | ||
1057 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go b/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go new file mode 100644 index 0000000..0c027d5 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go | |||
@@ -0,0 +1,176 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2020 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | import ( | ||
21 | "bytes" | ||
22 | "context" | ||
23 | "encoding/xml" | ||
24 | "fmt" | ||
25 | "net/http" | ||
26 | "net/url" | ||
27 | |||
28 | "github.com/minio/minio-go/v7/pkg/s3utils" | ||
29 | ) | ||
30 | |||
31 | // objectLegalHold - object legal hold specified in | ||
32 | // https://docs.aws.amazon.com/AmazonS3/latest/API/archive-RESTObjectPUTLegalHold.html | ||
33 | type objectLegalHold struct { | ||
34 | XMLNS string `xml:"xmlns,attr,omitempty"` | ||
35 | XMLName xml.Name `xml:"LegalHold"` | ||
36 | Status LegalHoldStatus `xml:"Status,omitempty"` | ||
37 | } | ||
38 | |||
39 | // PutObjectLegalHoldOptions represents options specified by user for PutObjectLegalHold call | ||
40 | type PutObjectLegalHoldOptions struct { | ||
41 | VersionID string | ||
42 | Status *LegalHoldStatus | ||
43 | } | ||
44 | |||
45 | // GetObjectLegalHoldOptions represents options specified by user for GetObjectLegalHold call | ||
46 | type GetObjectLegalHoldOptions struct { | ||
47 | VersionID string | ||
48 | } | ||
49 | |||
50 | // LegalHoldStatus - object legal hold status. | ||
51 | type LegalHoldStatus string | ||
52 | |||
53 | const ( | ||
54 | // LegalHoldEnabled indicates legal hold is enabled | ||
55 | LegalHoldEnabled LegalHoldStatus = "ON" | ||
56 | |||
57 | // LegalHoldDisabled indicates legal hold is disabled | ||
58 | LegalHoldDisabled LegalHoldStatus = "OFF" | ||
59 | ) | ||
60 | |||
61 | func (r LegalHoldStatus) String() string { | ||
62 | return string(r) | ||
63 | } | ||
64 | |||
65 | // IsValid - check whether this legal hold status is valid or not. | ||
66 | func (r LegalHoldStatus) IsValid() bool { | ||
67 | return r == LegalHoldEnabled || r == LegalHoldDisabled | ||
68 | } | ||
69 | |||
70 | func newObjectLegalHold(status *LegalHoldStatus) (*objectLegalHold, error) { | ||
71 | if status == nil { | ||
72 | return nil, fmt.Errorf("Status not set") | ||
73 | } | ||
74 | if !status.IsValid() { | ||
75 | return nil, fmt.Errorf("invalid legal hold status `%v`", status) | ||
76 | } | ||
77 | legalHold := &objectLegalHold{ | ||
78 | Status: *status, | ||
79 | } | ||
80 | return legalHold, nil | ||
81 | } | ||
82 | |||
83 | // PutObjectLegalHold : sets object legal hold for a given object and versionID. | ||
84 | func (c *Client) PutObjectLegalHold(ctx context.Context, bucketName, objectName string, opts PutObjectLegalHoldOptions) error { | ||
85 | // Input validation. | ||
86 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
87 | return err | ||
88 | } | ||
89 | |||
90 | if err := s3utils.CheckValidObjectName(objectName); err != nil { | ||
91 | return err | ||
92 | } | ||
93 | |||
94 | // Get resources properly escaped and lined up before | ||
95 | // using them in http request. | ||
96 | urlValues := make(url.Values) | ||
97 | urlValues.Set("legal-hold", "") | ||
98 | |||
99 | if opts.VersionID != "" { | ||
100 | urlValues.Set("versionId", opts.VersionID) | ||
101 | } | ||
102 | |||
103 | lh, err := newObjectLegalHold(opts.Status) | ||
104 | if err != nil { | ||
105 | return err | ||
106 | } | ||
107 | |||
108 | lhData, err := xml.Marshal(lh) | ||
109 | if err != nil { | ||
110 | return err | ||
111 | } | ||
112 | |||
113 | reqMetadata := requestMetadata{ | ||
114 | bucketName: bucketName, | ||
115 | objectName: objectName, | ||
116 | queryValues: urlValues, | ||
117 | contentBody: bytes.NewReader(lhData), | ||
118 | contentLength: int64(len(lhData)), | ||
119 | contentMD5Base64: sumMD5Base64(lhData), | ||
120 | contentSHA256Hex: sum256Hex(lhData), | ||
121 | } | ||
122 | |||
123 | // Execute PUT Object Legal Hold. | ||
124 | resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) | ||
125 | defer closeResponse(resp) | ||
126 | if err != nil { | ||
127 | return err | ||
128 | } | ||
129 | if resp != nil { | ||
130 | if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { | ||
131 | return httpRespToErrorResponse(resp, bucketName, objectName) | ||
132 | } | ||
133 | } | ||
134 | return nil | ||
135 | } | ||
136 | |||
137 | // GetObjectLegalHold gets legal-hold status of given object. | ||
138 | func (c *Client) GetObjectLegalHold(ctx context.Context, bucketName, objectName string, opts GetObjectLegalHoldOptions) (status *LegalHoldStatus, err error) { | ||
139 | // Input validation. | ||
140 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
141 | return nil, err | ||
142 | } | ||
143 | |||
144 | if err := s3utils.CheckValidObjectName(objectName); err != nil { | ||
145 | return nil, err | ||
146 | } | ||
147 | urlValues := make(url.Values) | ||
148 | urlValues.Set("legal-hold", "") | ||
149 | |||
150 | if opts.VersionID != "" { | ||
151 | urlValues.Set("versionId", opts.VersionID) | ||
152 | } | ||
153 | |||
154 | // Execute GET on bucket to list objects. | ||
155 | resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ | ||
156 | bucketName: bucketName, | ||
157 | objectName: objectName, | ||
158 | queryValues: urlValues, | ||
159 | contentSHA256Hex: emptySHA256Hex, | ||
160 | }) | ||
161 | defer closeResponse(resp) | ||
162 | if err != nil { | ||
163 | return nil, err | ||
164 | } | ||
165 | if resp != nil { | ||
166 | if resp.StatusCode != http.StatusOK { | ||
167 | return nil, httpRespToErrorResponse(resp, bucketName, objectName) | ||
168 | } | ||
169 | } | ||
170 | lh := &objectLegalHold{} | ||
171 | if err = xml.NewDecoder(resp.Body).Decode(lh); err != nil { | ||
172 | return nil, err | ||
173 | } | ||
174 | |||
175 | return &lh.Status, nil | ||
176 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/api-object-lock.go b/vendor/github.com/minio/minio-go/v7/api-object-lock.go new file mode 100644 index 0000000..f0a4398 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-object-lock.go | |||
@@ -0,0 +1,241 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2019 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | import ( | ||
21 | "bytes" | ||
22 | "context" | ||
23 | "encoding/xml" | ||
24 | "fmt" | ||
25 | "net/http" | ||
26 | "net/url" | ||
27 | "time" | ||
28 | |||
29 | "github.com/minio/minio-go/v7/pkg/s3utils" | ||
30 | ) | ||
31 | |||
32 | // RetentionMode - object retention mode. | ||
33 | type RetentionMode string | ||
34 | |||
35 | const ( | ||
36 | // Governance - governance mode. | ||
37 | Governance RetentionMode = "GOVERNANCE" | ||
38 | |||
39 | // Compliance - compliance mode. | ||
40 | Compliance RetentionMode = "COMPLIANCE" | ||
41 | ) | ||
42 | |||
43 | func (r RetentionMode) String() string { | ||
44 | return string(r) | ||
45 | } | ||
46 | |||
47 | // IsValid - check whether this retention mode is valid or not. | ||
48 | func (r RetentionMode) IsValid() bool { | ||
49 | return r == Governance || r == Compliance | ||
50 | } | ||
51 | |||
52 | // ValidityUnit - retention validity unit. | ||
53 | type ValidityUnit string | ||
54 | |||
55 | const ( | ||
56 | // Days - denotes no. of days. | ||
57 | Days ValidityUnit = "DAYS" | ||
58 | |||
59 | // Years - denotes no. of years. | ||
60 | Years ValidityUnit = "YEARS" | ||
61 | ) | ||
62 | |||
63 | func (unit ValidityUnit) String() string { | ||
64 | return string(unit) | ||
65 | } | ||
66 | |||
67 | // IsValid - check whether this validity unit is valid or not. | ||
68 | func (unit ValidityUnit) isValid() bool { | ||
69 | return unit == Days || unit == Years | ||
70 | } | ||
71 | |||
72 | // Retention - bucket level retention configuration. | ||
73 | type Retention struct { | ||
74 | Mode RetentionMode | ||
75 | Validity time.Duration | ||
76 | } | ||
77 | |||
78 | func (r Retention) String() string { | ||
79 | return fmt.Sprintf("{Mode:%v, Validity:%v}", r.Mode, r.Validity) | ||
80 | } | ||
81 | |||
82 | // IsEmpty - returns whether retention is empty or not. | ||
83 | func (r Retention) IsEmpty() bool { | ||
84 | return r.Mode == "" || r.Validity == 0 | ||
85 | } | ||
86 | |||
87 | // objectLockConfig - object lock configuration specified in | ||
88 | // https://docs.aws.amazon.com/AmazonS3/latest/API/Type_API_ObjectLockConfiguration.html | ||
89 | type objectLockConfig struct { | ||
90 | XMLNS string `xml:"xmlns,attr,omitempty"` | ||
91 | XMLName xml.Name `xml:"ObjectLockConfiguration"` | ||
92 | ObjectLockEnabled string `xml:"ObjectLockEnabled"` | ||
93 | Rule *struct { | ||
94 | DefaultRetention struct { | ||
95 | Mode RetentionMode `xml:"Mode"` | ||
96 | Days *uint `xml:"Days"` | ||
97 | Years *uint `xml:"Years"` | ||
98 | } `xml:"DefaultRetention"` | ||
99 | } `xml:"Rule,omitempty"` | ||
100 | } | ||
101 | |||
102 | func newObjectLockConfig(mode *RetentionMode, validity *uint, unit *ValidityUnit) (*objectLockConfig, error) { | ||
103 | config := &objectLockConfig{ | ||
104 | ObjectLockEnabled: "Enabled", | ||
105 | } | ||
106 | |||
107 | if mode != nil && validity != nil && unit != nil { | ||
108 | if !mode.IsValid() { | ||
109 | return nil, fmt.Errorf("invalid retention mode `%v`", mode) | ||
110 | } | ||
111 | |||
112 | if !unit.isValid() { | ||
113 | return nil, fmt.Errorf("invalid validity unit `%v`", unit) | ||
114 | } | ||
115 | |||
116 | config.Rule = &struct { | ||
117 | DefaultRetention struct { | ||
118 | Mode RetentionMode `xml:"Mode"` | ||
119 | Days *uint `xml:"Days"` | ||
120 | Years *uint `xml:"Years"` | ||
121 | } `xml:"DefaultRetention"` | ||
122 | }{} | ||
123 | |||
124 | config.Rule.DefaultRetention.Mode = *mode | ||
125 | if *unit == Days { | ||
126 | config.Rule.DefaultRetention.Days = validity | ||
127 | } else { | ||
128 | config.Rule.DefaultRetention.Years = validity | ||
129 | } | ||
130 | |||
131 | return config, nil | ||
132 | } | ||
133 | |||
134 | if mode == nil && validity == nil && unit == nil { | ||
135 | return config, nil | ||
136 | } | ||
137 | |||
138 | return nil, fmt.Errorf("all of retention mode, validity and validity unit must be passed") | ||
139 | } | ||
140 | |||
141 | // SetBucketObjectLockConfig sets object lock configuration in given bucket. mode, validity and unit are either all set or all nil. | ||
142 | func (c *Client) SetBucketObjectLockConfig(ctx context.Context, bucketName string, mode *RetentionMode, validity *uint, unit *ValidityUnit) error { | ||
143 | // Input validation. | ||
144 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
145 | return err | ||
146 | } | ||
147 | |||
148 | // Get resources properly escaped and lined up before | ||
149 | // using them in http request. | ||
150 | urlValues := make(url.Values) | ||
151 | urlValues.Set("object-lock", "") | ||
152 | |||
153 | config, err := newObjectLockConfig(mode, validity, unit) | ||
154 | if err != nil { | ||
155 | return err | ||
156 | } | ||
157 | |||
158 | configData, err := xml.Marshal(config) | ||
159 | if err != nil { | ||
160 | return err | ||
161 | } | ||
162 | |||
163 | reqMetadata := requestMetadata{ | ||
164 | bucketName: bucketName, | ||
165 | queryValues: urlValues, | ||
166 | contentBody: bytes.NewReader(configData), | ||
167 | contentLength: int64(len(configData)), | ||
168 | contentMD5Base64: sumMD5Base64(configData), | ||
169 | contentSHA256Hex: sum256Hex(configData), | ||
170 | } | ||
171 | |||
172 | // Execute PUT bucket object lock configuration. | ||
173 | resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) | ||
174 | defer closeResponse(resp) | ||
175 | if err != nil { | ||
176 | return err | ||
177 | } | ||
178 | if resp != nil { | ||
179 | if resp.StatusCode != http.StatusOK { | ||
180 | return httpRespToErrorResponse(resp, bucketName, "") | ||
181 | } | ||
182 | } | ||
183 | return nil | ||
184 | } | ||
185 | |||
186 | // GetObjectLockConfig gets object lock configuration of given bucket. | ||
187 | func (c *Client) GetObjectLockConfig(ctx context.Context, bucketName string) (objectLock string, mode *RetentionMode, validity *uint, unit *ValidityUnit, err error) { | ||
188 | // Input validation. | ||
189 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
190 | return "", nil, nil, nil, err | ||
191 | } | ||
192 | |||
193 | urlValues := make(url.Values) | ||
194 | urlValues.Set("object-lock", "") | ||
195 | |||
196 | // Execute GET on bucket to list objects. | ||
197 | resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ | ||
198 | bucketName: bucketName, | ||
199 | queryValues: urlValues, | ||
200 | contentSHA256Hex: emptySHA256Hex, | ||
201 | }) | ||
202 | defer closeResponse(resp) | ||
203 | if err != nil { | ||
204 | return "", nil, nil, nil, err | ||
205 | } | ||
206 | if resp != nil { | ||
207 | if resp.StatusCode != http.StatusOK { | ||
208 | return "", nil, nil, nil, httpRespToErrorResponse(resp, bucketName, "") | ||
209 | } | ||
210 | } | ||
211 | config := &objectLockConfig{} | ||
212 | if err = xml.NewDecoder(resp.Body).Decode(config); err != nil { | ||
213 | return "", nil, nil, nil, err | ||
214 | } | ||
215 | |||
216 | if config.Rule != nil { | ||
217 | mode = &config.Rule.DefaultRetention.Mode | ||
218 | if config.Rule.DefaultRetention.Days != nil { | ||
219 | validity = config.Rule.DefaultRetention.Days | ||
220 | days := Days | ||
221 | unit = &days | ||
222 | } else { | ||
223 | validity = config.Rule.DefaultRetention.Years | ||
224 | years := Years | ||
225 | unit = &years | ||
226 | } | ||
227 | return config.ObjectLockEnabled, mode, validity, unit, nil | ||
228 | } | ||
229 | return config.ObjectLockEnabled, nil, nil, nil, nil | ||
230 | } | ||
231 | |||
232 | // GetBucketObjectLockConfig gets object lock configuration of given bucket. | ||
233 | func (c *Client) GetBucketObjectLockConfig(ctx context.Context, bucketName string) (mode *RetentionMode, validity *uint, unit *ValidityUnit, err error) { | ||
234 | _, mode, validity, unit, err = c.GetObjectLockConfig(ctx, bucketName) | ||
235 | return mode, validity, unit, err | ||
236 | } | ||
237 | |||
238 | // SetObjectLockConfig sets object lock configuration in given bucket. mode, validity and unit are either all set or all nil. | ||
239 | func (c *Client) SetObjectLockConfig(ctx context.Context, bucketName string, mode *RetentionMode, validity *uint, unit *ValidityUnit) error { | ||
240 | return c.SetBucketObjectLockConfig(ctx, bucketName, mode, validity, unit) | ||
241 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/api-object-retention.go b/vendor/github.com/minio/minio-go/v7/api-object-retention.go new file mode 100644 index 0000000..b29cb1f --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-object-retention.go | |||
@@ -0,0 +1,165 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2019-2020 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | import ( | ||
21 | "bytes" | ||
22 | "context" | ||
23 | "encoding/xml" | ||
24 | "fmt" | ||
25 | "net/http" | ||
26 | "net/url" | ||
27 | "time" | ||
28 | |||
29 | "github.com/minio/minio-go/v7/pkg/s3utils" | ||
30 | ) | ||
31 | |||
32 | // objectRetention - object retention specified in | ||
33 | // https://docs.aws.amazon.com/AmazonS3/latest/API/Type_API_ObjectLockConfiguration.html | ||
34 | type objectRetention struct { | ||
35 | XMLNS string `xml:"xmlns,attr,omitempty"` | ||
36 | XMLName xml.Name `xml:"Retention"` | ||
37 | Mode RetentionMode `xml:"Mode,omitempty"` | ||
38 | RetainUntilDate *time.Time `type:"timestamp" timestampFormat:"iso8601" xml:"RetainUntilDate,omitempty"` | ||
39 | } | ||
40 | |||
41 | func newObjectRetention(mode *RetentionMode, date *time.Time) (*objectRetention, error) { | ||
42 | objectRetention := &objectRetention{} | ||
43 | |||
44 | if date != nil && !date.IsZero() { | ||
45 | objectRetention.RetainUntilDate = date | ||
46 | } | ||
47 | if mode != nil { | ||
48 | if !mode.IsValid() { | ||
49 | return nil, fmt.Errorf("invalid retention mode `%v`", mode) | ||
50 | } | ||
51 | objectRetention.Mode = *mode | ||
52 | } | ||
53 | |||
54 | return objectRetention, nil | ||
55 | } | ||
56 | |||
57 | // PutObjectRetentionOptions represents options specified by user for PutObject call | ||
58 | type PutObjectRetentionOptions struct { | ||
59 | GovernanceBypass bool | ||
60 | Mode *RetentionMode | ||
61 | RetainUntilDate *time.Time | ||
62 | VersionID string | ||
63 | } | ||
64 | |||
65 | // PutObjectRetention sets object retention for a given object and versionID. | ||
66 | func (c *Client) PutObjectRetention(ctx context.Context, bucketName, objectName string, opts PutObjectRetentionOptions) error { | ||
67 | // Input validation. | ||
68 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
69 | return err | ||
70 | } | ||
71 | |||
72 | if err := s3utils.CheckValidObjectName(objectName); err != nil { | ||
73 | return err | ||
74 | } | ||
75 | |||
76 | // Get resources properly escaped and lined up before | ||
77 | // using them in http request. | ||
78 | urlValues := make(url.Values) | ||
79 | urlValues.Set("retention", "") | ||
80 | |||
81 | if opts.VersionID != "" { | ||
82 | urlValues.Set("versionId", opts.VersionID) | ||
83 | } | ||
84 | |||
85 | retention, err := newObjectRetention(opts.Mode, opts.RetainUntilDate) | ||
86 | if err != nil { | ||
87 | return err | ||
88 | } | ||
89 | |||
90 | retentionData, err := xml.Marshal(retention) | ||
91 | if err != nil { | ||
92 | return err | ||
93 | } | ||
94 | |||
95 | // Build headers. | ||
96 | headers := make(http.Header) | ||
97 | |||
98 | if opts.GovernanceBypass { | ||
99 | // Set the bypass goverenance retention header | ||
100 | headers.Set(amzBypassGovernance, "true") | ||
101 | } | ||
102 | |||
103 | reqMetadata := requestMetadata{ | ||
104 | bucketName: bucketName, | ||
105 | objectName: objectName, | ||
106 | queryValues: urlValues, | ||
107 | contentBody: bytes.NewReader(retentionData), | ||
108 | contentLength: int64(len(retentionData)), | ||
109 | contentMD5Base64: sumMD5Base64(retentionData), | ||
110 | contentSHA256Hex: sum256Hex(retentionData), | ||
111 | customHeader: headers, | ||
112 | } | ||
113 | |||
114 | // Execute PUT Object Retention. | ||
115 | resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) | ||
116 | defer closeResponse(resp) | ||
117 | if err != nil { | ||
118 | return err | ||
119 | } | ||
120 | if resp != nil { | ||
121 | if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { | ||
122 | return httpRespToErrorResponse(resp, bucketName, objectName) | ||
123 | } | ||
124 | } | ||
125 | return nil | ||
126 | } | ||
127 | |||
128 | // GetObjectRetention gets retention of given object. | ||
129 | func (c *Client) GetObjectRetention(ctx context.Context, bucketName, objectName, versionID string) (mode *RetentionMode, retainUntilDate *time.Time, err error) { | ||
130 | // Input validation. | ||
131 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
132 | return nil, nil, err | ||
133 | } | ||
134 | |||
135 | if err := s3utils.CheckValidObjectName(objectName); err != nil { | ||
136 | return nil, nil, err | ||
137 | } | ||
138 | urlValues := make(url.Values) | ||
139 | urlValues.Set("retention", "") | ||
140 | if versionID != "" { | ||
141 | urlValues.Set("versionId", versionID) | ||
142 | } | ||
143 | // Execute GET on bucket to list objects. | ||
144 | resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ | ||
145 | bucketName: bucketName, | ||
146 | objectName: objectName, | ||
147 | queryValues: urlValues, | ||
148 | contentSHA256Hex: emptySHA256Hex, | ||
149 | }) | ||
150 | defer closeResponse(resp) | ||
151 | if err != nil { | ||
152 | return nil, nil, err | ||
153 | } | ||
154 | if resp != nil { | ||
155 | if resp.StatusCode != http.StatusOK { | ||
156 | return nil, nil, httpRespToErrorResponse(resp, bucketName, objectName) | ||
157 | } | ||
158 | } | ||
159 | retention := &objectRetention{} | ||
160 | if err = xml.NewDecoder(resp.Body).Decode(retention); err != nil { | ||
161 | return nil, nil, err | ||
162 | } | ||
163 | |||
164 | return &retention.Mode, retention.RetainUntilDate, nil | ||
165 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/api-object-tagging.go b/vendor/github.com/minio/minio-go/v7/api-object-tagging.go new file mode 100644 index 0000000..6623e26 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-object-tagging.go | |||
@@ -0,0 +1,177 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2020 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | import ( | ||
21 | "bytes" | ||
22 | "context" | ||
23 | "encoding/xml" | ||
24 | "net/http" | ||
25 | "net/url" | ||
26 | |||
27 | "github.com/minio/minio-go/v7/pkg/s3utils" | ||
28 | "github.com/minio/minio-go/v7/pkg/tags" | ||
29 | ) | ||
30 | |||
31 | // PutObjectTaggingOptions holds an object version id | ||
32 | // to update tag(s) of a specific object version | ||
33 | type PutObjectTaggingOptions struct { | ||
34 | VersionID string | ||
35 | Internal AdvancedObjectTaggingOptions | ||
36 | } | ||
37 | |||
38 | // AdvancedObjectTaggingOptions for internal use by MinIO server - not intended for client use. | ||
39 | type AdvancedObjectTaggingOptions struct { | ||
40 | ReplicationProxyRequest string | ||
41 | } | ||
42 | |||
43 | // PutObjectTagging replaces or creates object tag(s) and can target | ||
44 | // a specific object version in a versioned bucket. | ||
45 | func (c *Client) PutObjectTagging(ctx context.Context, bucketName, objectName string, otags *tags.Tags, opts PutObjectTaggingOptions) error { | ||
46 | // Input validation. | ||
47 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
48 | return err | ||
49 | } | ||
50 | |||
51 | // Get resources properly escaped and lined up before | ||
52 | // using them in http request. | ||
53 | urlValues := make(url.Values) | ||
54 | urlValues.Set("tagging", "") | ||
55 | |||
56 | if opts.VersionID != "" { | ||
57 | urlValues.Set("versionId", opts.VersionID) | ||
58 | } | ||
59 | headers := make(http.Header, 0) | ||
60 | if opts.Internal.ReplicationProxyRequest != "" { | ||
61 | headers.Set(minIOBucketReplicationProxyRequest, opts.Internal.ReplicationProxyRequest) | ||
62 | } | ||
63 | reqBytes, err := xml.Marshal(otags) | ||
64 | if err != nil { | ||
65 | return err | ||
66 | } | ||
67 | |||
68 | reqMetadata := requestMetadata{ | ||
69 | bucketName: bucketName, | ||
70 | objectName: objectName, | ||
71 | queryValues: urlValues, | ||
72 | contentBody: bytes.NewReader(reqBytes), | ||
73 | contentLength: int64(len(reqBytes)), | ||
74 | contentMD5Base64: sumMD5Base64(reqBytes), | ||
75 | customHeader: headers, | ||
76 | } | ||
77 | |||
78 | // Execute PUT to set a object tagging. | ||
79 | resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) | ||
80 | defer closeResponse(resp) | ||
81 | if err != nil { | ||
82 | return err | ||
83 | } | ||
84 | if resp != nil { | ||
85 | if resp.StatusCode != http.StatusOK { | ||
86 | return httpRespToErrorResponse(resp, bucketName, objectName) | ||
87 | } | ||
88 | } | ||
89 | return nil | ||
90 | } | ||
91 | |||
92 | // GetObjectTaggingOptions holds the object version ID | ||
93 | // to fetch the tagging key/value pairs | ||
94 | type GetObjectTaggingOptions struct { | ||
95 | VersionID string | ||
96 | Internal AdvancedObjectTaggingOptions | ||
97 | } | ||
98 | |||
99 | // GetObjectTagging fetches object tag(s) with options to target | ||
100 | // a specific object version in a versioned bucket. | ||
101 | func (c *Client) GetObjectTagging(ctx context.Context, bucketName, objectName string, opts GetObjectTaggingOptions) (*tags.Tags, error) { | ||
102 | // Get resources properly escaped and lined up before | ||
103 | // using them in http request. | ||
104 | urlValues := make(url.Values) | ||
105 | urlValues.Set("tagging", "") | ||
106 | |||
107 | if opts.VersionID != "" { | ||
108 | urlValues.Set("versionId", opts.VersionID) | ||
109 | } | ||
110 | headers := make(http.Header, 0) | ||
111 | if opts.Internal.ReplicationProxyRequest != "" { | ||
112 | headers.Set(minIOBucketReplicationProxyRequest, opts.Internal.ReplicationProxyRequest) | ||
113 | } | ||
114 | // Execute GET on object to get object tag(s) | ||
115 | resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ | ||
116 | bucketName: bucketName, | ||
117 | objectName: objectName, | ||
118 | queryValues: urlValues, | ||
119 | customHeader: headers, | ||
120 | }) | ||
121 | |||
122 | defer closeResponse(resp) | ||
123 | if err != nil { | ||
124 | return nil, err | ||
125 | } | ||
126 | |||
127 | if resp != nil { | ||
128 | if resp.StatusCode != http.StatusOK { | ||
129 | return nil, httpRespToErrorResponse(resp, bucketName, objectName) | ||
130 | } | ||
131 | } | ||
132 | |||
133 | return tags.ParseObjectXML(resp.Body) | ||
134 | } | ||
135 | |||
136 | // RemoveObjectTaggingOptions holds the version id of the object to remove | ||
137 | type RemoveObjectTaggingOptions struct { | ||
138 | VersionID string | ||
139 | Internal AdvancedObjectTaggingOptions | ||
140 | } | ||
141 | |||
142 | // RemoveObjectTagging removes object tag(s) with options to control a specific object | ||
143 | // version in a versioned bucket | ||
144 | func (c *Client) RemoveObjectTagging(ctx context.Context, bucketName, objectName string, opts RemoveObjectTaggingOptions) error { | ||
145 | // Get resources properly escaped and lined up before | ||
146 | // using them in http request. | ||
147 | urlValues := make(url.Values) | ||
148 | urlValues.Set("tagging", "") | ||
149 | |||
150 | if opts.VersionID != "" { | ||
151 | urlValues.Set("versionId", opts.VersionID) | ||
152 | } | ||
153 | headers := make(http.Header, 0) | ||
154 | if opts.Internal.ReplicationProxyRequest != "" { | ||
155 | headers.Set(minIOBucketReplicationProxyRequest, opts.Internal.ReplicationProxyRequest) | ||
156 | } | ||
157 | // Execute DELETE on object to remove object tag(s) | ||
158 | resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ | ||
159 | bucketName: bucketName, | ||
160 | objectName: objectName, | ||
161 | queryValues: urlValues, | ||
162 | customHeader: headers, | ||
163 | }) | ||
164 | |||
165 | defer closeResponse(resp) | ||
166 | if err != nil { | ||
167 | return err | ||
168 | } | ||
169 | |||
170 | if resp != nil { | ||
171 | // S3 returns "204 No content" after Object tag deletion. | ||
172 | if resp.StatusCode != http.StatusNoContent { | ||
173 | return httpRespToErrorResponse(resp, bucketName, objectName) | ||
174 | } | ||
175 | } | ||
176 | return err | ||
177 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/api-presigned.go b/vendor/github.com/minio/minio-go/v7/api-presigned.go new file mode 100644 index 0000000..9e85f81 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-presigned.go | |||
@@ -0,0 +1,228 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2015-2017 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | import ( | ||
21 | "context" | ||
22 | "errors" | ||
23 | "net/http" | ||
24 | "net/url" | ||
25 | "time" | ||
26 | |||
27 | "github.com/minio/minio-go/v7/pkg/s3utils" | ||
28 | "github.com/minio/minio-go/v7/pkg/signer" | ||
29 | ) | ||
30 | |||
31 | // presignURL - Returns a presigned URL for an input 'method'. | ||
32 | // Expires maximum is 7days - ie. 604800 and minimum is 1. | ||
33 | func (c *Client) presignURL(ctx context.Context, method, bucketName, objectName string, expires time.Duration, reqParams url.Values, extraHeaders http.Header) (u *url.URL, err error) { | ||
34 | // Input validation. | ||
35 | if method == "" { | ||
36 | return nil, errInvalidArgument("method cannot be empty.") | ||
37 | } | ||
38 | if err = s3utils.CheckValidBucketName(bucketName); err != nil { | ||
39 | return nil, err | ||
40 | } | ||
41 | if err = isValidExpiry(expires); err != nil { | ||
42 | return nil, err | ||
43 | } | ||
44 | |||
45 | // Convert expires into seconds. | ||
46 | expireSeconds := int64(expires / time.Second) | ||
47 | reqMetadata := requestMetadata{ | ||
48 | presignURL: true, | ||
49 | bucketName: bucketName, | ||
50 | objectName: objectName, | ||
51 | expires: expireSeconds, | ||
52 | queryValues: reqParams, | ||
53 | extraPresignHeader: extraHeaders, | ||
54 | } | ||
55 | |||
56 | // Instantiate a new request. | ||
57 | // Since expires is set newRequest will presign the request. | ||
58 | var req *http.Request | ||
59 | if req, err = c.newRequest(ctx, method, reqMetadata); err != nil { | ||
60 | return nil, err | ||
61 | } | ||
62 | return req.URL, nil | ||
63 | } | ||
64 | |||
65 | // PresignedGetObject - Returns a presigned URL to access an object | ||
66 | // data without credentials. URL can have a maximum expiry of | ||
67 | // upto 7days or a minimum of 1sec. Additionally you can override | ||
68 | // a set of response headers using the query parameters. | ||
69 | func (c *Client) PresignedGetObject(ctx context.Context, bucketName, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { | ||
70 | if err = s3utils.CheckValidObjectName(objectName); err != nil { | ||
71 | return nil, err | ||
72 | } | ||
73 | return c.presignURL(ctx, http.MethodGet, bucketName, objectName, expires, reqParams, nil) | ||
74 | } | ||
75 | |||
76 | // PresignedHeadObject - Returns a presigned URL to access | ||
77 | // object metadata without credentials. URL can have a maximum expiry | ||
78 | // of upto 7days or a minimum of 1sec. Additionally you can override | ||
79 | // a set of response headers using the query parameters. | ||
80 | func (c *Client) PresignedHeadObject(ctx context.Context, bucketName, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { | ||
81 | if err = s3utils.CheckValidObjectName(objectName); err != nil { | ||
82 | return nil, err | ||
83 | } | ||
84 | return c.presignURL(ctx, http.MethodHead, bucketName, objectName, expires, reqParams, nil) | ||
85 | } | ||
86 | |||
87 | // PresignedPutObject - Returns a presigned URL to upload an object | ||
88 | // without credentials. URL can have a maximum expiry of upto 7days | ||
89 | // or a minimum of 1sec. | ||
90 | func (c *Client) PresignedPutObject(ctx context.Context, bucketName, objectName string, expires time.Duration) (u *url.URL, err error) { | ||
91 | if err = s3utils.CheckValidObjectName(objectName); err != nil { | ||
92 | return nil, err | ||
93 | } | ||
94 | return c.presignURL(ctx, http.MethodPut, bucketName, objectName, expires, nil, nil) | ||
95 | } | ||
96 | |||
97 | // PresignHeader - similar to Presign() but allows including HTTP headers that | ||
98 | // will be used to build the signature. The request using the resulting URL will | ||
99 | // need to have the exact same headers to be added for signature validation to | ||
100 | // pass. | ||
101 | // | ||
102 | // FIXME: The extra header parameter should be included in Presign() in the next | ||
103 | // major version bump, and this function should then be deprecated. | ||
104 | func (c *Client) PresignHeader(ctx context.Context, method, bucketName, objectName string, expires time.Duration, reqParams url.Values, extraHeaders http.Header) (u *url.URL, err error) { | ||
105 | return c.presignURL(ctx, method, bucketName, objectName, expires, reqParams, extraHeaders) | ||
106 | } | ||
107 | |||
108 | // Presign - returns a presigned URL for any http method of your choice along | ||
109 | // with custom request params and extra signed headers. URL can have a maximum | ||
110 | // expiry of upto 7days or a minimum of 1sec. | ||
111 | func (c *Client) Presign(ctx context.Context, method, bucketName, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { | ||
112 | return c.presignURL(ctx, method, bucketName, objectName, expires, reqParams, nil) | ||
113 | } | ||
114 | |||
115 | // PresignedPostPolicy - Returns POST urlString, form data to upload an object. | ||
116 | func (c *Client) PresignedPostPolicy(ctx context.Context, p *PostPolicy) (u *url.URL, formData map[string]string, err error) { | ||
117 | // Validate input arguments. | ||
118 | if p.expiration.IsZero() { | ||
119 | return nil, nil, errors.New("Expiration time must be specified") | ||
120 | } | ||
121 | if _, ok := p.formData["key"]; !ok { | ||
122 | return nil, nil, errors.New("object key must be specified") | ||
123 | } | ||
124 | if _, ok := p.formData["bucket"]; !ok { | ||
125 | return nil, nil, errors.New("bucket name must be specified") | ||
126 | } | ||
127 | |||
128 | bucketName := p.formData["bucket"] | ||
129 | // Fetch the bucket location. | ||
130 | location, err := c.getBucketLocation(ctx, bucketName) | ||
131 | if err != nil { | ||
132 | return nil, nil, err | ||
133 | } | ||
134 | |||
135 | isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, bucketName) | ||
136 | |||
137 | u, err = c.makeTargetURL(bucketName, "", location, isVirtualHost, nil) | ||
138 | if err != nil { | ||
139 | return nil, nil, err | ||
140 | } | ||
141 | |||
142 | // Get credentials from the configured credentials provider. | ||
143 | credValues, err := c.credsProvider.Get() | ||
144 | if err != nil { | ||
145 | return nil, nil, err | ||
146 | } | ||
147 | |||
148 | var ( | ||
149 | signerType = credValues.SignerType | ||
150 | sessionToken = credValues.SessionToken | ||
151 | accessKeyID = credValues.AccessKeyID | ||
152 | secretAccessKey = credValues.SecretAccessKey | ||
153 | ) | ||
154 | |||
155 | if signerType.IsAnonymous() { | ||
156 | return nil, nil, errInvalidArgument("Presigned operations are not supported for anonymous credentials") | ||
157 | } | ||
158 | |||
159 | // Keep time. | ||
160 | t := time.Now().UTC() | ||
161 | // For signature version '2' handle here. | ||
162 | if signerType.IsV2() { | ||
163 | policyBase64 := p.base64() | ||
164 | p.formData["policy"] = policyBase64 | ||
165 | // For Google endpoint set this value to be 'GoogleAccessId'. | ||
166 | if s3utils.IsGoogleEndpoint(*c.endpointURL) { | ||
167 | p.formData["GoogleAccessId"] = accessKeyID | ||
168 | } else { | ||
169 | // For all other endpoints set this value to be 'AWSAccessKeyId'. | ||
170 | p.formData["AWSAccessKeyId"] = accessKeyID | ||
171 | } | ||
172 | // Sign the policy. | ||
173 | p.formData["signature"] = signer.PostPresignSignatureV2(policyBase64, secretAccessKey) | ||
174 | return u, p.formData, nil | ||
175 | } | ||
176 | |||
177 | // Add date policy. | ||
178 | if err = p.addNewPolicy(policyCondition{ | ||
179 | matchType: "eq", | ||
180 | condition: "$x-amz-date", | ||
181 | value: t.Format(iso8601DateFormat), | ||
182 | }); err != nil { | ||
183 | return nil, nil, err | ||
184 | } | ||
185 | |||
186 | // Add algorithm policy. | ||
187 | if err = p.addNewPolicy(policyCondition{ | ||
188 | matchType: "eq", | ||
189 | condition: "$x-amz-algorithm", | ||
190 | value: signV4Algorithm, | ||
191 | }); err != nil { | ||
192 | return nil, nil, err | ||
193 | } | ||
194 | |||
195 | // Add a credential policy. | ||
196 | credential := signer.GetCredential(accessKeyID, location, t, signer.ServiceTypeS3) | ||
197 | if err = p.addNewPolicy(policyCondition{ | ||
198 | matchType: "eq", | ||
199 | condition: "$x-amz-credential", | ||
200 | value: credential, | ||
201 | }); err != nil { | ||
202 | return nil, nil, err | ||
203 | } | ||
204 | |||
205 | if sessionToken != "" { | ||
206 | if err = p.addNewPolicy(policyCondition{ | ||
207 | matchType: "eq", | ||
208 | condition: "$x-amz-security-token", | ||
209 | value: sessionToken, | ||
210 | }); err != nil { | ||
211 | return nil, nil, err | ||
212 | } | ||
213 | } | ||
214 | |||
215 | // Get base64 encoded policy. | ||
216 | policyBase64 := p.base64() | ||
217 | |||
218 | // Fill in the form data. | ||
219 | p.formData["policy"] = policyBase64 | ||
220 | p.formData["x-amz-algorithm"] = signV4Algorithm | ||
221 | p.formData["x-amz-credential"] = credential | ||
222 | p.formData["x-amz-date"] = t.Format(iso8601DateFormat) | ||
223 | if sessionToken != "" { | ||
224 | p.formData["x-amz-security-token"] = sessionToken | ||
225 | } | ||
226 | p.formData["x-amz-signature"] = signer.PostPresignSignatureV4(policyBase64, t, secretAccessKey, location) | ||
227 | return u, p.formData, nil | ||
228 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-bucket.go b/vendor/github.com/minio/minio-go/v7/api-put-bucket.go new file mode 100644 index 0000000..7376669 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-put-bucket.go | |||
@@ -0,0 +1,123 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2015-2020 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | import ( | ||
21 | "bytes" | ||
22 | "context" | ||
23 | "encoding/xml" | ||
24 | "net/http" | ||
25 | |||
26 | "github.com/minio/minio-go/v7/pkg/s3utils" | ||
27 | ) | ||
28 | |||
29 | // Bucket operations | ||
30 | func (c *Client) makeBucket(ctx context.Context, bucketName string, opts MakeBucketOptions) (err error) { | ||
31 | // Validate the input arguments. | ||
32 | if err := s3utils.CheckValidBucketNameStrict(bucketName); err != nil { | ||
33 | return err | ||
34 | } | ||
35 | |||
36 | err = c.doMakeBucket(ctx, bucketName, opts.Region, opts.ObjectLocking) | ||
37 | if err != nil && (opts.Region == "" || opts.Region == "us-east-1") { | ||
38 | if resp, ok := err.(ErrorResponse); ok && resp.Code == "AuthorizationHeaderMalformed" && resp.Region != "" { | ||
39 | err = c.doMakeBucket(ctx, bucketName, resp.Region, opts.ObjectLocking) | ||
40 | } | ||
41 | } | ||
42 | return err | ||
43 | } | ||
44 | |||
45 | func (c *Client) doMakeBucket(ctx context.Context, bucketName, location string, objectLockEnabled bool) (err error) { | ||
46 | defer func() { | ||
47 | // Save the location into cache on a successful makeBucket response. | ||
48 | if err == nil { | ||
49 | c.bucketLocCache.Set(bucketName, location) | ||
50 | } | ||
51 | }() | ||
52 | |||
53 | // If location is empty, treat is a default region 'us-east-1'. | ||
54 | if location == "" { | ||
55 | location = "us-east-1" | ||
56 | // For custom region clients, default | ||
57 | // to custom region instead not 'us-east-1'. | ||
58 | if c.region != "" { | ||
59 | location = c.region | ||
60 | } | ||
61 | } | ||
62 | // PUT bucket request metadata. | ||
63 | reqMetadata := requestMetadata{ | ||
64 | bucketName: bucketName, | ||
65 | bucketLocation: location, | ||
66 | } | ||
67 | |||
68 | if objectLockEnabled { | ||
69 | headers := make(http.Header) | ||
70 | headers.Add("x-amz-bucket-object-lock-enabled", "true") | ||
71 | reqMetadata.customHeader = headers | ||
72 | } | ||
73 | |||
74 | // If location is not 'us-east-1' create bucket location config. | ||
75 | if location != "us-east-1" && location != "" { | ||
76 | createBucketConfig := createBucketConfiguration{} | ||
77 | createBucketConfig.Location = location | ||
78 | var createBucketConfigBytes []byte | ||
79 | createBucketConfigBytes, err = xml.Marshal(createBucketConfig) | ||
80 | if err != nil { | ||
81 | return err | ||
82 | } | ||
83 | reqMetadata.contentMD5Base64 = sumMD5Base64(createBucketConfigBytes) | ||
84 | reqMetadata.contentSHA256Hex = sum256Hex(createBucketConfigBytes) | ||
85 | reqMetadata.contentBody = bytes.NewReader(createBucketConfigBytes) | ||
86 | reqMetadata.contentLength = int64(len(createBucketConfigBytes)) | ||
87 | } | ||
88 | |||
89 | // Execute PUT to create a new bucket. | ||
90 | resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) | ||
91 | defer closeResponse(resp) | ||
92 | if err != nil { | ||
93 | return err | ||
94 | } | ||
95 | |||
96 | if resp != nil { | ||
97 | if resp.StatusCode != http.StatusOK { | ||
98 | return httpRespToErrorResponse(resp, bucketName, "") | ||
99 | } | ||
100 | } | ||
101 | |||
102 | // Success. | ||
103 | return nil | ||
104 | } | ||
105 | |||
106 | // MakeBucketOptions holds all options to tweak bucket creation | ||
107 | type MakeBucketOptions struct { | ||
108 | // Bucket location | ||
109 | Region string | ||
110 | // Enable object locking | ||
111 | ObjectLocking bool | ||
112 | } | ||
113 | |||
114 | // MakeBucket creates a new bucket with bucketName with a context to control cancellations and timeouts. | ||
115 | // | ||
116 | // Location is an optional argument, by default all buckets are | ||
117 | // created in US Standard Region. | ||
118 | // | ||
119 | // For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html | ||
120 | // For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations | ||
121 | func (c *Client) MakeBucket(ctx context.Context, bucketName string, opts MakeBucketOptions) (err error) { | ||
122 | return c.makeBucket(ctx, bucketName, opts) | ||
123 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-common.go b/vendor/github.com/minio/minio-go/v7/api-put-object-common.go new file mode 100644 index 0000000..9ccb97c --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-common.go | |||
@@ -0,0 +1,149 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2015-2017 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | import ( | ||
21 | "context" | ||
22 | "io" | ||
23 | "math" | ||
24 | "os" | ||
25 | |||
26 | "github.com/minio/minio-go/v7/pkg/s3utils" | ||
27 | ) | ||
28 | |||
29 | const nullVersionID = "null" | ||
30 | |||
31 | // Verify if reader is *minio.Object | ||
32 | func isObject(reader io.Reader) (ok bool) { | ||
33 | _, ok = reader.(*Object) | ||
34 | return | ||
35 | } | ||
36 | |||
37 | // Verify if reader is a generic ReaderAt | ||
38 | func isReadAt(reader io.Reader) (ok bool) { | ||
39 | var v *os.File | ||
40 | v, ok = reader.(*os.File) | ||
41 | if ok { | ||
42 | // Stdin, Stdout and Stderr all have *os.File type | ||
43 | // which happen to also be io.ReaderAt compatible | ||
44 | // we need to add special conditions for them to | ||
45 | // be ignored by this function. | ||
46 | for _, f := range []string{ | ||
47 | "/dev/stdin", | ||
48 | "/dev/stdout", | ||
49 | "/dev/stderr", | ||
50 | } { | ||
51 | if f == v.Name() { | ||
52 | ok = false | ||
53 | break | ||
54 | } | ||
55 | } | ||
56 | } else { | ||
57 | _, ok = reader.(io.ReaderAt) | ||
58 | } | ||
59 | return | ||
60 | } | ||
61 | |||
62 | // OptimalPartInfo - calculate the optimal part info for a given | ||
63 | // object size. | ||
64 | // | ||
65 | // NOTE: Assumption here is that for any object to be uploaded to any S3 compatible | ||
66 | // object storage it will have the following parameters as constants. | ||
67 | // | ||
68 | // maxPartsCount - 10000 | ||
69 | // minPartSize - 16MiB | ||
70 | // maxMultipartPutObjectSize - 5TiB | ||
71 | func OptimalPartInfo(objectSize int64, configuredPartSize uint64) (totalPartsCount int, partSize, lastPartSize int64, err error) { | ||
72 | // object size is '-1' set it to 5TiB. | ||
73 | var unknownSize bool | ||
74 | if objectSize == -1 { | ||
75 | unknownSize = true | ||
76 | objectSize = maxMultipartPutObjectSize | ||
77 | } | ||
78 | |||
79 | // object size is larger than supported maximum. | ||
80 | if objectSize > maxMultipartPutObjectSize { | ||
81 | err = errEntityTooLarge(objectSize, maxMultipartPutObjectSize, "", "") | ||
82 | return | ||
83 | } | ||
84 | |||
85 | var partSizeFlt float64 | ||
86 | if configuredPartSize > 0 { | ||
87 | if int64(configuredPartSize) > objectSize { | ||
88 | err = errEntityTooLarge(int64(configuredPartSize), objectSize, "", "") | ||
89 | return | ||
90 | } | ||
91 | |||
92 | if !unknownSize { | ||
93 | if objectSize > (int64(configuredPartSize) * maxPartsCount) { | ||
94 | err = errInvalidArgument("Part size * max_parts(10000) is lesser than input objectSize.") | ||
95 | return | ||
96 | } | ||
97 | } | ||
98 | |||
99 | if configuredPartSize < absMinPartSize { | ||
100 | err = errInvalidArgument("Input part size is smaller than allowed minimum of 5MiB.") | ||
101 | return | ||
102 | } | ||
103 | |||
104 | if configuredPartSize > maxPartSize { | ||
105 | err = errInvalidArgument("Input part size is bigger than allowed maximum of 5GiB.") | ||
106 | return | ||
107 | } | ||
108 | |||
109 | partSizeFlt = float64(configuredPartSize) | ||
110 | if unknownSize { | ||
111 | // If input has unknown size and part size is configured | ||
112 | // keep it to maximum allowed as per 10000 parts. | ||
113 | objectSize = int64(configuredPartSize) * maxPartsCount | ||
114 | } | ||
115 | } else { | ||
116 | configuredPartSize = minPartSize | ||
117 | // Use floats for part size for all calculations to avoid | ||
118 | // overflows during float64 to int64 conversions. | ||
119 | partSizeFlt = float64(objectSize / maxPartsCount) | ||
120 | partSizeFlt = math.Ceil(partSizeFlt/float64(configuredPartSize)) * float64(configuredPartSize) | ||
121 | } | ||
122 | |||
123 | // Total parts count. | ||
124 | totalPartsCount = int(math.Ceil(float64(objectSize) / partSizeFlt)) | ||
125 | // Part size. | ||
126 | partSize = int64(partSizeFlt) | ||
127 | // Last part size. | ||
128 | lastPartSize = objectSize - int64(totalPartsCount-1)*partSize | ||
129 | return totalPartsCount, partSize, lastPartSize, nil | ||
130 | } | ||
131 | |||
132 | // getUploadID - fetch upload id if already present for an object name | ||
133 | // or initiate a new request to fetch a new upload id. | ||
134 | func (c *Client) newUploadID(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (uploadID string, err error) { | ||
135 | // Input validation. | ||
136 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
137 | return "", err | ||
138 | } | ||
139 | if err := s3utils.CheckValidObjectName(objectName); err != nil { | ||
140 | return "", err | ||
141 | } | ||
142 | |||
143 | // Initiate multipart upload for an object. | ||
144 | initMultipartUploadResult, err := c.initiateMultipartUpload(ctx, bucketName, objectName, opts) | ||
145 | if err != nil { | ||
146 | return "", err | ||
147 | } | ||
148 | return initMultipartUploadResult.UploadID, nil | ||
149 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go b/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go new file mode 100644 index 0000000..0ae9142 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go | |||
@@ -0,0 +1,164 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2023 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | import ( | ||
21 | "context" | ||
22 | "encoding/json" | ||
23 | "errors" | ||
24 | "io" | ||
25 | "mime/multipart" | ||
26 | "net/http" | ||
27 | "strconv" | ||
28 | "strings" | ||
29 | "time" | ||
30 | |||
31 | "github.com/minio/minio-go/v7/pkg/encrypt" | ||
32 | ) | ||
33 | |||
34 | // PutObjectFanOutEntry is per object entry fan-out metadata | ||
35 | type PutObjectFanOutEntry struct { | ||
36 | Key string `json:"key"` | ||
37 | UserMetadata map[string]string `json:"metadata,omitempty"` | ||
38 | UserTags map[string]string `json:"tags,omitempty"` | ||
39 | ContentType string `json:"contentType,omitempty"` | ||
40 | ContentEncoding string `json:"contentEncoding,omitempty"` | ||
41 | ContentDisposition string `json:"contentDisposition,omitempty"` | ||
42 | ContentLanguage string `json:"contentLanguage,omitempty"` | ||
43 | CacheControl string `json:"cacheControl,omitempty"` | ||
44 | Retention RetentionMode `json:"retention,omitempty"` | ||
45 | RetainUntilDate *time.Time `json:"retainUntil,omitempty"` | ||
46 | } | ||
47 | |||
48 | // PutObjectFanOutRequest this is the request structure sent | ||
49 | // to the server to fan-out the stream to multiple objects. | ||
50 | type PutObjectFanOutRequest struct { | ||
51 | Entries []PutObjectFanOutEntry | ||
52 | Checksum Checksum | ||
53 | SSE encrypt.ServerSide | ||
54 | } | ||
55 | |||
56 | // PutObjectFanOutResponse this is the response structure sent | ||
57 | // by the server upon success or failure for each object | ||
58 | // fan-out keys. Additionally, this response carries ETag, | ||
59 | // VersionID and LastModified for each object fan-out. | ||
60 | type PutObjectFanOutResponse struct { | ||
61 | Key string `json:"key"` | ||
62 | ETag string `json:"etag,omitempty"` | ||
63 | VersionID string `json:"versionId,omitempty"` | ||
64 | LastModified *time.Time `json:"lastModified,omitempty"` | ||
65 | Error string `json:"error,omitempty"` | ||
66 | } | ||
67 | |||
68 | // PutObjectFanOut - is a variant of PutObject instead of writing a single object from a single | ||
69 | // stream multiple objects are written, defined via a list of PutObjectFanOutRequests. Each entry | ||
70 | // in PutObjectFanOutRequest carries an object keyname and its relevant metadata if any. `Key` is | ||
71 | // mandatory, rest of the other options in PutObjectFanOutRequest are optional. | ||
72 | func (c *Client) PutObjectFanOut(ctx context.Context, bucket string, fanOutData io.Reader, fanOutReq PutObjectFanOutRequest) ([]PutObjectFanOutResponse, error) { | ||
73 | if len(fanOutReq.Entries) == 0 { | ||
74 | return nil, errInvalidArgument("fan out requests cannot be empty") | ||
75 | } | ||
76 | |||
77 | policy := NewPostPolicy() | ||
78 | policy.SetBucket(bucket) | ||
79 | policy.SetKey(strconv.FormatInt(time.Now().UnixNano(), 16)) | ||
80 | |||
81 | // Expires in 15 minutes. | ||
82 | policy.SetExpires(time.Now().UTC().Add(15 * time.Minute)) | ||
83 | |||
84 | // Set encryption headers if any. | ||
85 | policy.SetEncryption(fanOutReq.SSE) | ||
86 | |||
87 | // Set checksum headers if any. | ||
88 | policy.SetChecksum(fanOutReq.Checksum) | ||
89 | |||
90 | url, formData, err := c.PresignedPostPolicy(ctx, policy) | ||
91 | if err != nil { | ||
92 | return nil, err | ||
93 | } | ||
94 | |||
95 | r, w := io.Pipe() | ||
96 | |||
97 | req, err := http.NewRequest(http.MethodPost, url.String(), r) | ||
98 | if err != nil { | ||
99 | w.Close() | ||
100 | return nil, err | ||
101 | } | ||
102 | |||
103 | var b strings.Builder | ||
104 | enc := json.NewEncoder(&b) | ||
105 | for _, req := range fanOutReq.Entries { | ||
106 | if req.Key == "" { | ||
107 | w.Close() | ||
108 | return nil, errors.New("PutObjectFanOutRequest.Key is mandatory and cannot be empty") | ||
109 | } | ||
110 | if err = enc.Encode(&req); err != nil { | ||
111 | w.Close() | ||
112 | return nil, err | ||
113 | } | ||
114 | } | ||
115 | |||
116 | mwriter := multipart.NewWriter(w) | ||
117 | req.Header.Add("Content-Type", mwriter.FormDataContentType()) | ||
118 | |||
119 | go func() { | ||
120 | defer w.Close() | ||
121 | defer mwriter.Close() | ||
122 | |||
123 | for k, v := range formData { | ||
124 | if err := mwriter.WriteField(k, v); err != nil { | ||
125 | return | ||
126 | } | ||
127 | } | ||
128 | |||
129 | if err := mwriter.WriteField("x-minio-fanout-list", b.String()); err != nil { | ||
130 | return | ||
131 | } | ||
132 | |||
133 | mw, err := mwriter.CreateFormFile("file", "fanout-content") | ||
134 | if err != nil { | ||
135 | return | ||
136 | } | ||
137 | |||
138 | if _, err = io.Copy(mw, fanOutData); err != nil { | ||
139 | return | ||
140 | } | ||
141 | }() | ||
142 | |||
143 | resp, err := c.do(req) | ||
144 | if err != nil { | ||
145 | return nil, err | ||
146 | } | ||
147 | defer closeResponse(resp) | ||
148 | |||
149 | if resp.StatusCode != http.StatusOK { | ||
150 | return nil, httpRespToErrorResponse(resp, bucket, "fanout-content") | ||
151 | } | ||
152 | |||
153 | dec := json.NewDecoder(resp.Body) | ||
154 | fanOutResp := make([]PutObjectFanOutResponse, 0, len(fanOutReq.Entries)) | ||
155 | for dec.More() { | ||
156 | var m PutObjectFanOutResponse | ||
157 | if err = dec.Decode(&m); err != nil { | ||
158 | return nil, err | ||
159 | } | ||
160 | fanOutResp = append(fanOutResp, m) | ||
161 | } | ||
162 | |||
163 | return fanOutResp, nil | ||
164 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go b/vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go new file mode 100644 index 0000000..4d29dfc --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go | |||
@@ -0,0 +1,64 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2017 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | import ( | ||
21 | "context" | ||
22 | "mime" | ||
23 | "os" | ||
24 | "path/filepath" | ||
25 | |||
26 | "github.com/minio/minio-go/v7/pkg/s3utils" | ||
27 | ) | ||
28 | |||
29 | // FPutObject - Create an object in a bucket, with contents from file at filePath. Allows request cancellation. | ||
30 | func (c *Client) FPutObject(ctx context.Context, bucketName, objectName, filePath string, opts PutObjectOptions) (info UploadInfo, err error) { | ||
31 | // Input validation. | ||
32 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
33 | return UploadInfo{}, err | ||
34 | } | ||
35 | if err := s3utils.CheckValidObjectName(objectName); err != nil { | ||
36 | return UploadInfo{}, err | ||
37 | } | ||
38 | |||
39 | // Open the referenced file. | ||
40 | fileReader, err := os.Open(filePath) | ||
41 | // If any error fail quickly here. | ||
42 | if err != nil { | ||
43 | return UploadInfo{}, err | ||
44 | } | ||
45 | defer fileReader.Close() | ||
46 | |||
47 | // Save the file stat. | ||
48 | fileStat, err := fileReader.Stat() | ||
49 | if err != nil { | ||
50 | return UploadInfo{}, err | ||
51 | } | ||
52 | |||
53 | // Save the file size. | ||
54 | fileSize := fileStat.Size() | ||
55 | |||
56 | // Set contentType based on filepath extension if not given or default | ||
57 | // value of "application/octet-stream" if the extension has no associated type. | ||
58 | if opts.ContentType == "" { | ||
59 | if opts.ContentType = mime.TypeByExtension(filepath.Ext(filePath)); opts.ContentType == "" { | ||
60 | opts.ContentType = "application/octet-stream" | ||
61 | } | ||
62 | } | ||
63 | return c.PutObject(ctx, bucketName, objectName, fileReader, fileSize, opts) | ||
64 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go new file mode 100644 index 0000000..5f117af --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go | |||
@@ -0,0 +1,465 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2015-2017 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | import ( | ||
21 | "bytes" | ||
22 | "context" | ||
23 | "encoding/base64" | ||
24 | "encoding/hex" | ||
25 | "encoding/xml" | ||
26 | "fmt" | ||
27 | "hash/crc32" | ||
28 | "io" | ||
29 | "net/http" | ||
30 | "net/url" | ||
31 | "sort" | ||
32 | "strconv" | ||
33 | "strings" | ||
34 | |||
35 | "github.com/google/uuid" | ||
36 | "github.com/minio/minio-go/v7/pkg/encrypt" | ||
37 | "github.com/minio/minio-go/v7/pkg/s3utils" | ||
38 | ) | ||
39 | |||
40 | func (c *Client) putObjectMultipart(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, | ||
41 | opts PutObjectOptions, | ||
42 | ) (info UploadInfo, err error) { | ||
43 | info, err = c.putObjectMultipartNoStream(ctx, bucketName, objectName, reader, opts) | ||
44 | if err != nil { | ||
45 | errResp := ToErrorResponse(err) | ||
46 | // Verify if multipart functionality is not available, if not | ||
47 | // fall back to single PutObject operation. | ||
48 | if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") { | ||
49 | // Verify if size of reader is greater than '5GiB'. | ||
50 | if size > maxSinglePutObjectSize { | ||
51 | return UploadInfo{}, errEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) | ||
52 | } | ||
53 | // Fall back to uploading as single PutObject operation. | ||
54 | return c.putObject(ctx, bucketName, objectName, reader, size, opts) | ||
55 | } | ||
56 | } | ||
57 | return info, err | ||
58 | } | ||
59 | |||
60 | func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) { | ||
61 | // Input validation. | ||
62 | if err = s3utils.CheckValidBucketName(bucketName); err != nil { | ||
63 | return UploadInfo{}, err | ||
64 | } | ||
65 | if err = s3utils.CheckValidObjectName(objectName); err != nil { | ||
66 | return UploadInfo{}, err | ||
67 | } | ||
68 | |||
69 | // Total data read and written to server. should be equal to | ||
70 | // 'size' at the end of the call. | ||
71 | var totalUploadedSize int64 | ||
72 | |||
73 | // Complete multipart upload. | ||
74 | var complMultipartUpload completeMultipartUpload | ||
75 | |||
76 | // Calculate the optimal parts info for a given size. | ||
77 | totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize) | ||
78 | if err != nil { | ||
79 | return UploadInfo{}, err | ||
80 | } | ||
81 | |||
82 | // Choose hash algorithms to be calculated by hashCopyN, | ||
83 | // avoid sha256 with non-v4 signature request or | ||
84 | // HTTPS connection. | ||
85 | hashAlgos, hashSums := c.hashMaterials(opts.SendContentMd5, !opts.DisableContentSha256) | ||
86 | if len(hashSums) == 0 { | ||
87 | if opts.UserMetadata == nil { | ||
88 | opts.UserMetadata = make(map[string]string, 1) | ||
89 | } | ||
90 | opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C" | ||
91 | } | ||
92 | |||
93 | // Initiate a new multipart upload. | ||
94 | uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) | ||
95 | if err != nil { | ||
96 | return UploadInfo{}, err | ||
97 | } | ||
98 | delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm") | ||
99 | |||
100 | defer func() { | ||
101 | if err != nil { | ||
102 | c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) | ||
103 | } | ||
104 | }() | ||
105 | |||
106 | // Part number always starts with '1'. | ||
107 | partNumber := 1 | ||
108 | |||
109 | // Initialize parts uploaded map. | ||
110 | partsInfo := make(map[int]ObjectPart) | ||
111 | |||
112 | // Create a buffer. | ||
113 | buf := make([]byte, partSize) | ||
114 | |||
115 | // Create checksums | ||
116 | // CRC32C is ~50% faster on AMD64 @ 30GB/s | ||
117 | var crcBytes []byte | ||
118 | customHeader := make(http.Header) | ||
119 | crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) | ||
120 | for partNumber <= totalPartsCount { | ||
121 | length, rErr := readFull(reader, buf) | ||
122 | if rErr == io.EOF && partNumber > 1 { | ||
123 | break | ||
124 | } | ||
125 | |||
126 | if rErr != nil && rErr != io.ErrUnexpectedEOF && rErr != io.EOF { | ||
127 | return UploadInfo{}, rErr | ||
128 | } | ||
129 | |||
130 | // Calculates hash sums while copying partSize bytes into cw. | ||
131 | for k, v := range hashAlgos { | ||
132 | v.Write(buf[:length]) | ||
133 | hashSums[k] = v.Sum(nil) | ||
134 | v.Close() | ||
135 | } | ||
136 | |||
137 | // Update progress reader appropriately to the latest offset | ||
138 | // as we read from the source. | ||
139 | rd := newHook(bytes.NewReader(buf[:length]), opts.Progress) | ||
140 | |||
141 | // Checksums.. | ||
142 | var ( | ||
143 | md5Base64 string | ||
144 | sha256Hex string | ||
145 | ) | ||
146 | |||
147 | if hashSums["md5"] != nil { | ||
148 | md5Base64 = base64.StdEncoding.EncodeToString(hashSums["md5"]) | ||
149 | } | ||
150 | if hashSums["sha256"] != nil { | ||
151 | sha256Hex = hex.EncodeToString(hashSums["sha256"]) | ||
152 | } | ||
153 | if len(hashSums) == 0 { | ||
154 | crc.Reset() | ||
155 | crc.Write(buf[:length]) | ||
156 | cSum := crc.Sum(nil) | ||
157 | customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum)) | ||
158 | crcBytes = append(crcBytes, cSum...) | ||
159 | } | ||
160 | |||
161 | p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: rd, partNumber: partNumber, md5Base64: md5Base64, sha256Hex: sha256Hex, size: int64(length), sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader} | ||
162 | // Proceed to upload the part. | ||
163 | objPart, uerr := c.uploadPart(ctx, p) | ||
164 | if uerr != nil { | ||
165 | return UploadInfo{}, uerr | ||
166 | } | ||
167 | |||
168 | // Save successfully uploaded part metadata. | ||
169 | partsInfo[partNumber] = objPart | ||
170 | |||
171 | // Save successfully uploaded size. | ||
172 | totalUploadedSize += int64(length) | ||
173 | |||
174 | // Increment part number. | ||
175 | partNumber++ | ||
176 | |||
177 | // For unknown size, Read EOF we break away. | ||
178 | // We do not have to upload till totalPartsCount. | ||
179 | if rErr == io.EOF { | ||
180 | break | ||
181 | } | ||
182 | } | ||
183 | |||
184 | // Loop over total uploaded parts to save them in | ||
185 | // Parts array before completing the multipart request. | ||
186 | for i := 1; i < partNumber; i++ { | ||
187 | part, ok := partsInfo[i] | ||
188 | if !ok { | ||
189 | return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) | ||
190 | } | ||
191 | complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ | ||
192 | ETag: part.ETag, | ||
193 | PartNumber: part.PartNumber, | ||
194 | ChecksumCRC32: part.ChecksumCRC32, | ||
195 | ChecksumCRC32C: part.ChecksumCRC32C, | ||
196 | ChecksumSHA1: part.ChecksumSHA1, | ||
197 | ChecksumSHA256: part.ChecksumSHA256, | ||
198 | }) | ||
199 | } | ||
200 | |||
201 | // Sort all completed parts. | ||
202 | sort.Sort(completedParts(complMultipartUpload.Parts)) | ||
203 | opts = PutObjectOptions{ | ||
204 | ServerSideEncryption: opts.ServerSideEncryption, | ||
205 | } | ||
206 | if len(crcBytes) > 0 { | ||
207 | // Add hash of hashes. | ||
208 | crc.Reset() | ||
209 | crc.Write(crcBytes) | ||
210 | opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))} | ||
211 | } | ||
212 | uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) | ||
213 | if err != nil { | ||
214 | return UploadInfo{}, err | ||
215 | } | ||
216 | |||
217 | uploadInfo.Size = totalUploadedSize | ||
218 | return uploadInfo, nil | ||
219 | } | ||
220 | |||
221 | // initiateMultipartUpload - Initiates a multipart upload and returns an upload ID. | ||
222 | func (c *Client) initiateMultipartUpload(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (initiateMultipartUploadResult, error) { | ||
223 | // Input validation. | ||
224 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
225 | return initiateMultipartUploadResult{}, err | ||
226 | } | ||
227 | if err := s3utils.CheckValidObjectName(objectName); err != nil { | ||
228 | return initiateMultipartUploadResult{}, err | ||
229 | } | ||
230 | |||
231 | // Initialize url queries. | ||
232 | urlValues := make(url.Values) | ||
233 | urlValues.Set("uploads", "") | ||
234 | |||
235 | if opts.Internal.SourceVersionID != "" { | ||
236 | if opts.Internal.SourceVersionID != nullVersionID { | ||
237 | if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil { | ||
238 | return initiateMultipartUploadResult{}, errInvalidArgument(err.Error()) | ||
239 | } | ||
240 | } | ||
241 | urlValues.Set("versionId", opts.Internal.SourceVersionID) | ||
242 | } | ||
243 | |||
244 | // Set ContentType header. | ||
245 | customHeader := opts.Header() | ||
246 | |||
247 | reqMetadata := requestMetadata{ | ||
248 | bucketName: bucketName, | ||
249 | objectName: objectName, | ||
250 | queryValues: urlValues, | ||
251 | customHeader: customHeader, | ||
252 | } | ||
253 | |||
254 | // Execute POST on an objectName to initiate multipart upload. | ||
255 | resp, err := c.executeMethod(ctx, http.MethodPost, reqMetadata) | ||
256 | defer closeResponse(resp) | ||
257 | if err != nil { | ||
258 | return initiateMultipartUploadResult{}, err | ||
259 | } | ||
260 | if resp != nil { | ||
261 | if resp.StatusCode != http.StatusOK { | ||
262 | return initiateMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName) | ||
263 | } | ||
264 | } | ||
265 | // Decode xml for new multipart upload. | ||
266 | initiateMultipartUploadResult := initiateMultipartUploadResult{} | ||
267 | err = xmlDecoder(resp.Body, &initiateMultipartUploadResult) | ||
268 | if err != nil { | ||
269 | return initiateMultipartUploadResult, err | ||
270 | } | ||
271 | return initiateMultipartUploadResult, nil | ||
272 | } | ||
273 | |||
274 | type uploadPartParams struct { | ||
275 | bucketName string | ||
276 | objectName string | ||
277 | uploadID string | ||
278 | reader io.Reader | ||
279 | partNumber int | ||
280 | md5Base64 string | ||
281 | sha256Hex string | ||
282 | size int64 | ||
283 | sse encrypt.ServerSide | ||
284 | streamSha256 bool | ||
285 | customHeader http.Header | ||
286 | trailer http.Header | ||
287 | } | ||
288 | |||
289 | // uploadPart - Uploads a part in a multipart upload. | ||
290 | func (c *Client) uploadPart(ctx context.Context, p uploadPartParams) (ObjectPart, error) { | ||
291 | // Input validation. | ||
292 | if err := s3utils.CheckValidBucketName(p.bucketName); err != nil { | ||
293 | return ObjectPart{}, err | ||
294 | } | ||
295 | if err := s3utils.CheckValidObjectName(p.objectName); err != nil { | ||
296 | return ObjectPart{}, err | ||
297 | } | ||
298 | if p.size > maxPartSize { | ||
299 | return ObjectPart{}, errEntityTooLarge(p.size, maxPartSize, p.bucketName, p.objectName) | ||
300 | } | ||
301 | if p.size <= -1 { | ||
302 | return ObjectPart{}, errEntityTooSmall(p.size, p.bucketName, p.objectName) | ||
303 | } | ||
304 | if p.partNumber <= 0 { | ||
305 | return ObjectPart{}, errInvalidArgument("Part number cannot be negative or equal to zero.") | ||
306 | } | ||
307 | if p.uploadID == "" { | ||
308 | return ObjectPart{}, errInvalidArgument("UploadID cannot be empty.") | ||
309 | } | ||
310 | |||
311 | // Get resources properly escaped and lined up before using them in http request. | ||
312 | urlValues := make(url.Values) | ||
313 | // Set part number. | ||
314 | urlValues.Set("partNumber", strconv.Itoa(p.partNumber)) | ||
315 | // Set upload id. | ||
316 | urlValues.Set("uploadId", p.uploadID) | ||
317 | |||
318 | // Set encryption headers, if any. | ||
319 | if p.customHeader == nil { | ||
320 | p.customHeader = make(http.Header) | ||
321 | } | ||
322 | // https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html | ||
323 | // Server-side encryption is supported by the S3 Multipart Upload actions. | ||
324 | // Unless you are using a customer-provided encryption key, you don't need | ||
325 | // to specify the encryption parameters in each UploadPart request. | ||
326 | if p.sse != nil && p.sse.Type() == encrypt.SSEC { | ||
327 | p.sse.Marshal(p.customHeader) | ||
328 | } | ||
329 | |||
330 | reqMetadata := requestMetadata{ | ||
331 | bucketName: p.bucketName, | ||
332 | objectName: p.objectName, | ||
333 | queryValues: urlValues, | ||
334 | customHeader: p.customHeader, | ||
335 | contentBody: p.reader, | ||
336 | contentLength: p.size, | ||
337 | contentMD5Base64: p.md5Base64, | ||
338 | contentSHA256Hex: p.sha256Hex, | ||
339 | streamSha256: p.streamSha256, | ||
340 | trailer: p.trailer, | ||
341 | } | ||
342 | |||
343 | // Execute PUT on each part. | ||
344 | resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) | ||
345 | defer closeResponse(resp) | ||
346 | if err != nil { | ||
347 | return ObjectPart{}, err | ||
348 | } | ||
349 | if resp != nil { | ||
350 | if resp.StatusCode != http.StatusOK { | ||
351 | return ObjectPart{}, httpRespToErrorResponse(resp, p.bucketName, p.objectName) | ||
352 | } | ||
353 | } | ||
354 | // Once successfully uploaded, return completed part. | ||
355 | h := resp.Header | ||
356 | objPart := ObjectPart{ | ||
357 | ChecksumCRC32: h.Get("x-amz-checksum-crc32"), | ||
358 | ChecksumCRC32C: h.Get("x-amz-checksum-crc32c"), | ||
359 | ChecksumSHA1: h.Get("x-amz-checksum-sha1"), | ||
360 | ChecksumSHA256: h.Get("x-amz-checksum-sha256"), | ||
361 | } | ||
362 | objPart.Size = p.size | ||
363 | objPart.PartNumber = p.partNumber | ||
364 | // Trim off the odd double quotes from ETag in the beginning and end. | ||
365 | objPart.ETag = trimEtag(h.Get("ETag")) | ||
366 | return objPart, nil | ||
367 | } | ||
368 | |||
369 | // completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts. | ||
370 | func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string, | ||
371 | complete completeMultipartUpload, opts PutObjectOptions, | ||
372 | ) (UploadInfo, error) { | ||
373 | // Input validation. | ||
374 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
375 | return UploadInfo{}, err | ||
376 | } | ||
377 | if err := s3utils.CheckValidObjectName(objectName); err != nil { | ||
378 | return UploadInfo{}, err | ||
379 | } | ||
380 | |||
381 | // Initialize url queries. | ||
382 | urlValues := make(url.Values) | ||
383 | urlValues.Set("uploadId", uploadID) | ||
384 | // Marshal complete multipart body. | ||
385 | completeMultipartUploadBytes, err := xml.Marshal(complete) | ||
386 | if err != nil { | ||
387 | return UploadInfo{}, err | ||
388 | } | ||
389 | |||
390 | headers := opts.Header() | ||
391 | if s3utils.IsAmazonEndpoint(*c.endpointURL) { | ||
392 | headers.Del(encrypt.SseKmsKeyID) // Remove X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id not supported in CompleteMultipartUpload | ||
393 | headers.Del(encrypt.SseGenericHeader) // Remove X-Amz-Server-Side-Encryption not supported in CompleteMultipartUpload | ||
394 | headers.Del(encrypt.SseEncryptionContext) // Remove X-Amz-Server-Side-Encryption-Context not supported in CompleteMultipartUpload | ||
395 | } | ||
396 | |||
397 | // Instantiate all the complete multipart buffer. | ||
398 | completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes) | ||
399 | reqMetadata := requestMetadata{ | ||
400 | bucketName: bucketName, | ||
401 | objectName: objectName, | ||
402 | queryValues: urlValues, | ||
403 | contentBody: completeMultipartUploadBuffer, | ||
404 | contentLength: int64(len(completeMultipartUploadBytes)), | ||
405 | contentSHA256Hex: sum256Hex(completeMultipartUploadBytes), | ||
406 | customHeader: headers, | ||
407 | } | ||
408 | |||
409 | // Execute POST to complete multipart upload for an objectName. | ||
410 | resp, err := c.executeMethod(ctx, http.MethodPost, reqMetadata) | ||
411 | defer closeResponse(resp) | ||
412 | if err != nil { | ||
413 | return UploadInfo{}, err | ||
414 | } | ||
415 | if resp != nil { | ||
416 | if resp.StatusCode != http.StatusOK { | ||
417 | return UploadInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) | ||
418 | } | ||
419 | } | ||
420 | |||
421 | // Read resp.Body into a []bytes to parse for Error response inside the body | ||
422 | var b []byte | ||
423 | b, err = io.ReadAll(resp.Body) | ||
424 | if err != nil { | ||
425 | return UploadInfo{}, err | ||
426 | } | ||
427 | // Decode completed multipart upload response on success. | ||
428 | completeMultipartUploadResult := completeMultipartUploadResult{} | ||
429 | err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadResult) | ||
430 | if err != nil { | ||
431 | // xml parsing failure due to presence an ill-formed xml fragment | ||
432 | return UploadInfo{}, err | ||
433 | } else if completeMultipartUploadResult.Bucket == "" { | ||
434 | // xml's Decode method ignores well-formed xml that don't apply to the type of value supplied. | ||
435 | // In this case, it would leave completeMultipartUploadResult with the corresponding zero-values | ||
436 | // of the members. | ||
437 | |||
438 | // Decode completed multipart upload response on failure | ||
439 | completeMultipartUploadErr := ErrorResponse{} | ||
440 | err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadErr) | ||
441 | if err != nil { | ||
442 | // xml parsing failure due to presence an ill-formed xml fragment | ||
443 | return UploadInfo{}, err | ||
444 | } | ||
445 | return UploadInfo{}, completeMultipartUploadErr | ||
446 | } | ||
447 | |||
448 | // extract lifecycle expiry date and rule ID | ||
449 | expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration)) | ||
450 | |||
451 | return UploadInfo{ | ||
452 | Bucket: completeMultipartUploadResult.Bucket, | ||
453 | Key: completeMultipartUploadResult.Key, | ||
454 | ETag: trimEtag(completeMultipartUploadResult.ETag), | ||
455 | VersionID: resp.Header.Get(amzVersionID), | ||
456 | Location: completeMultipartUploadResult.Location, | ||
457 | Expiration: expTime, | ||
458 | ExpirationRuleID: ruleID, | ||
459 | |||
460 | ChecksumSHA256: completeMultipartUploadResult.ChecksumSHA256, | ||
461 | ChecksumSHA1: completeMultipartUploadResult.ChecksumSHA1, | ||
462 | ChecksumCRC32: completeMultipartUploadResult.ChecksumCRC32, | ||
463 | ChecksumCRC32C: completeMultipartUploadResult.ChecksumCRC32C, | ||
464 | }, nil | ||
465 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go new file mode 100644 index 0000000..9182d4e --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go | |||
@@ -0,0 +1,809 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2017 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | import ( | ||
21 | "bytes" | ||
22 | "context" | ||
23 | "encoding/base64" | ||
24 | "fmt" | ||
25 | "hash/crc32" | ||
26 | "io" | ||
27 | "net/http" | ||
28 | "net/url" | ||
29 | "sort" | ||
30 | "strings" | ||
31 | "sync" | ||
32 | |||
33 | "github.com/google/uuid" | ||
34 | "github.com/minio/minio-go/v7/pkg/s3utils" | ||
35 | ) | ||
36 | |||
37 | // putObjectMultipartStream - upload a large object using | ||
38 | // multipart upload and streaming signature for signing payload. | ||
39 | // Comprehensive put object operation involving multipart uploads. | ||
40 | // | ||
41 | // Following code handles these types of readers. | ||
42 | // | ||
43 | // - *minio.Object | ||
44 | // - Any reader which has a method 'ReadAt()' | ||
45 | func (c *Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string, | ||
46 | reader io.Reader, size int64, opts PutObjectOptions, | ||
47 | ) (info UploadInfo, err error) { | ||
48 | if opts.ConcurrentStreamParts && opts.NumThreads > 1 { | ||
49 | info, err = c.putObjectMultipartStreamParallel(ctx, bucketName, objectName, reader, opts) | ||
50 | } else if !isObject(reader) && isReadAt(reader) && !opts.SendContentMd5 { | ||
51 | // Verify if the reader implements ReadAt and it is not a *minio.Object then we will use parallel uploader. | ||
52 | info, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts) | ||
53 | } else { | ||
54 | info, err = c.putObjectMultipartStreamOptionalChecksum(ctx, bucketName, objectName, reader, size, opts) | ||
55 | } | ||
56 | if err != nil { | ||
57 | errResp := ToErrorResponse(err) | ||
58 | // Verify if multipart functionality is not available, if not | ||
59 | // fall back to single PutObject operation. | ||
60 | if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") { | ||
61 | // Verify if size of reader is greater than '5GiB'. | ||
62 | if size > maxSinglePutObjectSize { | ||
63 | return UploadInfo{}, errEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) | ||
64 | } | ||
65 | // Fall back to uploading as single PutObject operation. | ||
66 | return c.putObject(ctx, bucketName, objectName, reader, size, opts) | ||
67 | } | ||
68 | } | ||
69 | return info, err | ||
70 | } | ||
71 | |||
72 | // uploadedPartRes - the response received from a part upload. | ||
73 | type uploadedPartRes struct { | ||
74 | Error error // Any error encountered while uploading the part. | ||
75 | PartNum int // Number of the part uploaded. | ||
76 | Size int64 // Size of the part uploaded. | ||
77 | Part ObjectPart | ||
78 | } | ||
79 | |||
80 | type uploadPartReq struct { | ||
81 | PartNum int // Number of the part uploaded. | ||
82 | Part ObjectPart // Size of the part uploaded. | ||
83 | } | ||
84 | |||
85 | // putObjectMultipartFromReadAt - Uploads files bigger than 128MiB. | ||
86 | // Supports all readers which implements io.ReaderAt interface | ||
87 | // (ReadAt method). | ||
88 | // | ||
89 | // NOTE: This function is meant to be used for all readers which | ||
90 | // implement io.ReaderAt which allows us for resuming multipart | ||
91 | // uploads but reading at an offset, which would avoid re-read the | ||
92 | // data which was already uploaded. Internally this function uses | ||
93 | // temporary files for staging all the data, these temporary files are | ||
94 | // cleaned automatically when the caller i.e http client closes the | ||
95 | // stream after uploading all the contents successfully. | ||
96 | func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketName, objectName string, | ||
97 | reader io.ReaderAt, size int64, opts PutObjectOptions, | ||
98 | ) (info UploadInfo, err error) { | ||
99 | // Input validation. | ||
100 | if err = s3utils.CheckValidBucketName(bucketName); err != nil { | ||
101 | return UploadInfo{}, err | ||
102 | } | ||
103 | if err = s3utils.CheckValidObjectName(objectName); err != nil { | ||
104 | return UploadInfo{}, err | ||
105 | } | ||
106 | |||
107 | // Calculate the optimal parts info for a given size. | ||
108 | totalPartsCount, partSize, lastPartSize, err := OptimalPartInfo(size, opts.PartSize) | ||
109 | if err != nil { | ||
110 | return UploadInfo{}, err | ||
111 | } | ||
112 | |||
113 | withChecksum := c.trailingHeaderSupport | ||
114 | if withChecksum { | ||
115 | if opts.UserMetadata == nil { | ||
116 | opts.UserMetadata = make(map[string]string, 1) | ||
117 | } | ||
118 | opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C" | ||
119 | } | ||
120 | // Initiate a new multipart upload. | ||
121 | uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) | ||
122 | if err != nil { | ||
123 | return UploadInfo{}, err | ||
124 | } | ||
125 | delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm") | ||
126 | |||
127 | // Aborts the multipart upload in progress, if the | ||
128 | // function returns any error, since we do not resume | ||
129 | // we should purge the parts which have been uploaded | ||
130 | // to relinquish storage space. | ||
131 | defer func() { | ||
132 | if err != nil { | ||
133 | c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) | ||
134 | } | ||
135 | }() | ||
136 | |||
137 | // Total data read and written to server. should be equal to 'size' at the end of the call. | ||
138 | var totalUploadedSize int64 | ||
139 | |||
140 | // Complete multipart upload. | ||
141 | var complMultipartUpload completeMultipartUpload | ||
142 | |||
143 | // Declare a channel that sends the next part number to be uploaded. | ||
144 | uploadPartsCh := make(chan uploadPartReq) | ||
145 | |||
146 | // Declare a channel that sends back the response of a part upload. | ||
147 | uploadedPartsCh := make(chan uploadedPartRes) | ||
148 | |||
149 | // Used for readability, lastPartNumber is always totalPartsCount. | ||
150 | lastPartNumber := totalPartsCount | ||
151 | |||
152 | partitionCtx, partitionCancel := context.WithCancel(ctx) | ||
153 | defer partitionCancel() | ||
154 | // Send each part number to the channel to be processed. | ||
155 | go func() { | ||
156 | defer close(uploadPartsCh) | ||
157 | |||
158 | for p := 1; p <= totalPartsCount; p++ { | ||
159 | select { | ||
160 | case <-partitionCtx.Done(): | ||
161 | return | ||
162 | case uploadPartsCh <- uploadPartReq{PartNum: p}: | ||
163 | } | ||
164 | } | ||
165 | }() | ||
166 | |||
167 | // Receive each part number from the channel allowing three parallel uploads. | ||
168 | for w := 1; w <= opts.getNumThreads(); w++ { | ||
169 | go func(partSize int64) { | ||
170 | for { | ||
171 | var uploadReq uploadPartReq | ||
172 | var ok bool | ||
173 | select { | ||
174 | case <-ctx.Done(): | ||
175 | return | ||
176 | case uploadReq, ok = <-uploadPartsCh: | ||
177 | if !ok { | ||
178 | return | ||
179 | } | ||
180 | // Each worker will draw from the part channel and upload in parallel. | ||
181 | } | ||
182 | |||
183 | // If partNumber was not uploaded we calculate the missing | ||
184 | // part offset and size. For all other part numbers we | ||
185 | // calculate offset based on multiples of partSize. | ||
186 | readOffset := int64(uploadReq.PartNum-1) * partSize | ||
187 | |||
188 | // As a special case if partNumber is lastPartNumber, we | ||
189 | // calculate the offset based on the last part size. | ||
190 | if uploadReq.PartNum == lastPartNumber { | ||
191 | readOffset = size - lastPartSize | ||
192 | partSize = lastPartSize | ||
193 | } | ||
194 | |||
195 | sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), opts.Progress) | ||
196 | trailer := make(http.Header, 1) | ||
197 | if withChecksum { | ||
198 | crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) | ||
199 | trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(crc.Sum(nil))) | ||
200 | sectionReader = newHashReaderWrapper(sectionReader, crc, func(hash []byte) { | ||
201 | trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(hash)) | ||
202 | }) | ||
203 | } | ||
204 | |||
205 | // Proceed to upload the part. | ||
206 | p := uploadPartParams{ | ||
207 | bucketName: bucketName, | ||
208 | objectName: objectName, | ||
209 | uploadID: uploadID, | ||
210 | reader: sectionReader, | ||
211 | partNumber: uploadReq.PartNum, | ||
212 | size: partSize, | ||
213 | sse: opts.ServerSideEncryption, | ||
214 | streamSha256: !opts.DisableContentSha256, | ||
215 | sha256Hex: "", | ||
216 | trailer: trailer, | ||
217 | } | ||
218 | objPart, err := c.uploadPart(ctx, p) | ||
219 | if err != nil { | ||
220 | uploadedPartsCh <- uploadedPartRes{ | ||
221 | Error: err, | ||
222 | } | ||
223 | // Exit the goroutine. | ||
224 | return | ||
225 | } | ||
226 | |||
227 | // Save successfully uploaded part metadata. | ||
228 | uploadReq.Part = objPart | ||
229 | |||
230 | // Send successful part info through the channel. | ||
231 | uploadedPartsCh <- uploadedPartRes{ | ||
232 | Size: objPart.Size, | ||
233 | PartNum: uploadReq.PartNum, | ||
234 | Part: uploadReq.Part, | ||
235 | } | ||
236 | } | ||
237 | }(partSize) | ||
238 | } | ||
239 | |||
240 | // Gather the responses as they occur and update any | ||
241 | // progress bar. | ||
242 | for u := 1; u <= totalPartsCount; u++ { | ||
243 | select { | ||
244 | case <-ctx.Done(): | ||
245 | return UploadInfo{}, ctx.Err() | ||
246 | case uploadRes := <-uploadedPartsCh: | ||
247 | if uploadRes.Error != nil { | ||
248 | return UploadInfo{}, uploadRes.Error | ||
249 | } | ||
250 | |||
251 | // Update the totalUploadedSize. | ||
252 | totalUploadedSize += uploadRes.Size | ||
253 | complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ | ||
254 | ETag: uploadRes.Part.ETag, | ||
255 | PartNumber: uploadRes.Part.PartNumber, | ||
256 | ChecksumCRC32: uploadRes.Part.ChecksumCRC32, | ||
257 | ChecksumCRC32C: uploadRes.Part.ChecksumCRC32C, | ||
258 | ChecksumSHA1: uploadRes.Part.ChecksumSHA1, | ||
259 | ChecksumSHA256: uploadRes.Part.ChecksumSHA256, | ||
260 | }) | ||
261 | } | ||
262 | } | ||
263 | |||
264 | // Verify if we uploaded all the data. | ||
265 | if totalUploadedSize != size { | ||
266 | return UploadInfo{}, errUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) | ||
267 | } | ||
268 | |||
269 | // Sort all completed parts. | ||
270 | sort.Sort(completedParts(complMultipartUpload.Parts)) | ||
271 | |||
272 | opts = PutObjectOptions{ | ||
273 | ServerSideEncryption: opts.ServerSideEncryption, | ||
274 | } | ||
275 | if withChecksum { | ||
276 | // Add hash of hashes. | ||
277 | crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) | ||
278 | for _, part := range complMultipartUpload.Parts { | ||
279 | cs, err := base64.StdEncoding.DecodeString(part.ChecksumCRC32C) | ||
280 | if err == nil { | ||
281 | crc.Write(cs) | ||
282 | } | ||
283 | } | ||
284 | opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))} | ||
285 | } | ||
286 | |||
287 | uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) | ||
288 | if err != nil { | ||
289 | return UploadInfo{}, err | ||
290 | } | ||
291 | |||
292 | uploadInfo.Size = totalUploadedSize | ||
293 | return uploadInfo, nil | ||
294 | } | ||
295 | |||
296 | func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, bucketName, objectName string, | ||
297 | reader io.Reader, size int64, opts PutObjectOptions, | ||
298 | ) (info UploadInfo, err error) { | ||
299 | // Input validation. | ||
300 | if err = s3utils.CheckValidBucketName(bucketName); err != nil { | ||
301 | return UploadInfo{}, err | ||
302 | } | ||
303 | if err = s3utils.CheckValidObjectName(objectName); err != nil { | ||
304 | return UploadInfo{}, err | ||
305 | } | ||
306 | |||
307 | if !opts.SendContentMd5 { | ||
308 | if opts.UserMetadata == nil { | ||
309 | opts.UserMetadata = make(map[string]string, 1) | ||
310 | } | ||
311 | opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C" | ||
312 | } | ||
313 | |||
314 | // Calculate the optimal parts info for a given size. | ||
315 | totalPartsCount, partSize, lastPartSize, err := OptimalPartInfo(size, opts.PartSize) | ||
316 | if err != nil { | ||
317 | return UploadInfo{}, err | ||
318 | } | ||
319 | // Initiates a new multipart request | ||
320 | uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) | ||
321 | if err != nil { | ||
322 | return UploadInfo{}, err | ||
323 | } | ||
324 | delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm") | ||
325 | |||
326 | // Aborts the multipart upload if the function returns | ||
327 | // any error, since we do not resume we should purge | ||
328 | // the parts which have been uploaded to relinquish | ||
329 | // storage space. | ||
330 | defer func() { | ||
331 | if err != nil { | ||
332 | c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) | ||
333 | } | ||
334 | }() | ||
335 | |||
336 | // Create checksums | ||
337 | // CRC32C is ~50% faster on AMD64 @ 30GB/s | ||
338 | var crcBytes []byte | ||
339 | customHeader := make(http.Header) | ||
340 | crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) | ||
341 | md5Hash := c.md5Hasher() | ||
342 | defer md5Hash.Close() | ||
343 | |||
344 | // Total data read and written to server. should be equal to 'size' at the end of the call. | ||
345 | var totalUploadedSize int64 | ||
346 | |||
347 | // Initialize parts uploaded map. | ||
348 | partsInfo := make(map[int]ObjectPart) | ||
349 | |||
350 | // Create a buffer. | ||
351 | buf := make([]byte, partSize) | ||
352 | |||
353 | // Avoid declaring variables in the for loop | ||
354 | var md5Base64 string | ||
355 | |||
356 | // Part number always starts with '1'. | ||
357 | var partNumber int | ||
358 | for partNumber = 1; partNumber <= totalPartsCount; partNumber++ { | ||
359 | |||
360 | // Proceed to upload the part. | ||
361 | if partNumber == totalPartsCount { | ||
362 | partSize = lastPartSize | ||
363 | } | ||
364 | |||
365 | length, rerr := readFull(reader, buf) | ||
366 | if rerr == io.EOF && partNumber > 1 { | ||
367 | break | ||
368 | } | ||
369 | |||
370 | if rerr != nil && rerr != io.ErrUnexpectedEOF && err != io.EOF { | ||
371 | return UploadInfo{}, rerr | ||
372 | } | ||
373 | |||
374 | // Calculate md5sum. | ||
375 | if opts.SendContentMd5 { | ||
376 | md5Hash.Reset() | ||
377 | md5Hash.Write(buf[:length]) | ||
378 | md5Base64 = base64.StdEncoding.EncodeToString(md5Hash.Sum(nil)) | ||
379 | } else { | ||
380 | // Add CRC32C instead. | ||
381 | crc.Reset() | ||
382 | crc.Write(buf[:length]) | ||
383 | cSum := crc.Sum(nil) | ||
384 | customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum)) | ||
385 | crcBytes = append(crcBytes, cSum...) | ||
386 | } | ||
387 | |||
388 | // Update progress reader appropriately to the latest offset | ||
389 | // as we read from the source. | ||
390 | hooked := newHook(bytes.NewReader(buf[:length]), opts.Progress) | ||
391 | p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: hooked, partNumber: partNumber, md5Base64: md5Base64, size: partSize, sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader} | ||
392 | objPart, uerr := c.uploadPart(ctx, p) | ||
393 | if uerr != nil { | ||
394 | return UploadInfo{}, uerr | ||
395 | } | ||
396 | |||
397 | // Save successfully uploaded part metadata. | ||
398 | partsInfo[partNumber] = objPart | ||
399 | |||
400 | // Save successfully uploaded size. | ||
401 | totalUploadedSize += partSize | ||
402 | } | ||
403 | |||
404 | // Verify if we uploaded all the data. | ||
405 | if size > 0 { | ||
406 | if totalUploadedSize != size { | ||
407 | return UploadInfo{}, errUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) | ||
408 | } | ||
409 | } | ||
410 | |||
411 | // Complete multipart upload. | ||
412 | var complMultipartUpload completeMultipartUpload | ||
413 | |||
414 | // Loop over total uploaded parts to save them in | ||
415 | // Parts array before completing the multipart request. | ||
416 | for i := 1; i < partNumber; i++ { | ||
417 | part, ok := partsInfo[i] | ||
418 | if !ok { | ||
419 | return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) | ||
420 | } | ||
421 | complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ | ||
422 | ETag: part.ETag, | ||
423 | PartNumber: part.PartNumber, | ||
424 | ChecksumCRC32: part.ChecksumCRC32, | ||
425 | ChecksumCRC32C: part.ChecksumCRC32C, | ||
426 | ChecksumSHA1: part.ChecksumSHA1, | ||
427 | ChecksumSHA256: part.ChecksumSHA256, | ||
428 | }) | ||
429 | } | ||
430 | |||
431 | // Sort all completed parts. | ||
432 | sort.Sort(completedParts(complMultipartUpload.Parts)) | ||
433 | |||
434 | opts = PutObjectOptions{ | ||
435 | ServerSideEncryption: opts.ServerSideEncryption, | ||
436 | } | ||
437 | if len(crcBytes) > 0 { | ||
438 | // Add hash of hashes. | ||
439 | crc.Reset() | ||
440 | crc.Write(crcBytes) | ||
441 | opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))} | ||
442 | } | ||
443 | uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) | ||
444 | if err != nil { | ||
445 | return UploadInfo{}, err | ||
446 | } | ||
447 | |||
448 | uploadInfo.Size = totalUploadedSize | ||
449 | return uploadInfo, nil | ||
450 | } | ||
451 | |||
452 | // putObjectMultipartStreamParallel uploads opts.NumThreads parts in parallel. | ||
453 | // This is expected to take opts.PartSize * opts.NumThreads * (GOGC / 100) bytes of buffer. | ||
454 | func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketName, objectName string, | ||
455 | reader io.Reader, opts PutObjectOptions, | ||
456 | ) (info UploadInfo, err error) { | ||
457 | // Input validation. | ||
458 | if err = s3utils.CheckValidBucketName(bucketName); err != nil { | ||
459 | return UploadInfo{}, err | ||
460 | } | ||
461 | |||
462 | if err = s3utils.CheckValidObjectName(objectName); err != nil { | ||
463 | return UploadInfo{}, err | ||
464 | } | ||
465 | |||
466 | if !opts.SendContentMd5 { | ||
467 | if opts.UserMetadata == nil { | ||
468 | opts.UserMetadata = make(map[string]string, 1) | ||
469 | } | ||
470 | opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C" | ||
471 | } | ||
472 | |||
473 | // Cancel all when an error occurs. | ||
474 | ctx, cancel := context.WithCancel(ctx) | ||
475 | defer cancel() | ||
476 | |||
477 | // Calculate the optimal parts info for a given size. | ||
478 | totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize) | ||
479 | if err != nil { | ||
480 | return UploadInfo{}, err | ||
481 | } | ||
482 | |||
483 | // Initiates a new multipart request | ||
484 | uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) | ||
485 | if err != nil { | ||
486 | return UploadInfo{}, err | ||
487 | } | ||
488 | delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm") | ||
489 | |||
490 | // Aborts the multipart upload if the function returns | ||
491 | // any error, since we do not resume we should purge | ||
492 | // the parts which have been uploaded to relinquish | ||
493 | // storage space. | ||
494 | defer func() { | ||
495 | if err != nil { | ||
496 | c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) | ||
497 | } | ||
498 | }() | ||
499 | |||
500 | // Create checksums | ||
501 | // CRC32C is ~50% faster on AMD64 @ 30GB/s | ||
502 | var crcBytes []byte | ||
503 | crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) | ||
504 | |||
505 | // Total data read and written to server. should be equal to 'size' at the end of the call. | ||
506 | var totalUploadedSize int64 | ||
507 | |||
508 | // Initialize parts uploaded map. | ||
509 | partsInfo := make(map[int]ObjectPart) | ||
510 | |||
511 | // Create a buffer. | ||
512 | nBuffers := int64(opts.NumThreads) | ||
513 | bufs := make(chan []byte, nBuffers) | ||
514 | all := make([]byte, nBuffers*partSize) | ||
515 | for i := int64(0); i < nBuffers; i++ { | ||
516 | bufs <- all[i*partSize : i*partSize+partSize] | ||
517 | } | ||
518 | |||
519 | var wg sync.WaitGroup | ||
520 | var mu sync.Mutex | ||
521 | errCh := make(chan error, opts.NumThreads) | ||
522 | |||
523 | reader = newHook(reader, opts.Progress) | ||
524 | |||
525 | // Part number always starts with '1'. | ||
526 | var partNumber int | ||
527 | for partNumber = 1; partNumber <= totalPartsCount; partNumber++ { | ||
528 | // Proceed to upload the part. | ||
529 | var buf []byte | ||
530 | select { | ||
531 | case buf = <-bufs: | ||
532 | case err = <-errCh: | ||
533 | cancel() | ||
534 | wg.Wait() | ||
535 | return UploadInfo{}, err | ||
536 | } | ||
537 | |||
538 | if int64(len(buf)) != partSize { | ||
539 | return UploadInfo{}, fmt.Errorf("read buffer < %d than expected partSize: %d", len(buf), partSize) | ||
540 | } | ||
541 | |||
542 | length, rerr := readFull(reader, buf) | ||
543 | if rerr == io.EOF && partNumber > 1 { | ||
544 | // Done | ||
545 | break | ||
546 | } | ||
547 | |||
548 | if rerr != nil && rerr != io.ErrUnexpectedEOF && err != io.EOF { | ||
549 | cancel() | ||
550 | wg.Wait() | ||
551 | return UploadInfo{}, rerr | ||
552 | } | ||
553 | |||
554 | // Calculate md5sum. | ||
555 | customHeader := make(http.Header) | ||
556 | if !opts.SendContentMd5 { | ||
557 | // Add CRC32C instead. | ||
558 | crc.Reset() | ||
559 | crc.Write(buf[:length]) | ||
560 | cSum := crc.Sum(nil) | ||
561 | customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum)) | ||
562 | crcBytes = append(crcBytes, cSum...) | ||
563 | } | ||
564 | |||
565 | wg.Add(1) | ||
566 | go func(partNumber int) { | ||
567 | // Avoid declaring variables in the for loop | ||
568 | var md5Base64 string | ||
569 | |||
570 | if opts.SendContentMd5 { | ||
571 | md5Hash := c.md5Hasher() | ||
572 | md5Hash.Write(buf[:length]) | ||
573 | md5Base64 = base64.StdEncoding.EncodeToString(md5Hash.Sum(nil)) | ||
574 | md5Hash.Close() | ||
575 | } | ||
576 | |||
577 | defer wg.Done() | ||
578 | p := uploadPartParams{ | ||
579 | bucketName: bucketName, | ||
580 | objectName: objectName, | ||
581 | uploadID: uploadID, | ||
582 | reader: bytes.NewReader(buf[:length]), | ||
583 | partNumber: partNumber, | ||
584 | md5Base64: md5Base64, | ||
585 | size: int64(length), | ||
586 | sse: opts.ServerSideEncryption, | ||
587 | streamSha256: !opts.DisableContentSha256, | ||
588 | customHeader: customHeader, | ||
589 | } | ||
590 | objPart, uerr := c.uploadPart(ctx, p) | ||
591 | if uerr != nil { | ||
592 | errCh <- uerr | ||
593 | return | ||
594 | } | ||
595 | |||
596 | // Save successfully uploaded part metadata. | ||
597 | mu.Lock() | ||
598 | partsInfo[partNumber] = objPart | ||
599 | mu.Unlock() | ||
600 | |||
601 | // Send buffer back so it can be reused. | ||
602 | bufs <- buf | ||
603 | }(partNumber) | ||
604 | |||
605 | // Save successfully uploaded size. | ||
606 | totalUploadedSize += int64(length) | ||
607 | } | ||
608 | wg.Wait() | ||
609 | |||
610 | // Collect any error | ||
611 | select { | ||
612 | case err = <-errCh: | ||
613 | return UploadInfo{}, err | ||
614 | default: | ||
615 | } | ||
616 | |||
617 | // Complete multipart upload. | ||
618 | var complMultipartUpload completeMultipartUpload | ||
619 | |||
620 | // Loop over total uploaded parts to save them in | ||
621 | // Parts array before completing the multipart request. | ||
622 | for i := 1; i < partNumber; i++ { | ||
623 | part, ok := partsInfo[i] | ||
624 | if !ok { | ||
625 | return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) | ||
626 | } | ||
627 | complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ | ||
628 | ETag: part.ETag, | ||
629 | PartNumber: part.PartNumber, | ||
630 | ChecksumCRC32: part.ChecksumCRC32, | ||
631 | ChecksumCRC32C: part.ChecksumCRC32C, | ||
632 | ChecksumSHA1: part.ChecksumSHA1, | ||
633 | ChecksumSHA256: part.ChecksumSHA256, | ||
634 | }) | ||
635 | } | ||
636 | |||
637 | // Sort all completed parts. | ||
638 | sort.Sort(completedParts(complMultipartUpload.Parts)) | ||
639 | |||
640 | opts = PutObjectOptions{} | ||
641 | if len(crcBytes) > 0 { | ||
642 | // Add hash of hashes. | ||
643 | crc.Reset() | ||
644 | crc.Write(crcBytes) | ||
645 | opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))} | ||
646 | } | ||
647 | uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) | ||
648 | if err != nil { | ||
649 | return UploadInfo{}, err | ||
650 | } | ||
651 | |||
652 | uploadInfo.Size = totalUploadedSize | ||
653 | return uploadInfo, nil | ||
654 | } | ||
655 | |||
656 | // putObject special function used Google Cloud Storage. This special function | ||
657 | // is used for Google Cloud Storage since Google's multipart API is not S3 compatible. | ||
658 | func (c *Client) putObject(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) { | ||
659 | // Input validation. | ||
660 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
661 | return UploadInfo{}, err | ||
662 | } | ||
663 | if err := s3utils.CheckValidObjectName(objectName); err != nil { | ||
664 | return UploadInfo{}, err | ||
665 | } | ||
666 | |||
667 | // Size -1 is only supported on Google Cloud Storage, we error | ||
668 | // out in all other situations. | ||
669 | if size < 0 && !s3utils.IsGoogleEndpoint(*c.endpointURL) { | ||
670 | return UploadInfo{}, errEntityTooSmall(size, bucketName, objectName) | ||
671 | } | ||
672 | |||
673 | if opts.SendContentMd5 && s3utils.IsGoogleEndpoint(*c.endpointURL) && size < 0 { | ||
674 | return UploadInfo{}, errInvalidArgument("MD5Sum cannot be calculated with size '-1'") | ||
675 | } | ||
676 | |||
677 | var readSeeker io.Seeker | ||
678 | if size > 0 { | ||
679 | if isReadAt(reader) && !isObject(reader) { | ||
680 | seeker, ok := reader.(io.Seeker) | ||
681 | if ok { | ||
682 | offset, err := seeker.Seek(0, io.SeekCurrent) | ||
683 | if err != nil { | ||
684 | return UploadInfo{}, errInvalidArgument(err.Error()) | ||
685 | } | ||
686 | reader = io.NewSectionReader(reader.(io.ReaderAt), offset, size) | ||
687 | readSeeker = reader.(io.Seeker) | ||
688 | } | ||
689 | } | ||
690 | } | ||
691 | |||
692 | var md5Base64 string | ||
693 | if opts.SendContentMd5 { | ||
694 | // Calculate md5sum. | ||
695 | hash := c.md5Hasher() | ||
696 | |||
697 | if readSeeker != nil { | ||
698 | if _, err := io.Copy(hash, reader); err != nil { | ||
699 | return UploadInfo{}, err | ||
700 | } | ||
701 | // Seek back to beginning of io.NewSectionReader's offset. | ||
702 | _, err = readSeeker.Seek(0, io.SeekStart) | ||
703 | if err != nil { | ||
704 | return UploadInfo{}, errInvalidArgument(err.Error()) | ||
705 | } | ||
706 | } else { | ||
707 | // Create a buffer. | ||
708 | buf := make([]byte, size) | ||
709 | |||
710 | length, err := readFull(reader, buf) | ||
711 | if err != nil && err != io.ErrUnexpectedEOF && err != io.EOF { | ||
712 | return UploadInfo{}, err | ||
713 | } | ||
714 | |||
715 | hash.Write(buf[:length]) | ||
716 | reader = bytes.NewReader(buf[:length]) | ||
717 | } | ||
718 | |||
719 | md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil)) | ||
720 | hash.Close() | ||
721 | } | ||
722 | |||
723 | // Update progress reader appropriately to the latest offset as we | ||
724 | // read from the source. | ||
725 | progressReader := newHook(reader, opts.Progress) | ||
726 | |||
727 | // This function does not calculate sha256 and md5sum for payload. | ||
728 | // Execute put object. | ||
729 | return c.putObjectDo(ctx, bucketName, objectName, progressReader, md5Base64, "", size, opts) | ||
730 | } | ||
731 | |||
732 | // putObjectDo - executes the put object http operation. | ||
733 | // NOTE: You must have WRITE permissions on a bucket to add an object to it. | ||
734 | func (c *Client) putObjectDo(ctx context.Context, bucketName, objectName string, reader io.Reader, md5Base64, sha256Hex string, size int64, opts PutObjectOptions) (UploadInfo, error) { | ||
735 | // Input validation. | ||
736 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
737 | return UploadInfo{}, err | ||
738 | } | ||
739 | if err := s3utils.CheckValidObjectName(objectName); err != nil { | ||
740 | return UploadInfo{}, err | ||
741 | } | ||
742 | // Set headers. | ||
743 | customHeader := opts.Header() | ||
744 | |||
745 | // Add CRC when client supports it, MD5 is not set, not Google and we don't add SHA256 to chunks. | ||
746 | addCrc := c.trailingHeaderSupport && md5Base64 == "" && !s3utils.IsGoogleEndpoint(*c.endpointURL) && (opts.DisableContentSha256 || c.secure) | ||
747 | |||
748 | if addCrc { | ||
749 | // If user has added checksums, don't add them ourselves. | ||
750 | for k := range opts.UserMetadata { | ||
751 | if strings.HasPrefix(strings.ToLower(k), "x-amz-checksum-") { | ||
752 | addCrc = false | ||
753 | } | ||
754 | } | ||
755 | } | ||
756 | // Populate request metadata. | ||
757 | reqMetadata := requestMetadata{ | ||
758 | bucketName: bucketName, | ||
759 | objectName: objectName, | ||
760 | customHeader: customHeader, | ||
761 | contentBody: reader, | ||
762 | contentLength: size, | ||
763 | contentMD5Base64: md5Base64, | ||
764 | contentSHA256Hex: sha256Hex, | ||
765 | streamSha256: !opts.DisableContentSha256, | ||
766 | addCrc: addCrc, | ||
767 | } | ||
768 | if opts.Internal.SourceVersionID != "" { | ||
769 | if opts.Internal.SourceVersionID != nullVersionID { | ||
770 | if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil { | ||
771 | return UploadInfo{}, errInvalidArgument(err.Error()) | ||
772 | } | ||
773 | } | ||
774 | urlValues := make(url.Values) | ||
775 | urlValues.Set("versionId", opts.Internal.SourceVersionID) | ||
776 | reqMetadata.queryValues = urlValues | ||
777 | } | ||
778 | |||
779 | // Execute PUT an objectName. | ||
780 | resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) | ||
781 | defer closeResponse(resp) | ||
782 | if err != nil { | ||
783 | return UploadInfo{}, err | ||
784 | } | ||
785 | if resp != nil { | ||
786 | if resp.StatusCode != http.StatusOK { | ||
787 | return UploadInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) | ||
788 | } | ||
789 | } | ||
790 | |||
791 | // extract lifecycle expiry date and rule ID | ||
792 | expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration)) | ||
793 | h := resp.Header | ||
794 | return UploadInfo{ | ||
795 | Bucket: bucketName, | ||
796 | Key: objectName, | ||
797 | ETag: trimEtag(h.Get("ETag")), | ||
798 | VersionID: h.Get(amzVersionID), | ||
799 | Size: size, | ||
800 | Expiration: expTime, | ||
801 | ExpirationRuleID: ruleID, | ||
802 | |||
803 | // Checksum values | ||
804 | ChecksumCRC32: h.Get("x-amz-checksum-crc32"), | ||
805 | ChecksumCRC32C: h.Get("x-amz-checksum-crc32c"), | ||
806 | ChecksumSHA1: h.Get("x-amz-checksum-sha1"), | ||
807 | ChecksumSHA256: h.Get("x-amz-checksum-sha256"), | ||
808 | }, nil | ||
809 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object.go b/vendor/github.com/minio/minio-go/v7/api-put-object.go new file mode 100644 index 0000000..bbd8924 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-put-object.go | |||
@@ -0,0 +1,473 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2015-2017 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | import ( | ||
21 | "bytes" | ||
22 | "context" | ||
23 | "encoding/base64" | ||
24 | "errors" | ||
25 | "fmt" | ||
26 | "hash/crc32" | ||
27 | "io" | ||
28 | "net/http" | ||
29 | "sort" | ||
30 | "time" | ||
31 | |||
32 | "github.com/minio/minio-go/v7/pkg/encrypt" | ||
33 | "github.com/minio/minio-go/v7/pkg/s3utils" | ||
34 | "golang.org/x/net/http/httpguts" | ||
35 | ) | ||
36 | |||
37 | // ReplicationStatus represents replication status of object | ||
38 | type ReplicationStatus string | ||
39 | |||
40 | const ( | ||
41 | // ReplicationStatusPending indicates replication is pending | ||
42 | ReplicationStatusPending ReplicationStatus = "PENDING" | ||
43 | // ReplicationStatusComplete indicates replication completed ok | ||
44 | ReplicationStatusComplete ReplicationStatus = "COMPLETED" | ||
45 | // ReplicationStatusFailed indicates replication failed | ||
46 | ReplicationStatusFailed ReplicationStatus = "FAILED" | ||
47 | // ReplicationStatusReplica indicates object is a replica of a source | ||
48 | ReplicationStatusReplica ReplicationStatus = "REPLICA" | ||
49 | ) | ||
50 | |||
51 | // Empty returns true if no replication status set. | ||
52 | func (r ReplicationStatus) Empty() bool { | ||
53 | return r == "" | ||
54 | } | ||
55 | |||
56 | // AdvancedPutOptions for internal use - to be utilized by replication, ILM transition | ||
57 | // implementation on MinIO server | ||
58 | type AdvancedPutOptions struct { | ||
59 | SourceVersionID string | ||
60 | SourceETag string | ||
61 | ReplicationStatus ReplicationStatus | ||
62 | SourceMTime time.Time | ||
63 | ReplicationRequest bool | ||
64 | RetentionTimestamp time.Time | ||
65 | TaggingTimestamp time.Time | ||
66 | LegalholdTimestamp time.Time | ||
67 | ReplicationValidityCheck bool | ||
68 | } | ||
69 | |||
70 | // PutObjectOptions represents options specified by user for PutObject call | ||
71 | type PutObjectOptions struct { | ||
72 | UserMetadata map[string]string | ||
73 | UserTags map[string]string | ||
74 | Progress io.Reader | ||
75 | ContentType string | ||
76 | ContentEncoding string | ||
77 | ContentDisposition string | ||
78 | ContentLanguage string | ||
79 | CacheControl string | ||
80 | Expires time.Time | ||
81 | Mode RetentionMode | ||
82 | RetainUntilDate time.Time | ||
83 | ServerSideEncryption encrypt.ServerSide | ||
84 | NumThreads uint | ||
85 | StorageClass string | ||
86 | WebsiteRedirectLocation string | ||
87 | PartSize uint64 | ||
88 | LegalHold LegalHoldStatus | ||
89 | SendContentMd5 bool | ||
90 | DisableContentSha256 bool | ||
91 | DisableMultipart bool | ||
92 | |||
93 | // ConcurrentStreamParts will create NumThreads buffers of PartSize bytes, | ||
94 | // fill them serially and upload them in parallel. | ||
95 | // This can be used for faster uploads on non-seekable or slow-to-seek input. | ||
96 | ConcurrentStreamParts bool | ||
97 | Internal AdvancedPutOptions | ||
98 | |||
99 | customHeaders http.Header | ||
100 | } | ||
101 | |||
102 | // SetMatchETag if etag matches while PUT MinIO returns an error | ||
103 | // this is a MinIO specific extension to support optimistic locking | ||
104 | // semantics. | ||
105 | func (opts *PutObjectOptions) SetMatchETag(etag string) { | ||
106 | if opts.customHeaders == nil { | ||
107 | opts.customHeaders = http.Header{} | ||
108 | } | ||
109 | opts.customHeaders.Set("If-Match", "\""+etag+"\"") | ||
110 | } | ||
111 | |||
112 | // SetMatchETagExcept if etag does not match while PUT MinIO returns an | ||
113 | // error this is a MinIO specific extension to support optimistic locking | ||
114 | // semantics. | ||
115 | func (opts *PutObjectOptions) SetMatchETagExcept(etag string) { | ||
116 | if opts.customHeaders == nil { | ||
117 | opts.customHeaders = http.Header{} | ||
118 | } | ||
119 | opts.customHeaders.Set("If-None-Match", "\""+etag+"\"") | ||
120 | } | ||
121 | |||
122 | // getNumThreads - gets the number of threads to be used in the multipart | ||
123 | // put object operation | ||
124 | func (opts PutObjectOptions) getNumThreads() (numThreads int) { | ||
125 | if opts.NumThreads > 0 { | ||
126 | numThreads = int(opts.NumThreads) | ||
127 | } else { | ||
128 | numThreads = totalWorkers | ||
129 | } | ||
130 | return | ||
131 | } | ||
132 | |||
133 | // Header - constructs the headers from metadata entered by user in | ||
134 | // PutObjectOptions struct | ||
135 | func (opts PutObjectOptions) Header() (header http.Header) { | ||
136 | header = make(http.Header) | ||
137 | |||
138 | contentType := opts.ContentType | ||
139 | if contentType == "" { | ||
140 | contentType = "application/octet-stream" | ||
141 | } | ||
142 | header.Set("Content-Type", contentType) | ||
143 | |||
144 | if opts.ContentEncoding != "" { | ||
145 | header.Set("Content-Encoding", opts.ContentEncoding) | ||
146 | } | ||
147 | if opts.ContentDisposition != "" { | ||
148 | header.Set("Content-Disposition", opts.ContentDisposition) | ||
149 | } | ||
150 | if opts.ContentLanguage != "" { | ||
151 | header.Set("Content-Language", opts.ContentLanguage) | ||
152 | } | ||
153 | if opts.CacheControl != "" { | ||
154 | header.Set("Cache-Control", opts.CacheControl) | ||
155 | } | ||
156 | |||
157 | if !opts.Expires.IsZero() { | ||
158 | header.Set("Expires", opts.Expires.UTC().Format(http.TimeFormat)) | ||
159 | } | ||
160 | |||
161 | if opts.Mode != "" { | ||
162 | header.Set(amzLockMode, opts.Mode.String()) | ||
163 | } | ||
164 | |||
165 | if !opts.RetainUntilDate.IsZero() { | ||
166 | header.Set("X-Amz-Object-Lock-Retain-Until-Date", opts.RetainUntilDate.Format(time.RFC3339)) | ||
167 | } | ||
168 | |||
169 | if opts.LegalHold != "" { | ||
170 | header.Set(amzLegalHoldHeader, opts.LegalHold.String()) | ||
171 | } | ||
172 | |||
173 | if opts.ServerSideEncryption != nil { | ||
174 | opts.ServerSideEncryption.Marshal(header) | ||
175 | } | ||
176 | |||
177 | if opts.StorageClass != "" { | ||
178 | header.Set(amzStorageClass, opts.StorageClass) | ||
179 | } | ||
180 | |||
181 | if opts.WebsiteRedirectLocation != "" { | ||
182 | header.Set(amzWebsiteRedirectLocation, opts.WebsiteRedirectLocation) | ||
183 | } | ||
184 | |||
185 | if !opts.Internal.ReplicationStatus.Empty() { | ||
186 | header.Set(amzBucketReplicationStatus, string(opts.Internal.ReplicationStatus)) | ||
187 | } | ||
188 | if !opts.Internal.SourceMTime.IsZero() { | ||
189 | header.Set(minIOBucketSourceMTime, opts.Internal.SourceMTime.Format(time.RFC3339Nano)) | ||
190 | } | ||
191 | if opts.Internal.SourceETag != "" { | ||
192 | header.Set(minIOBucketSourceETag, opts.Internal.SourceETag) | ||
193 | } | ||
194 | if opts.Internal.ReplicationRequest { | ||
195 | header.Set(minIOBucketReplicationRequest, "true") | ||
196 | } | ||
197 | if opts.Internal.ReplicationValidityCheck { | ||
198 | header.Set(minIOBucketReplicationCheck, "true") | ||
199 | } | ||
200 | if !opts.Internal.LegalholdTimestamp.IsZero() { | ||
201 | header.Set(minIOBucketReplicationObjectLegalHoldTimestamp, opts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano)) | ||
202 | } | ||
203 | if !opts.Internal.RetentionTimestamp.IsZero() { | ||
204 | header.Set(minIOBucketReplicationObjectRetentionTimestamp, opts.Internal.RetentionTimestamp.Format(time.RFC3339Nano)) | ||
205 | } | ||
206 | if !opts.Internal.TaggingTimestamp.IsZero() { | ||
207 | header.Set(minIOBucketReplicationTaggingTimestamp, opts.Internal.TaggingTimestamp.Format(time.RFC3339Nano)) | ||
208 | } | ||
209 | |||
210 | if len(opts.UserTags) != 0 { | ||
211 | header.Set(amzTaggingHeader, s3utils.TagEncode(opts.UserTags)) | ||
212 | } | ||
213 | |||
214 | for k, v := range opts.UserMetadata { | ||
215 | if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) { | ||
216 | header.Set(k, v) | ||
217 | } else { | ||
218 | header.Set("x-amz-meta-"+k, v) | ||
219 | } | ||
220 | } | ||
221 | |||
222 | // set any other additional custom headers. | ||
223 | for k, v := range opts.customHeaders { | ||
224 | header[k] = v | ||
225 | } | ||
226 | |||
227 | return | ||
228 | } | ||
229 | |||
230 | // validate() checks if the UserMetadata map has standard headers or and raises an error if so. | ||
231 | func (opts PutObjectOptions) validate() (err error) { | ||
232 | for k, v := range opts.UserMetadata { | ||
233 | if !httpguts.ValidHeaderFieldName(k) || isStandardHeader(k) || isSSEHeader(k) || isStorageClassHeader(k) { | ||
234 | return errInvalidArgument(k + " unsupported user defined metadata name") | ||
235 | } | ||
236 | if !httpguts.ValidHeaderFieldValue(v) { | ||
237 | return errInvalidArgument(v + " unsupported user defined metadata value") | ||
238 | } | ||
239 | } | ||
240 | if opts.Mode != "" && !opts.Mode.IsValid() { | ||
241 | return errInvalidArgument(opts.Mode.String() + " unsupported retention mode") | ||
242 | } | ||
243 | if opts.LegalHold != "" && !opts.LegalHold.IsValid() { | ||
244 | return errInvalidArgument(opts.LegalHold.String() + " unsupported legal-hold status") | ||
245 | } | ||
246 | return nil | ||
247 | } | ||
248 | |||
249 | // completedParts is a collection of parts sortable by their part numbers. | ||
250 | // used for sorting the uploaded parts before completing the multipart request. | ||
251 | type completedParts []CompletePart | ||
252 | |||
253 | func (a completedParts) Len() int { return len(a) } | ||
254 | func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } | ||
255 | func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber } | ||
256 | |||
257 | // PutObject creates an object in a bucket. | ||
258 | // | ||
259 | // You must have WRITE permissions on a bucket to create an object. | ||
260 | // | ||
261 | // - For size smaller than 16MiB PutObject automatically does a | ||
262 | // single atomic PUT operation. | ||
263 | // | ||
264 | // - For size larger than 16MiB PutObject automatically does a | ||
265 | // multipart upload operation. | ||
266 | // | ||
267 | // - For size input as -1 PutObject does a multipart Put operation | ||
268 | // until input stream reaches EOF. Maximum object size that can | ||
269 | // be uploaded through this operation will be 5TiB. | ||
270 | // | ||
271 | // WARNING: Passing down '-1' will use memory and these cannot | ||
272 | // be reused for best outcomes for PutObject(), pass the size always. | ||
273 | // | ||
274 | // NOTE: Upon errors during upload multipart operation is entirely aborted. | ||
275 | func (c *Client) PutObject(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64, | ||
276 | opts PutObjectOptions, | ||
277 | ) (info UploadInfo, err error) { | ||
278 | if objectSize < 0 && opts.DisableMultipart { | ||
279 | return UploadInfo{}, errors.New("object size must be provided with disable multipart upload") | ||
280 | } | ||
281 | |||
282 | err = opts.validate() | ||
283 | if err != nil { | ||
284 | return UploadInfo{}, err | ||
285 | } | ||
286 | |||
287 | return c.putObjectCommon(ctx, bucketName, objectName, reader, objectSize, opts) | ||
288 | } | ||
289 | |||
290 | func (c *Client) putObjectCommon(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) { | ||
291 | // Check for largest object size allowed. | ||
292 | if size > int64(maxMultipartPutObjectSize) { | ||
293 | return UploadInfo{}, errEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName) | ||
294 | } | ||
295 | |||
296 | // NOTE: Streaming signature is not supported by GCS. | ||
297 | if s3utils.IsGoogleEndpoint(*c.endpointURL) { | ||
298 | return c.putObject(ctx, bucketName, objectName, reader, size, opts) | ||
299 | } | ||
300 | |||
301 | partSize := opts.PartSize | ||
302 | if opts.PartSize == 0 { | ||
303 | partSize = minPartSize | ||
304 | } | ||
305 | |||
306 | if c.overrideSignerType.IsV2() { | ||
307 | if size >= 0 && size < int64(partSize) || opts.DisableMultipart { | ||
308 | return c.putObject(ctx, bucketName, objectName, reader, size, opts) | ||
309 | } | ||
310 | return c.putObjectMultipart(ctx, bucketName, objectName, reader, size, opts) | ||
311 | } | ||
312 | |||
313 | if size < 0 { | ||
314 | if opts.DisableMultipart { | ||
315 | return UploadInfo{}, errors.New("no length provided and multipart disabled") | ||
316 | } | ||
317 | if opts.ConcurrentStreamParts && opts.NumThreads > 1 { | ||
318 | return c.putObjectMultipartStreamParallel(ctx, bucketName, objectName, reader, opts) | ||
319 | } | ||
320 | return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts) | ||
321 | } | ||
322 | |||
323 | if size < int64(partSize) || opts.DisableMultipart { | ||
324 | return c.putObject(ctx, bucketName, objectName, reader, size, opts) | ||
325 | } | ||
326 | |||
327 | return c.putObjectMultipartStream(ctx, bucketName, objectName, reader, size, opts) | ||
328 | } | ||
329 | |||
330 | func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) { | ||
331 | // Input validation. | ||
332 | if err = s3utils.CheckValidBucketName(bucketName); err != nil { | ||
333 | return UploadInfo{}, err | ||
334 | } | ||
335 | if err = s3utils.CheckValidObjectName(objectName); err != nil { | ||
336 | return UploadInfo{}, err | ||
337 | } | ||
338 | |||
339 | // Total data read and written to server. should be equal to | ||
340 | // 'size' at the end of the call. | ||
341 | var totalUploadedSize int64 | ||
342 | |||
343 | // Complete multipart upload. | ||
344 | var complMultipartUpload completeMultipartUpload | ||
345 | |||
346 | // Calculate the optimal parts info for a given size. | ||
347 | totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize) | ||
348 | if err != nil { | ||
349 | return UploadInfo{}, err | ||
350 | } | ||
351 | |||
352 | if !opts.SendContentMd5 { | ||
353 | if opts.UserMetadata == nil { | ||
354 | opts.UserMetadata = make(map[string]string, 1) | ||
355 | } | ||
356 | opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C" | ||
357 | } | ||
358 | |||
359 | // Initiate a new multipart upload. | ||
360 | uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) | ||
361 | if err != nil { | ||
362 | return UploadInfo{}, err | ||
363 | } | ||
364 | delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm") | ||
365 | |||
366 | defer func() { | ||
367 | if err != nil { | ||
368 | c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) | ||
369 | } | ||
370 | }() | ||
371 | |||
372 | // Part number always starts with '1'. | ||
373 | partNumber := 1 | ||
374 | |||
375 | // Initialize parts uploaded map. | ||
376 | partsInfo := make(map[int]ObjectPart) | ||
377 | |||
378 | // Create a buffer. | ||
379 | buf := make([]byte, partSize) | ||
380 | |||
381 | // Create checksums | ||
382 | // CRC32C is ~50% faster on AMD64 @ 30GB/s | ||
383 | var crcBytes []byte | ||
384 | customHeader := make(http.Header) | ||
385 | crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) | ||
386 | |||
387 | for partNumber <= totalPartsCount { | ||
388 | length, rerr := readFull(reader, buf) | ||
389 | if rerr == io.EOF && partNumber > 1 { | ||
390 | break | ||
391 | } | ||
392 | |||
393 | if rerr != nil && rerr != io.ErrUnexpectedEOF && rerr != io.EOF { | ||
394 | return UploadInfo{}, rerr | ||
395 | } | ||
396 | |||
397 | var md5Base64 string | ||
398 | if opts.SendContentMd5 { | ||
399 | // Calculate md5sum. | ||
400 | hash := c.md5Hasher() | ||
401 | hash.Write(buf[:length]) | ||
402 | md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil)) | ||
403 | hash.Close() | ||
404 | } else { | ||
405 | crc.Reset() | ||
406 | crc.Write(buf[:length]) | ||
407 | cSum := crc.Sum(nil) | ||
408 | customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum)) | ||
409 | crcBytes = append(crcBytes, cSum...) | ||
410 | } | ||
411 | |||
412 | // Update progress reader appropriately to the latest offset | ||
413 | // as we read from the source. | ||
414 | rd := newHook(bytes.NewReader(buf[:length]), opts.Progress) | ||
415 | |||
416 | // Proceed to upload the part. | ||
417 | p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: rd, partNumber: partNumber, md5Base64: md5Base64, size: int64(length), sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader} | ||
418 | objPart, uerr := c.uploadPart(ctx, p) | ||
419 | if uerr != nil { | ||
420 | return UploadInfo{}, uerr | ||
421 | } | ||
422 | |||
423 | // Save successfully uploaded part metadata. | ||
424 | partsInfo[partNumber] = objPart | ||
425 | |||
426 | // Save successfully uploaded size. | ||
427 | totalUploadedSize += int64(length) | ||
428 | |||
429 | // Increment part number. | ||
430 | partNumber++ | ||
431 | |||
432 | // For unknown size, Read EOF we break away. | ||
433 | // We do not have to upload till totalPartsCount. | ||
434 | if rerr == io.EOF { | ||
435 | break | ||
436 | } | ||
437 | } | ||
438 | |||
439 | // Loop over total uploaded parts to save them in | ||
440 | // Parts array before completing the multipart request. | ||
441 | for i := 1; i < partNumber; i++ { | ||
442 | part, ok := partsInfo[i] | ||
443 | if !ok { | ||
444 | return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) | ||
445 | } | ||
446 | complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ | ||
447 | ETag: part.ETag, | ||
448 | PartNumber: part.PartNumber, | ||
449 | ChecksumCRC32: part.ChecksumCRC32, | ||
450 | ChecksumCRC32C: part.ChecksumCRC32C, | ||
451 | ChecksumSHA1: part.ChecksumSHA1, | ||
452 | ChecksumSHA256: part.ChecksumSHA256, | ||
453 | }) | ||
454 | } | ||
455 | |||
456 | // Sort all completed parts. | ||
457 | sort.Sort(completedParts(complMultipartUpload.Parts)) | ||
458 | |||
459 | opts = PutObjectOptions{} | ||
460 | if len(crcBytes) > 0 { | ||
461 | // Add hash of hashes. | ||
462 | crc.Reset() | ||
463 | crc.Write(crcBytes) | ||
464 | opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))} | ||
465 | } | ||
466 | uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) | ||
467 | if err != nil { | ||
468 | return UploadInfo{}, err | ||
469 | } | ||
470 | |||
471 | uploadInfo.Size = totalUploadedSize | ||
472 | return uploadInfo, nil | ||
473 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go b/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go new file mode 100644 index 0000000..eb4da41 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go | |||
@@ -0,0 +1,246 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2021 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | import ( | ||
21 | "archive/tar" | ||
22 | "bufio" | ||
23 | "bytes" | ||
24 | "context" | ||
25 | "fmt" | ||
26 | "io" | ||
27 | "net/http" | ||
28 | "os" | ||
29 | "strings" | ||
30 | "sync" | ||
31 | "time" | ||
32 | |||
33 | "github.com/klauspost/compress/s2" | ||
34 | ) | ||
35 | |||
36 | // SnowballOptions contains options for PutObjectsSnowball calls. | ||
37 | type SnowballOptions struct { | ||
38 | // Opts is options applied to all objects. | ||
39 | Opts PutObjectOptions | ||
40 | |||
41 | // Processing options: | ||
42 | |||
43 | // InMemory specifies that all objects should be collected in memory | ||
44 | // before they are uploaded. | ||
45 | // If false a temporary file will be created. | ||
46 | InMemory bool | ||
47 | |||
48 | // Compress enabled content compression before upload. | ||
49 | // Compression will typically reduce memory and network usage, | ||
50 | // Compression can safely be enabled with MinIO hosts. | ||
51 | Compress bool | ||
52 | |||
53 | // SkipErrs if enabled will skip any errors while reading the | ||
54 | // object content while creating the snowball archive | ||
55 | SkipErrs bool | ||
56 | } | ||
57 | |||
58 | // SnowballObject contains information about a single object to be added to the snowball. | ||
59 | type SnowballObject struct { | ||
60 | // Key is the destination key, including prefix. | ||
61 | Key string | ||
62 | |||
63 | // Size is the content size of this object. | ||
64 | Size int64 | ||
65 | |||
66 | // Modtime to apply to the object. | ||
67 | // If Modtime is the zero value current time will be used. | ||
68 | ModTime time.Time | ||
69 | |||
70 | // Content of the object. | ||
71 | // Exactly 'Size' number of bytes must be provided. | ||
72 | Content io.Reader | ||
73 | |||
74 | // VersionID of the object; if empty, a new versionID will be generated | ||
75 | VersionID string | ||
76 | |||
77 | // Headers contains more options for this object upload, the same as you | ||
78 | // would include in a regular PutObject operation, such as user metadata | ||
79 | // and content-disposition, expires, .. | ||
80 | Headers http.Header | ||
81 | |||
82 | // Close will be called when an object has finished processing. | ||
83 | // Note that if PutObjectsSnowball returns because of an error, | ||
84 | // objects not consumed from the input will NOT have been closed. | ||
85 | // Leave as nil for no callback. | ||
86 | Close func() | ||
87 | } | ||
88 | |||
89 | type nopReadSeekCloser struct { | ||
90 | io.ReadSeeker | ||
91 | } | ||
92 | |||
93 | func (n nopReadSeekCloser) Close() error { | ||
94 | return nil | ||
95 | } | ||
96 | |||
97 | // This is available as io.ReadSeekCloser from go1.16 | ||
98 | type readSeekCloser interface { | ||
99 | io.Reader | ||
100 | io.Closer | ||
101 | io.Seeker | ||
102 | } | ||
103 | |||
104 | // PutObjectsSnowball will put multiple objects with a single put call. | ||
105 | // A (compressed) TAR file will be created which will contain multiple objects. | ||
106 | // The key for each object will be used for the destination in the specified bucket. | ||
107 | // Total size should be < 5TB. | ||
108 | // This function blocks until 'objs' is closed and the content has been uploaded. | ||
109 | func (c Client) PutObjectsSnowball(ctx context.Context, bucketName string, opts SnowballOptions, objs <-chan SnowballObject) (err error) { | ||
110 | err = opts.Opts.validate() | ||
111 | if err != nil { | ||
112 | return err | ||
113 | } | ||
114 | var tmpWriter io.Writer | ||
115 | var getTmpReader func() (rc readSeekCloser, sz int64, err error) | ||
116 | if opts.InMemory { | ||
117 | b := bytes.NewBuffer(nil) | ||
118 | tmpWriter = b | ||
119 | getTmpReader = func() (readSeekCloser, int64, error) { | ||
120 | return nopReadSeekCloser{bytes.NewReader(b.Bytes())}, int64(b.Len()), nil | ||
121 | } | ||
122 | } else { | ||
123 | f, err := os.CreateTemp("", "s3-putsnowballobjects-*") | ||
124 | if err != nil { | ||
125 | return err | ||
126 | } | ||
127 | name := f.Name() | ||
128 | tmpWriter = f | ||
129 | var once sync.Once | ||
130 | defer once.Do(func() { | ||
131 | f.Close() | ||
132 | }) | ||
133 | defer os.Remove(name) | ||
134 | getTmpReader = func() (readSeekCloser, int64, error) { | ||
135 | once.Do(func() { | ||
136 | f.Close() | ||
137 | }) | ||
138 | f, err := os.Open(name) | ||
139 | if err != nil { | ||
140 | return nil, 0, err | ||
141 | } | ||
142 | st, err := f.Stat() | ||
143 | if err != nil { | ||
144 | return nil, 0, err | ||
145 | } | ||
146 | return f, st.Size(), nil | ||
147 | } | ||
148 | } | ||
149 | flush := func() error { return nil } | ||
150 | if !opts.Compress { | ||
151 | if !opts.InMemory { | ||
152 | // Insert buffer for writes. | ||
153 | buf := bufio.NewWriterSize(tmpWriter, 1<<20) | ||
154 | flush = buf.Flush | ||
155 | tmpWriter = buf | ||
156 | } | ||
157 | } else { | ||
158 | s2c := s2.NewWriter(tmpWriter, s2.WriterBetterCompression()) | ||
159 | flush = s2c.Close | ||
160 | defer s2c.Close() | ||
161 | tmpWriter = s2c | ||
162 | } | ||
163 | t := tar.NewWriter(tmpWriter) | ||
164 | |||
165 | objectLoop: | ||
166 | for { | ||
167 | select { | ||
168 | case <-ctx.Done(): | ||
169 | return ctx.Err() | ||
170 | case obj, ok := <-objs: | ||
171 | if !ok { | ||
172 | break objectLoop | ||
173 | } | ||
174 | |||
175 | closeObj := func() {} | ||
176 | if obj.Close != nil { | ||
177 | closeObj = obj.Close | ||
178 | } | ||
179 | |||
180 | // Trim accidental slash prefix. | ||
181 | obj.Key = strings.TrimPrefix(obj.Key, "/") | ||
182 | header := tar.Header{ | ||
183 | Typeflag: tar.TypeReg, | ||
184 | Name: obj.Key, | ||
185 | Size: obj.Size, | ||
186 | ModTime: obj.ModTime, | ||
187 | Format: tar.FormatPAX, | ||
188 | } | ||
189 | if header.ModTime.IsZero() { | ||
190 | header.ModTime = time.Now().UTC() | ||
191 | } | ||
192 | |||
193 | header.PAXRecords = make(map[string]string) | ||
194 | if obj.VersionID != "" { | ||
195 | header.PAXRecords["minio.versionId"] = obj.VersionID | ||
196 | } | ||
197 | for k, vals := range obj.Headers { | ||
198 | header.PAXRecords["minio.metadata."+k] = strings.Join(vals, ",") | ||
199 | } | ||
200 | |||
201 | if err := t.WriteHeader(&header); err != nil { | ||
202 | closeObj() | ||
203 | return err | ||
204 | } | ||
205 | n, err := io.Copy(t, obj.Content) | ||
206 | if err != nil { | ||
207 | closeObj() | ||
208 | if opts.SkipErrs { | ||
209 | continue | ||
210 | } | ||
211 | return err | ||
212 | } | ||
213 | if n != obj.Size { | ||
214 | closeObj() | ||
215 | if opts.SkipErrs { | ||
216 | continue | ||
217 | } | ||
218 | return io.ErrUnexpectedEOF | ||
219 | } | ||
220 | closeObj() | ||
221 | } | ||
222 | } | ||
223 | // Flush tar | ||
224 | err = t.Flush() | ||
225 | if err != nil { | ||
226 | return err | ||
227 | } | ||
228 | // Flush compression | ||
229 | err = flush() | ||
230 | if err != nil { | ||
231 | return err | ||
232 | } | ||
233 | if opts.Opts.UserMetadata == nil { | ||
234 | opts.Opts.UserMetadata = map[string]string{} | ||
235 | } | ||
236 | opts.Opts.UserMetadata["X-Amz-Meta-Snowball-Auto-Extract"] = "true" | ||
237 | opts.Opts.DisableMultipart = true | ||
238 | rc, sz, err := getTmpReader() | ||
239 | if err != nil { | ||
240 | return err | ||
241 | } | ||
242 | defer rc.Close() | ||
243 | rand := c.random.Uint64() | ||
244 | _, err = c.PutObject(ctx, bucketName, fmt.Sprintf("snowball-upload-%x.tar", rand), rc, sz, opts.Opts) | ||
245 | return err | ||
246 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/api-remove.go b/vendor/github.com/minio/minio-go/v7/api-remove.go new file mode 100644 index 0000000..9c0ac44 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-remove.go | |||
@@ -0,0 +1,548 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2015-2020 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | import ( | ||
21 | "bytes" | ||
22 | "context" | ||
23 | "encoding/xml" | ||
24 | "io" | ||
25 | "net/http" | ||
26 | "net/url" | ||
27 | "time" | ||
28 | |||
29 | "github.com/minio/minio-go/v7/pkg/s3utils" | ||
30 | ) | ||
31 | |||
32 | //revive:disable | ||
33 | |||
34 | // Deprecated: BucketOptions will be renamed to RemoveBucketOptions in future versions. | ||
35 | type BucketOptions = RemoveBucketOptions | ||
36 | |||
37 | //revive:enable | ||
38 | |||
39 | // RemoveBucketOptions special headers to purge buckets, only | ||
40 | // useful when endpoint is MinIO | ||
41 | type RemoveBucketOptions struct { | ||
42 | ForceDelete bool | ||
43 | } | ||
44 | |||
45 | // RemoveBucketWithOptions deletes the bucket name. | ||
46 | // | ||
47 | // All objects (including all object versions and delete markers) | ||
48 | // in the bucket will be deleted forcibly if bucket options set | ||
49 | // ForceDelete to 'true'. | ||
50 | func (c *Client) RemoveBucketWithOptions(ctx context.Context, bucketName string, opts RemoveBucketOptions) error { | ||
51 | // Input validation. | ||
52 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
53 | return err | ||
54 | } | ||
55 | |||
56 | // Build headers. | ||
57 | headers := make(http.Header) | ||
58 | if opts.ForceDelete { | ||
59 | headers.Set(minIOForceDelete, "true") | ||
60 | } | ||
61 | |||
62 | // Execute DELETE on bucket. | ||
63 | resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ | ||
64 | bucketName: bucketName, | ||
65 | contentSHA256Hex: emptySHA256Hex, | ||
66 | customHeader: headers, | ||
67 | }) | ||
68 | defer closeResponse(resp) | ||
69 | if err != nil { | ||
70 | return err | ||
71 | } | ||
72 | if resp != nil { | ||
73 | if resp.StatusCode != http.StatusNoContent { | ||
74 | return httpRespToErrorResponse(resp, bucketName, "") | ||
75 | } | ||
76 | } | ||
77 | |||
78 | // Remove the location from cache on a successful delete. | ||
79 | c.bucketLocCache.Delete(bucketName) | ||
80 | return nil | ||
81 | } | ||
82 | |||
83 | // RemoveBucket deletes the bucket name. | ||
84 | // | ||
85 | // All objects (including all object versions and delete markers). | ||
86 | // in the bucket must be deleted before successfully attempting this request. | ||
87 | func (c *Client) RemoveBucket(ctx context.Context, bucketName string) error { | ||
88 | // Input validation. | ||
89 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
90 | return err | ||
91 | } | ||
92 | // Execute DELETE on bucket. | ||
93 | resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ | ||
94 | bucketName: bucketName, | ||
95 | contentSHA256Hex: emptySHA256Hex, | ||
96 | }) | ||
97 | defer closeResponse(resp) | ||
98 | if err != nil { | ||
99 | return err | ||
100 | } | ||
101 | if resp != nil { | ||
102 | if resp.StatusCode != http.StatusNoContent { | ||
103 | return httpRespToErrorResponse(resp, bucketName, "") | ||
104 | } | ||
105 | } | ||
106 | |||
107 | // Remove the location from cache on a successful delete. | ||
108 | c.bucketLocCache.Delete(bucketName) | ||
109 | |||
110 | return nil | ||
111 | } | ||
112 | |||
113 | // AdvancedRemoveOptions intended for internal use by replication | ||
114 | type AdvancedRemoveOptions struct { | ||
115 | ReplicationDeleteMarker bool | ||
116 | ReplicationStatus ReplicationStatus | ||
117 | ReplicationMTime time.Time | ||
118 | ReplicationRequest bool | ||
119 | ReplicationValidityCheck bool // check permissions | ||
120 | } | ||
121 | |||
122 | // RemoveObjectOptions represents options specified by user for RemoveObject call | ||
123 | type RemoveObjectOptions struct { | ||
124 | ForceDelete bool | ||
125 | GovernanceBypass bool | ||
126 | VersionID string | ||
127 | Internal AdvancedRemoveOptions | ||
128 | } | ||
129 | |||
130 | // RemoveObject removes an object from a bucket. | ||
131 | func (c *Client) RemoveObject(ctx context.Context, bucketName, objectName string, opts RemoveObjectOptions) error { | ||
132 | // Input validation. | ||
133 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
134 | return err | ||
135 | } | ||
136 | if err := s3utils.CheckValidObjectName(objectName); err != nil { | ||
137 | return err | ||
138 | } | ||
139 | |||
140 | res := c.removeObject(ctx, bucketName, objectName, opts) | ||
141 | return res.Err | ||
142 | } | ||
143 | |||
144 | func (c *Client) removeObject(ctx context.Context, bucketName, objectName string, opts RemoveObjectOptions) RemoveObjectResult { | ||
145 | // Get resources properly escaped and lined up before | ||
146 | // using them in http request. | ||
147 | urlValues := make(url.Values) | ||
148 | |||
149 | if opts.VersionID != "" { | ||
150 | urlValues.Set("versionId", opts.VersionID) | ||
151 | } | ||
152 | |||
153 | // Build headers. | ||
154 | headers := make(http.Header) | ||
155 | |||
156 | if opts.GovernanceBypass { | ||
157 | // Set the bypass goverenance retention header | ||
158 | headers.Set(amzBypassGovernance, "true") | ||
159 | } | ||
160 | if opts.Internal.ReplicationDeleteMarker { | ||
161 | headers.Set(minIOBucketReplicationDeleteMarker, "true") | ||
162 | } | ||
163 | if !opts.Internal.ReplicationMTime.IsZero() { | ||
164 | headers.Set(minIOBucketSourceMTime, opts.Internal.ReplicationMTime.Format(time.RFC3339Nano)) | ||
165 | } | ||
166 | if !opts.Internal.ReplicationStatus.Empty() { | ||
167 | headers.Set(amzBucketReplicationStatus, string(opts.Internal.ReplicationStatus)) | ||
168 | } | ||
169 | if opts.Internal.ReplicationRequest { | ||
170 | headers.Set(minIOBucketReplicationRequest, "true") | ||
171 | } | ||
172 | if opts.Internal.ReplicationValidityCheck { | ||
173 | headers.Set(minIOBucketReplicationCheck, "true") | ||
174 | } | ||
175 | if opts.ForceDelete { | ||
176 | headers.Set(minIOForceDelete, "true") | ||
177 | } | ||
178 | // Execute DELETE on objectName. | ||
179 | resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ | ||
180 | bucketName: bucketName, | ||
181 | objectName: objectName, | ||
182 | contentSHA256Hex: emptySHA256Hex, | ||
183 | queryValues: urlValues, | ||
184 | customHeader: headers, | ||
185 | }) | ||
186 | defer closeResponse(resp) | ||
187 | if err != nil { | ||
188 | return RemoveObjectResult{Err: err} | ||
189 | } | ||
190 | if resp != nil { | ||
191 | // if some unexpected error happened and max retry is reached, we want to let client know | ||
192 | if resp.StatusCode != http.StatusNoContent { | ||
193 | err := httpRespToErrorResponse(resp, bucketName, objectName) | ||
194 | return RemoveObjectResult{Err: err} | ||
195 | } | ||
196 | } | ||
197 | |||
198 | // DeleteObject always responds with http '204' even for | ||
199 | // objects which do not exist. So no need to handle them | ||
200 | // specifically. | ||
201 | return RemoveObjectResult{ | ||
202 | ObjectName: objectName, | ||
203 | ObjectVersionID: opts.VersionID, | ||
204 | DeleteMarker: resp.Header.Get("x-amz-delete-marker") == "true", | ||
205 | DeleteMarkerVersionID: resp.Header.Get("x-amz-version-id"), | ||
206 | } | ||
207 | } | ||
208 | |||
209 | // RemoveObjectError - container of Multi Delete S3 API error | ||
210 | type RemoveObjectError struct { | ||
211 | ObjectName string | ||
212 | VersionID string | ||
213 | Err error | ||
214 | } | ||
215 | |||
216 | // RemoveObjectResult - container of Multi Delete S3 API result | ||
217 | type RemoveObjectResult struct { | ||
218 | ObjectName string | ||
219 | ObjectVersionID string | ||
220 | |||
221 | DeleteMarker bool | ||
222 | DeleteMarkerVersionID string | ||
223 | |||
224 | Err error | ||
225 | } | ||
226 | |||
227 | // generateRemoveMultiObjects - generate the XML request for remove multi objects request | ||
228 | func generateRemoveMultiObjectsRequest(objects []ObjectInfo) []byte { | ||
229 | delObjects := []deleteObject{} | ||
230 | for _, obj := range objects { | ||
231 | delObjects = append(delObjects, deleteObject{ | ||
232 | Key: obj.Key, | ||
233 | VersionID: obj.VersionID, | ||
234 | }) | ||
235 | } | ||
236 | xmlBytes, _ := xml.Marshal(deleteMultiObjects{Objects: delObjects, Quiet: false}) | ||
237 | return xmlBytes | ||
238 | } | ||
239 | |||
240 | // processRemoveMultiObjectsResponse - parse the remove multi objects web service | ||
241 | // and return the success/failure result status for each object | ||
242 | func processRemoveMultiObjectsResponse(body io.Reader, resultCh chan<- RemoveObjectResult) { | ||
243 | // Parse multi delete XML response | ||
244 | rmResult := &deleteMultiObjectsResult{} | ||
245 | err := xmlDecoder(body, rmResult) | ||
246 | if err != nil { | ||
247 | resultCh <- RemoveObjectResult{ObjectName: "", Err: err} | ||
248 | return | ||
249 | } | ||
250 | |||
251 | // Fill deletion that returned success | ||
252 | for _, obj := range rmResult.DeletedObjects { | ||
253 | resultCh <- RemoveObjectResult{ | ||
254 | ObjectName: obj.Key, | ||
255 | // Only filled with versioned buckets | ||
256 | ObjectVersionID: obj.VersionID, | ||
257 | DeleteMarker: obj.DeleteMarker, | ||
258 | DeleteMarkerVersionID: obj.DeleteMarkerVersionID, | ||
259 | } | ||
260 | } | ||
261 | |||
262 | // Fill deletion that returned an error. | ||
263 | for _, obj := range rmResult.UnDeletedObjects { | ||
264 | // Version does not exist is not an error ignore and continue. | ||
265 | switch obj.Code { | ||
266 | case "InvalidArgument", "NoSuchVersion": | ||
267 | continue | ||
268 | } | ||
269 | resultCh <- RemoveObjectResult{ | ||
270 | ObjectName: obj.Key, | ||
271 | ObjectVersionID: obj.VersionID, | ||
272 | Err: ErrorResponse{ | ||
273 | Code: obj.Code, | ||
274 | Message: obj.Message, | ||
275 | }, | ||
276 | } | ||
277 | } | ||
278 | } | ||
279 | |||
280 | // RemoveObjectsOptions represents options specified by user for RemoveObjects call | ||
281 | type RemoveObjectsOptions struct { | ||
282 | GovernanceBypass bool | ||
283 | } | ||
284 | |||
285 | // RemoveObjects removes multiple objects from a bucket while | ||
286 | // it is possible to specify objects versions which are received from | ||
287 | // objectsCh. Remove failures are sent back via error channel. | ||
288 | func (c *Client) RemoveObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, opts RemoveObjectsOptions) <-chan RemoveObjectError { | ||
289 | errorCh := make(chan RemoveObjectError, 1) | ||
290 | |||
291 | // Validate if bucket name is valid. | ||
292 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
293 | defer close(errorCh) | ||
294 | errorCh <- RemoveObjectError{ | ||
295 | Err: err, | ||
296 | } | ||
297 | return errorCh | ||
298 | } | ||
299 | // Validate objects channel to be properly allocated. | ||
300 | if objectsCh == nil { | ||
301 | defer close(errorCh) | ||
302 | errorCh <- RemoveObjectError{ | ||
303 | Err: errInvalidArgument("Objects channel cannot be nil"), | ||
304 | } | ||
305 | return errorCh | ||
306 | } | ||
307 | |||
308 | resultCh := make(chan RemoveObjectResult, 1) | ||
309 | go c.removeObjects(ctx, bucketName, objectsCh, resultCh, opts) | ||
310 | go func() { | ||
311 | defer close(errorCh) | ||
312 | for res := range resultCh { | ||
313 | // Send only errors to the error channel | ||
314 | if res.Err == nil { | ||
315 | continue | ||
316 | } | ||
317 | errorCh <- RemoveObjectError{ | ||
318 | ObjectName: res.ObjectName, | ||
319 | VersionID: res.ObjectVersionID, | ||
320 | Err: res.Err, | ||
321 | } | ||
322 | } | ||
323 | }() | ||
324 | |||
325 | return errorCh | ||
326 | } | ||
327 | |||
328 | // RemoveObjectsWithResult removes multiple objects from a bucket while | ||
329 | // it is possible to specify objects versions which are received from | ||
330 | // objectsCh. Remove results, successes and failures are sent back via | ||
331 | // RemoveObjectResult channel | ||
332 | func (c *Client) RemoveObjectsWithResult(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, opts RemoveObjectsOptions) <-chan RemoveObjectResult { | ||
333 | resultCh := make(chan RemoveObjectResult, 1) | ||
334 | |||
335 | // Validate if bucket name is valid. | ||
336 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
337 | defer close(resultCh) | ||
338 | resultCh <- RemoveObjectResult{ | ||
339 | Err: err, | ||
340 | } | ||
341 | return resultCh | ||
342 | } | ||
343 | // Validate objects channel to be properly allocated. | ||
344 | if objectsCh == nil { | ||
345 | defer close(resultCh) | ||
346 | resultCh <- RemoveObjectResult{ | ||
347 | Err: errInvalidArgument("Objects channel cannot be nil"), | ||
348 | } | ||
349 | return resultCh | ||
350 | } | ||
351 | |||
352 | go c.removeObjects(ctx, bucketName, objectsCh, resultCh, opts) | ||
353 | return resultCh | ||
354 | } | ||
355 | |||
356 | // Return true if the character is within the allowed characters in an XML 1.0 document | ||
357 | // The list of allowed characters can be found here: https://www.w3.org/TR/xml/#charsets | ||
358 | func validXMLChar(r rune) (ok bool) { | ||
359 | return r == 0x09 || | ||
360 | r == 0x0A || | ||
361 | r == 0x0D || | ||
362 | r >= 0x20 && r <= 0xD7FF || | ||
363 | r >= 0xE000 && r <= 0xFFFD || | ||
364 | r >= 0x10000 && r <= 0x10FFFF | ||
365 | } | ||
366 | |||
367 | func hasInvalidXMLChar(str string) bool { | ||
368 | for _, s := range str { | ||
369 | if !validXMLChar(s) { | ||
370 | return true | ||
371 | } | ||
372 | } | ||
373 | return false | ||
374 | } | ||
375 | |||
376 | // Generate and call MultiDelete S3 requests based on entries received from objectsCh | ||
377 | func (c *Client) removeObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, resultCh chan<- RemoveObjectResult, opts RemoveObjectsOptions) { | ||
378 | maxEntries := 1000 | ||
379 | finish := false | ||
380 | urlValues := make(url.Values) | ||
381 | urlValues.Set("delete", "") | ||
382 | |||
383 | // Close result channel when Multi delete finishes. | ||
384 | defer close(resultCh) | ||
385 | |||
386 | // Loop over entries by 1000 and call MultiDelete requests | ||
387 | for { | ||
388 | if finish { | ||
389 | break | ||
390 | } | ||
391 | count := 0 | ||
392 | var batch []ObjectInfo | ||
393 | |||
394 | // Try to gather 1000 entries | ||
395 | for object := range objectsCh { | ||
396 | if hasInvalidXMLChar(object.Key) { | ||
397 | // Use single DELETE so the object name will be in the request URL instead of the multi-delete XML document. | ||
398 | removeResult := c.removeObject(ctx, bucketName, object.Key, RemoveObjectOptions{ | ||
399 | VersionID: object.VersionID, | ||
400 | GovernanceBypass: opts.GovernanceBypass, | ||
401 | }) | ||
402 | if err := removeResult.Err; err != nil { | ||
403 | // Version does not exist is not an error ignore and continue. | ||
404 | switch ToErrorResponse(err).Code { | ||
405 | case "InvalidArgument", "NoSuchVersion": | ||
406 | continue | ||
407 | } | ||
408 | resultCh <- removeResult | ||
409 | } | ||
410 | |||
411 | resultCh <- removeResult | ||
412 | continue | ||
413 | } | ||
414 | |||
415 | batch = append(batch, object) | ||
416 | if count++; count >= maxEntries { | ||
417 | break | ||
418 | } | ||
419 | } | ||
420 | if count == 0 { | ||
421 | // Multi Objects Delete API doesn't accept empty object list, quit immediately | ||
422 | break | ||
423 | } | ||
424 | if count < maxEntries { | ||
425 | // We didn't have 1000 entries, so this is the last batch | ||
426 | finish = true | ||
427 | } | ||
428 | |||
429 | // Build headers. | ||
430 | headers := make(http.Header) | ||
431 | if opts.GovernanceBypass { | ||
432 | // Set the bypass goverenance retention header | ||
433 | headers.Set(amzBypassGovernance, "true") | ||
434 | } | ||
435 | |||
436 | // Generate remove multi objects XML request | ||
437 | removeBytes := generateRemoveMultiObjectsRequest(batch) | ||
438 | // Execute GET on bucket to list objects. | ||
439 | resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{ | ||
440 | bucketName: bucketName, | ||
441 | queryValues: urlValues, | ||
442 | contentBody: bytes.NewReader(removeBytes), | ||
443 | contentLength: int64(len(removeBytes)), | ||
444 | contentMD5Base64: sumMD5Base64(removeBytes), | ||
445 | contentSHA256Hex: sum256Hex(removeBytes), | ||
446 | customHeader: headers, | ||
447 | }) | ||
448 | if resp != nil { | ||
449 | if resp.StatusCode != http.StatusOK { | ||
450 | e := httpRespToErrorResponse(resp, bucketName, "") | ||
451 | resultCh <- RemoveObjectResult{ObjectName: "", Err: e} | ||
452 | } | ||
453 | } | ||
454 | if err != nil { | ||
455 | for _, b := range batch { | ||
456 | resultCh <- RemoveObjectResult{ | ||
457 | ObjectName: b.Key, | ||
458 | ObjectVersionID: b.VersionID, | ||
459 | Err: err, | ||
460 | } | ||
461 | } | ||
462 | continue | ||
463 | } | ||
464 | |||
465 | // Process multiobjects remove xml response | ||
466 | processRemoveMultiObjectsResponse(resp.Body, resultCh) | ||
467 | |||
468 | closeResponse(resp) | ||
469 | } | ||
470 | } | ||
471 | |||
472 | // RemoveIncompleteUpload aborts an partially uploaded object. | ||
473 | func (c *Client) RemoveIncompleteUpload(ctx context.Context, bucketName, objectName string) error { | ||
474 | // Input validation. | ||
475 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
476 | return err | ||
477 | } | ||
478 | if err := s3utils.CheckValidObjectName(objectName); err != nil { | ||
479 | return err | ||
480 | } | ||
481 | // Find multipart upload ids of the object to be aborted. | ||
482 | uploadIDs, err := c.findUploadIDs(ctx, bucketName, objectName) | ||
483 | if err != nil { | ||
484 | return err | ||
485 | } | ||
486 | |||
487 | for _, uploadID := range uploadIDs { | ||
488 | // abort incomplete multipart upload, based on the upload id passed. | ||
489 | err := c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) | ||
490 | if err != nil { | ||
491 | return err | ||
492 | } | ||
493 | } | ||
494 | |||
495 | return nil | ||
496 | } | ||
497 | |||
498 | // abortMultipartUpload aborts a multipart upload for the given | ||
499 | // uploadID, all previously uploaded parts are deleted. | ||
500 | func (c *Client) abortMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string) error { | ||
501 | // Input validation. | ||
502 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
503 | return err | ||
504 | } | ||
505 | if err := s3utils.CheckValidObjectName(objectName); err != nil { | ||
506 | return err | ||
507 | } | ||
508 | |||
509 | // Initialize url queries. | ||
510 | urlValues := make(url.Values) | ||
511 | urlValues.Set("uploadId", uploadID) | ||
512 | |||
513 | // Execute DELETE on multipart upload. | ||
514 | resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ | ||
515 | bucketName: bucketName, | ||
516 | objectName: objectName, | ||
517 | queryValues: urlValues, | ||
518 | contentSHA256Hex: emptySHA256Hex, | ||
519 | }) | ||
520 | defer closeResponse(resp) | ||
521 | if err != nil { | ||
522 | return err | ||
523 | } | ||
524 | if resp != nil { | ||
525 | if resp.StatusCode != http.StatusNoContent { | ||
526 | // Abort has no response body, handle it for any errors. | ||
527 | var errorResponse ErrorResponse | ||
528 | switch resp.StatusCode { | ||
529 | case http.StatusNotFound: | ||
530 | // This is needed specifically for abort and it cannot | ||
531 | // be converged into default case. | ||
532 | errorResponse = ErrorResponse{ | ||
533 | Code: "NoSuchUpload", | ||
534 | Message: "The specified multipart upload does not exist.", | ||
535 | BucketName: bucketName, | ||
536 | Key: objectName, | ||
537 | RequestID: resp.Header.Get("x-amz-request-id"), | ||
538 | HostID: resp.Header.Get("x-amz-id-2"), | ||
539 | Region: resp.Header.Get("x-amz-bucket-region"), | ||
540 | } | ||
541 | default: | ||
542 | return httpRespToErrorResponse(resp, bucketName, objectName) | ||
543 | } | ||
544 | return errorResponse | ||
545 | } | ||
546 | } | ||
547 | return nil | ||
548 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/api-restore.go b/vendor/github.com/minio/minio-go/v7/api-restore.go new file mode 100644 index 0000000..9ec8f4f --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-restore.go | |||
@@ -0,0 +1,182 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * (C) 2018-2021 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | import ( | ||
21 | "bytes" | ||
22 | "context" | ||
23 | "encoding/xml" | ||
24 | "net/http" | ||
25 | "net/url" | ||
26 | |||
27 | "github.com/minio/minio-go/v7/pkg/s3utils" | ||
28 | "github.com/minio/minio-go/v7/pkg/tags" | ||
29 | ) | ||
30 | |||
31 | // RestoreType represents the restore request type | ||
32 | type RestoreType string | ||
33 | |||
34 | const ( | ||
35 | // RestoreSelect represents the restore SELECT operation | ||
36 | RestoreSelect = RestoreType("SELECT") | ||
37 | ) | ||
38 | |||
39 | // TierType represents a retrieval tier | ||
40 | type TierType string | ||
41 | |||
42 | const ( | ||
43 | // TierStandard is the standard retrieval tier | ||
44 | TierStandard = TierType("Standard") | ||
45 | // TierBulk is the bulk retrieval tier | ||
46 | TierBulk = TierType("Bulk") | ||
47 | // TierExpedited is the expedited retrieval tier | ||
48 | TierExpedited = TierType("Expedited") | ||
49 | ) | ||
50 | |||
51 | // GlacierJobParameters represents the retrieval tier parameter | ||
52 | type GlacierJobParameters struct { | ||
53 | Tier TierType | ||
54 | } | ||
55 | |||
56 | // Encryption contains the type of server-side encryption used during object retrieval | ||
57 | type Encryption struct { | ||
58 | EncryptionType string | ||
59 | KMSContext string | ||
60 | KMSKeyID string `xml:"KMSKeyId"` | ||
61 | } | ||
62 | |||
63 | // MetadataEntry represents a metadata information of the restored object. | ||
64 | type MetadataEntry struct { | ||
65 | Name string | ||
66 | Value string | ||
67 | } | ||
68 | |||
69 | // S3 holds properties of the copy of the archived object | ||
70 | type S3 struct { | ||
71 | AccessControlList *AccessControlList `xml:"AccessControlList,omitempty"` | ||
72 | BucketName string | ||
73 | Prefix string | ||
74 | CannedACL *string `xml:"CannedACL,omitempty"` | ||
75 | Encryption *Encryption `xml:"Encryption,omitempty"` | ||
76 | StorageClass *string `xml:"StorageClass,omitempty"` | ||
77 | Tagging *tags.Tags `xml:"Tagging,omitempty"` | ||
78 | UserMetadata *MetadataEntry `xml:"UserMetadata,omitempty"` | ||
79 | } | ||
80 | |||
81 | // SelectParameters holds the select request parameters | ||
82 | type SelectParameters struct { | ||
83 | XMLName xml.Name `xml:"SelectParameters"` | ||
84 | ExpressionType QueryExpressionType | ||
85 | Expression string | ||
86 | InputSerialization SelectObjectInputSerialization | ||
87 | OutputSerialization SelectObjectOutputSerialization | ||
88 | } | ||
89 | |||
90 | // OutputLocation holds properties of the copy of the archived object | ||
91 | type OutputLocation struct { | ||
92 | XMLName xml.Name `xml:"OutputLocation"` | ||
93 | S3 S3 `xml:"S3"` | ||
94 | } | ||
95 | |||
96 | // RestoreRequest holds properties of the restore object request | ||
97 | type RestoreRequest struct { | ||
98 | XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ RestoreRequest"` | ||
99 | Type *RestoreType `xml:"Type,omitempty"` | ||
100 | Tier *TierType `xml:"Tier,omitempty"` | ||
101 | Days *int `xml:"Days,omitempty"` | ||
102 | GlacierJobParameters *GlacierJobParameters `xml:"GlacierJobParameters,omitempty"` | ||
103 | Description *string `xml:"Description,omitempty"` | ||
104 | SelectParameters *SelectParameters `xml:"SelectParameters,omitempty"` | ||
105 | OutputLocation *OutputLocation `xml:"OutputLocation,omitempty"` | ||
106 | } | ||
107 | |||
108 | // SetDays sets the days parameter of the restore request | ||
109 | func (r *RestoreRequest) SetDays(v int) { | ||
110 | r.Days = &v | ||
111 | } | ||
112 | |||
113 | // SetGlacierJobParameters sets the GlacierJobParameters of the restore request | ||
114 | func (r *RestoreRequest) SetGlacierJobParameters(v GlacierJobParameters) { | ||
115 | r.GlacierJobParameters = &v | ||
116 | } | ||
117 | |||
118 | // SetType sets the type of the restore request | ||
119 | func (r *RestoreRequest) SetType(v RestoreType) { | ||
120 | r.Type = &v | ||
121 | } | ||
122 | |||
123 | // SetTier sets the retrieval tier of the restore request | ||
124 | func (r *RestoreRequest) SetTier(v TierType) { | ||
125 | r.Tier = &v | ||
126 | } | ||
127 | |||
128 | // SetDescription sets the description of the restore request | ||
129 | func (r *RestoreRequest) SetDescription(v string) { | ||
130 | r.Description = &v | ||
131 | } | ||
132 | |||
133 | // SetSelectParameters sets SelectParameters of the restore select request | ||
134 | func (r *RestoreRequest) SetSelectParameters(v SelectParameters) { | ||
135 | r.SelectParameters = &v | ||
136 | } | ||
137 | |||
138 | // SetOutputLocation sets the properties of the copy of the archived object | ||
139 | func (r *RestoreRequest) SetOutputLocation(v OutputLocation) { | ||
140 | r.OutputLocation = &v | ||
141 | } | ||
142 | |||
143 | // RestoreObject is a implementation of https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html AWS S3 API | ||
144 | func (c *Client) RestoreObject(ctx context.Context, bucketName, objectName, versionID string, req RestoreRequest) error { | ||
145 | // Input validation. | ||
146 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
147 | return err | ||
148 | } | ||
149 | if err := s3utils.CheckValidObjectName(objectName); err != nil { | ||
150 | return err | ||
151 | } | ||
152 | |||
153 | restoreRequestBytes, err := xml.Marshal(req) | ||
154 | if err != nil { | ||
155 | return err | ||
156 | } | ||
157 | |||
158 | urlValues := make(url.Values) | ||
159 | urlValues.Set("restore", "") | ||
160 | if versionID != "" { | ||
161 | urlValues.Set("versionId", versionID) | ||
162 | } | ||
163 | |||
164 | // Execute POST on bucket/object. | ||
165 | resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{ | ||
166 | bucketName: bucketName, | ||
167 | objectName: objectName, | ||
168 | queryValues: urlValues, | ||
169 | contentMD5Base64: sumMD5Base64(restoreRequestBytes), | ||
170 | contentSHA256Hex: sum256Hex(restoreRequestBytes), | ||
171 | contentBody: bytes.NewReader(restoreRequestBytes), | ||
172 | contentLength: int64(len(restoreRequestBytes)), | ||
173 | }) | ||
174 | defer closeResponse(resp) | ||
175 | if err != nil { | ||
176 | return err | ||
177 | } | ||
178 | if resp.StatusCode != http.StatusAccepted && resp.StatusCode != http.StatusOK { | ||
179 | return httpRespToErrorResponse(resp, bucketName, "") | ||
180 | } | ||
181 | return nil | ||
182 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go new file mode 100644 index 0000000..1527b74 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go | |||
@@ -0,0 +1,390 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2015-2020 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | import ( | ||
21 | "encoding/xml" | ||
22 | "errors" | ||
23 | "io" | ||
24 | "reflect" | ||
25 | "time" | ||
26 | ) | ||
27 | |||
28 | // listAllMyBucketsResult container for listBuckets response. | ||
29 | type listAllMyBucketsResult struct { | ||
30 | // Container for one or more buckets. | ||
31 | Buckets struct { | ||
32 | Bucket []BucketInfo | ||
33 | } | ||
34 | Owner owner | ||
35 | } | ||
36 | |||
37 | // owner container for bucket owner information. | ||
38 | type owner struct { | ||
39 | DisplayName string | ||
40 | ID string | ||
41 | } | ||
42 | |||
43 | // CommonPrefix container for prefix response. | ||
44 | type CommonPrefix struct { | ||
45 | Prefix string | ||
46 | } | ||
47 | |||
48 | // ListBucketV2Result container for listObjects response version 2. | ||
49 | type ListBucketV2Result struct { | ||
50 | // A response can contain CommonPrefixes only if you have | ||
51 | // specified a delimiter. | ||
52 | CommonPrefixes []CommonPrefix | ||
53 | // Metadata about each object returned. | ||
54 | Contents []ObjectInfo | ||
55 | Delimiter string | ||
56 | |||
57 | // Encoding type used to encode object keys in the response. | ||
58 | EncodingType string | ||
59 | |||
60 | // A flag that indicates whether or not ListObjects returned all of the results | ||
61 | // that satisfied the search criteria. | ||
62 | IsTruncated bool | ||
63 | MaxKeys int64 | ||
64 | Name string | ||
65 | |||
66 | // Hold the token that will be sent in the next request to fetch the next group of keys | ||
67 | NextContinuationToken string | ||
68 | |||
69 | ContinuationToken string | ||
70 | Prefix string | ||
71 | |||
72 | // FetchOwner and StartAfter are currently not used | ||
73 | FetchOwner string | ||
74 | StartAfter string | ||
75 | } | ||
76 | |||
77 | // Version is an element in the list object versions response | ||
78 | type Version struct { | ||
79 | ETag string | ||
80 | IsLatest bool | ||
81 | Key string | ||
82 | LastModified time.Time | ||
83 | Owner Owner | ||
84 | Size int64 | ||
85 | StorageClass string | ||
86 | VersionID string `xml:"VersionId"` | ||
87 | |||
88 | // x-amz-meta-* headers stripped "x-amz-meta-" prefix containing the first value. | ||
89 | // Only returned by MinIO servers. | ||
90 | UserMetadata StringMap `json:"userMetadata,omitempty"` | ||
91 | |||
92 | // x-amz-tagging values in their k/v values. | ||
93 | // Only returned by MinIO servers. | ||
94 | UserTags URLMap `json:"userTags,omitempty" xml:"UserTags"` | ||
95 | |||
96 | Internal *struct { | ||
97 | K int // Data blocks | ||
98 | M int // Parity blocks | ||
99 | } `xml:"Internal"` | ||
100 | |||
101 | isDeleteMarker bool | ||
102 | } | ||
103 | |||
104 | // ListVersionsResult is an element in the list object versions response | ||
105 | // and has a special Unmarshaler because we need to preserver the order | ||
106 | // of <Version> and <DeleteMarker> in ListVersionsResult.Versions slice | ||
107 | type ListVersionsResult struct { | ||
108 | Versions []Version | ||
109 | |||
110 | CommonPrefixes []CommonPrefix | ||
111 | Name string | ||
112 | Prefix string | ||
113 | Delimiter string | ||
114 | MaxKeys int64 | ||
115 | EncodingType string | ||
116 | IsTruncated bool | ||
117 | KeyMarker string | ||
118 | VersionIDMarker string | ||
119 | NextKeyMarker string | ||
120 | NextVersionIDMarker string | ||
121 | } | ||
122 | |||
123 | // UnmarshalXML is a custom unmarshal code for the response of ListObjectVersions, the custom | ||
124 | // code will unmarshal <Version> and <DeleteMarker> tags and save them in Versions field to | ||
125 | // preserve the lexical order of the listing. | ||
126 | func (l *ListVersionsResult) UnmarshalXML(d *xml.Decoder, _ xml.StartElement) (err error) { | ||
127 | for { | ||
128 | // Read tokens from the XML document in a stream. | ||
129 | t, err := d.Token() | ||
130 | if err != nil { | ||
131 | if err == io.EOF { | ||
132 | break | ||
133 | } | ||
134 | return err | ||
135 | } | ||
136 | |||
137 | se, ok := t.(xml.StartElement) | ||
138 | if ok { | ||
139 | tagName := se.Name.Local | ||
140 | switch tagName { | ||
141 | case "Name", "Prefix", | ||
142 | "Delimiter", "EncodingType", | ||
143 | "KeyMarker", "NextKeyMarker": | ||
144 | var s string | ||
145 | if err = d.DecodeElement(&s, &se); err != nil { | ||
146 | return err | ||
147 | } | ||
148 | v := reflect.ValueOf(l).Elem().FieldByName(tagName) | ||
149 | if v.IsValid() { | ||
150 | v.SetString(s) | ||
151 | } | ||
152 | case "VersionIdMarker": | ||
153 | // VersionIdMarker is a special case because of 'Id' instead of 'ID' in field name | ||
154 | var s string | ||
155 | if err = d.DecodeElement(&s, &se); err != nil { | ||
156 | return err | ||
157 | } | ||
158 | l.VersionIDMarker = s | ||
159 | case "NextVersionIdMarker": | ||
160 | // NextVersionIdMarker is a special case because of 'Id' instead of 'ID' in field name | ||
161 | var s string | ||
162 | if err = d.DecodeElement(&s, &se); err != nil { | ||
163 | return err | ||
164 | } | ||
165 | l.NextVersionIDMarker = s | ||
166 | case "IsTruncated": // bool | ||
167 | var b bool | ||
168 | if err = d.DecodeElement(&b, &se); err != nil { | ||
169 | return err | ||
170 | } | ||
171 | l.IsTruncated = b | ||
172 | case "MaxKeys": // int64 | ||
173 | var i int64 | ||
174 | if err = d.DecodeElement(&i, &se); err != nil { | ||
175 | return err | ||
176 | } | ||
177 | l.MaxKeys = i | ||
178 | case "CommonPrefixes": | ||
179 | var cp CommonPrefix | ||
180 | if err = d.DecodeElement(&cp, &se); err != nil { | ||
181 | return err | ||
182 | } | ||
183 | l.CommonPrefixes = append(l.CommonPrefixes, cp) | ||
184 | case "DeleteMarker", "Version": | ||
185 | var v Version | ||
186 | if err = d.DecodeElement(&v, &se); err != nil { | ||
187 | return err | ||
188 | } | ||
189 | if tagName == "DeleteMarker" { | ||
190 | v.isDeleteMarker = true | ||
191 | } | ||
192 | l.Versions = append(l.Versions, v) | ||
193 | default: | ||
194 | return errors.New("unrecognized option:" + tagName) | ||
195 | } | ||
196 | |||
197 | } | ||
198 | } | ||
199 | return nil | ||
200 | } | ||
201 | |||
202 | // ListBucketResult container for listObjects response. | ||
203 | type ListBucketResult struct { | ||
204 | // A response can contain CommonPrefixes only if you have | ||
205 | // specified a delimiter. | ||
206 | CommonPrefixes []CommonPrefix | ||
207 | // Metadata about each object returned. | ||
208 | Contents []ObjectInfo | ||
209 | Delimiter string | ||
210 | |||
211 | // Encoding type used to encode object keys in the response. | ||
212 | EncodingType string | ||
213 | |||
214 | // A flag that indicates whether or not ListObjects returned all of the results | ||
215 | // that satisfied the search criteria. | ||
216 | IsTruncated bool | ||
217 | Marker string | ||
218 | MaxKeys int64 | ||
219 | Name string | ||
220 | |||
221 | // When response is truncated (the IsTruncated element value in | ||
222 | // the response is true), you can use the key name in this field | ||
223 | // as marker in the subsequent request to get next set of objects. | ||
224 | // Object storage lists objects in alphabetical order Note: This | ||
225 | // element is returned only if you have delimiter request | ||
226 | // parameter specified. If response does not include the NextMaker | ||
227 | // and it is truncated, you can use the value of the last Key in | ||
228 | // the response as the marker in the subsequent request to get the | ||
229 | // next set of object keys. | ||
230 | NextMarker string | ||
231 | Prefix string | ||
232 | } | ||
233 | |||
234 | // ListMultipartUploadsResult container for ListMultipartUploads response | ||
235 | type ListMultipartUploadsResult struct { | ||
236 | Bucket string | ||
237 | KeyMarker string | ||
238 | UploadIDMarker string `xml:"UploadIdMarker"` | ||
239 | NextKeyMarker string | ||
240 | NextUploadIDMarker string `xml:"NextUploadIdMarker"` | ||
241 | EncodingType string | ||
242 | MaxUploads int64 | ||
243 | IsTruncated bool | ||
244 | Uploads []ObjectMultipartInfo `xml:"Upload"` | ||
245 | Prefix string | ||
246 | Delimiter string | ||
247 | // A response can contain CommonPrefixes only if you specify a delimiter. | ||
248 | CommonPrefixes []CommonPrefix | ||
249 | } | ||
250 | |||
251 | // initiator container for who initiated multipart upload. | ||
252 | type initiator struct { | ||
253 | ID string | ||
254 | DisplayName string | ||
255 | } | ||
256 | |||
257 | // copyObjectResult container for copy object response. | ||
258 | type copyObjectResult struct { | ||
259 | ETag string | ||
260 | LastModified time.Time // time string format "2006-01-02T15:04:05.000Z" | ||
261 | } | ||
262 | |||
263 | // ObjectPart container for particular part of an object. | ||
264 | type ObjectPart struct { | ||
265 | // Part number identifies the part. | ||
266 | PartNumber int | ||
267 | |||
268 | // Date and time the part was uploaded. | ||
269 | LastModified time.Time | ||
270 | |||
271 | // Entity tag returned when the part was uploaded, usually md5sum | ||
272 | // of the part. | ||
273 | ETag string | ||
274 | |||
275 | // Size of the uploaded part data. | ||
276 | Size int64 | ||
277 | |||
278 | // Checksum values of each part. | ||
279 | ChecksumCRC32 string | ||
280 | ChecksumCRC32C string | ||
281 | ChecksumSHA1 string | ||
282 | ChecksumSHA256 string | ||
283 | } | ||
284 | |||
285 | // ListObjectPartsResult container for ListObjectParts response. | ||
286 | type ListObjectPartsResult struct { | ||
287 | Bucket string | ||
288 | Key string | ||
289 | UploadID string `xml:"UploadId"` | ||
290 | |||
291 | Initiator initiator | ||
292 | Owner owner | ||
293 | |||
294 | StorageClass string | ||
295 | PartNumberMarker int | ||
296 | NextPartNumberMarker int | ||
297 | MaxParts int | ||
298 | |||
299 | // Indicates whether the returned list of parts is truncated. | ||
300 | IsTruncated bool | ||
301 | ObjectParts []ObjectPart `xml:"Part"` | ||
302 | |||
303 | EncodingType string | ||
304 | } | ||
305 | |||
306 | // initiateMultipartUploadResult container for InitiateMultiPartUpload | ||
307 | // response. | ||
308 | type initiateMultipartUploadResult struct { | ||
309 | Bucket string | ||
310 | Key string | ||
311 | UploadID string `xml:"UploadId"` | ||
312 | } | ||
313 | |||
314 | // completeMultipartUploadResult container for completed multipart | ||
315 | // upload response. | ||
316 | type completeMultipartUploadResult struct { | ||
317 | Location string | ||
318 | Bucket string | ||
319 | Key string | ||
320 | ETag string | ||
321 | |||
322 | // Checksum values, hash of hashes of parts. | ||
323 | ChecksumCRC32 string | ||
324 | ChecksumCRC32C string | ||
325 | ChecksumSHA1 string | ||
326 | ChecksumSHA256 string | ||
327 | } | ||
328 | |||
329 | // CompletePart sub container lists individual part numbers and their | ||
330 | // md5sum, part of completeMultipartUpload. | ||
331 | type CompletePart struct { | ||
332 | // Part number identifies the part. | ||
333 | PartNumber int | ||
334 | ETag string | ||
335 | |||
336 | // Checksum values | ||
337 | ChecksumCRC32 string `xml:"ChecksumCRC32,omitempty"` | ||
338 | ChecksumCRC32C string `xml:"ChecksumCRC32C,omitempty"` | ||
339 | ChecksumSHA1 string `xml:"ChecksumSHA1,omitempty"` | ||
340 | ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"` | ||
341 | } | ||
342 | |||
343 | // completeMultipartUpload container for completing multipart upload. | ||
344 | type completeMultipartUpload struct { | ||
345 | XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"` | ||
346 | Parts []CompletePart `xml:"Part"` | ||
347 | } | ||
348 | |||
349 | // createBucketConfiguration container for bucket configuration. | ||
350 | type createBucketConfiguration struct { | ||
351 | XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"` | ||
352 | Location string `xml:"LocationConstraint"` | ||
353 | } | ||
354 | |||
355 | // deleteObject container for Delete element in MultiObjects Delete XML request | ||
356 | type deleteObject struct { | ||
357 | Key string | ||
358 | VersionID string `xml:"VersionId,omitempty"` | ||
359 | } | ||
360 | |||
361 | // deletedObject container for Deleted element in MultiObjects Delete XML response | ||
362 | type deletedObject struct { | ||
363 | Key string | ||
364 | VersionID string `xml:"VersionId,omitempty"` | ||
365 | // These fields are ignored. | ||
366 | DeleteMarker bool | ||
367 | DeleteMarkerVersionID string `xml:"DeleteMarkerVersionId,omitempty"` | ||
368 | } | ||
369 | |||
370 | // nonDeletedObject container for Error element (failed deletion) in MultiObjects Delete XML response | ||
371 | type nonDeletedObject struct { | ||
372 | Key string | ||
373 | Code string | ||
374 | Message string | ||
375 | VersionID string `xml:"VersionId"` | ||
376 | } | ||
377 | |||
378 | // deletedMultiObjects container for MultiObjects Delete XML request | ||
379 | type deleteMultiObjects struct { | ||
380 | XMLName xml.Name `xml:"Delete"` | ||
381 | Quiet bool | ||
382 | Objects []deleteObject `xml:"Object"` | ||
383 | } | ||
384 | |||
385 | // deletedMultiObjectsResult container for MultiObjects Delete XML response | ||
386 | type deleteMultiObjectsResult struct { | ||
387 | XMLName xml.Name `xml:"DeleteResult"` | ||
388 | DeletedObjects []deletedObject `xml:"Deleted"` | ||
389 | UnDeletedObjects []nonDeletedObject `xml:"Error"` | ||
390 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/api-select.go b/vendor/github.com/minio/minio-go/v7/api-select.go new file mode 100644 index 0000000..628d967 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-select.go | |||
@@ -0,0 +1,757 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * (C) 2018-2020 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | import ( | ||
21 | "bytes" | ||
22 | "context" | ||
23 | "encoding/binary" | ||
24 | "encoding/xml" | ||
25 | "errors" | ||
26 | "fmt" | ||
27 | "hash" | ||
28 | "hash/crc32" | ||
29 | "io" | ||
30 | "net/http" | ||
31 | "net/url" | ||
32 | "strings" | ||
33 | |||
34 | "github.com/minio/minio-go/v7/pkg/encrypt" | ||
35 | "github.com/minio/minio-go/v7/pkg/s3utils" | ||
36 | ) | ||
37 | |||
38 | // CSVFileHeaderInfo - is the parameter for whether to utilize headers. | ||
39 | type CSVFileHeaderInfo string | ||
40 | |||
41 | // Constants for file header info. | ||
42 | const ( | ||
43 | CSVFileHeaderInfoNone CSVFileHeaderInfo = "NONE" | ||
44 | CSVFileHeaderInfoIgnore CSVFileHeaderInfo = "IGNORE" | ||
45 | CSVFileHeaderInfoUse CSVFileHeaderInfo = "USE" | ||
46 | ) | ||
47 | |||
48 | // SelectCompressionType - is the parameter for what type of compression is | ||
49 | // present | ||
50 | type SelectCompressionType string | ||
51 | |||
52 | // Constants for compression types under select API. | ||
53 | const ( | ||
54 | SelectCompressionNONE SelectCompressionType = "NONE" | ||
55 | SelectCompressionGZIP SelectCompressionType = "GZIP" | ||
56 | SelectCompressionBZIP SelectCompressionType = "BZIP2" | ||
57 | |||
58 | // Non-standard compression schemes, supported by MinIO hosts: | ||
59 | |||
60 | SelectCompressionZSTD SelectCompressionType = "ZSTD" // Zstandard compression. | ||
61 | SelectCompressionLZ4 SelectCompressionType = "LZ4" // LZ4 Stream | ||
62 | SelectCompressionS2 SelectCompressionType = "S2" // S2 Stream | ||
63 | SelectCompressionSNAPPY SelectCompressionType = "SNAPPY" // Snappy stream | ||
64 | ) | ||
65 | |||
66 | // CSVQuoteFields - is the parameter for how CSV fields are quoted. | ||
67 | type CSVQuoteFields string | ||
68 | |||
69 | // Constants for csv quote styles. | ||
70 | const ( | ||
71 | CSVQuoteFieldsAlways CSVQuoteFields = "Always" | ||
72 | CSVQuoteFieldsAsNeeded CSVQuoteFields = "AsNeeded" | ||
73 | ) | ||
74 | |||
75 | // QueryExpressionType - is of what syntax the expression is, this should only | ||
76 | // be SQL | ||
77 | type QueryExpressionType string | ||
78 | |||
79 | // Constants for expression type. | ||
80 | const ( | ||
81 | QueryExpressionTypeSQL QueryExpressionType = "SQL" | ||
82 | ) | ||
83 | |||
84 | // JSONType determines json input serialization type. | ||
85 | type JSONType string | ||
86 | |||
87 | // Constants for JSONTypes. | ||
88 | const ( | ||
89 | JSONDocumentType JSONType = "DOCUMENT" | ||
90 | JSONLinesType JSONType = "LINES" | ||
91 | ) | ||
92 | |||
93 | // ParquetInputOptions parquet input specific options | ||
94 | type ParquetInputOptions struct{} | ||
95 | |||
96 | // CSVInputOptions csv input specific options | ||
97 | type CSVInputOptions struct { | ||
98 | FileHeaderInfo CSVFileHeaderInfo | ||
99 | fileHeaderInfoSet bool | ||
100 | |||
101 | RecordDelimiter string | ||
102 | recordDelimiterSet bool | ||
103 | |||
104 | FieldDelimiter string | ||
105 | fieldDelimiterSet bool | ||
106 | |||
107 | QuoteCharacter string | ||
108 | quoteCharacterSet bool | ||
109 | |||
110 | QuoteEscapeCharacter string | ||
111 | quoteEscapeCharacterSet bool | ||
112 | |||
113 | Comments string | ||
114 | commentsSet bool | ||
115 | } | ||
116 | |||
117 | // SetFileHeaderInfo sets the file header info in the CSV input options | ||
118 | func (c *CSVInputOptions) SetFileHeaderInfo(val CSVFileHeaderInfo) { | ||
119 | c.FileHeaderInfo = val | ||
120 | c.fileHeaderInfoSet = true | ||
121 | } | ||
122 | |||
123 | // SetRecordDelimiter sets the record delimiter in the CSV input options | ||
124 | func (c *CSVInputOptions) SetRecordDelimiter(val string) { | ||
125 | c.RecordDelimiter = val | ||
126 | c.recordDelimiterSet = true | ||
127 | } | ||
128 | |||
129 | // SetFieldDelimiter sets the field delimiter in the CSV input options | ||
130 | func (c *CSVInputOptions) SetFieldDelimiter(val string) { | ||
131 | c.FieldDelimiter = val | ||
132 | c.fieldDelimiterSet = true | ||
133 | } | ||
134 | |||
135 | // SetQuoteCharacter sets the quote character in the CSV input options | ||
136 | func (c *CSVInputOptions) SetQuoteCharacter(val string) { | ||
137 | c.QuoteCharacter = val | ||
138 | c.quoteCharacterSet = true | ||
139 | } | ||
140 | |||
141 | // SetQuoteEscapeCharacter sets the quote escape character in the CSV input options | ||
142 | func (c *CSVInputOptions) SetQuoteEscapeCharacter(val string) { | ||
143 | c.QuoteEscapeCharacter = val | ||
144 | c.quoteEscapeCharacterSet = true | ||
145 | } | ||
146 | |||
147 | // SetComments sets the comments character in the CSV input options | ||
148 | func (c *CSVInputOptions) SetComments(val string) { | ||
149 | c.Comments = val | ||
150 | c.commentsSet = true | ||
151 | } | ||
152 | |||
153 | // MarshalXML - produces the xml representation of the CSV input options struct | ||
154 | func (c CSVInputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error { | ||
155 | if err := e.EncodeToken(start); err != nil { | ||
156 | return err | ||
157 | } | ||
158 | if c.FileHeaderInfo != "" || c.fileHeaderInfoSet { | ||
159 | if err := e.EncodeElement(c.FileHeaderInfo, xml.StartElement{Name: xml.Name{Local: "FileHeaderInfo"}}); err != nil { | ||
160 | return err | ||
161 | } | ||
162 | } | ||
163 | |||
164 | if c.RecordDelimiter != "" || c.recordDelimiterSet { | ||
165 | if err := e.EncodeElement(c.RecordDelimiter, xml.StartElement{Name: xml.Name{Local: "RecordDelimiter"}}); err != nil { | ||
166 | return err | ||
167 | } | ||
168 | } | ||
169 | |||
170 | if c.FieldDelimiter != "" || c.fieldDelimiterSet { | ||
171 | if err := e.EncodeElement(c.FieldDelimiter, xml.StartElement{Name: xml.Name{Local: "FieldDelimiter"}}); err != nil { | ||
172 | return err | ||
173 | } | ||
174 | } | ||
175 | |||
176 | if c.QuoteCharacter != "" || c.quoteCharacterSet { | ||
177 | if err := e.EncodeElement(c.QuoteCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteCharacter"}}); err != nil { | ||
178 | return err | ||
179 | } | ||
180 | } | ||
181 | |||
182 | if c.QuoteEscapeCharacter != "" || c.quoteEscapeCharacterSet { | ||
183 | if err := e.EncodeElement(c.QuoteEscapeCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteEscapeCharacter"}}); err != nil { | ||
184 | return err | ||
185 | } | ||
186 | } | ||
187 | |||
188 | if c.Comments != "" || c.commentsSet { | ||
189 | if err := e.EncodeElement(c.Comments, xml.StartElement{Name: xml.Name{Local: "Comments"}}); err != nil { | ||
190 | return err | ||
191 | } | ||
192 | } | ||
193 | |||
194 | return e.EncodeToken(xml.EndElement{Name: start.Name}) | ||
195 | } | ||
196 | |||
197 | // CSVOutputOptions csv output specific options | ||
198 | type CSVOutputOptions struct { | ||
199 | QuoteFields CSVQuoteFields | ||
200 | quoteFieldsSet bool | ||
201 | |||
202 | RecordDelimiter string | ||
203 | recordDelimiterSet bool | ||
204 | |||
205 | FieldDelimiter string | ||
206 | fieldDelimiterSet bool | ||
207 | |||
208 | QuoteCharacter string | ||
209 | quoteCharacterSet bool | ||
210 | |||
211 | QuoteEscapeCharacter string | ||
212 | quoteEscapeCharacterSet bool | ||
213 | } | ||
214 | |||
215 | // SetQuoteFields sets the quote field parameter in the CSV output options | ||
216 | func (c *CSVOutputOptions) SetQuoteFields(val CSVQuoteFields) { | ||
217 | c.QuoteFields = val | ||
218 | c.quoteFieldsSet = true | ||
219 | } | ||
220 | |||
221 | // SetRecordDelimiter sets the record delimiter character in the CSV output options | ||
222 | func (c *CSVOutputOptions) SetRecordDelimiter(val string) { | ||
223 | c.RecordDelimiter = val | ||
224 | c.recordDelimiterSet = true | ||
225 | } | ||
226 | |||
227 | // SetFieldDelimiter sets the field delimiter character in the CSV output options | ||
228 | func (c *CSVOutputOptions) SetFieldDelimiter(val string) { | ||
229 | c.FieldDelimiter = val | ||
230 | c.fieldDelimiterSet = true | ||
231 | } | ||
232 | |||
233 | // SetQuoteCharacter sets the quote character in the CSV output options | ||
234 | func (c *CSVOutputOptions) SetQuoteCharacter(val string) { | ||
235 | c.QuoteCharacter = val | ||
236 | c.quoteCharacterSet = true | ||
237 | } | ||
238 | |||
239 | // SetQuoteEscapeCharacter sets the quote escape character in the CSV output options | ||
240 | func (c *CSVOutputOptions) SetQuoteEscapeCharacter(val string) { | ||
241 | c.QuoteEscapeCharacter = val | ||
242 | c.quoteEscapeCharacterSet = true | ||
243 | } | ||
244 | |||
245 | // MarshalXML - produces the xml representation of the CSVOutputOptions struct | ||
246 | func (c CSVOutputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error { | ||
247 | if err := e.EncodeToken(start); err != nil { | ||
248 | return err | ||
249 | } | ||
250 | |||
251 | if c.QuoteFields != "" || c.quoteFieldsSet { | ||
252 | if err := e.EncodeElement(c.QuoteFields, xml.StartElement{Name: xml.Name{Local: "QuoteFields"}}); err != nil { | ||
253 | return err | ||
254 | } | ||
255 | } | ||
256 | |||
257 | if c.RecordDelimiter != "" || c.recordDelimiterSet { | ||
258 | if err := e.EncodeElement(c.RecordDelimiter, xml.StartElement{Name: xml.Name{Local: "RecordDelimiter"}}); err != nil { | ||
259 | return err | ||
260 | } | ||
261 | } | ||
262 | |||
263 | if c.FieldDelimiter != "" || c.fieldDelimiterSet { | ||
264 | if err := e.EncodeElement(c.FieldDelimiter, xml.StartElement{Name: xml.Name{Local: "FieldDelimiter"}}); err != nil { | ||
265 | return err | ||
266 | } | ||
267 | } | ||
268 | |||
269 | if c.QuoteCharacter != "" || c.quoteCharacterSet { | ||
270 | if err := e.EncodeElement(c.QuoteCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteCharacter"}}); err != nil { | ||
271 | return err | ||
272 | } | ||
273 | } | ||
274 | |||
275 | if c.QuoteEscapeCharacter != "" || c.quoteEscapeCharacterSet { | ||
276 | if err := e.EncodeElement(c.QuoteEscapeCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteEscapeCharacter"}}); err != nil { | ||
277 | return err | ||
278 | } | ||
279 | } | ||
280 | |||
281 | return e.EncodeToken(xml.EndElement{Name: start.Name}) | ||
282 | } | ||
283 | |||
284 | // JSONInputOptions json input specific options | ||
285 | type JSONInputOptions struct { | ||
286 | Type JSONType | ||
287 | typeSet bool | ||
288 | } | ||
289 | |||
290 | // SetType sets the JSON type in the JSON input options | ||
291 | func (j *JSONInputOptions) SetType(typ JSONType) { | ||
292 | j.Type = typ | ||
293 | j.typeSet = true | ||
294 | } | ||
295 | |||
296 | // MarshalXML - produces the xml representation of the JSONInputOptions struct | ||
297 | func (j JSONInputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error { | ||
298 | if err := e.EncodeToken(start); err != nil { | ||
299 | return err | ||
300 | } | ||
301 | |||
302 | if j.Type != "" || j.typeSet { | ||
303 | if err := e.EncodeElement(j.Type, xml.StartElement{Name: xml.Name{Local: "Type"}}); err != nil { | ||
304 | return err | ||
305 | } | ||
306 | } | ||
307 | |||
308 | return e.EncodeToken(xml.EndElement{Name: start.Name}) | ||
309 | } | ||
310 | |||
311 | // JSONOutputOptions - json output specific options | ||
312 | type JSONOutputOptions struct { | ||
313 | RecordDelimiter string | ||
314 | recordDelimiterSet bool | ||
315 | } | ||
316 | |||
317 | // SetRecordDelimiter sets the record delimiter in the JSON output options | ||
318 | func (j *JSONOutputOptions) SetRecordDelimiter(val string) { | ||
319 | j.RecordDelimiter = val | ||
320 | j.recordDelimiterSet = true | ||
321 | } | ||
322 | |||
323 | // MarshalXML - produces the xml representation of the JSONOutputOptions struct | ||
324 | func (j JSONOutputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error { | ||
325 | if err := e.EncodeToken(start); err != nil { | ||
326 | return err | ||
327 | } | ||
328 | |||
329 | if j.RecordDelimiter != "" || j.recordDelimiterSet { | ||
330 | if err := e.EncodeElement(j.RecordDelimiter, xml.StartElement{Name: xml.Name{Local: "RecordDelimiter"}}); err != nil { | ||
331 | return err | ||
332 | } | ||
333 | } | ||
334 | |||
335 | return e.EncodeToken(xml.EndElement{Name: start.Name}) | ||
336 | } | ||
337 | |||
338 | // SelectObjectInputSerialization - input serialization parameters | ||
339 | type SelectObjectInputSerialization struct { | ||
340 | CompressionType SelectCompressionType `xml:"CompressionType,omitempty"` | ||
341 | Parquet *ParquetInputOptions `xml:"Parquet,omitempty"` | ||
342 | CSV *CSVInputOptions `xml:"CSV,omitempty"` | ||
343 | JSON *JSONInputOptions `xml:"JSON,omitempty"` | ||
344 | } | ||
345 | |||
346 | // SelectObjectOutputSerialization - output serialization parameters. | ||
347 | type SelectObjectOutputSerialization struct { | ||
348 | CSV *CSVOutputOptions `xml:"CSV,omitempty"` | ||
349 | JSON *JSONOutputOptions `xml:"JSON,omitempty"` | ||
350 | } | ||
351 | |||
352 | // SelectObjectOptions - represents the input select body | ||
353 | type SelectObjectOptions struct { | ||
354 | XMLName xml.Name `xml:"SelectObjectContentRequest" json:"-"` | ||
355 | ServerSideEncryption encrypt.ServerSide `xml:"-"` | ||
356 | Expression string | ||
357 | ExpressionType QueryExpressionType | ||
358 | InputSerialization SelectObjectInputSerialization | ||
359 | OutputSerialization SelectObjectOutputSerialization | ||
360 | RequestProgress struct { | ||
361 | Enabled bool | ||
362 | } | ||
363 | } | ||
364 | |||
365 | // Header returns the http.Header representation of the SelectObject options. | ||
366 | func (o SelectObjectOptions) Header() http.Header { | ||
367 | headers := make(http.Header) | ||
368 | if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() == encrypt.SSEC { | ||
369 | o.ServerSideEncryption.Marshal(headers) | ||
370 | } | ||
371 | return headers | ||
372 | } | ||
373 | |||
374 | // SelectObjectType - is the parameter which defines what type of object the | ||
375 | // operation is being performed on. | ||
376 | type SelectObjectType string | ||
377 | |||
378 | // Constants for input data types. | ||
379 | const ( | ||
380 | SelectObjectTypeCSV SelectObjectType = "CSV" | ||
381 | SelectObjectTypeJSON SelectObjectType = "JSON" | ||
382 | SelectObjectTypeParquet SelectObjectType = "Parquet" | ||
383 | ) | ||
384 | |||
385 | // preludeInfo is used for keeping track of necessary information from the | ||
386 | // prelude. | ||
387 | type preludeInfo struct { | ||
388 | totalLen uint32 | ||
389 | headerLen uint32 | ||
390 | } | ||
391 | |||
392 | // SelectResults is used for the streaming responses from the server. | ||
393 | type SelectResults struct { | ||
394 | pipeReader *io.PipeReader | ||
395 | resp *http.Response | ||
396 | stats *StatsMessage | ||
397 | progress *ProgressMessage | ||
398 | } | ||
399 | |||
400 | // ProgressMessage is a struct for progress xml message. | ||
401 | type ProgressMessage struct { | ||
402 | XMLName xml.Name `xml:"Progress" json:"-"` | ||
403 | StatsMessage | ||
404 | } | ||
405 | |||
406 | // StatsMessage is a struct for stat xml message. | ||
407 | type StatsMessage struct { | ||
408 | XMLName xml.Name `xml:"Stats" json:"-"` | ||
409 | BytesScanned int64 | ||
410 | BytesProcessed int64 | ||
411 | BytesReturned int64 | ||
412 | } | ||
413 | |||
414 | // messageType represents the type of message. | ||
415 | type messageType string | ||
416 | |||
417 | const ( | ||
418 | errorMsg messageType = "error" | ||
419 | commonMsg messageType = "event" | ||
420 | ) | ||
421 | |||
422 | // eventType represents the type of event. | ||
423 | type eventType string | ||
424 | |||
425 | // list of event-types returned by Select API. | ||
426 | const ( | ||
427 | endEvent eventType = "End" | ||
428 | recordsEvent eventType = "Records" | ||
429 | progressEvent eventType = "Progress" | ||
430 | statsEvent eventType = "Stats" | ||
431 | ) | ||
432 | |||
433 | // contentType represents content type of event. | ||
434 | type contentType string | ||
435 | |||
436 | const ( | ||
437 | xmlContent contentType = "text/xml" | ||
438 | ) | ||
439 | |||
440 | // SelectObjectContent is a implementation of http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html AWS S3 API. | ||
441 | func (c *Client) SelectObjectContent(ctx context.Context, bucketName, objectName string, opts SelectObjectOptions) (*SelectResults, error) { | ||
442 | // Input validation. | ||
443 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
444 | return nil, err | ||
445 | } | ||
446 | if err := s3utils.CheckValidObjectName(objectName); err != nil { | ||
447 | return nil, err | ||
448 | } | ||
449 | |||
450 | selectReqBytes, err := xml.Marshal(opts) | ||
451 | if err != nil { | ||
452 | return nil, err | ||
453 | } | ||
454 | |||
455 | urlValues := make(url.Values) | ||
456 | urlValues.Set("select", "") | ||
457 | urlValues.Set("select-type", "2") | ||
458 | |||
459 | // Execute POST on bucket/object. | ||
460 | resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{ | ||
461 | bucketName: bucketName, | ||
462 | objectName: objectName, | ||
463 | queryValues: urlValues, | ||
464 | customHeader: opts.Header(), | ||
465 | contentMD5Base64: sumMD5Base64(selectReqBytes), | ||
466 | contentSHA256Hex: sum256Hex(selectReqBytes), | ||
467 | contentBody: bytes.NewReader(selectReqBytes), | ||
468 | contentLength: int64(len(selectReqBytes)), | ||
469 | }) | ||
470 | if err != nil { | ||
471 | return nil, err | ||
472 | } | ||
473 | |||
474 | return NewSelectResults(resp, bucketName) | ||
475 | } | ||
476 | |||
477 | // NewSelectResults creates a Select Result parser that parses the response | ||
478 | // and returns a Reader that will return parsed and assembled select output. | ||
479 | func NewSelectResults(resp *http.Response, bucketName string) (*SelectResults, error) { | ||
480 | if resp.StatusCode != http.StatusOK { | ||
481 | return nil, httpRespToErrorResponse(resp, bucketName, "") | ||
482 | } | ||
483 | |||
484 | pipeReader, pipeWriter := io.Pipe() | ||
485 | streamer := &SelectResults{ | ||
486 | resp: resp, | ||
487 | stats: &StatsMessage{}, | ||
488 | progress: &ProgressMessage{}, | ||
489 | pipeReader: pipeReader, | ||
490 | } | ||
491 | streamer.start(pipeWriter) | ||
492 | return streamer, nil | ||
493 | } | ||
494 | |||
495 | // Close - closes the underlying response body and the stream reader. | ||
496 | func (s *SelectResults) Close() error { | ||
497 | defer closeResponse(s.resp) | ||
498 | return s.pipeReader.Close() | ||
499 | } | ||
500 | |||
501 | // Read - is a reader compatible implementation for SelectObjectContent records. | ||
502 | func (s *SelectResults) Read(b []byte) (n int, err error) { | ||
503 | return s.pipeReader.Read(b) | ||
504 | } | ||
505 | |||
506 | // Stats - information about a request's stats when processing is complete. | ||
507 | func (s *SelectResults) Stats() *StatsMessage { | ||
508 | return s.stats | ||
509 | } | ||
510 | |||
511 | // Progress - information about the progress of a request. | ||
512 | func (s *SelectResults) Progress() *ProgressMessage { | ||
513 | return s.progress | ||
514 | } | ||
515 | |||
516 | // start is the main function that decodes the large byte array into | ||
517 | // several events that are sent through the eventstream. | ||
518 | func (s *SelectResults) start(pipeWriter *io.PipeWriter) { | ||
519 | go func() { | ||
520 | for { | ||
521 | var prelude preludeInfo | ||
522 | headers := make(http.Header) | ||
523 | var err error | ||
524 | |||
525 | // Create CRC code | ||
526 | crc := crc32.New(crc32.IEEETable) | ||
527 | crcReader := io.TeeReader(s.resp.Body, crc) | ||
528 | |||
529 | // Extract the prelude(12 bytes) into a struct to extract relevant information. | ||
530 | prelude, err = processPrelude(crcReader, crc) | ||
531 | if err != nil { | ||
532 | pipeWriter.CloseWithError(err) | ||
533 | closeResponse(s.resp) | ||
534 | return | ||
535 | } | ||
536 | |||
537 | // Extract the headers(variable bytes) into a struct to extract relevant information | ||
538 | if prelude.headerLen > 0 { | ||
539 | if err = extractHeader(io.LimitReader(crcReader, int64(prelude.headerLen)), headers); err != nil { | ||
540 | pipeWriter.CloseWithError(err) | ||
541 | closeResponse(s.resp) | ||
542 | return | ||
543 | } | ||
544 | } | ||
545 | |||
546 | // Get the actual payload length so that the appropriate amount of | ||
547 | // bytes can be read or parsed. | ||
548 | payloadLen := prelude.PayloadLen() | ||
549 | |||
550 | m := messageType(headers.Get("message-type")) | ||
551 | |||
552 | switch m { | ||
553 | case errorMsg: | ||
554 | pipeWriter.CloseWithError(errors.New(headers.Get("error-code") + ":\"" + headers.Get("error-message") + "\"")) | ||
555 | closeResponse(s.resp) | ||
556 | return | ||
557 | case commonMsg: | ||
558 | // Get content-type of the payload. | ||
559 | c := contentType(headers.Get("content-type")) | ||
560 | |||
561 | // Get event type of the payload. | ||
562 | e := eventType(headers.Get("event-type")) | ||
563 | |||
564 | // Handle all supported events. | ||
565 | switch e { | ||
566 | case endEvent: | ||
567 | pipeWriter.Close() | ||
568 | closeResponse(s.resp) | ||
569 | return | ||
570 | case recordsEvent: | ||
571 | if _, err = io.Copy(pipeWriter, io.LimitReader(crcReader, payloadLen)); err != nil { | ||
572 | pipeWriter.CloseWithError(err) | ||
573 | closeResponse(s.resp) | ||
574 | return | ||
575 | } | ||
576 | case progressEvent: | ||
577 | switch c { | ||
578 | case xmlContent: | ||
579 | if err = xmlDecoder(io.LimitReader(crcReader, payloadLen), s.progress); err != nil { | ||
580 | pipeWriter.CloseWithError(err) | ||
581 | closeResponse(s.resp) | ||
582 | return | ||
583 | } | ||
584 | default: | ||
585 | pipeWriter.CloseWithError(fmt.Errorf("Unexpected content-type %s sent for event-type %s", c, progressEvent)) | ||
586 | closeResponse(s.resp) | ||
587 | return | ||
588 | } | ||
589 | case statsEvent: | ||
590 | switch c { | ||
591 | case xmlContent: | ||
592 | if err = xmlDecoder(io.LimitReader(crcReader, payloadLen), s.stats); err != nil { | ||
593 | pipeWriter.CloseWithError(err) | ||
594 | closeResponse(s.resp) | ||
595 | return | ||
596 | } | ||
597 | default: | ||
598 | pipeWriter.CloseWithError(fmt.Errorf("Unexpected content-type %s sent for event-type %s", c, statsEvent)) | ||
599 | closeResponse(s.resp) | ||
600 | return | ||
601 | } | ||
602 | } | ||
603 | } | ||
604 | |||
605 | // Ensures that the full message's CRC is correct and | ||
606 | // that the message is not corrupted | ||
607 | if err := checkCRC(s.resp.Body, crc.Sum32()); err != nil { | ||
608 | pipeWriter.CloseWithError(err) | ||
609 | closeResponse(s.resp) | ||
610 | return | ||
611 | } | ||
612 | |||
613 | } | ||
614 | }() | ||
615 | } | ||
616 | |||
617 | // PayloadLen is a function that calculates the length of the payload. | ||
618 | func (p preludeInfo) PayloadLen() int64 { | ||
619 | return int64(p.totalLen - p.headerLen - 16) | ||
620 | } | ||
621 | |||
622 | // processPrelude is the function that reads the 12 bytes of the prelude and | ||
623 | // ensures the CRC is correct while also extracting relevant information into | ||
624 | // the struct, | ||
625 | func processPrelude(prelude io.Reader, crc hash.Hash32) (preludeInfo, error) { | ||
626 | var err error | ||
627 | pInfo := preludeInfo{} | ||
628 | |||
629 | // reads total length of the message (first 4 bytes) | ||
630 | pInfo.totalLen, err = extractUint32(prelude) | ||
631 | if err != nil { | ||
632 | return pInfo, err | ||
633 | } | ||
634 | |||
635 | // reads total header length of the message (2nd 4 bytes) | ||
636 | pInfo.headerLen, err = extractUint32(prelude) | ||
637 | if err != nil { | ||
638 | return pInfo, err | ||
639 | } | ||
640 | |||
641 | // checks that the CRC is correct (3rd 4 bytes) | ||
642 | preCRC := crc.Sum32() | ||
643 | if err := checkCRC(prelude, preCRC); err != nil { | ||
644 | return pInfo, err | ||
645 | } | ||
646 | |||
647 | return pInfo, nil | ||
648 | } | ||
649 | |||
650 | // extracts the relevant information from the Headers. | ||
651 | func extractHeader(body io.Reader, myHeaders http.Header) error { | ||
652 | for { | ||
653 | // extracts the first part of the header, | ||
654 | headerTypeName, err := extractHeaderType(body) | ||
655 | if err != nil { | ||
656 | // Since end of file, we have read all of our headers | ||
657 | if err == io.EOF { | ||
658 | break | ||
659 | } | ||
660 | return err | ||
661 | } | ||
662 | |||
663 | // reads the 7 present in the header and ignores it. | ||
664 | extractUint8(body) | ||
665 | |||
666 | headerValueName, err := extractHeaderValue(body) | ||
667 | if err != nil { | ||
668 | return err | ||
669 | } | ||
670 | |||
671 | myHeaders.Set(headerTypeName, headerValueName) | ||
672 | |||
673 | } | ||
674 | return nil | ||
675 | } | ||
676 | |||
677 | // extractHeaderType extracts the first half of the header message, the header type. | ||
678 | func extractHeaderType(body io.Reader) (string, error) { | ||
679 | // extracts 2 bit integer | ||
680 | headerNameLen, err := extractUint8(body) | ||
681 | if err != nil { | ||
682 | return "", err | ||
683 | } | ||
684 | // extracts the string with the appropriate number of bytes | ||
685 | headerName, err := extractString(body, int(headerNameLen)) | ||
686 | if err != nil { | ||
687 | return "", err | ||
688 | } | ||
689 | return strings.TrimPrefix(headerName, ":"), nil | ||
690 | } | ||
691 | |||
692 | // extractsHeaderValue extracts the second half of the header message, the | ||
693 | // header value | ||
694 | func extractHeaderValue(body io.Reader) (string, error) { | ||
695 | bodyLen, err := extractUint16(body) | ||
696 | if err != nil { | ||
697 | return "", err | ||
698 | } | ||
699 | bodyName, err := extractString(body, int(bodyLen)) | ||
700 | if err != nil { | ||
701 | return "", err | ||
702 | } | ||
703 | return bodyName, nil | ||
704 | } | ||
705 | |||
706 | // extracts a string from byte array of a particular number of bytes. | ||
707 | func extractString(source io.Reader, lenBytes int) (string, error) { | ||
708 | myVal := make([]byte, lenBytes) | ||
709 | _, err := source.Read(myVal) | ||
710 | if err != nil { | ||
711 | return "", err | ||
712 | } | ||
713 | return string(myVal), nil | ||
714 | } | ||
715 | |||
716 | // extractUint32 extracts a 4 byte integer from the byte array. | ||
717 | func extractUint32(r io.Reader) (uint32, error) { | ||
718 | buf := make([]byte, 4) | ||
719 | _, err := readFull(r, buf) | ||
720 | if err != nil { | ||
721 | return 0, err | ||
722 | } | ||
723 | return binary.BigEndian.Uint32(buf), nil | ||
724 | } | ||
725 | |||
726 | // extractUint16 extracts a 2 byte integer from the byte array. | ||
727 | func extractUint16(r io.Reader) (uint16, error) { | ||
728 | buf := make([]byte, 2) | ||
729 | _, err := readFull(r, buf) | ||
730 | if err != nil { | ||
731 | return 0, err | ||
732 | } | ||
733 | return binary.BigEndian.Uint16(buf), nil | ||
734 | } | ||
735 | |||
736 | // extractUint8 extracts a 1 byte integer from the byte array. | ||
737 | func extractUint8(r io.Reader) (uint8, error) { | ||
738 | buf := make([]byte, 1) | ||
739 | _, err := readFull(r, buf) | ||
740 | if err != nil { | ||
741 | return 0, err | ||
742 | } | ||
743 | return buf[0], nil | ||
744 | } | ||
745 | |||
746 | // checkCRC ensures that the CRC matches with the one from the reader. | ||
747 | func checkCRC(r io.Reader, expect uint32) error { | ||
748 | msgCRC, err := extractUint32(r) | ||
749 | if err != nil { | ||
750 | return err | ||
751 | } | ||
752 | |||
753 | if msgCRC != expect { | ||
754 | return fmt.Errorf("Checksum Mismatch, MessageCRC of 0x%X does not equal expected CRC of 0x%X", msgCRC, expect) | ||
755 | } | ||
756 | return nil | ||
757 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/api-stat.go b/vendor/github.com/minio/minio-go/v7/api-stat.go new file mode 100644 index 0000000..b043dc4 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-stat.go | |||
@@ -0,0 +1,116 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2015-2020 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | import ( | ||
21 | "context" | ||
22 | "net/http" | ||
23 | |||
24 | "github.com/minio/minio-go/v7/pkg/s3utils" | ||
25 | ) | ||
26 | |||
27 | // BucketExists verifies if bucket exists and you have permission to access it. Allows for a Context to | ||
28 | // control cancellations and timeouts. | ||
29 | func (c *Client) BucketExists(ctx context.Context, bucketName string) (bool, error) { | ||
30 | // Input validation. | ||
31 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
32 | return false, err | ||
33 | } | ||
34 | |||
35 | // Execute HEAD on bucketName. | ||
36 | resp, err := c.executeMethod(ctx, http.MethodHead, requestMetadata{ | ||
37 | bucketName: bucketName, | ||
38 | contentSHA256Hex: emptySHA256Hex, | ||
39 | }) | ||
40 | defer closeResponse(resp) | ||
41 | if err != nil { | ||
42 | if ToErrorResponse(err).Code == "NoSuchBucket" { | ||
43 | return false, nil | ||
44 | } | ||
45 | return false, err | ||
46 | } | ||
47 | if resp != nil { | ||
48 | resperr := httpRespToErrorResponse(resp, bucketName, "") | ||
49 | if ToErrorResponse(resperr).Code == "NoSuchBucket" { | ||
50 | return false, nil | ||
51 | } | ||
52 | if resp.StatusCode != http.StatusOK { | ||
53 | return false, httpRespToErrorResponse(resp, bucketName, "") | ||
54 | } | ||
55 | } | ||
56 | return true, nil | ||
57 | } | ||
58 | |||
59 | // StatObject verifies if object exists, you have permission to access it | ||
60 | // and returns information about the object. | ||
61 | func (c *Client) StatObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { | ||
62 | // Input validation. | ||
63 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
64 | return ObjectInfo{}, err | ||
65 | } | ||
66 | if err := s3utils.CheckValidObjectName(objectName); err != nil { | ||
67 | return ObjectInfo{}, err | ||
68 | } | ||
69 | headers := opts.Header() | ||
70 | if opts.Internal.ReplicationDeleteMarker { | ||
71 | headers.Set(minIOBucketReplicationDeleteMarker, "true") | ||
72 | } | ||
73 | if opts.Internal.IsReplicationReadyForDeleteMarker { | ||
74 | headers.Set(isMinioTgtReplicationReady, "true") | ||
75 | } | ||
76 | |||
77 | // Execute HEAD on objectName. | ||
78 | resp, err := c.executeMethod(ctx, http.MethodHead, requestMetadata{ | ||
79 | bucketName: bucketName, | ||
80 | objectName: objectName, | ||
81 | queryValues: opts.toQueryValues(), | ||
82 | contentSHA256Hex: emptySHA256Hex, | ||
83 | customHeader: headers, | ||
84 | }) | ||
85 | defer closeResponse(resp) | ||
86 | if err != nil { | ||
87 | return ObjectInfo{}, err | ||
88 | } | ||
89 | |||
90 | if resp != nil { | ||
91 | deleteMarker := resp.Header.Get(amzDeleteMarker) == "true" | ||
92 | replicationReady := resp.Header.Get(minioTgtReplicationReady) == "true" | ||
93 | if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { | ||
94 | if resp.StatusCode == http.StatusMethodNotAllowed && opts.VersionID != "" && deleteMarker { | ||
95 | errResp := ErrorResponse{ | ||
96 | StatusCode: resp.StatusCode, | ||
97 | Code: "MethodNotAllowed", | ||
98 | Message: "The specified method is not allowed against this resource.", | ||
99 | BucketName: bucketName, | ||
100 | Key: objectName, | ||
101 | } | ||
102 | return ObjectInfo{ | ||
103 | VersionID: resp.Header.Get(amzVersionID), | ||
104 | IsDeleteMarker: deleteMarker, | ||
105 | }, errResp | ||
106 | } | ||
107 | return ObjectInfo{ | ||
108 | VersionID: resp.Header.Get(amzVersionID), | ||
109 | IsDeleteMarker: deleteMarker, | ||
110 | ReplicationReady: replicationReady, // whether delete marker can be replicated | ||
111 | }, httpRespToErrorResponse(resp, bucketName, objectName) | ||
112 | } | ||
113 | } | ||
114 | |||
115 | return ToObjectInfo(bucketName, objectName, resp.Header) | ||
116 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go new file mode 100644 index 0000000..f8a9b34 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api.go | |||
@@ -0,0 +1,995 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2015-2023 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | import ( | ||
21 | "bytes" | ||
22 | "context" | ||
23 | "encoding/base64" | ||
24 | "errors" | ||
25 | "fmt" | ||
26 | "hash/crc32" | ||
27 | "io" | ||
28 | "math/rand" | ||
29 | "net" | ||
30 | "net/http" | ||
31 | "net/http/cookiejar" | ||
32 | "net/http/httptrace" | ||
33 | "net/http/httputil" | ||
34 | "net/url" | ||
35 | "os" | ||
36 | "runtime" | ||
37 | "strings" | ||
38 | "sync" | ||
39 | "sync/atomic" | ||
40 | "time" | ||
41 | |||
42 | md5simd "github.com/minio/md5-simd" | ||
43 | "github.com/minio/minio-go/v7/pkg/credentials" | ||
44 | "github.com/minio/minio-go/v7/pkg/s3utils" | ||
45 | "github.com/minio/minio-go/v7/pkg/signer" | ||
46 | "golang.org/x/net/publicsuffix" | ||
47 | ) | ||
48 | |||
49 | // Client implements Amazon S3 compatible methods. | ||
50 | type Client struct { | ||
51 | // Standard options. | ||
52 | |||
53 | // Parsed endpoint url provided by the user. | ||
54 | endpointURL *url.URL | ||
55 | |||
56 | // Holds various credential providers. | ||
57 | credsProvider *credentials.Credentials | ||
58 | |||
59 | // Custom signerType value overrides all credentials. | ||
60 | overrideSignerType credentials.SignatureType | ||
61 | |||
62 | // User supplied. | ||
63 | appInfo struct { | ||
64 | appName string | ||
65 | appVersion string | ||
66 | } | ||
67 | |||
68 | // Indicate whether we are using https or not | ||
69 | secure bool | ||
70 | |||
71 | // Needs allocation. | ||
72 | httpClient *http.Client | ||
73 | httpTrace *httptrace.ClientTrace | ||
74 | bucketLocCache *bucketLocationCache | ||
75 | |||
76 | // Advanced functionality. | ||
77 | isTraceEnabled bool | ||
78 | traceErrorsOnly bool | ||
79 | traceOutput io.Writer | ||
80 | |||
81 | // S3 specific accelerated endpoint. | ||
82 | s3AccelerateEndpoint string | ||
83 | |||
84 | // Region endpoint | ||
85 | region string | ||
86 | |||
87 | // Random seed. | ||
88 | random *rand.Rand | ||
89 | |||
90 | // lookup indicates type of url lookup supported by server. If not specified, | ||
91 | // default to Auto. | ||
92 | lookup BucketLookupType | ||
93 | |||
94 | // Factory for MD5 hash functions. | ||
95 | md5Hasher func() md5simd.Hasher | ||
96 | sha256Hasher func() md5simd.Hasher | ||
97 | |||
98 | healthStatus int32 | ||
99 | |||
100 | trailingHeaderSupport bool | ||
101 | } | ||
102 | |||
103 | // Options for New method | ||
104 | type Options struct { | ||
105 | Creds *credentials.Credentials | ||
106 | Secure bool | ||
107 | Transport http.RoundTripper | ||
108 | Trace *httptrace.ClientTrace | ||
109 | Region string | ||
110 | BucketLookup BucketLookupType | ||
111 | |||
112 | // Allows setting a custom region lookup based on URL pattern | ||
113 | // not all URL patterns are covered by this library so if you | ||
114 | // have a custom endpoints with many regions you can use this | ||
115 | // function to perform region lookups appropriately. | ||
116 | CustomRegionViaURL func(u url.URL) string | ||
117 | |||
118 | // TrailingHeaders indicates server support of trailing headers. | ||
119 | // Only supported for v4 signatures. | ||
120 | TrailingHeaders bool | ||
121 | |||
122 | // Custom hash routines. Leave nil to use standard. | ||
123 | CustomMD5 func() md5simd.Hasher | ||
124 | CustomSHA256 func() md5simd.Hasher | ||
125 | } | ||
126 | |||
127 | // Global constants. | ||
128 | const ( | ||
129 | libraryName = "minio-go" | ||
130 | libraryVersion = "v7.0.66" | ||
131 | ) | ||
132 | |||
133 | // User Agent should always following the below style. | ||
134 | // Please open an issue to discuss any new changes here. | ||
135 | // | ||
136 | // MinIO (OS; ARCH) LIB/VER APP/VER | ||
137 | const ( | ||
138 | libraryUserAgentPrefix = "MinIO (" + runtime.GOOS + "; " + runtime.GOARCH + ") " | ||
139 | libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion | ||
140 | ) | ||
141 | |||
142 | // BucketLookupType is type of url lookup supported by server. | ||
143 | type BucketLookupType int | ||
144 | |||
145 | // Different types of url lookup supported by the server.Initialized to BucketLookupAuto | ||
146 | const ( | ||
147 | BucketLookupAuto BucketLookupType = iota | ||
148 | BucketLookupDNS | ||
149 | BucketLookupPath | ||
150 | ) | ||
151 | |||
152 | // New - instantiate minio client with options | ||
153 | func New(endpoint string, opts *Options) (*Client, error) { | ||
154 | if opts == nil { | ||
155 | return nil, errors.New("no options provided") | ||
156 | } | ||
157 | clnt, err := privateNew(endpoint, opts) | ||
158 | if err != nil { | ||
159 | return nil, err | ||
160 | } | ||
161 | // If Amazon S3 set to signature v4. | ||
162 | if s3utils.IsAmazonEndpoint(*clnt.endpointURL) { | ||
163 | clnt.overrideSignerType = credentials.SignatureV4 | ||
164 | } | ||
165 | |||
166 | return clnt, nil | ||
167 | } | ||
168 | |||
169 | // EndpointURL returns the URL of the S3 endpoint. | ||
170 | func (c *Client) EndpointURL() *url.URL { | ||
171 | endpoint := *c.endpointURL // copy to prevent callers from modifying internal state | ||
172 | return &endpoint | ||
173 | } | ||
174 | |||
175 | // lockedRandSource provides protected rand source, implements rand.Source interface. | ||
176 | type lockedRandSource struct { | ||
177 | lk sync.Mutex | ||
178 | src rand.Source | ||
179 | } | ||
180 | |||
181 | // Int63 returns a non-negative pseudo-random 63-bit integer as an int64. | ||
182 | func (r *lockedRandSource) Int63() (n int64) { | ||
183 | r.lk.Lock() | ||
184 | n = r.src.Int63() | ||
185 | r.lk.Unlock() | ||
186 | return | ||
187 | } | ||
188 | |||
189 | // Seed uses the provided seed value to initialize the generator to a | ||
190 | // deterministic state. | ||
191 | func (r *lockedRandSource) Seed(seed int64) { | ||
192 | r.lk.Lock() | ||
193 | r.src.Seed(seed) | ||
194 | r.lk.Unlock() | ||
195 | } | ||
196 | |||
197 | func privateNew(endpoint string, opts *Options) (*Client, error) { | ||
198 | // construct endpoint. | ||
199 | endpointURL, err := getEndpointURL(endpoint, opts.Secure) | ||
200 | if err != nil { | ||
201 | return nil, err | ||
202 | } | ||
203 | |||
204 | // Initialize cookies to preserve server sent cookies if any and replay | ||
205 | // them upon each request. | ||
206 | jar, err := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List}) | ||
207 | if err != nil { | ||
208 | return nil, err | ||
209 | } | ||
210 | |||
211 | // instantiate new Client. | ||
212 | clnt := new(Client) | ||
213 | |||
214 | // Save the credentials. | ||
215 | clnt.credsProvider = opts.Creds | ||
216 | |||
217 | // Remember whether we are using https or not | ||
218 | clnt.secure = opts.Secure | ||
219 | |||
220 | // Save endpoint URL, user agent for future uses. | ||
221 | clnt.endpointURL = endpointURL | ||
222 | |||
223 | transport := opts.Transport | ||
224 | if transport == nil { | ||
225 | transport, err = DefaultTransport(opts.Secure) | ||
226 | if err != nil { | ||
227 | return nil, err | ||
228 | } | ||
229 | } | ||
230 | |||
231 | clnt.httpTrace = opts.Trace | ||
232 | |||
233 | // Instantiate http client and bucket location cache. | ||
234 | clnt.httpClient = &http.Client{ | ||
235 | Jar: jar, | ||
236 | Transport: transport, | ||
237 | CheckRedirect: func(req *http.Request, via []*http.Request) error { | ||
238 | return http.ErrUseLastResponse | ||
239 | }, | ||
240 | } | ||
241 | |||
242 | // Sets custom region, if region is empty bucket location cache is used automatically. | ||
243 | if opts.Region == "" { | ||
244 | if opts.CustomRegionViaURL != nil { | ||
245 | opts.Region = opts.CustomRegionViaURL(*clnt.endpointURL) | ||
246 | } else { | ||
247 | opts.Region = s3utils.GetRegionFromURL(*clnt.endpointURL) | ||
248 | } | ||
249 | } | ||
250 | clnt.region = opts.Region | ||
251 | |||
252 | // Instantiate bucket location cache. | ||
253 | clnt.bucketLocCache = newBucketLocationCache() | ||
254 | |||
255 | // Introduce a new locked random seed. | ||
256 | clnt.random = rand.New(&lockedRandSource{src: rand.NewSource(time.Now().UTC().UnixNano())}) | ||
257 | |||
258 | // Add default md5 hasher. | ||
259 | clnt.md5Hasher = opts.CustomMD5 | ||
260 | clnt.sha256Hasher = opts.CustomSHA256 | ||
261 | if clnt.md5Hasher == nil { | ||
262 | clnt.md5Hasher = newMd5Hasher | ||
263 | } | ||
264 | if clnt.sha256Hasher == nil { | ||
265 | clnt.sha256Hasher = newSHA256Hasher | ||
266 | } | ||
267 | |||
268 | clnt.trailingHeaderSupport = opts.TrailingHeaders && clnt.overrideSignerType.IsV4() | ||
269 | |||
270 | // Sets bucket lookup style, whether server accepts DNS or Path lookup. Default is Auto - determined | ||
271 | // by the SDK. When Auto is specified, DNS lookup is used for Amazon/Google cloud endpoints and Path for all other endpoints. | ||
272 | clnt.lookup = opts.BucketLookup | ||
273 | |||
274 | // healthcheck is not initialized | ||
275 | clnt.healthStatus = unknown | ||
276 | |||
277 | // Return. | ||
278 | return clnt, nil | ||
279 | } | ||
280 | |||
281 | // SetAppInfo - add application details to user agent. | ||
282 | func (c *Client) SetAppInfo(appName, appVersion string) { | ||
283 | // if app name and version not set, we do not set a new user agent. | ||
284 | if appName != "" && appVersion != "" { | ||
285 | c.appInfo.appName = appName | ||
286 | c.appInfo.appVersion = appVersion | ||
287 | } | ||
288 | } | ||
289 | |||
290 | // TraceOn - enable HTTP tracing. | ||
291 | func (c *Client) TraceOn(outputStream io.Writer) { | ||
292 | // if outputStream is nil then default to os.Stdout. | ||
293 | if outputStream == nil { | ||
294 | outputStream = os.Stdout | ||
295 | } | ||
296 | // Sets a new output stream. | ||
297 | c.traceOutput = outputStream | ||
298 | |||
299 | // Enable tracing. | ||
300 | c.isTraceEnabled = true | ||
301 | } | ||
302 | |||
303 | // TraceErrorsOnlyOn - same as TraceOn, but only errors will be traced. | ||
304 | func (c *Client) TraceErrorsOnlyOn(outputStream io.Writer) { | ||
305 | c.TraceOn(outputStream) | ||
306 | c.traceErrorsOnly = true | ||
307 | } | ||
308 | |||
309 | // TraceErrorsOnlyOff - Turns off the errors only tracing and everything will be traced after this call. | ||
310 | // If all tracing needs to be turned off, call TraceOff(). | ||
311 | func (c *Client) TraceErrorsOnlyOff() { | ||
312 | c.traceErrorsOnly = false | ||
313 | } | ||
314 | |||
315 | // TraceOff - disable HTTP tracing. | ||
316 | func (c *Client) TraceOff() { | ||
317 | // Disable tracing. | ||
318 | c.isTraceEnabled = false | ||
319 | c.traceErrorsOnly = false | ||
320 | } | ||
321 | |||
322 | // SetS3TransferAccelerate - turns s3 accelerated endpoint on or off for all your | ||
323 | // requests. This feature is only specific to S3 for all other endpoints this | ||
324 | // function does nothing. To read further details on s3 transfer acceleration | ||
325 | // please vist - | ||
326 | // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html | ||
327 | func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) { | ||
328 | if s3utils.IsAmazonEndpoint(*c.endpointURL) { | ||
329 | c.s3AccelerateEndpoint = accelerateEndpoint | ||
330 | } | ||
331 | } | ||
332 | |||
333 | // Hash materials provides relevant initialized hash algo writers | ||
334 | // based on the expected signature type. | ||
335 | // | ||
336 | // - For signature v4 request if the connection is insecure compute only sha256. | ||
337 | // - For signature v4 request if the connection is secure compute only md5. | ||
338 | // - For anonymous request compute md5. | ||
339 | func (c *Client) hashMaterials(isMd5Requested, isSha256Requested bool) (hashAlgos map[string]md5simd.Hasher, hashSums map[string][]byte) { | ||
340 | hashSums = make(map[string][]byte) | ||
341 | hashAlgos = make(map[string]md5simd.Hasher) | ||
342 | if c.overrideSignerType.IsV4() { | ||
343 | if c.secure { | ||
344 | hashAlgos["md5"] = c.md5Hasher() | ||
345 | } else { | ||
346 | if isSha256Requested { | ||
347 | hashAlgos["sha256"] = c.sha256Hasher() | ||
348 | } | ||
349 | } | ||
350 | } else { | ||
351 | if c.overrideSignerType.IsAnonymous() { | ||
352 | hashAlgos["md5"] = c.md5Hasher() | ||
353 | } | ||
354 | } | ||
355 | if isMd5Requested { | ||
356 | hashAlgos["md5"] = c.md5Hasher() | ||
357 | } | ||
358 | return hashAlgos, hashSums | ||
359 | } | ||
360 | |||
361 | const ( | ||
362 | unknown = -1 | ||
363 | offline = 0 | ||
364 | online = 1 | ||
365 | ) | ||
366 | |||
367 | // IsOnline returns true if healthcheck enabled and client is online. | ||
368 | // If HealthCheck function has not been called this will always return true. | ||
369 | func (c *Client) IsOnline() bool { | ||
370 | return !c.IsOffline() | ||
371 | } | ||
372 | |||
373 | // sets online healthStatus to offline | ||
374 | func (c *Client) markOffline() { | ||
375 | atomic.CompareAndSwapInt32(&c.healthStatus, online, offline) | ||
376 | } | ||
377 | |||
378 | // IsOffline returns true if healthcheck enabled and client is offline | ||
379 | // If HealthCheck function has not been called this will always return false. | ||
380 | func (c *Client) IsOffline() bool { | ||
381 | return atomic.LoadInt32(&c.healthStatus) == offline | ||
382 | } | ||
383 | |||
384 | // HealthCheck starts a healthcheck to see if endpoint is up. | ||
385 | // Returns a context cancellation function, to stop the health check, | ||
386 | // and an error if health check is already started. | ||
387 | func (c *Client) HealthCheck(hcDuration time.Duration) (context.CancelFunc, error) { | ||
388 | if atomic.LoadInt32(&c.healthStatus) != unknown { | ||
389 | return nil, fmt.Errorf("health check is running") | ||
390 | } | ||
391 | if hcDuration < 1*time.Second { | ||
392 | return nil, fmt.Errorf("health check duration should be at least 1 second") | ||
393 | } | ||
394 | probeBucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "probe-health-") | ||
395 | ctx, cancelFn := context.WithCancel(context.Background()) | ||
396 | atomic.StoreInt32(&c.healthStatus, offline) | ||
397 | { | ||
398 | // Change to online, if we can connect. | ||
399 | gctx, gcancel := context.WithTimeout(ctx, 3*time.Second) | ||
400 | _, err := c.getBucketLocation(gctx, probeBucketName) | ||
401 | gcancel() | ||
402 | if !IsNetworkOrHostDown(err, false) { | ||
403 | switch ToErrorResponse(err).Code { | ||
404 | case "NoSuchBucket", "AccessDenied", "": | ||
405 | atomic.CompareAndSwapInt32(&c.healthStatus, offline, online) | ||
406 | } | ||
407 | } | ||
408 | } | ||
409 | |||
410 | go func(duration time.Duration) { | ||
411 | timer := time.NewTimer(duration) | ||
412 | defer timer.Stop() | ||
413 | for { | ||
414 | select { | ||
415 | case <-ctx.Done(): | ||
416 | atomic.StoreInt32(&c.healthStatus, unknown) | ||
417 | return | ||
418 | case <-timer.C: | ||
419 | // Do health check the first time and ONLY if the connection is marked offline | ||
420 | if c.IsOffline() { | ||
421 | gctx, gcancel := context.WithTimeout(context.Background(), 3*time.Second) | ||
422 | _, err := c.getBucketLocation(gctx, probeBucketName) | ||
423 | gcancel() | ||
424 | if !IsNetworkOrHostDown(err, false) { | ||
425 | switch ToErrorResponse(err).Code { | ||
426 | case "NoSuchBucket", "AccessDenied", "": | ||
427 | atomic.CompareAndSwapInt32(&c.healthStatus, offline, online) | ||
428 | } | ||
429 | } | ||
430 | } | ||
431 | |||
432 | timer.Reset(duration) | ||
433 | } | ||
434 | } | ||
435 | }(hcDuration) | ||
436 | return cancelFn, nil | ||
437 | } | ||
438 | |||
439 | // requestMetadata - is container for all the values to make a request. | ||
440 | type requestMetadata struct { | ||
441 | // If set newRequest presigns the URL. | ||
442 | presignURL bool | ||
443 | |||
444 | // User supplied. | ||
445 | bucketName string | ||
446 | objectName string | ||
447 | queryValues url.Values | ||
448 | customHeader http.Header | ||
449 | extraPresignHeader http.Header | ||
450 | expires int64 | ||
451 | |||
452 | // Generated by our internal code. | ||
453 | bucketLocation string | ||
454 | contentBody io.Reader | ||
455 | contentLength int64 | ||
456 | contentMD5Base64 string // carries base64 encoded md5sum | ||
457 | contentSHA256Hex string // carries hex encoded sha256sum | ||
458 | streamSha256 bool | ||
459 | addCrc bool | ||
460 | trailer http.Header // (http.Request).Trailer. Requires v4 signature. | ||
461 | } | ||
462 | |||
463 | // dumpHTTP - dump HTTP request and response. | ||
464 | func (c *Client) dumpHTTP(req *http.Request, resp *http.Response) error { | ||
465 | // Starts http dump. | ||
466 | _, err := fmt.Fprintln(c.traceOutput, "---------START-HTTP---------") | ||
467 | if err != nil { | ||
468 | return err | ||
469 | } | ||
470 | |||
471 | // Filter out Signature field from Authorization header. | ||
472 | origAuth := req.Header.Get("Authorization") | ||
473 | if origAuth != "" { | ||
474 | req.Header.Set("Authorization", redactSignature(origAuth)) | ||
475 | } | ||
476 | |||
477 | // Only display request header. | ||
478 | reqTrace, err := httputil.DumpRequestOut(req, false) | ||
479 | if err != nil { | ||
480 | return err | ||
481 | } | ||
482 | |||
483 | // Write request to trace output. | ||
484 | _, err = fmt.Fprint(c.traceOutput, string(reqTrace)) | ||
485 | if err != nil { | ||
486 | return err | ||
487 | } | ||
488 | |||
489 | // Only display response header. | ||
490 | var respTrace []byte | ||
491 | |||
492 | // For errors we make sure to dump response body as well. | ||
493 | if resp.StatusCode != http.StatusOK && | ||
494 | resp.StatusCode != http.StatusPartialContent && | ||
495 | resp.StatusCode != http.StatusNoContent { | ||
496 | respTrace, err = httputil.DumpResponse(resp, true) | ||
497 | if err != nil { | ||
498 | return err | ||
499 | } | ||
500 | } else { | ||
501 | respTrace, err = httputil.DumpResponse(resp, false) | ||
502 | if err != nil { | ||
503 | return err | ||
504 | } | ||
505 | } | ||
506 | |||
507 | // Write response to trace output. | ||
508 | _, err = fmt.Fprint(c.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n")) | ||
509 | if err != nil { | ||
510 | return err | ||
511 | } | ||
512 | |||
513 | // Ends the http dump. | ||
514 | _, err = fmt.Fprintln(c.traceOutput, "---------END-HTTP---------") | ||
515 | if err != nil { | ||
516 | return err | ||
517 | } | ||
518 | |||
519 | // Returns success. | ||
520 | return nil | ||
521 | } | ||
522 | |||
523 | // do - execute http request. | ||
524 | func (c *Client) do(req *http.Request) (resp *http.Response, err error) { | ||
525 | defer func() { | ||
526 | if IsNetworkOrHostDown(err, false) { | ||
527 | c.markOffline() | ||
528 | } | ||
529 | }() | ||
530 | |||
531 | resp, err = c.httpClient.Do(req) | ||
532 | if err != nil { | ||
533 | // Handle this specifically for now until future Golang versions fix this issue properly. | ||
534 | if urlErr, ok := err.(*url.Error); ok { | ||
535 | if strings.Contains(urlErr.Err.Error(), "EOF") { | ||
536 | return nil, &url.Error{ | ||
537 | Op: urlErr.Op, | ||
538 | URL: urlErr.URL, | ||
539 | Err: errors.New("Connection closed by foreign host " + urlErr.URL + ". Retry again."), | ||
540 | } | ||
541 | } | ||
542 | } | ||
543 | return nil, err | ||
544 | } | ||
545 | |||
546 | // Response cannot be non-nil, report error if thats the case. | ||
547 | if resp == nil { | ||
548 | msg := "Response is empty. " + reportIssue | ||
549 | return nil, errInvalidArgument(msg) | ||
550 | } | ||
551 | |||
552 | // If trace is enabled, dump http request and response, | ||
553 | // except when the traceErrorsOnly enabled and the response's status code is ok | ||
554 | if c.isTraceEnabled && !(c.traceErrorsOnly && resp.StatusCode == http.StatusOK) { | ||
555 | err = c.dumpHTTP(req, resp) | ||
556 | if err != nil { | ||
557 | return nil, err | ||
558 | } | ||
559 | } | ||
560 | |||
561 | return resp, nil | ||
562 | } | ||
563 | |||
564 | // List of success status. | ||
565 | var successStatus = []int{ | ||
566 | http.StatusOK, | ||
567 | http.StatusNoContent, | ||
568 | http.StatusPartialContent, | ||
569 | } | ||
570 | |||
571 | // executeMethod - instantiates a given method, and retries the | ||
572 | // request upon any error up to maxRetries attempts in a binomially | ||
573 | // delayed manner using a standard back off algorithm. | ||
574 | func (c *Client) executeMethod(ctx context.Context, method string, metadata requestMetadata) (res *http.Response, err error) { | ||
575 | if c.IsOffline() { | ||
576 | return nil, errors.New(c.endpointURL.String() + " is offline.") | ||
577 | } | ||
578 | |||
579 | var retryable bool // Indicates if request can be retried. | ||
580 | var bodySeeker io.Seeker // Extracted seeker from io.Reader. | ||
581 | reqRetry := MaxRetry // Indicates how many times we can retry the request | ||
582 | |||
583 | if metadata.contentBody != nil { | ||
584 | // Check if body is seekable then it is retryable. | ||
585 | bodySeeker, retryable = metadata.contentBody.(io.Seeker) | ||
586 | switch bodySeeker { | ||
587 | case os.Stdin, os.Stdout, os.Stderr: | ||
588 | retryable = false | ||
589 | } | ||
590 | // Retry only when reader is seekable | ||
591 | if !retryable { | ||
592 | reqRetry = 1 | ||
593 | } | ||
594 | |||
595 | // Figure out if the body can be closed - if yes | ||
596 | // we will definitely close it upon the function | ||
597 | // return. | ||
598 | bodyCloser, ok := metadata.contentBody.(io.Closer) | ||
599 | if ok { | ||
600 | defer bodyCloser.Close() | ||
601 | } | ||
602 | } | ||
603 | |||
604 | // Create cancel context to control 'newRetryTimer' go routine. | ||
605 | retryCtx, cancel := context.WithCancel(ctx) | ||
606 | |||
607 | // Indicate to our routine to exit cleanly upon return. | ||
608 | defer cancel() | ||
609 | |||
610 | for range c.newRetryTimer(retryCtx, reqRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter) { | ||
611 | // Retry executes the following function body if request has an | ||
612 | // error until maxRetries have been exhausted, retry attempts are | ||
613 | // performed after waiting for a given period of time in a | ||
614 | // binomial fashion. | ||
615 | if retryable { | ||
616 | // Seek back to beginning for each attempt. | ||
617 | if _, err = bodySeeker.Seek(0, 0); err != nil { | ||
618 | // If seek failed, no need to retry. | ||
619 | return nil, err | ||
620 | } | ||
621 | } | ||
622 | |||
623 | if metadata.addCrc { | ||
624 | if metadata.trailer == nil { | ||
625 | metadata.trailer = make(http.Header, 1) | ||
626 | } | ||
627 | crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) | ||
628 | metadata.contentBody = newHashReaderWrapper(metadata.contentBody, crc, func(hash []byte) { | ||
629 | // Update trailer when done. | ||
630 | metadata.trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(hash)) | ||
631 | }) | ||
632 | metadata.trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(crc.Sum(nil))) | ||
633 | } | ||
634 | // Instantiate a new request. | ||
635 | var req *http.Request | ||
636 | req, err = c.newRequest(ctx, method, metadata) | ||
637 | if err != nil { | ||
638 | errResponse := ToErrorResponse(err) | ||
639 | if isS3CodeRetryable(errResponse.Code) { | ||
640 | continue // Retry. | ||
641 | } | ||
642 | |||
643 | return nil, err | ||
644 | } | ||
645 | |||
646 | // Initiate the request. | ||
647 | res, err = c.do(req) | ||
648 | if err != nil { | ||
649 | if isRequestErrorRetryable(err) { | ||
650 | // Retry the request | ||
651 | continue | ||
652 | } | ||
653 | return nil, err | ||
654 | } | ||
655 | |||
656 | // For any known successful http status, return quickly. | ||
657 | for _, httpStatus := range successStatus { | ||
658 | if httpStatus == res.StatusCode { | ||
659 | return res, nil | ||
660 | } | ||
661 | } | ||
662 | |||
663 | // Read the body to be saved later. | ||
664 | errBodyBytes, err := io.ReadAll(res.Body) | ||
665 | // res.Body should be closed | ||
666 | closeResponse(res) | ||
667 | if err != nil { | ||
668 | return nil, err | ||
669 | } | ||
670 | |||
671 | // Save the body. | ||
672 | errBodySeeker := bytes.NewReader(errBodyBytes) | ||
673 | res.Body = io.NopCloser(errBodySeeker) | ||
674 | |||
675 | // For errors verify if its retryable otherwise fail quickly. | ||
676 | errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName)) | ||
677 | |||
678 | // Save the body back again. | ||
679 | errBodySeeker.Seek(0, 0) // Seek back to starting point. | ||
680 | res.Body = io.NopCloser(errBodySeeker) | ||
681 | |||
682 | // Bucket region if set in error response and the error | ||
683 | // code dictates invalid region, we can retry the request | ||
684 | // with the new region. | ||
685 | // | ||
686 | // Additionally, we should only retry if bucketLocation and custom | ||
687 | // region is empty. | ||
688 | if c.region == "" { | ||
689 | switch errResponse.Code { | ||
690 | case "AuthorizationHeaderMalformed": | ||
691 | fallthrough | ||
692 | case "InvalidRegion": | ||
693 | fallthrough | ||
694 | case "AccessDenied": | ||
695 | if errResponse.Region == "" { | ||
696 | // Region is empty we simply return the error. | ||
697 | return res, err | ||
698 | } | ||
699 | // Region is not empty figure out a way to | ||
700 | // handle this appropriately. | ||
701 | if metadata.bucketName != "" { | ||
702 | // Gather Cached location only if bucketName is present. | ||
703 | if location, cachedOk := c.bucketLocCache.Get(metadata.bucketName); cachedOk && location != errResponse.Region { | ||
704 | c.bucketLocCache.Set(metadata.bucketName, errResponse.Region) | ||
705 | continue // Retry. | ||
706 | } | ||
707 | } else { | ||
708 | // This is for ListBuckets() fallback. | ||
709 | if errResponse.Region != metadata.bucketLocation { | ||
710 | // Retry if the error response has a different region | ||
711 | // than the request we just made. | ||
712 | metadata.bucketLocation = errResponse.Region | ||
713 | continue // Retry | ||
714 | } | ||
715 | } | ||
716 | } | ||
717 | } | ||
718 | |||
719 | // Verify if error response code is retryable. | ||
720 | if isS3CodeRetryable(errResponse.Code) { | ||
721 | continue // Retry. | ||
722 | } | ||
723 | |||
724 | // Verify if http status code is retryable. | ||
725 | if isHTTPStatusRetryable(res.StatusCode) { | ||
726 | continue // Retry. | ||
727 | } | ||
728 | |||
729 | // For all other cases break out of the retry loop. | ||
730 | break | ||
731 | } | ||
732 | |||
733 | // Return an error when retry is canceled or deadlined | ||
734 | if e := retryCtx.Err(); e != nil { | ||
735 | return nil, e | ||
736 | } | ||
737 | |||
738 | return res, err | ||
739 | } | ||
740 | |||
741 | // newRequest - instantiate a new HTTP request for a given method. | ||
742 | func (c *Client) newRequest(ctx context.Context, method string, metadata requestMetadata) (req *http.Request, err error) { | ||
743 | // If no method is supplied default to 'POST'. | ||
744 | if method == "" { | ||
745 | method = http.MethodPost | ||
746 | } | ||
747 | |||
748 | location := metadata.bucketLocation | ||
749 | if location == "" { | ||
750 | if metadata.bucketName != "" { | ||
751 | // Gather location only if bucketName is present. | ||
752 | location, err = c.getBucketLocation(ctx, metadata.bucketName) | ||
753 | if err != nil { | ||
754 | return nil, err | ||
755 | } | ||
756 | } | ||
757 | if location == "" { | ||
758 | location = getDefaultLocation(*c.endpointURL, c.region) | ||
759 | } | ||
760 | } | ||
761 | |||
762 | // Look if target url supports virtual host. | ||
763 | // We explicitly disallow MakeBucket calls to not use virtual DNS style, | ||
764 | // since the resolution may fail. | ||
765 | isMakeBucket := (metadata.objectName == "" && method == http.MethodPut && len(metadata.queryValues) == 0) | ||
766 | isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, metadata.bucketName) && !isMakeBucket | ||
767 | |||
768 | // Construct a new target URL. | ||
769 | targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, location, | ||
770 | isVirtualHost, metadata.queryValues) | ||
771 | if err != nil { | ||
772 | return nil, err | ||
773 | } | ||
774 | |||
775 | if c.httpTrace != nil { | ||
776 | ctx = httptrace.WithClientTrace(ctx, c.httpTrace) | ||
777 | } | ||
778 | |||
779 | // Initialize a new HTTP request for the method. | ||
780 | req, err = http.NewRequestWithContext(ctx, method, targetURL.String(), nil) | ||
781 | if err != nil { | ||
782 | return nil, err | ||
783 | } | ||
784 | |||
785 | // Get credentials from the configured credentials provider. | ||
786 | value, err := c.credsProvider.Get() | ||
787 | if err != nil { | ||
788 | return nil, err | ||
789 | } | ||
790 | |||
791 | var ( | ||
792 | signerType = value.SignerType | ||
793 | accessKeyID = value.AccessKeyID | ||
794 | secretAccessKey = value.SecretAccessKey | ||
795 | sessionToken = value.SessionToken | ||
796 | ) | ||
797 | |||
798 | // Custom signer set then override the behavior. | ||
799 | if c.overrideSignerType != credentials.SignatureDefault { | ||
800 | signerType = c.overrideSignerType | ||
801 | } | ||
802 | |||
803 | // If signerType returned by credentials helper is anonymous, | ||
804 | // then do not sign regardless of signerType override. | ||
805 | if value.SignerType == credentials.SignatureAnonymous { | ||
806 | signerType = credentials.SignatureAnonymous | ||
807 | } | ||
808 | |||
809 | // Generate presign url if needed, return right here. | ||
810 | if metadata.expires != 0 && metadata.presignURL { | ||
811 | if signerType.IsAnonymous() { | ||
812 | return nil, errInvalidArgument("Presigned URLs cannot be generated with anonymous credentials.") | ||
813 | } | ||
814 | if metadata.extraPresignHeader != nil { | ||
815 | if signerType.IsV2() { | ||
816 | return nil, errInvalidArgument("Extra signed headers for Presign with Signature V2 is not supported.") | ||
817 | } | ||
818 | for k, v := range metadata.extraPresignHeader { | ||
819 | req.Header.Set(k, v[0]) | ||
820 | } | ||
821 | } | ||
822 | if signerType.IsV2() { | ||
823 | // Presign URL with signature v2. | ||
824 | req = signer.PreSignV2(*req, accessKeyID, secretAccessKey, metadata.expires, isVirtualHost) | ||
825 | } else if signerType.IsV4() { | ||
826 | // Presign URL with signature v4. | ||
827 | req = signer.PreSignV4(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.expires) | ||
828 | } | ||
829 | return req, nil | ||
830 | } | ||
831 | |||
832 | // Set 'User-Agent' header for the request. | ||
833 | c.setUserAgent(req) | ||
834 | |||
835 | // Set all headers. | ||
836 | for k, v := range metadata.customHeader { | ||
837 | req.Header.Set(k, v[0]) | ||
838 | } | ||
839 | |||
840 | // Go net/http notoriously closes the request body. | ||
841 | // - The request Body, if non-nil, will be closed by the underlying Transport, even on errors. | ||
842 | // This can cause underlying *os.File seekers to fail, avoid that | ||
843 | // by making sure to wrap the closer as a nop. | ||
844 | if metadata.contentLength == 0 { | ||
845 | req.Body = nil | ||
846 | } else { | ||
847 | req.Body = io.NopCloser(metadata.contentBody) | ||
848 | } | ||
849 | |||
850 | // Set incoming content-length. | ||
851 | req.ContentLength = metadata.contentLength | ||
852 | if req.ContentLength <= -1 { | ||
853 | // For unknown content length, we upload using transfer-encoding: chunked. | ||
854 | req.TransferEncoding = []string{"chunked"} | ||
855 | } | ||
856 | |||
857 | // set md5Sum for content protection. | ||
858 | if len(metadata.contentMD5Base64) > 0 { | ||
859 | req.Header.Set("Content-Md5", metadata.contentMD5Base64) | ||
860 | } | ||
861 | |||
862 | // For anonymous requests just return. | ||
863 | if signerType.IsAnonymous() { | ||
864 | return req, nil | ||
865 | } | ||
866 | |||
867 | switch { | ||
868 | case signerType.IsV2(): | ||
869 | // Add signature version '2' authorization header. | ||
870 | req = signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost) | ||
871 | case metadata.streamSha256 && !c.secure: | ||
872 | if len(metadata.trailer) > 0 { | ||
873 | req.Trailer = metadata.trailer | ||
874 | } | ||
875 | // Streaming signature is used by default for a PUT object request. | ||
876 | // Additionally, we also look if the initialized client is secure, | ||
877 | // if yes then we don't need to perform streaming signature. | ||
878 | req = signer.StreamingSignV4(req, accessKeyID, | ||
879 | secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC(), c.sha256Hasher()) | ||
880 | default: | ||
881 | // Set sha256 sum for signature calculation only with signature version '4'. | ||
882 | shaHeader := unsignedPayload | ||
883 | if metadata.contentSHA256Hex != "" { | ||
884 | shaHeader = metadata.contentSHA256Hex | ||
885 | if len(metadata.trailer) > 0 { | ||
886 | // Sanity check, we should not end up here if upstream is sane. | ||
887 | return nil, errors.New("internal error: contentSHA256Hex with trailer not supported") | ||
888 | } | ||
889 | } else if len(metadata.trailer) > 0 { | ||
890 | shaHeader = unsignedPayloadTrailer | ||
891 | } | ||
892 | req.Header.Set("X-Amz-Content-Sha256", shaHeader) | ||
893 | |||
894 | // Add signature version '4' authorization header. | ||
895 | req = signer.SignV4Trailer(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.trailer) | ||
896 | } | ||
897 | |||
898 | // Return request. | ||
899 | return req, nil | ||
900 | } | ||
901 | |||
902 | // set User agent. | ||
903 | func (c *Client) setUserAgent(req *http.Request) { | ||
904 | req.Header.Set("User-Agent", libraryUserAgent) | ||
905 | if c.appInfo.appName != "" && c.appInfo.appVersion != "" { | ||
906 | req.Header.Set("User-Agent", libraryUserAgent+" "+c.appInfo.appName+"/"+c.appInfo.appVersion) | ||
907 | } | ||
908 | } | ||
909 | |||
910 | // makeTargetURL make a new target url. | ||
911 | func (c *Client) makeTargetURL(bucketName, objectName, bucketLocation string, isVirtualHostStyle bool, queryValues url.Values) (*url.URL, error) { | ||
912 | host := c.endpointURL.Host | ||
913 | // For Amazon S3 endpoint, try to fetch location based endpoint. | ||
914 | if s3utils.IsAmazonEndpoint(*c.endpointURL) { | ||
915 | if c.s3AccelerateEndpoint != "" && bucketName != "" { | ||
916 | // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html | ||
917 | // Disable transfer acceleration for non-compliant bucket names. | ||
918 | if strings.Contains(bucketName, ".") { | ||
919 | return nil, errTransferAccelerationBucket(bucketName) | ||
920 | } | ||
921 | // If transfer acceleration is requested set new host. | ||
922 | // For more details about enabling transfer acceleration read here. | ||
923 | // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html | ||
924 | host = c.s3AccelerateEndpoint | ||
925 | } else { | ||
926 | // Do not change the host if the endpoint URL is a FIPS S3 endpoint or a S3 PrivateLink interface endpoint | ||
927 | if !s3utils.IsAmazonFIPSEndpoint(*c.endpointURL) && !s3utils.IsAmazonPrivateLinkEndpoint(*c.endpointURL) { | ||
928 | // Fetch new host based on the bucket location. | ||
929 | host = getS3Endpoint(bucketLocation) | ||
930 | } | ||
931 | } | ||
932 | } | ||
933 | |||
934 | // Save scheme. | ||
935 | scheme := c.endpointURL.Scheme | ||
936 | |||
937 | // Strip port 80 and 443 so we won't send these ports in Host header. | ||
938 | // The reason is that browsers and curl automatically remove :80 and :443 | ||
939 | // with the generated presigned urls, then a signature mismatch error. | ||
940 | if h, p, err := net.SplitHostPort(host); err == nil { | ||
941 | if scheme == "http" && p == "80" || scheme == "https" && p == "443" { | ||
942 | host = h | ||
943 | if ip := net.ParseIP(h); ip != nil && ip.To4() == nil { | ||
944 | host = "[" + h + "]" | ||
945 | } | ||
946 | } | ||
947 | } | ||
948 | |||
949 | urlStr := scheme + "://" + host + "/" | ||
950 | |||
951 | // Make URL only if bucketName is available, otherwise use the | ||
952 | // endpoint URL. | ||
953 | if bucketName != "" { | ||
954 | // If endpoint supports virtual host style use that always. | ||
955 | // Currently only S3 and Google Cloud Storage would support | ||
956 | // virtual host style. | ||
957 | if isVirtualHostStyle { | ||
958 | urlStr = scheme + "://" + bucketName + "." + host + "/" | ||
959 | if objectName != "" { | ||
960 | urlStr += s3utils.EncodePath(objectName) | ||
961 | } | ||
962 | } else { | ||
963 | // If not fall back to using path style. | ||
964 | urlStr = urlStr + bucketName + "/" | ||
965 | if objectName != "" { | ||
966 | urlStr += s3utils.EncodePath(objectName) | ||
967 | } | ||
968 | } | ||
969 | } | ||
970 | |||
971 | // If there are any query values, add them to the end. | ||
972 | if len(queryValues) > 0 { | ||
973 | urlStr = urlStr + "?" + s3utils.QueryEncode(queryValues) | ||
974 | } | ||
975 | |||
976 | return url.Parse(urlStr) | ||
977 | } | ||
978 | |||
979 | // returns true if virtual hosted style requests are to be used. | ||
980 | func (c *Client) isVirtualHostStyleRequest(url url.URL, bucketName string) bool { | ||
981 | if bucketName == "" { | ||
982 | return false | ||
983 | } | ||
984 | |||
985 | if c.lookup == BucketLookupDNS { | ||
986 | return true | ||
987 | } | ||
988 | if c.lookup == BucketLookupPath { | ||
989 | return false | ||
990 | } | ||
991 | |||
992 | // default to virtual only for Amazon/Google storage. In all other cases use | ||
993 | // path style requests | ||
994 | return s3utils.IsVirtualHostSupported(url, bucketName) | ||
995 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/bucket-cache.go b/vendor/github.com/minio/minio-go/v7/bucket-cache.go new file mode 100644 index 0000000..b1d3b38 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/bucket-cache.go | |||
@@ -0,0 +1,256 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2015-2017 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | import ( | ||
21 | "context" | ||
22 | "net" | ||
23 | "net/http" | ||
24 | "net/url" | ||
25 | "path" | ||
26 | "sync" | ||
27 | |||
28 | "github.com/minio/minio-go/v7/pkg/credentials" | ||
29 | "github.com/minio/minio-go/v7/pkg/s3utils" | ||
30 | "github.com/minio/minio-go/v7/pkg/signer" | ||
31 | ) | ||
32 | |||
33 | // bucketLocationCache - Provides simple mechanism to hold bucket | ||
34 | // locations in memory. | ||
35 | type bucketLocationCache struct { | ||
36 | // mutex is used for handling the concurrent | ||
37 | // read/write requests for cache. | ||
38 | sync.RWMutex | ||
39 | |||
40 | // items holds the cached bucket locations. | ||
41 | items map[string]string | ||
42 | } | ||
43 | |||
44 | // newBucketLocationCache - Provides a new bucket location cache to be | ||
45 | // used internally with the client object. | ||
46 | func newBucketLocationCache() *bucketLocationCache { | ||
47 | return &bucketLocationCache{ | ||
48 | items: make(map[string]string), | ||
49 | } | ||
50 | } | ||
51 | |||
52 | // Get - Returns a value of a given key if it exists. | ||
53 | func (r *bucketLocationCache) Get(bucketName string) (location string, ok bool) { | ||
54 | r.RLock() | ||
55 | defer r.RUnlock() | ||
56 | location, ok = r.items[bucketName] | ||
57 | return | ||
58 | } | ||
59 | |||
60 | // Set - Will persist a value into cache. | ||
61 | func (r *bucketLocationCache) Set(bucketName, location string) { | ||
62 | r.Lock() | ||
63 | defer r.Unlock() | ||
64 | r.items[bucketName] = location | ||
65 | } | ||
66 | |||
67 | // Delete - Deletes a bucket name from cache. | ||
68 | func (r *bucketLocationCache) Delete(bucketName string) { | ||
69 | r.Lock() | ||
70 | defer r.Unlock() | ||
71 | delete(r.items, bucketName) | ||
72 | } | ||
73 | |||
74 | // GetBucketLocation - get location for the bucket name from location cache, if not | ||
75 | // fetch freshly by making a new request. | ||
76 | func (c *Client) GetBucketLocation(ctx context.Context, bucketName string) (string, error) { | ||
77 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
78 | return "", err | ||
79 | } | ||
80 | return c.getBucketLocation(ctx, bucketName) | ||
81 | } | ||
82 | |||
83 | // getBucketLocation - Get location for the bucketName from location map cache, if not | ||
84 | // fetch freshly by making a new request. | ||
85 | func (c *Client) getBucketLocation(ctx context.Context, bucketName string) (string, error) { | ||
86 | if err := s3utils.CheckValidBucketName(bucketName); err != nil { | ||
87 | return "", err | ||
88 | } | ||
89 | |||
90 | // Region set then no need to fetch bucket location. | ||
91 | if c.region != "" { | ||
92 | return c.region, nil | ||
93 | } | ||
94 | |||
95 | if location, ok := c.bucketLocCache.Get(bucketName); ok { | ||
96 | return location, nil | ||
97 | } | ||
98 | |||
99 | // Initialize a new request. | ||
100 | req, err := c.getBucketLocationRequest(ctx, bucketName) | ||
101 | if err != nil { | ||
102 | return "", err | ||
103 | } | ||
104 | |||
105 | // Initiate the request. | ||
106 | resp, err := c.do(req) | ||
107 | defer closeResponse(resp) | ||
108 | if err != nil { | ||
109 | return "", err | ||
110 | } | ||
111 | location, err := processBucketLocationResponse(resp, bucketName) | ||
112 | if err != nil { | ||
113 | return "", err | ||
114 | } | ||
115 | c.bucketLocCache.Set(bucketName, location) | ||
116 | return location, nil | ||
117 | } | ||
118 | |||
119 | // processes the getBucketLocation http response from the server. | ||
120 | func processBucketLocationResponse(resp *http.Response, bucketName string) (bucketLocation string, err error) { | ||
121 | if resp != nil { | ||
122 | if resp.StatusCode != http.StatusOK { | ||
123 | err = httpRespToErrorResponse(resp, bucketName, "") | ||
124 | errResp := ToErrorResponse(err) | ||
125 | // For access denied error, it could be an anonymous | ||
126 | // request. Move forward and let the top level callers | ||
127 | // succeed if possible based on their policy. | ||
128 | switch errResp.Code { | ||
129 | case "NotImplemented": | ||
130 | switch errResp.Server { | ||
131 | case "AmazonSnowball": | ||
132 | return "snowball", nil | ||
133 | case "cloudflare": | ||
134 | return "us-east-1", nil | ||
135 | } | ||
136 | case "AuthorizationHeaderMalformed": | ||
137 | fallthrough | ||
138 | case "InvalidRegion": | ||
139 | fallthrough | ||
140 | case "AccessDenied": | ||
141 | if errResp.Region == "" { | ||
142 | return "us-east-1", nil | ||
143 | } | ||
144 | return errResp.Region, nil | ||
145 | } | ||
146 | return "", err | ||
147 | } | ||
148 | } | ||
149 | |||
150 | // Extract location. | ||
151 | var locationConstraint string | ||
152 | err = xmlDecoder(resp.Body, &locationConstraint) | ||
153 | if err != nil { | ||
154 | return "", err | ||
155 | } | ||
156 | |||
157 | location := locationConstraint | ||
158 | // Location is empty will be 'us-east-1'. | ||
159 | if location == "" { | ||
160 | location = "us-east-1" | ||
161 | } | ||
162 | |||
163 | // Location can be 'EU' convert it to meaningful 'eu-west-1'. | ||
164 | if location == "EU" { | ||
165 | location = "eu-west-1" | ||
166 | } | ||
167 | |||
168 | // Save the location into cache. | ||
169 | |||
170 | // Return. | ||
171 | return location, nil | ||
172 | } | ||
173 | |||
174 | // getBucketLocationRequest - Wrapper creates a new getBucketLocation request. | ||
175 | func (c *Client) getBucketLocationRequest(ctx context.Context, bucketName string) (*http.Request, error) { | ||
176 | // Set location query. | ||
177 | urlValues := make(url.Values) | ||
178 | urlValues.Set("location", "") | ||
179 | |||
180 | // Set get bucket location always as path style. | ||
181 | targetURL := *c.endpointURL | ||
182 | |||
183 | // as it works in makeTargetURL method from api.go file | ||
184 | if h, p, err := net.SplitHostPort(targetURL.Host); err == nil { | ||
185 | if targetURL.Scheme == "http" && p == "80" || targetURL.Scheme == "https" && p == "443" { | ||
186 | targetURL.Host = h | ||
187 | if ip := net.ParseIP(h); ip != nil && ip.To16() != nil { | ||
188 | targetURL.Host = "[" + h + "]" | ||
189 | } | ||
190 | } | ||
191 | } | ||
192 | |||
193 | isVirtualStyle := c.isVirtualHostStyleRequest(targetURL, bucketName) | ||
194 | |||
195 | var urlStr string | ||
196 | |||
197 | if isVirtualStyle { | ||
198 | urlStr = c.endpointURL.Scheme + "://" + bucketName + "." + targetURL.Host + "/?location" | ||
199 | } else { | ||
200 | targetURL.Path = path.Join(bucketName, "") + "/" | ||
201 | targetURL.RawQuery = urlValues.Encode() | ||
202 | urlStr = targetURL.String() | ||
203 | } | ||
204 | |||
205 | // Get a new HTTP request for the method. | ||
206 | req, err := http.NewRequestWithContext(ctx, http.MethodGet, urlStr, nil) | ||
207 | if err != nil { | ||
208 | return nil, err | ||
209 | } | ||
210 | |||
211 | // Set UserAgent for the request. | ||
212 | c.setUserAgent(req) | ||
213 | |||
214 | // Get credentials from the configured credentials provider. | ||
215 | value, err := c.credsProvider.Get() | ||
216 | if err != nil { | ||
217 | return nil, err | ||
218 | } | ||
219 | |||
220 | var ( | ||
221 | signerType = value.SignerType | ||
222 | accessKeyID = value.AccessKeyID | ||
223 | secretAccessKey = value.SecretAccessKey | ||
224 | sessionToken = value.SessionToken | ||
225 | ) | ||
226 | |||
227 | // Custom signer set then override the behavior. | ||
228 | if c.overrideSignerType != credentials.SignatureDefault { | ||
229 | signerType = c.overrideSignerType | ||
230 | } | ||
231 | |||
232 | // If signerType returned by credentials helper is anonymous, | ||
233 | // then do not sign regardless of signerType override. | ||
234 | if value.SignerType == credentials.SignatureAnonymous { | ||
235 | signerType = credentials.SignatureAnonymous | ||
236 | } | ||
237 | |||
238 | if signerType.IsAnonymous() { | ||
239 | return req, nil | ||
240 | } | ||
241 | |||
242 | if signerType.IsV2() { | ||
243 | req = signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualStyle) | ||
244 | return req, nil | ||
245 | } | ||
246 | |||
247 | // Set sha256 sum for signature calculation only with signature version '4'. | ||
248 | contentSha256 := emptySHA256Hex | ||
249 | if c.secure { | ||
250 | contentSha256 = unsignedPayload | ||
251 | } | ||
252 | |||
253 | req.Header.Set("X-Amz-Content-Sha256", contentSha256) | ||
254 | req = signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1") | ||
255 | return req, nil | ||
256 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/checksum.go b/vendor/github.com/minio/minio-go/v7/checksum.go new file mode 100644 index 0000000..a1f6f43 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/checksum.go | |||
@@ -0,0 +1,210 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2015-2023 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | import ( | ||
21 | "crypto/sha1" | ||
22 | "crypto/sha256" | ||
23 | "encoding/base64" | ||
24 | "hash" | ||
25 | "hash/crc32" | ||
26 | "io" | ||
27 | "math/bits" | ||
28 | ) | ||
29 | |||
30 | // ChecksumType contains information about the checksum type. | ||
31 | type ChecksumType uint32 | ||
32 | |||
33 | const ( | ||
34 | |||
35 | // ChecksumSHA256 indicates a SHA256 checksum. | ||
36 | ChecksumSHA256 ChecksumType = 1 << iota | ||
37 | // ChecksumSHA1 indicates a SHA-1 checksum. | ||
38 | ChecksumSHA1 | ||
39 | // ChecksumCRC32 indicates a CRC32 checksum with IEEE table. | ||
40 | ChecksumCRC32 | ||
41 | // ChecksumCRC32C indicates a CRC32 checksum with Castagnoli table. | ||
42 | ChecksumCRC32C | ||
43 | |||
44 | // Keep after all valid checksums | ||
45 | checksumLast | ||
46 | |||
47 | // checksumMask is a mask for valid checksum types. | ||
48 | checksumMask = checksumLast - 1 | ||
49 | |||
50 | // ChecksumNone indicates no checksum. | ||
51 | ChecksumNone ChecksumType = 0 | ||
52 | |||
53 | amzChecksumAlgo = "x-amz-checksum-algorithm" | ||
54 | amzChecksumCRC32 = "x-amz-checksum-crc32" | ||
55 | amzChecksumCRC32C = "x-amz-checksum-crc32c" | ||
56 | amzChecksumSHA1 = "x-amz-checksum-sha1" | ||
57 | amzChecksumSHA256 = "x-amz-checksum-sha256" | ||
58 | ) | ||
59 | |||
60 | // Is returns if c is all of t. | ||
61 | func (c ChecksumType) Is(t ChecksumType) bool { | ||
62 | return c&t == t | ||
63 | } | ||
64 | |||
65 | // Key returns the header key. | ||
66 | // returns empty string if invalid or none. | ||
67 | func (c ChecksumType) Key() string { | ||
68 | switch c & checksumMask { | ||
69 | case ChecksumCRC32: | ||
70 | return amzChecksumCRC32 | ||
71 | case ChecksumCRC32C: | ||
72 | return amzChecksumCRC32C | ||
73 | case ChecksumSHA1: | ||
74 | return amzChecksumSHA1 | ||
75 | case ChecksumSHA256: | ||
76 | return amzChecksumSHA256 | ||
77 | } | ||
78 | return "" | ||
79 | } | ||
80 | |||
81 | // RawByteLen returns the size of the un-encoded checksum. | ||
82 | func (c ChecksumType) RawByteLen() int { | ||
83 | switch c & checksumMask { | ||
84 | case ChecksumCRC32, ChecksumCRC32C: | ||
85 | return 4 | ||
86 | case ChecksumSHA1: | ||
87 | return sha1.Size | ||
88 | case ChecksumSHA256: | ||
89 | return sha256.Size | ||
90 | } | ||
91 | return 0 | ||
92 | } | ||
93 | |||
94 | // Hasher returns a hasher corresponding to the checksum type. | ||
95 | // Returns nil if no checksum. | ||
96 | func (c ChecksumType) Hasher() hash.Hash { | ||
97 | switch c & checksumMask { | ||
98 | case ChecksumCRC32: | ||
99 | return crc32.NewIEEE() | ||
100 | case ChecksumCRC32C: | ||
101 | return crc32.New(crc32.MakeTable(crc32.Castagnoli)) | ||
102 | case ChecksumSHA1: | ||
103 | return sha1.New() | ||
104 | case ChecksumSHA256: | ||
105 | return sha256.New() | ||
106 | } | ||
107 | return nil | ||
108 | } | ||
109 | |||
110 | // IsSet returns whether the type is valid and known. | ||
111 | func (c ChecksumType) IsSet() bool { | ||
112 | return bits.OnesCount32(uint32(c)) == 1 | ||
113 | } | ||
114 | |||
115 | // String returns the type as a string. | ||
116 | // CRC32, CRC32C, SHA1, and SHA256 for valid values. | ||
117 | // Empty string for unset and "<invalid>" if not valid. | ||
118 | func (c ChecksumType) String() string { | ||
119 | switch c & checksumMask { | ||
120 | case ChecksumCRC32: | ||
121 | return "CRC32" | ||
122 | case ChecksumCRC32C: | ||
123 | return "CRC32C" | ||
124 | case ChecksumSHA1: | ||
125 | return "SHA1" | ||
126 | case ChecksumSHA256: | ||
127 | return "SHA256" | ||
128 | case ChecksumNone: | ||
129 | return "" | ||
130 | } | ||
131 | return "<invalid>" | ||
132 | } | ||
133 | |||
134 | // ChecksumReader reads all of r and returns a checksum of type c. | ||
135 | // Returns any error that may have occurred while reading. | ||
136 | func (c ChecksumType) ChecksumReader(r io.Reader) (Checksum, error) { | ||
137 | h := c.Hasher() | ||
138 | if h == nil { | ||
139 | return Checksum{}, nil | ||
140 | } | ||
141 | _, err := io.Copy(h, r) | ||
142 | if err != nil { | ||
143 | return Checksum{}, err | ||
144 | } | ||
145 | return NewChecksum(c, h.Sum(nil)), nil | ||
146 | } | ||
147 | |||
148 | // ChecksumBytes returns a checksum of the content b with type c. | ||
149 | func (c ChecksumType) ChecksumBytes(b []byte) Checksum { | ||
150 | h := c.Hasher() | ||
151 | if h == nil { | ||
152 | return Checksum{} | ||
153 | } | ||
154 | n, err := h.Write(b) | ||
155 | if err != nil || n != len(b) { | ||
156 | // Shouldn't happen with these checksummers. | ||
157 | return Checksum{} | ||
158 | } | ||
159 | return NewChecksum(c, h.Sum(nil)) | ||
160 | } | ||
161 | |||
162 | // Checksum is a type and encoded value. | ||
163 | type Checksum struct { | ||
164 | Type ChecksumType | ||
165 | r []byte | ||
166 | } | ||
167 | |||
168 | // NewChecksum sets the checksum to the value of b, | ||
169 | // which is the raw hash output. | ||
170 | // If the length of c does not match t.RawByteLen, | ||
171 | // a checksum with ChecksumNone is returned. | ||
172 | func NewChecksum(t ChecksumType, b []byte) Checksum { | ||
173 | if t.IsSet() && len(b) == t.RawByteLen() { | ||
174 | return Checksum{Type: t, r: b} | ||
175 | } | ||
176 | return Checksum{} | ||
177 | } | ||
178 | |||
179 | // NewChecksumString sets the checksum to the value of s, | ||
180 | // which is the base 64 encoded raw hash output. | ||
181 | // If the length of c does not match t.RawByteLen, it is not added. | ||
182 | func NewChecksumString(t ChecksumType, s string) Checksum { | ||
183 | b, _ := base64.StdEncoding.DecodeString(s) | ||
184 | if t.IsSet() && len(b) == t.RawByteLen() { | ||
185 | return Checksum{Type: t, r: b} | ||
186 | } | ||
187 | return Checksum{} | ||
188 | } | ||
189 | |||
190 | // IsSet returns whether the checksum is valid and known. | ||
191 | func (c Checksum) IsSet() bool { | ||
192 | return c.Type.IsSet() && len(c.r) == c.Type.RawByteLen() | ||
193 | } | ||
194 | |||
195 | // Encoded returns the encoded value. | ||
196 | // Returns the empty string if not set or valid. | ||
197 | func (c Checksum) Encoded() string { | ||
198 | if !c.IsSet() { | ||
199 | return "" | ||
200 | } | ||
201 | return base64.StdEncoding.EncodeToString(c.r) | ||
202 | } | ||
203 | |||
204 | // Raw returns the raw checksum value if set. | ||
205 | func (c Checksum) Raw() []byte { | ||
206 | if !c.IsSet() { | ||
207 | return nil | ||
208 | } | ||
209 | return c.r | ||
210 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/code_of_conduct.md b/vendor/github.com/minio/minio-go/v7/code_of_conduct.md new file mode 100644 index 0000000..cb232c3 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/code_of_conduct.md | |||
@@ -0,0 +1,80 @@ | |||
1 | # Contributor Covenant Code of Conduct | ||
2 | |||
3 | ## Our Pledge | ||
4 | |||
5 | In the interest of fostering an open and welcoming environment, we as | ||
6 | contributors and maintainers pledge to making participation in our project and | ||
7 | our community a harassment-free experience for everyone, regardless of age, body | ||
8 | size, disability, ethnicity, gender identity and expression, level of experience, | ||
9 | nationality, personal appearance, race, religion, or sexual identity and | ||
10 | orientation. | ||
11 | |||
12 | ## Our Standards | ||
13 | |||
14 | Examples of behavior that contributes to creating a positive environment | ||
15 | include: | ||
16 | |||
17 | * Using welcoming and inclusive language | ||
18 | * Being respectful of differing viewpoints and experiences | ||
19 | * Gracefully accepting constructive criticism | ||
20 | * Focusing on what is best for the community | ||
21 | * Showing empathy towards other community members | ||
22 | |||
23 | Examples of unacceptable behavior by participants include: | ||
24 | |||
25 | * The use of sexualized language or imagery and unwelcome sexual attention or | ||
26 | advances | ||
27 | * Trolling, insulting/derogatory comments, and personal or political attacks | ||
28 | * Public or private harassment | ||
29 | * Publishing others' private information, such as a physical or electronic | ||
30 | address, without explicit permission | ||
31 | * Other conduct which could reasonably be considered inappropriate in a | ||
32 | professional setting | ||
33 | |||
34 | ## Our Responsibilities | ||
35 | |||
36 | Project maintainers are responsible for clarifying the standards of acceptable | ||
37 | behavior and are expected to take appropriate and fair corrective action in | ||
38 | response to any instances of unacceptable behavior, in compliance with the | ||
39 | licensing terms applying to the Project developments. | ||
40 | |||
41 | Project maintainers have the right and responsibility to remove, edit, or | ||
42 | reject comments, commits, code, wiki edits, issues, and other contributions | ||
43 | that are not aligned to this Code of Conduct, or to ban temporarily or | ||
44 | permanently any contributor for other behaviors that they deem inappropriate, | ||
45 | threatening, offensive, or harmful. However, these actions shall respect the | ||
46 | licensing terms of the Project Developments that will always supersede such | ||
47 | Code of Conduct. | ||
48 | |||
49 | ## Scope | ||
50 | |||
51 | This Code of Conduct applies both within project spaces and in public spaces | ||
52 | when an individual is representing the project or its community. Examples of | ||
53 | representing a project or community include using an official project e-mail | ||
54 | address, posting via an official social media account, or acting as an appointed | ||
55 | representative at an online or offline event. Representation of a project may be | ||
56 | further defined and clarified by project maintainers. | ||
57 | |||
58 | ## Enforcement | ||
59 | |||
60 | Instances of abusive, harassing, or otherwise unacceptable behavior may be | ||
61 | reported by contacting the project team at [email protected]. The project team | ||
62 | will review and investigate all complaints, and will respond in a way that it deems | ||
63 | appropriate to the circumstances. The project team is obligated to maintain | ||
64 | confidentiality with regard to the reporter of an incident. | ||
65 | Further details of specific enforcement policies may be posted separately. | ||
66 | |||
67 | Project maintainers who do not follow or enforce the Code of Conduct in good | ||
68 | faith may face temporary or permanent repercussions as determined by other | ||
69 | members of the project's leadership. | ||
70 | |||
71 | ## Attribution | ||
72 | |||
73 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, | ||
74 | available at [http://contributor-covenant.org/version/1/4][version] | ||
75 | |||
76 | This version includes a clarification to ensure that the code of conduct is in | ||
77 | compliance with the free software licensing terms of the project. | ||
78 | |||
79 | [homepage]: http://contributor-covenant.org | ||
80 | [version]: http://contributor-covenant.org/version/1/4/ | ||
diff --git a/vendor/github.com/minio/minio-go/v7/constants.go b/vendor/github.com/minio/minio-go/v7/constants.go new file mode 100644 index 0000000..401d2a7 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/constants.go | |||
@@ -0,0 +1,110 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2015-2017 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | // Multipart upload defaults. | ||
21 | |||
22 | // absMinPartSize - absolute minimum part size (5 MiB) below which | ||
23 | // a part in a multipart upload may not be uploaded. | ||
24 | const absMinPartSize = 1024 * 1024 * 5 | ||
25 | |||
26 | // minPartSize - minimum part size 16MiB per object after which | ||
27 | // putObject behaves internally as multipart. | ||
28 | const minPartSize = 1024 * 1024 * 16 | ||
29 | |||
30 | // maxPartsCount - maximum number of parts for a single multipart session. | ||
31 | const maxPartsCount = 10000 | ||
32 | |||
33 | // maxPartSize - maximum part size 5GiB for a single multipart upload | ||
34 | // operation. | ||
35 | const maxPartSize = 1024 * 1024 * 1024 * 5 | ||
36 | |||
37 | // maxSinglePutObjectSize - maximum size 5GiB of object per PUT | ||
38 | // operation. | ||
39 | const maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5 | ||
40 | |||
41 | // maxMultipartPutObjectSize - maximum size 5TiB of object for | ||
42 | // Multipart operation. | ||
43 | const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5 | ||
44 | |||
45 | // unsignedPayload - value to be set to X-Amz-Content-Sha256 header when | ||
46 | // we don't want to sign the request payload | ||
47 | const unsignedPayload = "UNSIGNED-PAYLOAD" | ||
48 | |||
49 | // unsignedPayloadTrailer value to be set to X-Amz-Content-Sha256 header when | ||
50 | // we don't want to sign the request payload, but have a trailer. | ||
51 | const unsignedPayloadTrailer = "STREAMING-UNSIGNED-PAYLOAD-TRAILER" | ||
52 | |||
53 | // Total number of parallel workers used for multipart operation. | ||
54 | const totalWorkers = 4 | ||
55 | |||
56 | // Signature related constants. | ||
57 | const ( | ||
58 | signV4Algorithm = "AWS4-HMAC-SHA256" | ||
59 | iso8601DateFormat = "20060102T150405Z" | ||
60 | ) | ||
61 | |||
62 | const ( | ||
63 | // Storage class header. | ||
64 | amzStorageClass = "X-Amz-Storage-Class" | ||
65 | |||
66 | // Website redirect location header | ||
67 | amzWebsiteRedirectLocation = "X-Amz-Website-Redirect-Location" | ||
68 | |||
69 | // Object Tagging headers | ||
70 | amzTaggingHeader = "X-Amz-Tagging" | ||
71 | amzTaggingHeaderDirective = "X-Amz-Tagging-Directive" | ||
72 | |||
73 | amzVersionID = "X-Amz-Version-Id" | ||
74 | amzTaggingCount = "X-Amz-Tagging-Count" | ||
75 | amzExpiration = "X-Amz-Expiration" | ||
76 | amzRestore = "X-Amz-Restore" | ||
77 | amzReplicationStatus = "X-Amz-Replication-Status" | ||
78 | amzDeleteMarker = "X-Amz-Delete-Marker" | ||
79 | |||
80 | // Object legal hold header | ||
81 | amzLegalHoldHeader = "X-Amz-Object-Lock-Legal-Hold" | ||
82 | |||
83 | // Object retention header | ||
84 | amzLockMode = "X-Amz-Object-Lock-Mode" | ||
85 | amzLockRetainUntil = "X-Amz-Object-Lock-Retain-Until-Date" | ||
86 | amzBypassGovernance = "X-Amz-Bypass-Governance-Retention" | ||
87 | |||
88 | // Replication status | ||
89 | amzBucketReplicationStatus = "X-Amz-Replication-Status" | ||
90 | // Minio specific Replication/lifecycle transition extension | ||
91 | minIOBucketSourceMTime = "X-Minio-Source-Mtime" | ||
92 | |||
93 | minIOBucketSourceETag = "X-Minio-Source-Etag" | ||
94 | minIOBucketReplicationDeleteMarker = "X-Minio-Source-DeleteMarker" | ||
95 | minIOBucketReplicationProxyRequest = "X-Minio-Source-Proxy-Request" | ||
96 | minIOBucketReplicationRequest = "X-Minio-Source-Replication-Request" | ||
97 | minIOBucketReplicationCheck = "X-Minio-Source-Replication-Check" | ||
98 | |||
99 | // Header indicates last tag update time on source | ||
100 | minIOBucketReplicationTaggingTimestamp = "X-Minio-Source-Replication-Tagging-Timestamp" | ||
101 | // Header indicates last retention update time on source | ||
102 | minIOBucketReplicationObjectRetentionTimestamp = "X-Minio-Source-Replication-Retention-Timestamp" | ||
103 | // Header indicates last legalhold update time on source | ||
104 | minIOBucketReplicationObjectLegalHoldTimestamp = "X-Minio-Source-Replication-LegalHold-Timestamp" | ||
105 | minIOForceDelete = "x-minio-force-delete" | ||
106 | // Header indicates delete marker replication request can be sent by source now. | ||
107 | minioTgtReplicationReady = "X-Minio-Replication-Ready" | ||
108 | // Header asks if delete marker replication request can be sent by source now. | ||
109 | isMinioTgtReplicationReady = "X-Minio-Check-Replication-Ready" | ||
110 | ) | ||
diff --git a/vendor/github.com/minio/minio-go/v7/core.go b/vendor/github.com/minio/minio-go/v7/core.go new file mode 100644 index 0000000..132ea70 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/core.go | |||
@@ -0,0 +1,150 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2015-2017 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | import ( | ||
21 | "context" | ||
22 | "io" | ||
23 | "net/http" | ||
24 | |||
25 | "github.com/minio/minio-go/v7/pkg/encrypt" | ||
26 | ) | ||
27 | |||
28 | // Core - Inherits Client and adds new methods to expose the low level S3 APIs. | ||
29 | type Core struct { | ||
30 | *Client | ||
31 | } | ||
32 | |||
33 | // NewCore - Returns new initialized a Core client, this CoreClient should be | ||
34 | // only used under special conditions such as need to access lower primitives | ||
35 | // and being able to use them to write your own wrappers. | ||
36 | func NewCore(endpoint string, opts *Options) (*Core, error) { | ||
37 | var s3Client Core | ||
38 | client, err := New(endpoint, opts) | ||
39 | if err != nil { | ||
40 | return nil, err | ||
41 | } | ||
42 | s3Client.Client = client | ||
43 | return &s3Client, nil | ||
44 | } | ||
45 | |||
46 | // ListObjects - List all the objects at a prefix, optionally with marker and delimiter | ||
47 | // you can further filter the results. | ||
48 | func (c Core) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListBucketResult, err error) { | ||
49 | return c.listObjectsQuery(context.Background(), bucket, prefix, marker, delimiter, maxKeys, nil) | ||
50 | } | ||
51 | |||
52 | // ListObjectsV2 - Lists all the objects at a prefix, similar to ListObjects() but uses | ||
53 | // continuationToken instead of marker to support iteration over the results. | ||
54 | func (c Core) ListObjectsV2(bucketName, objectPrefix, startAfter, continuationToken, delimiter string, maxkeys int) (ListBucketV2Result, error) { | ||
55 | return c.listObjectsV2Query(context.Background(), bucketName, objectPrefix, continuationToken, true, false, delimiter, startAfter, maxkeys, nil) | ||
56 | } | ||
57 | |||
58 | // CopyObject - copies an object from source object to destination object on server side. | ||
59 | func (c Core) CopyObject(ctx context.Context, sourceBucket, sourceObject, destBucket, destObject string, metadata map[string]string, srcOpts CopySrcOptions, dstOpts PutObjectOptions) (ObjectInfo, error) { | ||
60 | return c.copyObjectDo(ctx, sourceBucket, sourceObject, destBucket, destObject, metadata, srcOpts, dstOpts) | ||
61 | } | ||
62 | |||
63 | // CopyObjectPart - creates a part in a multipart upload by copying (a | ||
64 | // part of) an existing object. | ||
65 | func (c Core) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string, | ||
66 | partID int, startOffset, length int64, metadata map[string]string, | ||
67 | ) (p CompletePart, err error) { | ||
68 | return c.copyObjectPartDo(ctx, srcBucket, srcObject, destBucket, destObject, uploadID, | ||
69 | partID, startOffset, length, metadata) | ||
70 | } | ||
71 | |||
72 | // PutObject - Upload object. Uploads using single PUT call. | ||
73 | func (c Core) PutObject(ctx context.Context, bucket, object string, data io.Reader, size int64, md5Base64, sha256Hex string, opts PutObjectOptions) (UploadInfo, error) { | ||
74 | hookReader := newHook(data, opts.Progress) | ||
75 | return c.putObjectDo(ctx, bucket, object, hookReader, md5Base64, sha256Hex, size, opts) | ||
76 | } | ||
77 | |||
78 | // NewMultipartUpload - Initiates new multipart upload and returns the new uploadID. | ||
79 | func (c Core) NewMultipartUpload(ctx context.Context, bucket, object string, opts PutObjectOptions) (uploadID string, err error) { | ||
80 | result, err := c.initiateMultipartUpload(ctx, bucket, object, opts) | ||
81 | return result.UploadID, err | ||
82 | } | ||
83 | |||
84 | // ListMultipartUploads - List incomplete uploads. | ||
85 | func (c Core) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartUploadsResult, err error) { | ||
86 | return c.listMultipartUploadsQuery(ctx, bucket, keyMarker, uploadIDMarker, prefix, delimiter, maxUploads) | ||
87 | } | ||
88 | |||
89 | // PutObjectPartOptions contains options for PutObjectPart API | ||
90 | type PutObjectPartOptions struct { | ||
91 | Md5Base64, Sha256Hex string | ||
92 | SSE encrypt.ServerSide | ||
93 | CustomHeader, Trailer http.Header | ||
94 | } | ||
95 | |||
96 | // PutObjectPart - Upload an object part. | ||
97 | func (c Core) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, | ||
98 | data io.Reader, size int64, opts PutObjectPartOptions, | ||
99 | ) (ObjectPart, error) { | ||
100 | p := uploadPartParams{ | ||
101 | bucketName: bucket, | ||
102 | objectName: object, | ||
103 | uploadID: uploadID, | ||
104 | reader: data, | ||
105 | partNumber: partID, | ||
106 | md5Base64: opts.Md5Base64, | ||
107 | sha256Hex: opts.Sha256Hex, | ||
108 | size: size, | ||
109 | sse: opts.SSE, | ||
110 | streamSha256: true, | ||
111 | customHeader: opts.CustomHeader, | ||
112 | trailer: opts.Trailer, | ||
113 | } | ||
114 | return c.uploadPart(ctx, p) | ||
115 | } | ||
116 | |||
117 | // ListObjectParts - List uploaded parts of an incomplete upload.x | ||
118 | func (c Core) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int) (result ListObjectPartsResult, err error) { | ||
119 | return c.listObjectPartsQuery(ctx, bucket, object, uploadID, partNumberMarker, maxParts) | ||
120 | } | ||
121 | |||
122 | // CompleteMultipartUpload - Concatenate uploaded parts and commit to an object. | ||
123 | func (c Core) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, parts []CompletePart, opts PutObjectOptions) (UploadInfo, error) { | ||
124 | res, err := c.completeMultipartUpload(ctx, bucket, object, uploadID, completeMultipartUpload{ | ||
125 | Parts: parts, | ||
126 | }, opts) | ||
127 | return res, err | ||
128 | } | ||
129 | |||
130 | // AbortMultipartUpload - Abort an incomplete upload. | ||
131 | func (c Core) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error { | ||
132 | return c.abortMultipartUpload(ctx, bucket, object, uploadID) | ||
133 | } | ||
134 | |||
135 | // GetBucketPolicy - fetches bucket access policy for a given bucket. | ||
136 | func (c Core) GetBucketPolicy(ctx context.Context, bucket string) (string, error) { | ||
137 | return c.getBucketPolicy(ctx, bucket) | ||
138 | } | ||
139 | |||
140 | // PutBucketPolicy - applies a new bucket access policy for a given bucket. | ||
141 | func (c Core) PutBucketPolicy(ctx context.Context, bucket, bucketPolicy string) error { | ||
142 | return c.putBucketPolicy(ctx, bucket, bucketPolicy) | ||
143 | } | ||
144 | |||
145 | // GetObject is a lower level API implemented to support reading | ||
146 | // partial objects and also downloading objects with special conditions | ||
147 | // matching etag, modtime etc. | ||
148 | func (c Core) GetObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) { | ||
149 | return c.getObject(ctx, bucketName, objectName, opts) | ||
150 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go new file mode 100644 index 0000000..f951cd0 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/functional_tests.go | |||
@@ -0,0 +1,13004 @@ | |||
1 | //go:build mint | ||
2 | // +build mint | ||
3 | |||
4 | /* | ||
5 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
6 | * Copyright 2015-2020 MinIO, Inc. | ||
7 | * | ||
8 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
9 | * you may not use this file except in compliance with the License. | ||
10 | * You may obtain a copy of the License at | ||
11 | * | ||
12 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
13 | * | ||
14 | * Unless required by applicable law or agreed to in writing, software | ||
15 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
17 | * See the License for the specific language governing permissions and | ||
18 | * limitations under the License. | ||
19 | */ | ||
20 | |||
21 | package main | ||
22 | |||
23 | import ( | ||
24 | "archive/zip" | ||
25 | "bytes" | ||
26 | "context" | ||
27 | "crypto/sha1" | ||
28 | "encoding/base64" | ||
29 | "errors" | ||
30 | "fmt" | ||
31 | "hash" | ||
32 | "hash/crc32" | ||
33 | "io" | ||
34 | "math/rand" | ||
35 | "mime/multipart" | ||
36 | "net/http" | ||
37 | "net/url" | ||
38 | "os" | ||
39 | "path" | ||
40 | "path/filepath" | ||
41 | "reflect" | ||
42 | "runtime" | ||
43 | "sort" | ||
44 | "strconv" | ||
45 | "strings" | ||
46 | "sync" | ||
47 | "time" | ||
48 | |||
49 | "github.com/dustin/go-humanize" | ||
50 | jsoniter "github.com/json-iterator/go" | ||
51 | "github.com/minio/sha256-simd" | ||
52 | log "github.com/sirupsen/logrus" | ||
53 | |||
54 | "github.com/minio/minio-go/v7" | ||
55 | "github.com/minio/minio-go/v7/pkg/credentials" | ||
56 | "github.com/minio/minio-go/v7/pkg/encrypt" | ||
57 | "github.com/minio/minio-go/v7/pkg/notification" | ||
58 | "github.com/minio/minio-go/v7/pkg/tags" | ||
59 | ) | ||
60 | |||
61 | const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569" | ||
62 | const ( | ||
63 | letterIdxBits = 6 // 6 bits to represent a letter index | ||
64 | letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits | ||
65 | letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits | ||
66 | ) | ||
67 | |||
68 | const ( | ||
69 | serverEndpoint = "SERVER_ENDPOINT" | ||
70 | accessKey = "ACCESS_KEY" | ||
71 | secretKey = "SECRET_KEY" | ||
72 | enableHTTPS = "ENABLE_HTTPS" | ||
73 | enableKMS = "ENABLE_KMS" | ||
74 | ) | ||
75 | |||
76 | type mintJSONFormatter struct{} | ||
77 | |||
78 | func (f *mintJSONFormatter) Format(entry *log.Entry) ([]byte, error) { | ||
79 | data := make(log.Fields, len(entry.Data)) | ||
80 | for k, v := range entry.Data { | ||
81 | switch v := v.(type) { | ||
82 | case error: | ||
83 | // Otherwise errors are ignored by `encoding/json` | ||
84 | // https://github.com/sirupsen/logrus/issues/137 | ||
85 | data[k] = v.Error() | ||
86 | default: | ||
87 | data[k] = v | ||
88 | } | ||
89 | } | ||
90 | json := jsoniter.ConfigCompatibleWithStandardLibrary | ||
91 | serialized, err := json.Marshal(data) | ||
92 | if err != nil { | ||
93 | return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) | ||
94 | } | ||
95 | return append(serialized, '\n'), nil | ||
96 | } | ||
97 | |||
98 | var readFull = func(r io.Reader, buf []byte) (n int, err error) { | ||
99 | // ReadFull reads exactly len(buf) bytes from r into buf. | ||
100 | // It returns the number of bytes copied and an error if | ||
101 | // fewer bytes were read. The error is EOF only if no bytes | ||
102 | // were read. If an EOF happens after reading some but not | ||
103 | // all the bytes, ReadFull returns ErrUnexpectedEOF. | ||
104 | // On return, n == len(buf) if and only if err == nil. | ||
105 | // If r returns an error having read at least len(buf) bytes, | ||
106 | // the error is dropped. | ||
107 | for n < len(buf) && err == nil { | ||
108 | var nn int | ||
109 | nn, err = r.Read(buf[n:]) | ||
110 | // Some spurious io.Reader's return | ||
111 | // io.ErrUnexpectedEOF when nn == 0 | ||
112 | // this behavior is undocumented | ||
113 | // so we are on purpose not using io.ReadFull | ||
114 | // implementation because this can lead | ||
115 | // to custom handling, to avoid that | ||
116 | // we simply modify the original io.ReadFull | ||
117 | // implementation to avoid this issue. | ||
118 | // io.ErrUnexpectedEOF with nn == 0 really | ||
119 | // means that io.EOF | ||
120 | if err == io.ErrUnexpectedEOF && nn == 0 { | ||
121 | err = io.EOF | ||
122 | } | ||
123 | n += nn | ||
124 | } | ||
125 | if n >= len(buf) { | ||
126 | err = nil | ||
127 | } else if n > 0 && err == io.EOF { | ||
128 | err = io.ErrUnexpectedEOF | ||
129 | } | ||
130 | return | ||
131 | } | ||
132 | |||
133 | func cleanEmptyEntries(fields log.Fields) log.Fields { | ||
134 | cleanFields := log.Fields{} | ||
135 | for k, v := range fields { | ||
136 | if v != "" { | ||
137 | cleanFields[k] = v | ||
138 | } | ||
139 | } | ||
140 | return cleanFields | ||
141 | } | ||
142 | |||
143 | // log successful test runs | ||
144 | func successLogger(testName, function string, args map[string]interface{}, startTime time.Time) *log.Entry { | ||
145 | // calculate the test case duration | ||
146 | duration := time.Since(startTime) | ||
147 | // log with the fields as per mint | ||
148 | fields := log.Fields{"name": "minio-go: " + testName, "function": function, "args": args, "duration": duration.Nanoseconds() / 1000000, "status": "PASS"} | ||
149 | return log.WithFields(cleanEmptyEntries(fields)) | ||
150 | } | ||
151 | |||
152 | // As few of the features are not available in Gateway(s) currently, Check if err value is NotImplemented, | ||
153 | // and log as NA in that case and continue execution. Otherwise log as failure and return | ||
154 | func logError(testName, function string, args map[string]interface{}, startTime time.Time, alert, message string, err error) { | ||
155 | // If server returns NotImplemented we assume it is gateway mode and hence log it as info and move on to next tests | ||
156 | // Special case for ComposeObject API as it is implemented on client side and adds specific error details like `Error in upload-part-copy` in | ||
157 | // addition to NotImplemented error returned from server | ||
158 | if isErrNotImplemented(err) { | ||
159 | ignoredLog(testName, function, args, startTime, message).Info() | ||
160 | } else if isRunOnFail() { | ||
161 | failureLog(testName, function, args, startTime, alert, message, err).Error() | ||
162 | } else { | ||
163 | failureLog(testName, function, args, startTime, alert, message, err).Fatal() | ||
164 | } | ||
165 | } | ||
166 | |||
167 | // log failed test runs | ||
168 | func failureLog(testName, function string, args map[string]interface{}, startTime time.Time, alert, message string, err error) *log.Entry { | ||
169 | // calculate the test case duration | ||
170 | duration := time.Since(startTime) | ||
171 | var fields log.Fields | ||
172 | // log with the fields as per mint | ||
173 | if err != nil { | ||
174 | fields = log.Fields{ | ||
175 | "name": "minio-go: " + testName, "function": function, "args": args, | ||
176 | "duration": duration.Nanoseconds() / 1000000, "status": "FAIL", "alert": alert, "message": message, "error": err, | ||
177 | } | ||
178 | } else { | ||
179 | fields = log.Fields{ | ||
180 | "name": "minio-go: " + testName, "function": function, "args": args, | ||
181 | "duration": duration.Nanoseconds() / 1000000, "status": "FAIL", "alert": alert, "message": message, | ||
182 | } | ||
183 | } | ||
184 | return log.WithFields(cleanEmptyEntries(fields)) | ||
185 | } | ||
186 | |||
187 | // log not applicable test runs | ||
188 | func ignoredLog(testName, function string, args map[string]interface{}, startTime time.Time, alert string) *log.Entry { | ||
189 | // calculate the test case duration | ||
190 | duration := time.Since(startTime) | ||
191 | // log with the fields as per mint | ||
192 | fields := log.Fields{ | ||
193 | "name": "minio-go: " + testName, "function": function, "args": args, | ||
194 | "duration": duration.Nanoseconds() / 1000000, "status": "NA", "alert": strings.Split(alert, " ")[0] + " is NotImplemented", | ||
195 | } | ||
196 | return log.WithFields(cleanEmptyEntries(fields)) | ||
197 | } | ||
198 | |||
199 | // Delete objects in given bucket, recursively | ||
200 | func cleanupBucket(bucketName string, c *minio.Client) error { | ||
201 | // Create a done channel to control 'ListObjectsV2' go routine. | ||
202 | doneCh := make(chan struct{}) | ||
203 | // Exit cleanly upon return. | ||
204 | defer close(doneCh) | ||
205 | // Iterate over all objects in the bucket via listObjectsV2 and delete | ||
206 | for objCh := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{Recursive: true}) { | ||
207 | if objCh.Err != nil { | ||
208 | return objCh.Err | ||
209 | } | ||
210 | if objCh.Key != "" { | ||
211 | err := c.RemoveObject(context.Background(), bucketName, objCh.Key, minio.RemoveObjectOptions{}) | ||
212 | if err != nil { | ||
213 | return err | ||
214 | } | ||
215 | } | ||
216 | } | ||
217 | for objPartInfo := range c.ListIncompleteUploads(context.Background(), bucketName, "", true) { | ||
218 | if objPartInfo.Err != nil { | ||
219 | return objPartInfo.Err | ||
220 | } | ||
221 | if objPartInfo.Key != "" { | ||
222 | err := c.RemoveIncompleteUpload(context.Background(), bucketName, objPartInfo.Key) | ||
223 | if err != nil { | ||
224 | return err | ||
225 | } | ||
226 | } | ||
227 | } | ||
228 | // objects are already deleted, clear the buckets now | ||
229 | err := c.RemoveBucket(context.Background(), bucketName) | ||
230 | if err != nil { | ||
231 | return err | ||
232 | } | ||
233 | return err | ||
234 | } | ||
235 | |||
236 | func cleanupVersionedBucket(bucketName string, c *minio.Client) error { | ||
237 | doneCh := make(chan struct{}) | ||
238 | defer close(doneCh) | ||
239 | for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) { | ||
240 | if obj.Err != nil { | ||
241 | return obj.Err | ||
242 | } | ||
243 | if obj.Key != "" { | ||
244 | err := c.RemoveObject(context.Background(), bucketName, obj.Key, | ||
245 | minio.RemoveObjectOptions{VersionID: obj.VersionID, GovernanceBypass: true}) | ||
246 | if err != nil { | ||
247 | return err | ||
248 | } | ||
249 | } | ||
250 | } | ||
251 | for objPartInfo := range c.ListIncompleteUploads(context.Background(), bucketName, "", true) { | ||
252 | if objPartInfo.Err != nil { | ||
253 | return objPartInfo.Err | ||
254 | } | ||
255 | if objPartInfo.Key != "" { | ||
256 | err := c.RemoveIncompleteUpload(context.Background(), bucketName, objPartInfo.Key) | ||
257 | if err != nil { | ||
258 | return err | ||
259 | } | ||
260 | } | ||
261 | } | ||
262 | // objects are already deleted, clear the buckets now | ||
263 | err := c.RemoveBucket(context.Background(), bucketName) | ||
264 | if err != nil { | ||
265 | for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) { | ||
266 | log.Println("found", obj.Key, obj.VersionID) | ||
267 | } | ||
268 | return err | ||
269 | } | ||
270 | return err | ||
271 | } | ||
272 | |||
273 | func isErrNotImplemented(err error) bool { | ||
274 | return minio.ToErrorResponse(err).Code == "NotImplemented" | ||
275 | } | ||
276 | |||
277 | func isRunOnFail() bool { | ||
278 | return os.Getenv("RUN_ON_FAIL") == "1" | ||
279 | } | ||
280 | |||
281 | func init() { | ||
282 | // If server endpoint is not set, all tests default to | ||
283 | // using https://play.min.io | ||
284 | if os.Getenv(serverEndpoint) == "" { | ||
285 | os.Setenv(serverEndpoint, "play.min.io") | ||
286 | os.Setenv(accessKey, "Q3AM3UQ867SPQQA43P2F") | ||
287 | os.Setenv(secretKey, "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG") | ||
288 | os.Setenv(enableHTTPS, "1") | ||
289 | } | ||
290 | } | ||
291 | |||
292 | var mintDataDir = os.Getenv("MINT_DATA_DIR") | ||
293 | |||
294 | func getMintDataDirFilePath(filename string) (fp string) { | ||
295 | if mintDataDir == "" { | ||
296 | return | ||
297 | } | ||
298 | return filepath.Join(mintDataDir, filename) | ||
299 | } | ||
300 | |||
301 | func newRandomReader(seed, size int64) io.Reader { | ||
302 | return io.LimitReader(rand.New(rand.NewSource(seed)), size) | ||
303 | } | ||
304 | |||
305 | func mustCrcReader(r io.Reader) uint32 { | ||
306 | crc := crc32.NewIEEE() | ||
307 | _, err := io.Copy(crc, r) | ||
308 | if err != nil { | ||
309 | panic(err) | ||
310 | } | ||
311 | return crc.Sum32() | ||
312 | } | ||
313 | |||
314 | func crcMatches(r io.Reader, want uint32) error { | ||
315 | crc := crc32.NewIEEE() | ||
316 | _, err := io.Copy(crc, r) | ||
317 | if err != nil { | ||
318 | panic(err) | ||
319 | } | ||
320 | got := crc.Sum32() | ||
321 | if got != want { | ||
322 | return fmt.Errorf("crc mismatch, want %x, got %x", want, got) | ||
323 | } | ||
324 | return nil | ||
325 | } | ||
326 | |||
327 | func crcMatchesName(r io.Reader, name string) error { | ||
328 | want := dataFileCRC32[name] | ||
329 | crc := crc32.NewIEEE() | ||
330 | _, err := io.Copy(crc, r) | ||
331 | if err != nil { | ||
332 | panic(err) | ||
333 | } | ||
334 | got := crc.Sum32() | ||
335 | if got != want { | ||
336 | return fmt.Errorf("crc mismatch, want %x, got %x", want, got) | ||
337 | } | ||
338 | return nil | ||
339 | } | ||
340 | |||
341 | // read data from file if it exists or optionally create a buffer of particular size | ||
342 | func getDataReader(fileName string) io.ReadCloser { | ||
343 | if mintDataDir == "" { | ||
344 | size := int64(dataFileMap[fileName]) | ||
345 | if _, ok := dataFileCRC32[fileName]; !ok { | ||
346 | dataFileCRC32[fileName] = mustCrcReader(newRandomReader(size, size)) | ||
347 | } | ||
348 | return io.NopCloser(newRandomReader(size, size)) | ||
349 | } | ||
350 | reader, _ := os.Open(getMintDataDirFilePath(fileName)) | ||
351 | if _, ok := dataFileCRC32[fileName]; !ok { | ||
352 | dataFileCRC32[fileName] = mustCrcReader(reader) | ||
353 | reader.Close() | ||
354 | reader, _ = os.Open(getMintDataDirFilePath(fileName)) | ||
355 | } | ||
356 | return reader | ||
357 | } | ||
358 | |||
359 | // randString generates random names and prepends them with a known prefix. | ||
360 | func randString(n int, src rand.Source, prefix string) string { | ||
361 | b := make([]byte, n) | ||
362 | // A rand.Int63() generates 63 random bits, enough for letterIdxMax letters! | ||
363 | for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; { | ||
364 | if remain == 0 { | ||
365 | cache, remain = src.Int63(), letterIdxMax | ||
366 | } | ||
367 | if idx := int(cache & letterIdxMask); idx < len(letterBytes) { | ||
368 | b[i] = letterBytes[idx] | ||
369 | i-- | ||
370 | } | ||
371 | cache >>= letterIdxBits | ||
372 | remain-- | ||
373 | } | ||
374 | return prefix + string(b[0:30-len(prefix)]) | ||
375 | } | ||
376 | |||
377 | var dataFileMap = map[string]int{ | ||
378 | "datafile-0-b": 0, | ||
379 | "datafile-1-b": 1, | ||
380 | "datafile-1-kB": 1 * humanize.KiByte, | ||
381 | "datafile-10-kB": 10 * humanize.KiByte, | ||
382 | "datafile-33-kB": 33 * humanize.KiByte, | ||
383 | "datafile-100-kB": 100 * humanize.KiByte, | ||
384 | "datafile-1.03-MB": 1056 * humanize.KiByte, | ||
385 | "datafile-1-MB": 1 * humanize.MiByte, | ||
386 | "datafile-5-MB": 5 * humanize.MiByte, | ||
387 | "datafile-6-MB": 6 * humanize.MiByte, | ||
388 | "datafile-11-MB": 11 * humanize.MiByte, | ||
389 | "datafile-65-MB": 65 * humanize.MiByte, | ||
390 | "datafile-129-MB": 129 * humanize.MiByte, | ||
391 | } | ||
392 | |||
393 | var dataFileCRC32 = map[string]uint32{} | ||
394 | |||
395 | func isFullMode() bool { | ||
396 | return os.Getenv("MINT_MODE") == "full" | ||
397 | } | ||
398 | |||
399 | func getFuncName() string { | ||
400 | return getFuncNameLoc(2) | ||
401 | } | ||
402 | |||
403 | func getFuncNameLoc(caller int) string { | ||
404 | pc, _, _, _ := runtime.Caller(caller) | ||
405 | return strings.TrimPrefix(runtime.FuncForPC(pc).Name(), "main.") | ||
406 | } | ||
407 | |||
408 | // Tests bucket re-create errors. | ||
409 | func testMakeBucketError() { | ||
410 | region := "eu-central-1" | ||
411 | |||
412 | // initialize logging params | ||
413 | startTime := time.Now() | ||
414 | testName := getFuncName() | ||
415 | function := "MakeBucket(bucketName, region)" | ||
416 | // initialize logging params | ||
417 | args := map[string]interface{}{ | ||
418 | "bucketName": "", | ||
419 | "region": region, | ||
420 | } | ||
421 | |||
422 | // Seed random based on current time. | ||
423 | rand.Seed(time.Now().Unix()) | ||
424 | |||
425 | // Instantiate new minio client object. | ||
426 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
427 | &minio.Options{ | ||
428 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
429 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
430 | }) | ||
431 | if err != nil { | ||
432 | logError(testName, function, args, startTime, "", "MinIO client creation failed", err) | ||
433 | return | ||
434 | } | ||
435 | |||
436 | // Enable tracing, write to stderr. | ||
437 | // c.TraceOn(os.Stderr) | ||
438 | |||
439 | // Set user agent. | ||
440 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
441 | |||
442 | // Generate a new random bucket name. | ||
443 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
444 | args["bucketName"] = bucketName | ||
445 | |||
446 | // Make a new bucket in 'eu-central-1'. | ||
447 | if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err != nil { | ||
448 | logError(testName, function, args, startTime, "", "MakeBucket Failed", err) | ||
449 | return | ||
450 | } | ||
451 | defer cleanupBucket(bucketName, c) | ||
452 | |||
453 | if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err == nil { | ||
454 | logError(testName, function, args, startTime, "", "Bucket already exists", err) | ||
455 | return | ||
456 | } | ||
457 | // Verify valid error response from server. | ||
458 | if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" && | ||
459 | minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" { | ||
460 | logError(testName, function, args, startTime, "", "Invalid error returned by server", err) | ||
461 | return | ||
462 | } | ||
463 | |||
464 | successLogger(testName, function, args, startTime).Info() | ||
465 | } | ||
466 | |||
467 | func testMetadataSizeLimit() { | ||
468 | startTime := time.Now() | ||
469 | testName := getFuncName() | ||
470 | function := "PutObject(bucketName, objectName, reader, objectSize, opts)" | ||
471 | args := map[string]interface{}{ | ||
472 | "bucketName": "", | ||
473 | "objectName": "", | ||
474 | "opts.UserMetadata": "", | ||
475 | } | ||
476 | rand.Seed(startTime.Unix()) | ||
477 | |||
478 | // Instantiate new minio client object. | ||
479 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
480 | &minio.Options{ | ||
481 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
482 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
483 | }) | ||
484 | if err != nil { | ||
485 | logError(testName, function, args, startTime, "", "MinIO client creation failed", err) | ||
486 | return | ||
487 | } | ||
488 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
489 | |||
490 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
491 | args["bucketName"] = bucketName | ||
492 | |||
493 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
494 | args["objectName"] = objectName | ||
495 | |||
496 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
497 | if err != nil { | ||
498 | logError(testName, function, args, startTime, "", "Make bucket failed", err) | ||
499 | return | ||
500 | } | ||
501 | |||
502 | defer cleanupBucket(bucketName, c) | ||
503 | |||
504 | const HeaderSizeLimit = 8 * 1024 | ||
505 | const UserMetadataLimit = 2 * 1024 | ||
506 | |||
507 | // Meta-data greater than the 2 KB limit of AWS - PUT calls with this meta-data should fail | ||
508 | metadata := make(map[string]string) | ||
509 | metadata["X-Amz-Meta-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+UserMetadataLimit-len("X-Amz-Meta-Mint-Test"))) | ||
510 | args["metadata"] = fmt.Sprint(metadata) | ||
511 | |||
512 | _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata}) | ||
513 | if err == nil { | ||
514 | logError(testName, function, args, startTime, "", "Created object with user-defined metadata exceeding metadata size limits", nil) | ||
515 | return | ||
516 | } | ||
517 | |||
518 | // Meta-data (headers) greater than the 8 KB limit of AWS - PUT calls with this meta-data should fail | ||
519 | metadata = make(map[string]string) | ||
520 | metadata["X-Amz-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+HeaderSizeLimit-len("X-Amz-Mint-Test"))) | ||
521 | args["metadata"] = fmt.Sprint(metadata) | ||
522 | _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata}) | ||
523 | if err == nil { | ||
524 | logError(testName, function, args, startTime, "", "Created object with headers exceeding header size limits", nil) | ||
525 | return | ||
526 | } | ||
527 | |||
528 | successLogger(testName, function, args, startTime).Info() | ||
529 | } | ||
530 | |||
531 | // Tests various bucket supported formats. | ||
532 | func testMakeBucketRegions() { | ||
533 | region := "eu-central-1" | ||
534 | // initialize logging params | ||
535 | startTime := time.Now() | ||
536 | testName := getFuncName() | ||
537 | function := "MakeBucket(bucketName, region)" | ||
538 | // initialize logging params | ||
539 | args := map[string]interface{}{ | ||
540 | "bucketName": "", | ||
541 | "region": region, | ||
542 | } | ||
543 | |||
544 | // Seed random based on current time. | ||
545 | rand.Seed(time.Now().Unix()) | ||
546 | |||
547 | // Instantiate new minio client object. | ||
548 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
549 | &minio.Options{ | ||
550 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
551 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
552 | }) | ||
553 | if err != nil { | ||
554 | logError(testName, function, args, startTime, "", "MinIO client creation failed", err) | ||
555 | return | ||
556 | } | ||
557 | |||
558 | // Enable tracing, write to stderr. | ||
559 | // c.TraceOn(os.Stderr) | ||
560 | |||
561 | // Set user agent. | ||
562 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
563 | |||
564 | // Generate a new random bucket name. | ||
565 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
566 | args["bucketName"] = bucketName | ||
567 | |||
568 | // Make a new bucket in 'eu-central-1'. | ||
569 | if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err != nil { | ||
570 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
571 | return | ||
572 | } | ||
573 | |||
574 | // Delete all objects and buckets | ||
575 | if err = cleanupBucket(bucketName, c); err != nil { | ||
576 | logError(testName, function, args, startTime, "", "CleanupBucket failed", err) | ||
577 | return | ||
578 | } | ||
579 | |||
580 | // Make a new bucket with '.' in its name, in 'us-west-2'. This | ||
581 | // request is internally staged into a path style instead of | ||
582 | // virtual host style. | ||
583 | region = "us-west-2" | ||
584 | args["region"] = region | ||
585 | if err = c.MakeBucket(context.Background(), bucketName+".withperiod", minio.MakeBucketOptions{Region: region}); err != nil { | ||
586 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
587 | return | ||
588 | } | ||
589 | |||
590 | // Delete all objects and buckets | ||
591 | if err = cleanupBucket(bucketName+".withperiod", c); err != nil { | ||
592 | logError(testName, function, args, startTime, "", "CleanupBucket failed", err) | ||
593 | return | ||
594 | } | ||
595 | successLogger(testName, function, args, startTime).Info() | ||
596 | } | ||
597 | |||
598 | // Test PutObject using a large data to trigger multipart readat | ||
599 | func testPutObjectReadAt() { | ||
600 | // initialize logging params | ||
601 | startTime := time.Now() | ||
602 | testName := getFuncName() | ||
603 | function := "PutObject(bucketName, objectName, reader, opts)" | ||
604 | args := map[string]interface{}{ | ||
605 | "bucketName": "", | ||
606 | "objectName": "", | ||
607 | "opts": "objectContentType", | ||
608 | } | ||
609 | |||
610 | // Seed random based on current time. | ||
611 | rand.Seed(time.Now().Unix()) | ||
612 | |||
613 | // Instantiate new minio client object. | ||
614 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
615 | &minio.Options{ | ||
616 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
617 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
618 | }) | ||
619 | if err != nil { | ||
620 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
621 | return | ||
622 | } | ||
623 | |||
624 | // Enable tracing, write to stderr. | ||
625 | // c.TraceOn(os.Stderr) | ||
626 | |||
627 | // Set user agent. | ||
628 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
629 | |||
630 | // Generate a new random bucket name. | ||
631 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
632 | args["bucketName"] = bucketName | ||
633 | |||
634 | // Make a new bucket. | ||
635 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
636 | if err != nil { | ||
637 | logError(testName, function, args, startTime, "", "Make bucket failed", err) | ||
638 | return | ||
639 | } | ||
640 | |||
641 | defer cleanupBucket(bucketName, c) | ||
642 | |||
643 | bufSize := dataFileMap["datafile-129-MB"] | ||
644 | reader := getDataReader("datafile-129-MB") | ||
645 | defer reader.Close() | ||
646 | |||
647 | // Save the data | ||
648 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
649 | args["objectName"] = objectName | ||
650 | |||
651 | // Object content type | ||
652 | objectContentType := "binary/octet-stream" | ||
653 | args["objectContentType"] = objectContentType | ||
654 | |||
655 | _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: objectContentType}) | ||
656 | if err != nil { | ||
657 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
658 | return | ||
659 | } | ||
660 | |||
661 | // Read the data back | ||
662 | r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) | ||
663 | if err != nil { | ||
664 | logError(testName, function, args, startTime, "", "Get Object failed", err) | ||
665 | return | ||
666 | } | ||
667 | |||
668 | st, err := r.Stat() | ||
669 | if err != nil { | ||
670 | logError(testName, function, args, startTime, "", "Stat Object failed", err) | ||
671 | return | ||
672 | } | ||
673 | if st.Size != int64(bufSize) { | ||
674 | logError(testName, function, args, startTime, "", fmt.Sprintf("Number of bytes in stat does not match, expected %d got %d", bufSize, st.Size), err) | ||
675 | return | ||
676 | } | ||
677 | if st.ContentType != objectContentType && st.ContentType != "application/octet-stream" { | ||
678 | logError(testName, function, args, startTime, "", "Content types don't match", err) | ||
679 | return | ||
680 | } | ||
681 | if err := crcMatchesName(r, "datafile-129-MB"); err != nil { | ||
682 | logError(testName, function, args, startTime, "", "data CRC check failed", err) | ||
683 | return | ||
684 | } | ||
685 | if err := r.Close(); err != nil { | ||
686 | logError(testName, function, args, startTime, "", "Object Close failed", err) | ||
687 | return | ||
688 | } | ||
689 | if err := r.Close(); err == nil { | ||
690 | logError(testName, function, args, startTime, "", "Object is already closed, didn't return error on Close", err) | ||
691 | return | ||
692 | } | ||
693 | |||
694 | successLogger(testName, function, args, startTime).Info() | ||
695 | } | ||
696 | |||
697 | func testListObjectVersions() { | ||
698 | // initialize logging params | ||
699 | startTime := time.Now() | ||
700 | testName := getFuncName() | ||
701 | function := "ListObjectVersions(bucketName, prefix, recursive)" | ||
702 | args := map[string]interface{}{ | ||
703 | "bucketName": "", | ||
704 | "prefix": "", | ||
705 | "recursive": "", | ||
706 | } | ||
707 | |||
708 | // Seed random based on current time. | ||
709 | rand.Seed(time.Now().Unix()) | ||
710 | |||
711 | // Instantiate new minio client object. | ||
712 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
713 | &minio.Options{ | ||
714 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
715 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
716 | }) | ||
717 | if err != nil { | ||
718 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
719 | return | ||
720 | } | ||
721 | |||
722 | // Enable tracing, write to stderr. | ||
723 | // c.TraceOn(os.Stderr) | ||
724 | |||
725 | // Set user agent. | ||
726 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
727 | |||
728 | // Generate a new random bucket name. | ||
729 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
730 | args["bucketName"] = bucketName | ||
731 | |||
732 | // Make a new bucket. | ||
733 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) | ||
734 | if err != nil { | ||
735 | logError(testName, function, args, startTime, "", "Make bucket failed", err) | ||
736 | return | ||
737 | } | ||
738 | |||
739 | err = c.EnableVersioning(context.Background(), bucketName) | ||
740 | if err != nil { | ||
741 | logError(testName, function, args, startTime, "", "Enable versioning failed", err) | ||
742 | return | ||
743 | } | ||
744 | |||
745 | // Save the data | ||
746 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
747 | args["objectName"] = objectName | ||
748 | |||
749 | bufSize := dataFileMap["datafile-10-kB"] | ||
750 | reader := getDataReader("datafile-10-kB") | ||
751 | |||
752 | _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) | ||
753 | if err != nil { | ||
754 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
755 | return | ||
756 | } | ||
757 | reader.Close() | ||
758 | |||
759 | bufSize = dataFileMap["datafile-1-b"] | ||
760 | reader = getDataReader("datafile-1-b") | ||
761 | _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) | ||
762 | if err != nil { | ||
763 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
764 | return | ||
765 | } | ||
766 | reader.Close() | ||
767 | |||
768 | err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{}) | ||
769 | if err != nil { | ||
770 | logError(testName, function, args, startTime, "", "Unexpected object deletion", err) | ||
771 | return | ||
772 | } | ||
773 | |||
774 | var deleteMarkers, versions int | ||
775 | |||
776 | objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) | ||
777 | for info := range objectsInfo { | ||
778 | if info.Err != nil { | ||
779 | logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) | ||
780 | return | ||
781 | } | ||
782 | if info.Key != objectName { | ||
783 | logError(testName, function, args, startTime, "", "Unexpected object name in listing objects", nil) | ||
784 | return | ||
785 | } | ||
786 | if info.VersionID == "" { | ||
787 | logError(testName, function, args, startTime, "", "Unexpected version id in listing objects", nil) | ||
788 | return | ||
789 | } | ||
790 | if info.IsDeleteMarker { | ||
791 | deleteMarkers++ | ||
792 | if !info.IsLatest { | ||
793 | logError(testName, function, args, startTime, "", "Unexpected IsLatest field in listing objects", nil) | ||
794 | return | ||
795 | } | ||
796 | } else { | ||
797 | versions++ | ||
798 | } | ||
799 | } | ||
800 | |||
801 | if deleteMarkers != 1 { | ||
802 | logError(testName, function, args, startTime, "", "Unexpected number of DeleteMarker elements in listing objects", nil) | ||
803 | return | ||
804 | } | ||
805 | |||
806 | if versions != 2 { | ||
807 | logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) | ||
808 | return | ||
809 | } | ||
810 | |||
811 | // Delete all objects and their versions as long as the bucket itself | ||
812 | if err = cleanupVersionedBucket(bucketName, c); err != nil { | ||
813 | logError(testName, function, args, startTime, "", "CleanupBucket failed", err) | ||
814 | return | ||
815 | } | ||
816 | |||
817 | successLogger(testName, function, args, startTime).Info() | ||
818 | } | ||
819 | |||
820 | func testStatObjectWithVersioning() { | ||
821 | // initialize logging params | ||
822 | startTime := time.Now() | ||
823 | testName := getFuncName() | ||
824 | function := "StatObject" | ||
825 | args := map[string]interface{}{} | ||
826 | |||
827 | // Seed random based on current time. | ||
828 | rand.Seed(time.Now().Unix()) | ||
829 | |||
830 | // Instantiate new minio client object. | ||
831 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
832 | &minio.Options{ | ||
833 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
834 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
835 | }) | ||
836 | if err != nil { | ||
837 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
838 | return | ||
839 | } | ||
840 | |||
841 | // Enable tracing, write to stderr. | ||
842 | // c.TraceOn(os.Stderr) | ||
843 | |||
844 | // Set user agent. | ||
845 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
846 | |||
847 | // Generate a new random bucket name. | ||
848 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
849 | args["bucketName"] = bucketName | ||
850 | |||
851 | // Make a new bucket. | ||
852 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) | ||
853 | if err != nil { | ||
854 | logError(testName, function, args, startTime, "", "Make bucket failed", err) | ||
855 | return | ||
856 | } | ||
857 | |||
858 | err = c.EnableVersioning(context.Background(), bucketName) | ||
859 | if err != nil { | ||
860 | logError(testName, function, args, startTime, "", "Enable versioning failed", err) | ||
861 | return | ||
862 | } | ||
863 | |||
864 | // Save the data | ||
865 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
866 | args["objectName"] = objectName | ||
867 | |||
868 | bufSize := dataFileMap["datafile-10-kB"] | ||
869 | reader := getDataReader("datafile-10-kB") | ||
870 | |||
871 | _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) | ||
872 | if err != nil { | ||
873 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
874 | return | ||
875 | } | ||
876 | reader.Close() | ||
877 | |||
878 | bufSize = dataFileMap["datafile-1-b"] | ||
879 | reader = getDataReader("datafile-1-b") | ||
880 | _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) | ||
881 | if err != nil { | ||
882 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
883 | return | ||
884 | } | ||
885 | reader.Close() | ||
886 | |||
887 | objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) | ||
888 | |||
889 | var results []minio.ObjectInfo | ||
890 | for info := range objectsInfo { | ||
891 | if info.Err != nil { | ||
892 | logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) | ||
893 | return | ||
894 | } | ||
895 | results = append(results, info) | ||
896 | } | ||
897 | |||
898 | if len(results) != 2 { | ||
899 | logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) | ||
900 | return | ||
901 | } | ||
902 | |||
903 | for i := 0; i < len(results); i++ { | ||
904 | opts := minio.StatObjectOptions{VersionID: results[i].VersionID} | ||
905 | statInfo, err := c.StatObject(context.Background(), bucketName, objectName, opts) | ||
906 | if err != nil { | ||
907 | logError(testName, function, args, startTime, "", "error during HEAD object", err) | ||
908 | return | ||
909 | } | ||
910 | if statInfo.VersionID == "" || statInfo.VersionID != results[i].VersionID { | ||
911 | logError(testName, function, args, startTime, "", "error during HEAD object, unexpected version id", err) | ||
912 | return | ||
913 | } | ||
914 | if statInfo.ETag != results[i].ETag { | ||
915 | logError(testName, function, args, startTime, "", "error during HEAD object, unexpected ETag", err) | ||
916 | return | ||
917 | } | ||
918 | if statInfo.LastModified.Unix() != results[i].LastModified.Unix() { | ||
919 | logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Last-Modified", err) | ||
920 | return | ||
921 | } | ||
922 | if statInfo.Size != results[i].Size { | ||
923 | logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Content-Length", err) | ||
924 | return | ||
925 | } | ||
926 | } | ||
927 | |||
928 | // Delete all objects and their versions as long as the bucket itself | ||
929 | if err = cleanupVersionedBucket(bucketName, c); err != nil { | ||
930 | logError(testName, function, args, startTime, "", "CleanupBucket failed", err) | ||
931 | return | ||
932 | } | ||
933 | |||
934 | successLogger(testName, function, args, startTime).Info() | ||
935 | } | ||
936 | |||
937 | func testGetObjectWithVersioning() { | ||
938 | // initialize logging params | ||
939 | startTime := time.Now() | ||
940 | testName := getFuncName() | ||
941 | function := "GetObject()" | ||
942 | args := map[string]interface{}{} | ||
943 | |||
944 | // Seed random based on current time. | ||
945 | rand.Seed(time.Now().Unix()) | ||
946 | |||
947 | // Instantiate new minio client object. | ||
948 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
949 | &minio.Options{ | ||
950 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
951 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
952 | }) | ||
953 | if err != nil { | ||
954 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
955 | return | ||
956 | } | ||
957 | |||
958 | // Enable tracing, write to stderr. | ||
959 | // c.TraceOn(os.Stderr) | ||
960 | |||
961 | // Set user agent. | ||
962 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
963 | |||
964 | // Generate a new random bucket name. | ||
965 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
966 | args["bucketName"] = bucketName | ||
967 | |||
968 | // Make a new bucket. | ||
969 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) | ||
970 | if err != nil { | ||
971 | logError(testName, function, args, startTime, "", "Make bucket failed", err) | ||
972 | return | ||
973 | } | ||
974 | |||
975 | err = c.EnableVersioning(context.Background(), bucketName) | ||
976 | if err != nil { | ||
977 | logError(testName, function, args, startTime, "", "Enable versioning failed", err) | ||
978 | return | ||
979 | } | ||
980 | |||
981 | // Save the data | ||
982 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
983 | args["objectName"] = objectName | ||
984 | |||
985 | // Save the contents of datafiles to check with GetObject() reader output later | ||
986 | var buffers [][]byte | ||
987 | testFiles := []string{"datafile-1-b", "datafile-10-kB"} | ||
988 | |||
989 | for _, testFile := range testFiles { | ||
990 | r := getDataReader(testFile) | ||
991 | buf, err := io.ReadAll(r) | ||
992 | if err != nil { | ||
993 | logError(testName, function, args, startTime, "", "unexpected failure", err) | ||
994 | return | ||
995 | } | ||
996 | r.Close() | ||
997 | _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) | ||
998 | if err != nil { | ||
999 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
1000 | return | ||
1001 | } | ||
1002 | buffers = append(buffers, buf) | ||
1003 | } | ||
1004 | |||
1005 | objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) | ||
1006 | |||
1007 | var results []minio.ObjectInfo | ||
1008 | for info := range objectsInfo { | ||
1009 | if info.Err != nil { | ||
1010 | logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) | ||
1011 | return | ||
1012 | } | ||
1013 | results = append(results, info) | ||
1014 | } | ||
1015 | |||
1016 | if len(results) != 2 { | ||
1017 | logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) | ||
1018 | return | ||
1019 | } | ||
1020 | |||
1021 | sort.SliceStable(results, func(i, j int) bool { | ||
1022 | return results[i].Size < results[j].Size | ||
1023 | }) | ||
1024 | |||
1025 | sort.SliceStable(buffers, func(i, j int) bool { | ||
1026 | return len(buffers[i]) < len(buffers[j]) | ||
1027 | }) | ||
1028 | |||
1029 | for i := 0; i < len(results); i++ { | ||
1030 | opts := minio.GetObjectOptions{VersionID: results[i].VersionID} | ||
1031 | reader, err := c.GetObject(context.Background(), bucketName, objectName, opts) | ||
1032 | if err != nil { | ||
1033 | logError(testName, function, args, startTime, "", "error during GET object", err) | ||
1034 | return | ||
1035 | } | ||
1036 | statInfo, err := reader.Stat() | ||
1037 | if err != nil { | ||
1038 | logError(testName, function, args, startTime, "", "error during calling reader.Stat()", err) | ||
1039 | return | ||
1040 | } | ||
1041 | if statInfo.ETag != results[i].ETag { | ||
1042 | logError(testName, function, args, startTime, "", "error during HEAD object, unexpected ETag", err) | ||
1043 | return | ||
1044 | } | ||
1045 | if statInfo.LastModified.Unix() != results[i].LastModified.Unix() { | ||
1046 | logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Last-Modified", err) | ||
1047 | return | ||
1048 | } | ||
1049 | if statInfo.Size != results[i].Size { | ||
1050 | logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Content-Length", err) | ||
1051 | return | ||
1052 | } | ||
1053 | |||
1054 | tmpBuffer := bytes.NewBuffer([]byte{}) | ||
1055 | _, err = io.Copy(tmpBuffer, reader) | ||
1056 | if err != nil { | ||
1057 | logError(testName, function, args, startTime, "", "unexpected io.Copy()", err) | ||
1058 | return | ||
1059 | } | ||
1060 | |||
1061 | if !bytes.Equal(tmpBuffer.Bytes(), buffers[i]) { | ||
1062 | logError(testName, function, args, startTime, "", "unexpected content of GetObject()", err) | ||
1063 | return | ||
1064 | } | ||
1065 | } | ||
1066 | |||
1067 | // Delete all objects and their versions as long as the bucket itself | ||
1068 | if err = cleanupVersionedBucket(bucketName, c); err != nil { | ||
1069 | logError(testName, function, args, startTime, "", "CleanupBucket failed", err) | ||
1070 | return | ||
1071 | } | ||
1072 | |||
1073 | successLogger(testName, function, args, startTime).Info() | ||
1074 | } | ||
1075 | |||
1076 | func testPutObjectWithVersioning() { | ||
1077 | // initialize logging params | ||
1078 | startTime := time.Now() | ||
1079 | testName := getFuncName() | ||
1080 | function := "GetObject()" | ||
1081 | args := map[string]interface{}{} | ||
1082 | |||
1083 | // Seed random based on current time. | ||
1084 | rand.Seed(time.Now().Unix()) | ||
1085 | |||
1086 | // Instantiate new minio client object. | ||
1087 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
1088 | &minio.Options{ | ||
1089 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
1090 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
1091 | }) | ||
1092 | if err != nil { | ||
1093 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
1094 | return | ||
1095 | } | ||
1096 | |||
1097 | // Enable tracing, write to stderr. | ||
1098 | // c.TraceOn(os.Stderr) | ||
1099 | |||
1100 | // Set user agent. | ||
1101 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
1102 | |||
1103 | // Generate a new random bucket name. | ||
1104 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
1105 | args["bucketName"] = bucketName | ||
1106 | |||
1107 | // Make a new bucket. | ||
1108 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) | ||
1109 | if err != nil { | ||
1110 | logError(testName, function, args, startTime, "", "Make bucket failed", err) | ||
1111 | return | ||
1112 | } | ||
1113 | |||
1114 | err = c.EnableVersioning(context.Background(), bucketName) | ||
1115 | if err != nil { | ||
1116 | logError(testName, function, args, startTime, "", "Enable versioning failed", err) | ||
1117 | return | ||
1118 | } | ||
1119 | |||
1120 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
1121 | args["objectName"] = objectName | ||
1122 | |||
1123 | const n = 10 | ||
1124 | // Read input... | ||
1125 | |||
1126 | // Save the data concurrently. | ||
1127 | var wg sync.WaitGroup | ||
1128 | wg.Add(n) | ||
1129 | buffers := make([][]byte, n) | ||
1130 | var errs [n]error | ||
1131 | for i := 0; i < n; i++ { | ||
1132 | r := newRandomReader(int64((1<<20)*i+i), int64(i)) | ||
1133 | buf, err := io.ReadAll(r) | ||
1134 | if err != nil { | ||
1135 | logError(testName, function, args, startTime, "", "unexpected failure", err) | ||
1136 | return | ||
1137 | } | ||
1138 | buffers[i] = buf | ||
1139 | |||
1140 | go func(i int) { | ||
1141 | defer wg.Done() | ||
1142 | _, errs[i] = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{PartSize: 5 << 20}) | ||
1143 | }(i) | ||
1144 | } | ||
1145 | wg.Wait() | ||
1146 | for _, err := range errs { | ||
1147 | if err != nil { | ||
1148 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
1149 | return | ||
1150 | } | ||
1151 | } | ||
1152 | |||
1153 | objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) | ||
1154 | var results []minio.ObjectInfo | ||
1155 | for info := range objectsInfo { | ||
1156 | if info.Err != nil { | ||
1157 | logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) | ||
1158 | return | ||
1159 | } | ||
1160 | results = append(results, info) | ||
1161 | } | ||
1162 | |||
1163 | if len(results) != n { | ||
1164 | logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) | ||
1165 | return | ||
1166 | } | ||
1167 | |||
1168 | sort.Slice(results, func(i, j int) bool { | ||
1169 | return results[i].Size < results[j].Size | ||
1170 | }) | ||
1171 | |||
1172 | sort.Slice(buffers, func(i, j int) bool { | ||
1173 | return len(buffers[i]) < len(buffers[j]) | ||
1174 | }) | ||
1175 | |||
1176 | for i := 0; i < len(results); i++ { | ||
1177 | opts := minio.GetObjectOptions{VersionID: results[i].VersionID} | ||
1178 | reader, err := c.GetObject(context.Background(), bucketName, objectName, opts) | ||
1179 | if err != nil { | ||
1180 | logError(testName, function, args, startTime, "", "error during GET object", err) | ||
1181 | return | ||
1182 | } | ||
1183 | statInfo, err := reader.Stat() | ||
1184 | if err != nil { | ||
1185 | logError(testName, function, args, startTime, "", "error during calling reader.Stat()", err) | ||
1186 | return | ||
1187 | } | ||
1188 | if statInfo.ETag != results[i].ETag { | ||
1189 | logError(testName, function, args, startTime, "", "error during HEAD object, unexpected ETag", err) | ||
1190 | return | ||
1191 | } | ||
1192 | if statInfo.LastModified.Unix() != results[i].LastModified.Unix() { | ||
1193 | logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Last-Modified", err) | ||
1194 | return | ||
1195 | } | ||
1196 | if statInfo.Size != results[i].Size { | ||
1197 | logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Content-Length", err) | ||
1198 | return | ||
1199 | } | ||
1200 | |||
1201 | tmpBuffer := bytes.NewBuffer([]byte{}) | ||
1202 | _, err = io.Copy(tmpBuffer, reader) | ||
1203 | if err != nil { | ||
1204 | logError(testName, function, args, startTime, "", "unexpected io.Copy()", err) | ||
1205 | return | ||
1206 | } | ||
1207 | |||
1208 | if !bytes.Equal(tmpBuffer.Bytes(), buffers[i]) { | ||
1209 | logError(testName, function, args, startTime, "", "unexpected content of GetObject()", err) | ||
1210 | return | ||
1211 | } | ||
1212 | } | ||
1213 | |||
1214 | // Delete all objects and their versions as long as the bucket itself | ||
1215 | if err = cleanupVersionedBucket(bucketName, c); err != nil { | ||
1216 | logError(testName, function, args, startTime, "", "CleanupBucket failed", err) | ||
1217 | return | ||
1218 | } | ||
1219 | |||
1220 | successLogger(testName, function, args, startTime).Info() | ||
1221 | } | ||
1222 | |||
1223 | func testCopyObjectWithVersioning() { | ||
1224 | // initialize logging params | ||
1225 | startTime := time.Now() | ||
1226 | testName := getFuncName() | ||
1227 | function := "CopyObject()" | ||
1228 | args := map[string]interface{}{} | ||
1229 | |||
1230 | // Seed random based on current time. | ||
1231 | rand.Seed(time.Now().Unix()) | ||
1232 | |||
1233 | // Instantiate new minio client object. | ||
1234 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
1235 | &minio.Options{ | ||
1236 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
1237 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
1238 | }) | ||
1239 | if err != nil { | ||
1240 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
1241 | return | ||
1242 | } | ||
1243 | |||
1244 | // Enable tracing, write to stderr. | ||
1245 | // c.TraceOn(os.Stderr) | ||
1246 | |||
1247 | // Set user agent. | ||
1248 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
1249 | |||
1250 | // Generate a new random bucket name. | ||
1251 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
1252 | args["bucketName"] = bucketName | ||
1253 | |||
1254 | // Make a new bucket. | ||
1255 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) | ||
1256 | if err != nil { | ||
1257 | logError(testName, function, args, startTime, "", "Make bucket failed", err) | ||
1258 | return | ||
1259 | } | ||
1260 | |||
1261 | err = c.EnableVersioning(context.Background(), bucketName) | ||
1262 | if err != nil { | ||
1263 | logError(testName, function, args, startTime, "", "Enable versioning failed", err) | ||
1264 | return | ||
1265 | } | ||
1266 | |||
1267 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
1268 | args["objectName"] = objectName | ||
1269 | |||
1270 | testFiles := []string{"datafile-1-b", "datafile-10-kB"} | ||
1271 | for _, testFile := range testFiles { | ||
1272 | r := getDataReader(testFile) | ||
1273 | buf, err := io.ReadAll(r) | ||
1274 | if err != nil { | ||
1275 | logError(testName, function, args, startTime, "", "unexpected failure", err) | ||
1276 | return | ||
1277 | } | ||
1278 | r.Close() | ||
1279 | _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) | ||
1280 | if err != nil { | ||
1281 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
1282 | return | ||
1283 | } | ||
1284 | } | ||
1285 | |||
1286 | objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) | ||
1287 | var infos []minio.ObjectInfo | ||
1288 | for info := range objectsInfo { | ||
1289 | if info.Err != nil { | ||
1290 | logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) | ||
1291 | return | ||
1292 | } | ||
1293 | infos = append(infos, info) | ||
1294 | } | ||
1295 | |||
1296 | sort.Slice(infos, func(i, j int) bool { | ||
1297 | return infos[i].Size < infos[j].Size | ||
1298 | }) | ||
1299 | |||
1300 | reader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{VersionID: infos[0].VersionID}) | ||
1301 | if err != nil { | ||
1302 | logError(testName, function, args, startTime, "", "GetObject of the oldest version content failed", err) | ||
1303 | return | ||
1304 | } | ||
1305 | |||
1306 | oldestContent, err := io.ReadAll(reader) | ||
1307 | if err != nil { | ||
1308 | logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err) | ||
1309 | return | ||
1310 | } | ||
1311 | |||
1312 | // Copy Source | ||
1313 | srcOpts := minio.CopySrcOptions{ | ||
1314 | Bucket: bucketName, | ||
1315 | Object: objectName, | ||
1316 | VersionID: infos[0].VersionID, | ||
1317 | } | ||
1318 | args["src"] = srcOpts | ||
1319 | |||
1320 | dstOpts := minio.CopyDestOptions{ | ||
1321 | Bucket: bucketName, | ||
1322 | Object: objectName + "-copy", | ||
1323 | } | ||
1324 | args["dst"] = dstOpts | ||
1325 | |||
1326 | // Perform the Copy | ||
1327 | if _, err = c.CopyObject(context.Background(), dstOpts, srcOpts); err != nil { | ||
1328 | logError(testName, function, args, startTime, "", "CopyObject failed", err) | ||
1329 | return | ||
1330 | } | ||
1331 | |||
1332 | // Destination object | ||
1333 | readerCopy, err := c.GetObject(context.Background(), bucketName, objectName+"-copy", minio.GetObjectOptions{}) | ||
1334 | if err != nil { | ||
1335 | logError(testName, function, args, startTime, "", "GetObject failed", err) | ||
1336 | return | ||
1337 | } | ||
1338 | defer readerCopy.Close() | ||
1339 | |||
1340 | newestContent, err := io.ReadAll(readerCopy) | ||
1341 | if err != nil { | ||
1342 | logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err) | ||
1343 | return | ||
1344 | } | ||
1345 | |||
1346 | if len(newestContent) == 0 || !bytes.Equal(oldestContent, newestContent) { | ||
1347 | logError(testName, function, args, startTime, "", "Unexpected destination object content", err) | ||
1348 | return | ||
1349 | } | ||
1350 | |||
1351 | // Delete all objects and their versions as long as the bucket itself | ||
1352 | if err = cleanupVersionedBucket(bucketName, c); err != nil { | ||
1353 | logError(testName, function, args, startTime, "", "CleanupBucket failed", err) | ||
1354 | return | ||
1355 | } | ||
1356 | |||
1357 | successLogger(testName, function, args, startTime).Info() | ||
1358 | } | ||
1359 | |||
1360 | func testConcurrentCopyObjectWithVersioning() { | ||
1361 | // initialize logging params | ||
1362 | startTime := time.Now() | ||
1363 | testName := getFuncName() | ||
1364 | function := "CopyObject()" | ||
1365 | args := map[string]interface{}{} | ||
1366 | |||
1367 | // Seed random based on current time. | ||
1368 | rand.Seed(time.Now().Unix()) | ||
1369 | |||
1370 | // Instantiate new minio client object. | ||
1371 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
1372 | &minio.Options{ | ||
1373 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
1374 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
1375 | }) | ||
1376 | if err != nil { | ||
1377 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
1378 | return | ||
1379 | } | ||
1380 | |||
1381 | // Enable tracing, write to stderr. | ||
1382 | // c.TraceOn(os.Stderr) | ||
1383 | |||
1384 | // Set user agent. | ||
1385 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
1386 | |||
1387 | // Generate a new random bucket name. | ||
1388 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
1389 | args["bucketName"] = bucketName | ||
1390 | |||
1391 | // Make a new bucket. | ||
1392 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) | ||
1393 | if err != nil { | ||
1394 | logError(testName, function, args, startTime, "", "Make bucket failed", err) | ||
1395 | return | ||
1396 | } | ||
1397 | |||
1398 | err = c.EnableVersioning(context.Background(), bucketName) | ||
1399 | if err != nil { | ||
1400 | logError(testName, function, args, startTime, "", "Enable versioning failed", err) | ||
1401 | return | ||
1402 | } | ||
1403 | |||
1404 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
1405 | args["objectName"] = objectName | ||
1406 | |||
1407 | testFiles := []string{"datafile-10-kB"} | ||
1408 | for _, testFile := range testFiles { | ||
1409 | r := getDataReader(testFile) | ||
1410 | buf, err := io.ReadAll(r) | ||
1411 | if err != nil { | ||
1412 | logError(testName, function, args, startTime, "", "unexpected failure", err) | ||
1413 | return | ||
1414 | } | ||
1415 | r.Close() | ||
1416 | _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) | ||
1417 | if err != nil { | ||
1418 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
1419 | return | ||
1420 | } | ||
1421 | } | ||
1422 | |||
1423 | objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) | ||
1424 | var infos []minio.ObjectInfo | ||
1425 | for info := range objectsInfo { | ||
1426 | if info.Err != nil { | ||
1427 | logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) | ||
1428 | return | ||
1429 | } | ||
1430 | infos = append(infos, info) | ||
1431 | } | ||
1432 | |||
1433 | sort.Slice(infos, func(i, j int) bool { | ||
1434 | return infos[i].Size < infos[j].Size | ||
1435 | }) | ||
1436 | |||
1437 | reader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{VersionID: infos[0].VersionID}) | ||
1438 | if err != nil { | ||
1439 | logError(testName, function, args, startTime, "", "GetObject of the oldest version content failed", err) | ||
1440 | return | ||
1441 | } | ||
1442 | |||
1443 | oldestContent, err := io.ReadAll(reader) | ||
1444 | if err != nil { | ||
1445 | logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err) | ||
1446 | return | ||
1447 | } | ||
1448 | |||
1449 | // Copy Source | ||
1450 | srcOpts := minio.CopySrcOptions{ | ||
1451 | Bucket: bucketName, | ||
1452 | Object: objectName, | ||
1453 | VersionID: infos[0].VersionID, | ||
1454 | } | ||
1455 | args["src"] = srcOpts | ||
1456 | |||
1457 | dstOpts := minio.CopyDestOptions{ | ||
1458 | Bucket: bucketName, | ||
1459 | Object: objectName + "-copy", | ||
1460 | } | ||
1461 | args["dst"] = dstOpts | ||
1462 | |||
1463 | // Perform the Copy concurrently | ||
1464 | const n = 10 | ||
1465 | var wg sync.WaitGroup | ||
1466 | wg.Add(n) | ||
1467 | var errs [n]error | ||
1468 | for i := 0; i < n; i++ { | ||
1469 | go func(i int) { | ||
1470 | defer wg.Done() | ||
1471 | _, errs[i] = c.CopyObject(context.Background(), dstOpts, srcOpts) | ||
1472 | }(i) | ||
1473 | } | ||
1474 | wg.Wait() | ||
1475 | for _, err := range errs { | ||
1476 | if err != nil { | ||
1477 | logError(testName, function, args, startTime, "", "CopyObject failed", err) | ||
1478 | return | ||
1479 | } | ||
1480 | } | ||
1481 | |||
1482 | objectsInfo = c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: false, Prefix: dstOpts.Object}) | ||
1483 | infos = []minio.ObjectInfo{} | ||
1484 | for info := range objectsInfo { | ||
1485 | // Destination object | ||
1486 | readerCopy, err := c.GetObject(context.Background(), bucketName, objectName+"-copy", minio.GetObjectOptions{VersionID: info.VersionID}) | ||
1487 | if err != nil { | ||
1488 | logError(testName, function, args, startTime, "", "GetObject failed", err) | ||
1489 | return | ||
1490 | } | ||
1491 | defer readerCopy.Close() | ||
1492 | |||
1493 | newestContent, err := io.ReadAll(readerCopy) | ||
1494 | if err != nil { | ||
1495 | logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err) | ||
1496 | return | ||
1497 | } | ||
1498 | |||
1499 | if len(newestContent) == 0 || !bytes.Equal(oldestContent, newestContent) { | ||
1500 | logError(testName, function, args, startTime, "", "Unexpected destination object content", err) | ||
1501 | return | ||
1502 | } | ||
1503 | infos = append(infos, info) | ||
1504 | } | ||
1505 | |||
1506 | if len(infos) != n { | ||
1507 | logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) | ||
1508 | return | ||
1509 | } | ||
1510 | |||
1511 | // Delete all objects and their versions as long as the bucket itself | ||
1512 | if err = cleanupVersionedBucket(bucketName, c); err != nil { | ||
1513 | logError(testName, function, args, startTime, "", "CleanupBucket failed", err) | ||
1514 | return | ||
1515 | } | ||
1516 | |||
1517 | successLogger(testName, function, args, startTime).Info() | ||
1518 | } | ||
1519 | |||
1520 | func testComposeObjectWithVersioning() { | ||
1521 | // initialize logging params | ||
1522 | startTime := time.Now() | ||
1523 | testName := getFuncName() | ||
1524 | function := "ComposeObject()" | ||
1525 | args := map[string]interface{}{} | ||
1526 | |||
1527 | // Seed random based on current time. | ||
1528 | rand.Seed(time.Now().Unix()) | ||
1529 | |||
1530 | // Instantiate new minio client object. | ||
1531 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
1532 | &minio.Options{ | ||
1533 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
1534 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
1535 | }) | ||
1536 | if err != nil { | ||
1537 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
1538 | return | ||
1539 | } | ||
1540 | |||
1541 | // Enable tracing, write to stderr. | ||
1542 | // c.TraceOn(os.Stderr) | ||
1543 | |||
1544 | // Set user agent. | ||
1545 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
1546 | |||
1547 | // Generate a new random bucket name. | ||
1548 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
1549 | args["bucketName"] = bucketName | ||
1550 | |||
1551 | // Make a new bucket. | ||
1552 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) | ||
1553 | if err != nil { | ||
1554 | logError(testName, function, args, startTime, "", "Make bucket failed", err) | ||
1555 | return | ||
1556 | } | ||
1557 | |||
1558 | err = c.EnableVersioning(context.Background(), bucketName) | ||
1559 | if err != nil { | ||
1560 | logError(testName, function, args, startTime, "", "Enable versioning failed", err) | ||
1561 | return | ||
1562 | } | ||
1563 | |||
1564 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
1565 | args["objectName"] = objectName | ||
1566 | |||
1567 | // var testFiles = []string{"datafile-5-MB", "datafile-10-kB"} | ||
1568 | testFiles := []string{"datafile-5-MB", "datafile-10-kB"} | ||
1569 | var testFilesBytes [][]byte | ||
1570 | |||
1571 | for _, testFile := range testFiles { | ||
1572 | r := getDataReader(testFile) | ||
1573 | buf, err := io.ReadAll(r) | ||
1574 | if err != nil { | ||
1575 | logError(testName, function, args, startTime, "", "unexpected failure", err) | ||
1576 | return | ||
1577 | } | ||
1578 | r.Close() | ||
1579 | _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) | ||
1580 | if err != nil { | ||
1581 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
1582 | return | ||
1583 | } | ||
1584 | testFilesBytes = append(testFilesBytes, buf) | ||
1585 | } | ||
1586 | |||
1587 | objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) | ||
1588 | |||
1589 | var results []minio.ObjectInfo | ||
1590 | for info := range objectsInfo { | ||
1591 | if info.Err != nil { | ||
1592 | logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) | ||
1593 | return | ||
1594 | } | ||
1595 | results = append(results, info) | ||
1596 | } | ||
1597 | |||
1598 | sort.SliceStable(results, func(i, j int) bool { | ||
1599 | return results[i].Size > results[j].Size | ||
1600 | }) | ||
1601 | |||
1602 | // Source objects to concatenate. We also specify decryption | ||
1603 | // key for each | ||
1604 | src1 := minio.CopySrcOptions{ | ||
1605 | Bucket: bucketName, | ||
1606 | Object: objectName, | ||
1607 | VersionID: results[0].VersionID, | ||
1608 | } | ||
1609 | |||
1610 | src2 := minio.CopySrcOptions{ | ||
1611 | Bucket: bucketName, | ||
1612 | Object: objectName, | ||
1613 | VersionID: results[1].VersionID, | ||
1614 | } | ||
1615 | |||
1616 | dst := minio.CopyDestOptions{ | ||
1617 | Bucket: bucketName, | ||
1618 | Object: objectName + "-copy", | ||
1619 | } | ||
1620 | |||
1621 | _, err = c.ComposeObject(context.Background(), dst, src1, src2) | ||
1622 | if err != nil { | ||
1623 | logError(testName, function, args, startTime, "", "ComposeObject failed", err) | ||
1624 | return | ||
1625 | } | ||
1626 | |||
1627 | // Destination object | ||
1628 | readerCopy, err := c.GetObject(context.Background(), bucketName, objectName+"-copy", minio.GetObjectOptions{}) | ||
1629 | if err != nil { | ||
1630 | logError(testName, function, args, startTime, "", "GetObject of the copy object failed", err) | ||
1631 | return | ||
1632 | } | ||
1633 | defer readerCopy.Close() | ||
1634 | |||
1635 | copyContentBytes, err := io.ReadAll(readerCopy) | ||
1636 | if err != nil { | ||
1637 | logError(testName, function, args, startTime, "", "Reading from the copy object reader failed", err) | ||
1638 | return | ||
1639 | } | ||
1640 | |||
1641 | var expectedContent []byte | ||
1642 | for _, fileBytes := range testFilesBytes { | ||
1643 | expectedContent = append(expectedContent, fileBytes...) | ||
1644 | } | ||
1645 | |||
1646 | if len(copyContentBytes) == 0 || !bytes.Equal(copyContentBytes, expectedContent) { | ||
1647 | logError(testName, function, args, startTime, "", "Unexpected destination object content", err) | ||
1648 | return | ||
1649 | } | ||
1650 | |||
1651 | // Delete all objects and their versions as long as the bucket itself | ||
1652 | if err = cleanupVersionedBucket(bucketName, c); err != nil { | ||
1653 | logError(testName, function, args, startTime, "", "CleanupBucket failed", err) | ||
1654 | return | ||
1655 | } | ||
1656 | |||
1657 | successLogger(testName, function, args, startTime).Info() | ||
1658 | } | ||
1659 | |||
1660 | func testRemoveObjectWithVersioning() { | ||
1661 | // initialize logging params | ||
1662 | startTime := time.Now() | ||
1663 | testName := getFuncName() | ||
1664 | function := "DeleteObject()" | ||
1665 | args := map[string]interface{}{} | ||
1666 | |||
1667 | // Seed random based on current time. | ||
1668 | rand.Seed(time.Now().Unix()) | ||
1669 | |||
1670 | // Instantiate new minio client object. | ||
1671 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
1672 | &minio.Options{ | ||
1673 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
1674 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
1675 | }) | ||
1676 | if err != nil { | ||
1677 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
1678 | return | ||
1679 | } | ||
1680 | |||
1681 | // Enable tracing, write to stderr. | ||
1682 | // c.TraceOn(os.Stderr) | ||
1683 | |||
1684 | // Set user agent. | ||
1685 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
1686 | |||
1687 | // Generate a new random bucket name. | ||
1688 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
1689 | args["bucketName"] = bucketName | ||
1690 | |||
1691 | // Make a new bucket. | ||
1692 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) | ||
1693 | if err != nil { | ||
1694 | logError(testName, function, args, startTime, "", "Make bucket failed", err) | ||
1695 | return | ||
1696 | } | ||
1697 | |||
1698 | err = c.EnableVersioning(context.Background(), bucketName) | ||
1699 | if err != nil { | ||
1700 | logError(testName, function, args, startTime, "", "Enable versioning failed", err) | ||
1701 | return | ||
1702 | } | ||
1703 | |||
1704 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
1705 | args["objectName"] = objectName | ||
1706 | |||
1707 | _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader("datafile-10-kB"), int64(dataFileMap["datafile-10-kB"]), minio.PutObjectOptions{}) | ||
1708 | if err != nil { | ||
1709 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
1710 | return | ||
1711 | } | ||
1712 | |||
1713 | objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) | ||
1714 | var version minio.ObjectInfo | ||
1715 | for info := range objectsInfo { | ||
1716 | if info.Err != nil { | ||
1717 | logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) | ||
1718 | return | ||
1719 | } | ||
1720 | version = info | ||
1721 | break | ||
1722 | } | ||
1723 | |||
1724 | err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{VersionID: version.VersionID}) | ||
1725 | if err != nil { | ||
1726 | logError(testName, function, args, startTime, "", "DeleteObject failed", err) | ||
1727 | return | ||
1728 | } | ||
1729 | |||
1730 | objectsInfo = c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) | ||
1731 | for range objectsInfo { | ||
1732 | logError(testName, function, args, startTime, "", "Unexpected versioning info, should not have any one ", err) | ||
1733 | return | ||
1734 | } | ||
1735 | // test delete marker version id is non-null | ||
1736 | _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader("datafile-10-kB"), int64(dataFileMap["datafile-10-kB"]), minio.PutObjectOptions{}) | ||
1737 | if err != nil { | ||
1738 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
1739 | return | ||
1740 | } | ||
1741 | // create delete marker | ||
1742 | err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{}) | ||
1743 | if err != nil { | ||
1744 | logError(testName, function, args, startTime, "", "DeleteObject failed", err) | ||
1745 | return | ||
1746 | } | ||
1747 | objectsInfo = c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) | ||
1748 | idx := 0 | ||
1749 | for info := range objectsInfo { | ||
1750 | if info.Err != nil { | ||
1751 | logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) | ||
1752 | return | ||
1753 | } | ||
1754 | if idx == 0 { | ||
1755 | if !info.IsDeleteMarker { | ||
1756 | logError(testName, function, args, startTime, "", "Unexpected error - expected delete marker to have been created", err) | ||
1757 | return | ||
1758 | } | ||
1759 | if info.VersionID == "" { | ||
1760 | logError(testName, function, args, startTime, "", "Unexpected error - expected delete marker to be versioned", err) | ||
1761 | return | ||
1762 | } | ||
1763 | } | ||
1764 | idx++ | ||
1765 | } | ||
1766 | |||
1767 | defer cleanupBucket(bucketName, c) | ||
1768 | |||
1769 | successLogger(testName, function, args, startTime).Info() | ||
1770 | } | ||
1771 | |||
1772 | func testRemoveObjectsWithVersioning() { | ||
1773 | // initialize logging params | ||
1774 | startTime := time.Now() | ||
1775 | testName := getFuncName() | ||
1776 | function := "DeleteObjects()" | ||
1777 | args := map[string]interface{}{} | ||
1778 | |||
1779 | // Seed random based on current time. | ||
1780 | rand.Seed(time.Now().Unix()) | ||
1781 | |||
1782 | // Instantiate new minio client object. | ||
1783 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
1784 | &minio.Options{ | ||
1785 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
1786 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
1787 | }) | ||
1788 | if err != nil { | ||
1789 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
1790 | return | ||
1791 | } | ||
1792 | |||
1793 | // Enable tracing, write to stderr. | ||
1794 | // c.TraceOn(os.Stderr) | ||
1795 | |||
1796 | // Set user agent. | ||
1797 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
1798 | |||
1799 | // Generate a new random bucket name. | ||
1800 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
1801 | args["bucketName"] = bucketName | ||
1802 | |||
1803 | // Make a new bucket. | ||
1804 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) | ||
1805 | if err != nil { | ||
1806 | logError(testName, function, args, startTime, "", "Make bucket failed", err) | ||
1807 | return | ||
1808 | } | ||
1809 | |||
1810 | err = c.EnableVersioning(context.Background(), bucketName) | ||
1811 | if err != nil { | ||
1812 | logError(testName, function, args, startTime, "", "Enable versioning failed", err) | ||
1813 | return | ||
1814 | } | ||
1815 | |||
1816 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
1817 | args["objectName"] = objectName | ||
1818 | |||
1819 | _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader("datafile-10-kB"), int64(dataFileMap["datafile-10-kB"]), minio.PutObjectOptions{}) | ||
1820 | if err != nil { | ||
1821 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
1822 | return | ||
1823 | } | ||
1824 | |||
1825 | objectsVersions := make(chan minio.ObjectInfo) | ||
1826 | go func() { | ||
1827 | objectsVersionsInfo := c.ListObjects(context.Background(), bucketName, | ||
1828 | minio.ListObjectsOptions{WithVersions: true, Recursive: true}) | ||
1829 | for info := range objectsVersionsInfo { | ||
1830 | if info.Err != nil { | ||
1831 | logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) | ||
1832 | return | ||
1833 | } | ||
1834 | objectsVersions <- info | ||
1835 | } | ||
1836 | close(objectsVersions) | ||
1837 | }() | ||
1838 | |||
1839 | removeErrors := c.RemoveObjects(context.Background(), bucketName, objectsVersions, minio.RemoveObjectsOptions{}) | ||
1840 | if err != nil { | ||
1841 | logError(testName, function, args, startTime, "", "DeleteObjects call failed", err) | ||
1842 | return | ||
1843 | } | ||
1844 | |||
1845 | for e := range removeErrors { | ||
1846 | if e.Err != nil { | ||
1847 | logError(testName, function, args, startTime, "", "Single delete operation failed", err) | ||
1848 | return | ||
1849 | } | ||
1850 | } | ||
1851 | |||
1852 | objectsVersionsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) | ||
1853 | for range objectsVersionsInfo { | ||
1854 | logError(testName, function, args, startTime, "", "Unexpected versioning info, should not have any one ", err) | ||
1855 | return | ||
1856 | } | ||
1857 | |||
1858 | err = c.RemoveBucket(context.Background(), bucketName) | ||
1859 | if err != nil { | ||
1860 | logError(testName, function, args, startTime, "", "CleanupBucket failed", err) | ||
1861 | return | ||
1862 | } | ||
1863 | |||
1864 | successLogger(testName, function, args, startTime).Info() | ||
1865 | } | ||
1866 | |||
1867 | func testObjectTaggingWithVersioning() { | ||
1868 | // initialize logging params | ||
1869 | startTime := time.Now() | ||
1870 | testName := getFuncName() | ||
1871 | function := "{Get,Set,Remove}ObjectTagging()" | ||
1872 | args := map[string]interface{}{} | ||
1873 | |||
1874 | // Seed random based on current time. | ||
1875 | rand.Seed(time.Now().Unix()) | ||
1876 | |||
1877 | // Instantiate new minio client object. | ||
1878 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
1879 | &minio.Options{ | ||
1880 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
1881 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
1882 | }) | ||
1883 | if err != nil { | ||
1884 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
1885 | return | ||
1886 | } | ||
1887 | |||
1888 | // Enable tracing, write to stderr. | ||
1889 | // c.TraceOn(os.Stderr) | ||
1890 | |||
1891 | // Set user agent. | ||
1892 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
1893 | |||
1894 | // Generate a new random bucket name. | ||
1895 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
1896 | args["bucketName"] = bucketName | ||
1897 | |||
1898 | // Make a new bucket. | ||
1899 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) | ||
1900 | if err != nil { | ||
1901 | logError(testName, function, args, startTime, "", "Make bucket failed", err) | ||
1902 | return | ||
1903 | } | ||
1904 | |||
1905 | err = c.EnableVersioning(context.Background(), bucketName) | ||
1906 | if err != nil { | ||
1907 | logError(testName, function, args, startTime, "", "Enable versioning failed", err) | ||
1908 | return | ||
1909 | } | ||
1910 | |||
1911 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
1912 | args["objectName"] = objectName | ||
1913 | |||
1914 | for _, file := range []string{"datafile-1-b", "datafile-10-kB"} { | ||
1915 | _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader(file), int64(dataFileMap[file]), minio.PutObjectOptions{}) | ||
1916 | if err != nil { | ||
1917 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
1918 | return | ||
1919 | } | ||
1920 | } | ||
1921 | |||
1922 | versionsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) | ||
1923 | |||
1924 | var versions []minio.ObjectInfo | ||
1925 | for info := range versionsInfo { | ||
1926 | if info.Err != nil { | ||
1927 | logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) | ||
1928 | return | ||
1929 | } | ||
1930 | versions = append(versions, info) | ||
1931 | } | ||
1932 | |||
1933 | sort.SliceStable(versions, func(i, j int) bool { | ||
1934 | return versions[i].Size < versions[j].Size | ||
1935 | }) | ||
1936 | |||
1937 | tagsV1 := map[string]string{"key1": "val1"} | ||
1938 | t1, err := tags.MapToObjectTags(tagsV1) | ||
1939 | if err != nil { | ||
1940 | logError(testName, function, args, startTime, "", "PutObjectTagging (1) failed", err) | ||
1941 | return | ||
1942 | } | ||
1943 | |||
1944 | err = c.PutObjectTagging(context.Background(), bucketName, objectName, t1, minio.PutObjectTaggingOptions{VersionID: versions[0].VersionID}) | ||
1945 | if err != nil { | ||
1946 | logError(testName, function, args, startTime, "", "PutObjectTagging (1) failed", err) | ||
1947 | return | ||
1948 | } | ||
1949 | |||
1950 | tagsV2 := map[string]string{"key2": "val2"} | ||
1951 | t2, err := tags.MapToObjectTags(tagsV2) | ||
1952 | if err != nil { | ||
1953 | logError(testName, function, args, startTime, "", "PutObjectTagging (1) failed", err) | ||
1954 | return | ||
1955 | } | ||
1956 | |||
1957 | err = c.PutObjectTagging(context.Background(), bucketName, objectName, t2, minio.PutObjectTaggingOptions{VersionID: versions[1].VersionID}) | ||
1958 | if err != nil { | ||
1959 | logError(testName, function, args, startTime, "", "PutObjectTagging (2) failed", err) | ||
1960 | return | ||
1961 | } | ||
1962 | |||
1963 | tagsEqual := func(tags1, tags2 map[string]string) bool { | ||
1964 | for k1, v1 := range tags1 { | ||
1965 | v2, found := tags2[k1] | ||
1966 | if found { | ||
1967 | if v1 != v2 { | ||
1968 | return false | ||
1969 | } | ||
1970 | } | ||
1971 | } | ||
1972 | return true | ||
1973 | } | ||
1974 | |||
1975 | gotTagsV1, err := c.GetObjectTagging(context.Background(), bucketName, objectName, minio.GetObjectTaggingOptions{VersionID: versions[0].VersionID}) | ||
1976 | if err != nil { | ||
1977 | logError(testName, function, args, startTime, "", "GetObjectTagging failed", err) | ||
1978 | return | ||
1979 | } | ||
1980 | |||
1981 | if !tagsEqual(t1.ToMap(), gotTagsV1.ToMap()) { | ||
1982 | logError(testName, function, args, startTime, "", "Unexpected tags content (1)", err) | ||
1983 | return | ||
1984 | } | ||
1985 | |||
1986 | gotTagsV2, err := c.GetObjectTagging(context.Background(), bucketName, objectName, minio.GetObjectTaggingOptions{}) | ||
1987 | if err != nil { | ||
1988 | logError(testName, function, args, startTime, "", "GetObjectTaggingContext failed", err) | ||
1989 | return | ||
1990 | } | ||
1991 | |||
1992 | if !tagsEqual(t2.ToMap(), gotTagsV2.ToMap()) { | ||
1993 | logError(testName, function, args, startTime, "", "Unexpected tags content (2)", err) | ||
1994 | return | ||
1995 | } | ||
1996 | |||
1997 | err = c.RemoveObjectTagging(context.Background(), bucketName, objectName, minio.RemoveObjectTaggingOptions{VersionID: versions[0].VersionID}) | ||
1998 | if err != nil { | ||
1999 | logError(testName, function, args, startTime, "", "PutObjectTagging (2) failed", err) | ||
2000 | return | ||
2001 | } | ||
2002 | |||
2003 | emptyTags, err := c.GetObjectTagging(context.Background(), bucketName, objectName, | ||
2004 | minio.GetObjectTaggingOptions{VersionID: versions[0].VersionID}) | ||
2005 | if err != nil { | ||
2006 | logError(testName, function, args, startTime, "", "GetObjectTagging failed", err) | ||
2007 | return | ||
2008 | } | ||
2009 | |||
2010 | if len(emptyTags.ToMap()) != 0 { | ||
2011 | logError(testName, function, args, startTime, "", "Unexpected tags content (2)", err) | ||
2012 | return | ||
2013 | } | ||
2014 | |||
2015 | // Delete all objects and their versions as long as the bucket itself | ||
2016 | if err = cleanupVersionedBucket(bucketName, c); err != nil { | ||
2017 | logError(testName, function, args, startTime, "", "CleanupBucket failed", err) | ||
2018 | return | ||
2019 | } | ||
2020 | |||
2021 | successLogger(testName, function, args, startTime).Info() | ||
2022 | } | ||
2023 | |||
2024 | // Test PutObject with custom checksums. | ||
2025 | func testPutObjectWithChecksums() { | ||
2026 | // initialize logging params | ||
2027 | startTime := time.Now() | ||
2028 | testName := getFuncName() | ||
2029 | function := "PutObject(bucketName, objectName, reader,size, opts)" | ||
2030 | args := map[string]interface{}{ | ||
2031 | "bucketName": "", | ||
2032 | "objectName": "", | ||
2033 | "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}", | ||
2034 | } | ||
2035 | |||
2036 | if !isFullMode() { | ||
2037 | ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info() | ||
2038 | return | ||
2039 | } | ||
2040 | |||
2041 | // Seed random based on current time. | ||
2042 | rand.Seed(time.Now().Unix()) | ||
2043 | |||
2044 | // Instantiate new minio client object. | ||
2045 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
2046 | &minio.Options{ | ||
2047 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
2048 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
2049 | }) | ||
2050 | if err != nil { | ||
2051 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
2052 | return | ||
2053 | } | ||
2054 | |||
2055 | // Enable tracing, write to stderr. | ||
2056 | // c.TraceOn(os.Stderr) | ||
2057 | |||
2058 | // Set user agent. | ||
2059 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
2060 | |||
2061 | // Generate a new random bucket name. | ||
2062 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
2063 | args["bucketName"] = bucketName | ||
2064 | |||
2065 | // Make a new bucket. | ||
2066 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
2067 | if err != nil { | ||
2068 | logError(testName, function, args, startTime, "", "Make bucket failed", err) | ||
2069 | return | ||
2070 | } | ||
2071 | |||
2072 | defer cleanupBucket(bucketName, c) | ||
2073 | tests := []struct { | ||
2074 | header string | ||
2075 | hasher hash.Hash | ||
2076 | |||
2077 | // Checksum values | ||
2078 | ChecksumCRC32 string | ||
2079 | ChecksumCRC32C string | ||
2080 | ChecksumSHA1 string | ||
2081 | ChecksumSHA256 string | ||
2082 | }{ | ||
2083 | {header: "x-amz-checksum-crc32", hasher: crc32.NewIEEE()}, | ||
2084 | {header: "x-amz-checksum-crc32c", hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli))}, | ||
2085 | {header: "x-amz-checksum-sha1", hasher: sha1.New()}, | ||
2086 | {header: "x-amz-checksum-sha256", hasher: sha256.New()}, | ||
2087 | } | ||
2088 | |||
2089 | for i, test := range tests { | ||
2090 | bufSize := dataFileMap["datafile-10-kB"] | ||
2091 | |||
2092 | // Save the data | ||
2093 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
2094 | args["objectName"] = objectName | ||
2095 | |||
2096 | cmpChecksum := func(got, want string) { | ||
2097 | if want != got { | ||
2098 | logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got)) | ||
2099 | return | ||
2100 | } | ||
2101 | } | ||
2102 | |||
2103 | meta := map[string]string{} | ||
2104 | reader := getDataReader("datafile-10-kB") | ||
2105 | b, err := io.ReadAll(reader) | ||
2106 | if err != nil { | ||
2107 | logError(testName, function, args, startTime, "", "Read failed", err) | ||
2108 | return | ||
2109 | } | ||
2110 | h := test.hasher | ||
2111 | h.Reset() | ||
2112 | // Wrong CRC. | ||
2113 | meta[test.header] = base64.StdEncoding.EncodeToString(h.Sum(nil)) | ||
2114 | args["metadata"] = meta | ||
2115 | args["range"] = "false" | ||
2116 | |||
2117 | resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{ | ||
2118 | DisableMultipart: true, | ||
2119 | UserMetadata: meta, | ||
2120 | }) | ||
2121 | if err == nil { | ||
2122 | if i == 0 && resp.ChecksumCRC32 == "" { | ||
2123 | ignoredLog(testName, function, args, startTime, "Checksums does not appear to be supported by backend").Info() | ||
2124 | return | ||
2125 | } | ||
2126 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
2127 | return | ||
2128 | } | ||
2129 | |||
2130 | // Set correct CRC. | ||
2131 | h.Write(b) | ||
2132 | meta[test.header] = base64.StdEncoding.EncodeToString(h.Sum(nil)) | ||
2133 | reader.Close() | ||
2134 | |||
2135 | resp, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{ | ||
2136 | DisableMultipart: true, | ||
2137 | DisableContentSha256: true, | ||
2138 | UserMetadata: meta, | ||
2139 | }) | ||
2140 | if err != nil { | ||
2141 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
2142 | return | ||
2143 | } | ||
2144 | cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"]) | ||
2145 | cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"]) | ||
2146 | cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"]) | ||
2147 | cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) | ||
2148 | |||
2149 | // Read the data back | ||
2150 | gopts := minio.GetObjectOptions{Checksum: true} | ||
2151 | |||
2152 | r, err := c.GetObject(context.Background(), bucketName, objectName, gopts) | ||
2153 | if err != nil { | ||
2154 | logError(testName, function, args, startTime, "", "GetObject failed", err) | ||
2155 | return | ||
2156 | } | ||
2157 | |||
2158 | st, err := r.Stat() | ||
2159 | if err != nil { | ||
2160 | logError(testName, function, args, startTime, "", "Stat failed", err) | ||
2161 | return | ||
2162 | } | ||
2163 | cmpChecksum(st.ChecksumSHA256, meta["x-amz-checksum-sha256"]) | ||
2164 | cmpChecksum(st.ChecksumSHA1, meta["x-amz-checksum-sha1"]) | ||
2165 | cmpChecksum(st.ChecksumCRC32, meta["x-amz-checksum-crc32"]) | ||
2166 | cmpChecksum(st.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) | ||
2167 | |||
2168 | if st.Size != int64(bufSize) { | ||
2169 | logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err) | ||
2170 | return | ||
2171 | } | ||
2172 | |||
2173 | if err := r.Close(); err != nil { | ||
2174 | logError(testName, function, args, startTime, "", "Object Close failed", err) | ||
2175 | return | ||
2176 | } | ||
2177 | if err := r.Close(); err == nil { | ||
2178 | logError(testName, function, args, startTime, "", "Object already closed, should respond with error", err) | ||
2179 | return | ||
2180 | } | ||
2181 | |||
2182 | args["range"] = "true" | ||
2183 | err = gopts.SetRange(100, 1000) | ||
2184 | if err != nil { | ||
2185 | logError(testName, function, args, startTime, "", "SetRange failed", err) | ||
2186 | return | ||
2187 | } | ||
2188 | r, err = c.GetObject(context.Background(), bucketName, objectName, gopts) | ||
2189 | if err != nil { | ||
2190 | logError(testName, function, args, startTime, "", "GetObject failed", err) | ||
2191 | return | ||
2192 | } | ||
2193 | |||
2194 | b, err = io.ReadAll(r) | ||
2195 | if err != nil { | ||
2196 | logError(testName, function, args, startTime, "", "Read failed", err) | ||
2197 | return | ||
2198 | } | ||
2199 | st, err = r.Stat() | ||
2200 | if err != nil { | ||
2201 | logError(testName, function, args, startTime, "", "Stat failed", err) | ||
2202 | return | ||
2203 | } | ||
2204 | |||
2205 | // Range requests should return empty checksums... | ||
2206 | cmpChecksum(st.ChecksumSHA256, "") | ||
2207 | cmpChecksum(st.ChecksumSHA1, "") | ||
2208 | cmpChecksum(st.ChecksumCRC32, "") | ||
2209 | cmpChecksum(st.ChecksumCRC32C, "") | ||
2210 | |||
2211 | delete(args, "range") | ||
2212 | delete(args, "metadata") | ||
2213 | } | ||
2214 | |||
2215 | successLogger(testName, function, args, startTime).Info() | ||
2216 | } | ||
2217 | |||
2218 | // Test PutObject with custom checksums. | ||
2219 | func testPutMultipartObjectWithChecksums() { | ||
2220 | // initialize logging params | ||
2221 | startTime := time.Now() | ||
2222 | testName := getFuncName() | ||
2223 | function := "PutObject(bucketName, objectName, reader,size, opts)" | ||
2224 | args := map[string]interface{}{ | ||
2225 | "bucketName": "", | ||
2226 | "objectName": "", | ||
2227 | "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}", | ||
2228 | } | ||
2229 | |||
2230 | if !isFullMode() { | ||
2231 | ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info() | ||
2232 | return | ||
2233 | } | ||
2234 | |||
2235 | // Seed random based on current time. | ||
2236 | rand.Seed(time.Now().Unix()) | ||
2237 | |||
2238 | // Instantiate new minio client object. | ||
2239 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
2240 | &minio.Options{ | ||
2241 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
2242 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
2243 | }) | ||
2244 | if err != nil { | ||
2245 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
2246 | return | ||
2247 | } | ||
2248 | |||
2249 | // Enable tracing, write to stderr. | ||
2250 | // c.TraceOn(os.Stderr) | ||
2251 | |||
2252 | // Set user agent. | ||
2253 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
2254 | |||
2255 | // Generate a new random bucket name. | ||
2256 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
2257 | args["bucketName"] = bucketName | ||
2258 | |||
2259 | // Make a new bucket. | ||
2260 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
2261 | if err != nil { | ||
2262 | logError(testName, function, args, startTime, "", "Make bucket failed", err) | ||
2263 | return | ||
2264 | } | ||
2265 | |||
2266 | hashMultiPart := func(b []byte, partSize int, hasher hash.Hash) string { | ||
2267 | r := bytes.NewReader(b) | ||
2268 | tmp := make([]byte, partSize) | ||
2269 | parts := 0 | ||
2270 | var all []byte | ||
2271 | for { | ||
2272 | n, err := io.ReadFull(r, tmp) | ||
2273 | if err != nil && err != io.ErrUnexpectedEOF { | ||
2274 | logError(testName, function, args, startTime, "", "Calc crc failed", err) | ||
2275 | } | ||
2276 | if n == 0 { | ||
2277 | break | ||
2278 | } | ||
2279 | parts++ | ||
2280 | hasher.Reset() | ||
2281 | hasher.Write(tmp[:n]) | ||
2282 | all = append(all, hasher.Sum(nil)...) | ||
2283 | if err != nil { | ||
2284 | break | ||
2285 | } | ||
2286 | } | ||
2287 | hasher.Reset() | ||
2288 | hasher.Write(all) | ||
2289 | return fmt.Sprintf("%s-%d", base64.StdEncoding.EncodeToString(hasher.Sum(nil)), parts) | ||
2290 | } | ||
2291 | defer cleanupBucket(bucketName, c) | ||
2292 | tests := []struct { | ||
2293 | header string | ||
2294 | hasher hash.Hash | ||
2295 | |||
2296 | // Checksum values | ||
2297 | ChecksumCRC32 string | ||
2298 | ChecksumCRC32C string | ||
2299 | ChecksumSHA1 string | ||
2300 | ChecksumSHA256 string | ||
2301 | }{ | ||
2302 | // Currently there is no way to override the checksum type. | ||
2303 | {header: "x-amz-checksum-crc32c", hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)), ChecksumCRC32C: "OpEx0Q==-13"}, | ||
2304 | } | ||
2305 | |||
2306 | for _, test := range tests { | ||
2307 | bufSize := dataFileMap["datafile-129-MB"] | ||
2308 | |||
2309 | // Save the data | ||
2310 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
2311 | args["objectName"] = objectName | ||
2312 | |||
2313 | cmpChecksum := func(got, want string) { | ||
2314 | if want != got { | ||
2315 | // logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got)) | ||
2316 | fmt.Printf("want %s, got %s\n", want, got) | ||
2317 | return | ||
2318 | } | ||
2319 | } | ||
2320 | |||
2321 | const partSize = 10 << 20 | ||
2322 | reader := getDataReader("datafile-129-MB") | ||
2323 | b, err := io.ReadAll(reader) | ||
2324 | if err != nil { | ||
2325 | logError(testName, function, args, startTime, "", "Read failed", err) | ||
2326 | return | ||
2327 | } | ||
2328 | reader.Close() | ||
2329 | h := test.hasher | ||
2330 | h.Reset() | ||
2331 | test.ChecksumCRC32C = hashMultiPart(b, partSize, test.hasher) | ||
2332 | |||
2333 | // Set correct CRC. | ||
2334 | |||
2335 | resp, err := c.PutObject(context.Background(), bucketName, objectName, io.NopCloser(bytes.NewReader(b)), int64(bufSize), minio.PutObjectOptions{ | ||
2336 | DisableContentSha256: true, | ||
2337 | DisableMultipart: false, | ||
2338 | UserMetadata: nil, | ||
2339 | PartSize: partSize, | ||
2340 | }) | ||
2341 | if err != nil { | ||
2342 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
2343 | return | ||
2344 | } | ||
2345 | cmpChecksum(resp.ChecksumSHA256, test.ChecksumSHA256) | ||
2346 | cmpChecksum(resp.ChecksumSHA1, test.ChecksumSHA1) | ||
2347 | cmpChecksum(resp.ChecksumCRC32, test.ChecksumCRC32) | ||
2348 | cmpChecksum(resp.ChecksumCRC32C, test.ChecksumCRC32C) | ||
2349 | |||
2350 | // Read the data back | ||
2351 | gopts := minio.GetObjectOptions{Checksum: true} | ||
2352 | gopts.PartNumber = 2 | ||
2353 | |||
2354 | // We cannot use StatObject, since it ignores partnumber. | ||
2355 | r, err := c.GetObject(context.Background(), bucketName, objectName, gopts) | ||
2356 | if err != nil { | ||
2357 | logError(testName, function, args, startTime, "", "GetObject failed", err) | ||
2358 | return | ||
2359 | } | ||
2360 | io.Copy(io.Discard, r) | ||
2361 | st, err := r.Stat() | ||
2362 | if err != nil { | ||
2363 | logError(testName, function, args, startTime, "", "Stat failed", err) | ||
2364 | return | ||
2365 | } | ||
2366 | |||
2367 | // Test part 2 checksum... | ||
2368 | h.Reset() | ||
2369 | h.Write(b[partSize : 2*partSize]) | ||
2370 | got := base64.StdEncoding.EncodeToString(h.Sum(nil)) | ||
2371 | if test.ChecksumSHA256 != "" { | ||
2372 | cmpChecksum(st.ChecksumSHA256, got) | ||
2373 | } | ||
2374 | if test.ChecksumSHA1 != "" { | ||
2375 | cmpChecksum(st.ChecksumSHA1, got) | ||
2376 | } | ||
2377 | if test.ChecksumCRC32 != "" { | ||
2378 | cmpChecksum(st.ChecksumCRC32, got) | ||
2379 | } | ||
2380 | if test.ChecksumCRC32C != "" { | ||
2381 | cmpChecksum(st.ChecksumCRC32C, got) | ||
2382 | } | ||
2383 | |||
2384 | delete(args, "metadata") | ||
2385 | } | ||
2386 | |||
2387 | successLogger(testName, function, args, startTime).Info() | ||
2388 | } | ||
2389 | |||
2390 | // Test PutObject with trailing checksums. | ||
2391 | func testTrailingChecksums() { | ||
2392 | // initialize logging params | ||
2393 | startTime := time.Now() | ||
2394 | testName := getFuncName() | ||
2395 | function := "PutObject(bucketName, objectName, reader,size, opts)" | ||
2396 | args := map[string]interface{}{ | ||
2397 | "bucketName": "", | ||
2398 | "objectName": "", | ||
2399 | "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}", | ||
2400 | } | ||
2401 | |||
2402 | if !isFullMode() { | ||
2403 | ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info() | ||
2404 | return | ||
2405 | } | ||
2406 | |||
2407 | // Instantiate new minio client object. | ||
2408 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
2409 | &minio.Options{ | ||
2410 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
2411 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
2412 | TrailingHeaders: true, | ||
2413 | }) | ||
2414 | if err != nil { | ||
2415 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
2416 | return | ||
2417 | } | ||
2418 | |||
2419 | // Enable tracing, write to stderr. | ||
2420 | // c.TraceOn(os.Stderr) | ||
2421 | |||
2422 | // Set user agent. | ||
2423 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
2424 | |||
2425 | // Generate a new random bucket name. | ||
2426 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
2427 | args["bucketName"] = bucketName | ||
2428 | |||
2429 | // Make a new bucket. | ||
2430 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
2431 | if err != nil { | ||
2432 | logError(testName, function, args, startTime, "", "Make bucket failed", err) | ||
2433 | return | ||
2434 | } | ||
2435 | |||
2436 | hashMultiPart := func(b []byte, partSize int, hasher hash.Hash) string { | ||
2437 | r := bytes.NewReader(b) | ||
2438 | tmp := make([]byte, partSize) | ||
2439 | parts := 0 | ||
2440 | var all []byte | ||
2441 | for { | ||
2442 | n, err := io.ReadFull(r, tmp) | ||
2443 | if err != nil && err != io.ErrUnexpectedEOF { | ||
2444 | logError(testName, function, args, startTime, "", "Calc crc failed", err) | ||
2445 | } | ||
2446 | if n == 0 { | ||
2447 | break | ||
2448 | } | ||
2449 | parts++ | ||
2450 | hasher.Reset() | ||
2451 | hasher.Write(tmp[:n]) | ||
2452 | all = append(all, hasher.Sum(nil)...) | ||
2453 | if err != nil { | ||
2454 | break | ||
2455 | } | ||
2456 | } | ||
2457 | hasher.Reset() | ||
2458 | hasher.Write(all) | ||
2459 | return fmt.Sprintf("%s-%d", base64.StdEncoding.EncodeToString(hasher.Sum(nil)), parts) | ||
2460 | } | ||
2461 | defer cleanupBucket(bucketName, c) | ||
2462 | tests := []struct { | ||
2463 | header string | ||
2464 | hasher hash.Hash | ||
2465 | |||
2466 | // Checksum values | ||
2467 | ChecksumCRC32 string | ||
2468 | ChecksumCRC32C string | ||
2469 | ChecksumSHA1 string | ||
2470 | ChecksumSHA256 string | ||
2471 | PO minio.PutObjectOptions | ||
2472 | }{ | ||
2473 | // Currently there is no way to override the checksum type. | ||
2474 | { | ||
2475 | header: "x-amz-checksum-crc32c", | ||
2476 | hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)), | ||
2477 | ChecksumCRC32C: "set", | ||
2478 | PO: minio.PutObjectOptions{ | ||
2479 | DisableContentSha256: true, | ||
2480 | DisableMultipart: false, | ||
2481 | UserMetadata: nil, | ||
2482 | PartSize: 5 << 20, | ||
2483 | }, | ||
2484 | }, | ||
2485 | { | ||
2486 | header: "x-amz-checksum-crc32c", | ||
2487 | hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)), | ||
2488 | ChecksumCRC32C: "set", | ||
2489 | PO: minio.PutObjectOptions{ | ||
2490 | DisableContentSha256: true, | ||
2491 | DisableMultipart: false, | ||
2492 | UserMetadata: nil, | ||
2493 | PartSize: 6_645_654, // Rather arbitrary size | ||
2494 | }, | ||
2495 | }, | ||
2496 | { | ||
2497 | header: "x-amz-checksum-crc32c", | ||
2498 | hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)), | ||
2499 | ChecksumCRC32C: "set", | ||
2500 | PO: minio.PutObjectOptions{ | ||
2501 | DisableContentSha256: false, | ||
2502 | DisableMultipart: false, | ||
2503 | UserMetadata: nil, | ||
2504 | PartSize: 5 << 20, | ||
2505 | }, | ||
2506 | }, | ||
2507 | { | ||
2508 | header: "x-amz-checksum-crc32c", | ||
2509 | hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)), | ||
2510 | ChecksumCRC32C: "set", | ||
2511 | PO: minio.PutObjectOptions{ | ||
2512 | DisableContentSha256: false, | ||
2513 | DisableMultipart: false, | ||
2514 | UserMetadata: nil, | ||
2515 | PartSize: 6_645_654, // Rather arbitrary size | ||
2516 | }, | ||
2517 | }, | ||
2518 | } | ||
2519 | |||
2520 | for _, test := range tests { | ||
2521 | bufSize := dataFileMap["datafile-11-MB"] | ||
2522 | |||
2523 | // Save the data | ||
2524 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
2525 | args["objectName"] = objectName | ||
2526 | |||
2527 | cmpChecksum := func(got, want string) { | ||
2528 | if want != got { | ||
2529 | logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %q, got %q", want, got)) | ||
2530 | return | ||
2531 | } | ||
2532 | } | ||
2533 | |||
2534 | reader := getDataReader("datafile-11-MB") | ||
2535 | b, err := io.ReadAll(reader) | ||
2536 | if err != nil { | ||
2537 | logError(testName, function, args, startTime, "", "Read failed", err) | ||
2538 | return | ||
2539 | } | ||
2540 | reader.Close() | ||
2541 | h := test.hasher | ||
2542 | h.Reset() | ||
2543 | test.ChecksumCRC32C = hashMultiPart(b, int(test.PO.PartSize), test.hasher) | ||
2544 | |||
2545 | // Set correct CRC. | ||
2546 | // c.TraceOn(os.Stderr) | ||
2547 | resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), test.PO) | ||
2548 | if err != nil { | ||
2549 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
2550 | return | ||
2551 | } | ||
2552 | // c.TraceOff() | ||
2553 | cmpChecksum(resp.ChecksumSHA256, test.ChecksumSHA256) | ||
2554 | cmpChecksum(resp.ChecksumSHA1, test.ChecksumSHA1) | ||
2555 | cmpChecksum(resp.ChecksumCRC32, test.ChecksumCRC32) | ||
2556 | cmpChecksum(resp.ChecksumCRC32C, test.ChecksumCRC32C) | ||
2557 | |||
2558 | // Read the data back | ||
2559 | gopts := minio.GetObjectOptions{Checksum: true} | ||
2560 | gopts.PartNumber = 2 | ||
2561 | |||
2562 | // We cannot use StatObject, since it ignores partnumber. | ||
2563 | r, err := c.GetObject(context.Background(), bucketName, objectName, gopts) | ||
2564 | if err != nil { | ||
2565 | logError(testName, function, args, startTime, "", "GetObject failed", err) | ||
2566 | return | ||
2567 | } | ||
2568 | io.Copy(io.Discard, r) | ||
2569 | st, err := r.Stat() | ||
2570 | if err != nil { | ||
2571 | logError(testName, function, args, startTime, "", "Stat failed", err) | ||
2572 | return | ||
2573 | } | ||
2574 | |||
2575 | // Test part 2 checksum... | ||
2576 | h.Reset() | ||
2577 | p2 := b[test.PO.PartSize:] | ||
2578 | if len(p2) > int(test.PO.PartSize) { | ||
2579 | p2 = p2[:test.PO.PartSize] | ||
2580 | } | ||
2581 | h.Write(p2) | ||
2582 | got := base64.StdEncoding.EncodeToString(h.Sum(nil)) | ||
2583 | if test.ChecksumSHA256 != "" { | ||
2584 | cmpChecksum(st.ChecksumSHA256, got) | ||
2585 | } | ||
2586 | if test.ChecksumSHA1 != "" { | ||
2587 | cmpChecksum(st.ChecksumSHA1, got) | ||
2588 | } | ||
2589 | if test.ChecksumCRC32 != "" { | ||
2590 | cmpChecksum(st.ChecksumCRC32, got) | ||
2591 | } | ||
2592 | if test.ChecksumCRC32C != "" { | ||
2593 | cmpChecksum(st.ChecksumCRC32C, got) | ||
2594 | } | ||
2595 | |||
2596 | delete(args, "metadata") | ||
2597 | } | ||
2598 | } | ||
2599 | |||
2600 | // Test PutObject with custom checksums. | ||
2601 | func testPutObjectWithAutomaticChecksums() { | ||
2602 | // initialize logging params | ||
2603 | startTime := time.Now() | ||
2604 | testName := getFuncName() | ||
2605 | function := "PutObject(bucketName, objectName, reader,size, opts)" | ||
2606 | args := map[string]interface{}{ | ||
2607 | "bucketName": "", | ||
2608 | "objectName": "", | ||
2609 | "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}", | ||
2610 | } | ||
2611 | |||
2612 | if !isFullMode() { | ||
2613 | ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info() | ||
2614 | return | ||
2615 | } | ||
2616 | |||
2617 | // Seed random based on current time. | ||
2618 | rand.Seed(time.Now().Unix()) | ||
2619 | |||
2620 | // Instantiate new minio client object. | ||
2621 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
2622 | &minio.Options{ | ||
2623 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
2624 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
2625 | TrailingHeaders: true, | ||
2626 | }) | ||
2627 | if err != nil { | ||
2628 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
2629 | return | ||
2630 | } | ||
2631 | |||
2632 | // Set user agent. | ||
2633 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
2634 | |||
2635 | // Generate a new random bucket name. | ||
2636 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
2637 | args["bucketName"] = bucketName | ||
2638 | |||
2639 | // Make a new bucket. | ||
2640 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
2641 | if err != nil { | ||
2642 | logError(testName, function, args, startTime, "", "Make bucket failed", err) | ||
2643 | return | ||
2644 | } | ||
2645 | |||
2646 | defer cleanupBucket(bucketName, c) | ||
2647 | tests := []struct { | ||
2648 | header string | ||
2649 | hasher hash.Hash | ||
2650 | |||
2651 | // Checksum values | ||
2652 | ChecksumCRC32 string | ||
2653 | ChecksumCRC32C string | ||
2654 | ChecksumSHA1 string | ||
2655 | ChecksumSHA256 string | ||
2656 | }{ | ||
2657 | // Built-in will only add crc32c, when no MD5 nor SHA256. | ||
2658 | {header: "x-amz-checksum-crc32c", hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli))}, | ||
2659 | } | ||
2660 | |||
2661 | // Enable tracing, write to stderr. | ||
2662 | // c.TraceOn(os.Stderr) | ||
2663 | // defer c.TraceOff() | ||
2664 | |||
2665 | for i, test := range tests { | ||
2666 | bufSize := dataFileMap["datafile-10-kB"] | ||
2667 | |||
2668 | // Save the data | ||
2669 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
2670 | args["objectName"] = objectName | ||
2671 | |||
2672 | cmpChecksum := func(got, want string) { | ||
2673 | if want != got { | ||
2674 | logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got)) | ||
2675 | return | ||
2676 | } | ||
2677 | } | ||
2678 | |||
2679 | meta := map[string]string{} | ||
2680 | reader := getDataReader("datafile-10-kB") | ||
2681 | b, err := io.ReadAll(reader) | ||
2682 | if err != nil { | ||
2683 | logError(testName, function, args, startTime, "", "Read failed", err) | ||
2684 | return | ||
2685 | } | ||
2686 | |||
2687 | h := test.hasher | ||
2688 | h.Reset() | ||
2689 | h.Write(b) | ||
2690 | meta[test.header] = base64.StdEncoding.EncodeToString(h.Sum(nil)) | ||
2691 | args["metadata"] = meta | ||
2692 | |||
2693 | resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{ | ||
2694 | DisableMultipart: true, | ||
2695 | UserMetadata: nil, | ||
2696 | DisableContentSha256: true, | ||
2697 | SendContentMd5: false, | ||
2698 | }) | ||
2699 | if err == nil { | ||
2700 | if i == 0 && resp.ChecksumCRC32C == "" { | ||
2701 | ignoredLog(testName, function, args, startTime, "Checksums does not appear to be supported by backend").Info() | ||
2702 | return | ||
2703 | } | ||
2704 | } else { | ||
2705 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
2706 | return | ||
2707 | } | ||
2708 | cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"]) | ||
2709 | cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"]) | ||
2710 | cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"]) | ||
2711 | cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) | ||
2712 | |||
2713 | // Usually this will be the same as above, since we skip automatic checksum when SHA256 content is sent. | ||
2714 | // When/if we add a checksum control to PutObjectOptions this will make more sense. | ||
2715 | resp, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{ | ||
2716 | DisableMultipart: true, | ||
2717 | UserMetadata: nil, | ||
2718 | DisableContentSha256: false, | ||
2719 | SendContentMd5: false, | ||
2720 | }) | ||
2721 | if err != nil { | ||
2722 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
2723 | return | ||
2724 | } | ||
2725 | // The checksum will not be enabled on HTTP, since it uses SHA256 blocks. | ||
2726 | if mustParseBool(os.Getenv(enableHTTPS)) { | ||
2727 | cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"]) | ||
2728 | cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"]) | ||
2729 | cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"]) | ||
2730 | cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) | ||
2731 | } | ||
2732 | |||
2733 | // Set SHA256 header manually | ||
2734 | sh256 := sha256.Sum256(b) | ||
2735 | meta = map[string]string{"x-amz-checksum-sha256": base64.StdEncoding.EncodeToString(sh256[:])} | ||
2736 | args["metadata"] = meta | ||
2737 | resp, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{ | ||
2738 | DisableMultipart: true, | ||
2739 | UserMetadata: meta, | ||
2740 | DisableContentSha256: true, | ||
2741 | SendContentMd5: false, | ||
2742 | }) | ||
2743 | if err != nil { | ||
2744 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
2745 | return | ||
2746 | } | ||
2747 | cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"]) | ||
2748 | cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"]) | ||
2749 | cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"]) | ||
2750 | cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) | ||
2751 | delete(args, "metadata") | ||
2752 | } | ||
2753 | |||
2754 | successLogger(testName, function, args, startTime).Info() | ||
2755 | } | ||
2756 | |||
2757 | // Test PutObject using a large data to trigger multipart readat | ||
2758 | func testPutObjectWithMetadata() { | ||
2759 | // initialize logging params | ||
2760 | startTime := time.Now() | ||
2761 | testName := getFuncName() | ||
2762 | function := "PutObject(bucketName, objectName, reader,size, opts)" | ||
2763 | args := map[string]interface{}{ | ||
2764 | "bucketName": "", | ||
2765 | "objectName": "", | ||
2766 | "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}", | ||
2767 | } | ||
2768 | |||
2769 | if !isFullMode() { | ||
2770 | ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info() | ||
2771 | return | ||
2772 | } | ||
2773 | |||
2774 | // Seed random based on current time. | ||
2775 | rand.Seed(time.Now().Unix()) | ||
2776 | |||
2777 | // Instantiate new minio client object. | ||
2778 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
2779 | &minio.Options{ | ||
2780 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
2781 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
2782 | }) | ||
2783 | if err != nil { | ||
2784 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
2785 | return | ||
2786 | } | ||
2787 | |||
2788 | // Enable tracing, write to stderr. | ||
2789 | // c.TraceOn(os.Stderr) | ||
2790 | |||
2791 | // Set user agent. | ||
2792 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
2793 | |||
2794 | // Generate a new random bucket name. | ||
2795 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
2796 | args["bucketName"] = bucketName | ||
2797 | |||
2798 | // Make a new bucket. | ||
2799 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
2800 | if err != nil { | ||
2801 | logError(testName, function, args, startTime, "", "Make bucket failed", err) | ||
2802 | return | ||
2803 | } | ||
2804 | |||
2805 | defer cleanupBucket(bucketName, c) | ||
2806 | |||
2807 | bufSize := dataFileMap["datafile-129-MB"] | ||
2808 | reader := getDataReader("datafile-129-MB") | ||
2809 | defer reader.Close() | ||
2810 | |||
2811 | // Save the data | ||
2812 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
2813 | args["objectName"] = objectName | ||
2814 | |||
2815 | // Object custom metadata | ||
2816 | customContentType := "custom/contenttype" | ||
2817 | |||
2818 | args["metadata"] = map[string][]string{ | ||
2819 | "Content-Type": {customContentType}, | ||
2820 | "X-Amz-Meta-CustomKey": {"extra spaces in value"}, | ||
2821 | } | ||
2822 | |||
2823 | _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ | ||
2824 | ContentType: customContentType, | ||
2825 | }) | ||
2826 | if err != nil { | ||
2827 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
2828 | return | ||
2829 | } | ||
2830 | |||
2831 | // Read the data back | ||
2832 | r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) | ||
2833 | if err != nil { | ||
2834 | logError(testName, function, args, startTime, "", "GetObject failed", err) | ||
2835 | return | ||
2836 | } | ||
2837 | |||
2838 | st, err := r.Stat() | ||
2839 | if err != nil { | ||
2840 | logError(testName, function, args, startTime, "", "Stat failed", err) | ||
2841 | return | ||
2842 | } | ||
2843 | if st.Size != int64(bufSize) { | ||
2844 | logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err) | ||
2845 | return | ||
2846 | } | ||
2847 | if st.ContentType != customContentType && st.ContentType != "application/octet-stream" { | ||
2848 | logError(testName, function, args, startTime, "", "ContentType does not match, expected "+customContentType+" got "+st.ContentType, err) | ||
2849 | return | ||
2850 | } | ||
2851 | if err := crcMatchesName(r, "datafile-129-MB"); err != nil { | ||
2852 | logError(testName, function, args, startTime, "", "data CRC check failed", err) | ||
2853 | return | ||
2854 | } | ||
2855 | if err := r.Close(); err != nil { | ||
2856 | logError(testName, function, args, startTime, "", "Object Close failed", err) | ||
2857 | return | ||
2858 | } | ||
2859 | if err := r.Close(); err == nil { | ||
2860 | logError(testName, function, args, startTime, "", "Object already closed, should respond with error", err) | ||
2861 | return | ||
2862 | } | ||
2863 | |||
2864 | successLogger(testName, function, args, startTime).Info() | ||
2865 | } | ||
2866 | |||
2867 | func testPutObjectWithContentLanguage() { | ||
2868 | // initialize logging params | ||
2869 | objectName := "test-object" | ||
2870 | startTime := time.Now() | ||
2871 | testName := getFuncName() | ||
2872 | function := "PutObject(bucketName, objectName, reader, size, opts)" | ||
2873 | args := map[string]interface{}{ | ||
2874 | "bucketName": "", | ||
2875 | "objectName": objectName, | ||
2876 | "size": -1, | ||
2877 | "opts": "", | ||
2878 | } | ||
2879 | |||
2880 | // Seed random based on current time. | ||
2881 | rand.Seed(time.Now().Unix()) | ||
2882 | |||
2883 | // Instantiate new minio client object. | ||
2884 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
2885 | &minio.Options{ | ||
2886 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
2887 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
2888 | }) | ||
2889 | if err != nil { | ||
2890 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
2891 | return | ||
2892 | } | ||
2893 | |||
2894 | // Enable tracing, write to stderr. | ||
2895 | // c.TraceOn(os.Stderr) | ||
2896 | |||
2897 | // Set user agent. | ||
2898 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
2899 | |||
2900 | // Generate a new random bucket name. | ||
2901 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
2902 | args["bucketName"] = bucketName | ||
2903 | // Make a new bucket. | ||
2904 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
2905 | if err != nil { | ||
2906 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
2907 | return | ||
2908 | } | ||
2909 | |||
2910 | defer cleanupBucket(bucketName, c) | ||
2911 | |||
2912 | data := []byte{} | ||
2913 | _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(data), int64(0), minio.PutObjectOptions{ | ||
2914 | ContentLanguage: "en", | ||
2915 | }) | ||
2916 | if err != nil { | ||
2917 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
2918 | return | ||
2919 | } | ||
2920 | |||
2921 | objInfo, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) | ||
2922 | if err != nil { | ||
2923 | logError(testName, function, args, startTime, "", "StatObject failed", err) | ||
2924 | return | ||
2925 | } | ||
2926 | |||
2927 | if objInfo.Metadata.Get("Content-Language") != "en" { | ||
2928 | logError(testName, function, args, startTime, "", "Expected content-language 'en' doesn't match with StatObject return value", err) | ||
2929 | return | ||
2930 | } | ||
2931 | |||
2932 | successLogger(testName, function, args, startTime).Info() | ||
2933 | } | ||
2934 | |||
2935 | // Test put object with streaming signature. | ||
2936 | func testPutObjectStreaming() { | ||
2937 | // initialize logging params | ||
2938 | objectName := "test-object" | ||
2939 | startTime := time.Now() | ||
2940 | testName := getFuncName() | ||
2941 | function := "PutObject(bucketName, objectName, reader,size,opts)" | ||
2942 | args := map[string]interface{}{ | ||
2943 | "bucketName": "", | ||
2944 | "objectName": objectName, | ||
2945 | "size": -1, | ||
2946 | "opts": "", | ||
2947 | } | ||
2948 | |||
2949 | // Seed random based on current time. | ||
2950 | rand.Seed(time.Now().Unix()) | ||
2951 | |||
2952 | // Instantiate new minio client object. | ||
2953 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
2954 | &minio.Options{ | ||
2955 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
2956 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
2957 | }) | ||
2958 | if err != nil { | ||
2959 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
2960 | return | ||
2961 | } | ||
2962 | |||
2963 | // Enable tracing, write to stderr. | ||
2964 | // c.TraceOn(os.Stderr) | ||
2965 | |||
2966 | // Set user agent. | ||
2967 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
2968 | |||
2969 | // Generate a new random bucket name. | ||
2970 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
2971 | args["bucketName"] = bucketName | ||
2972 | // Make a new bucket. | ||
2973 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
2974 | if err != nil { | ||
2975 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
2976 | return | ||
2977 | } | ||
2978 | |||
2979 | defer cleanupBucket(bucketName, c) | ||
2980 | |||
2981 | // Upload an object. | ||
2982 | sizes := []int64{0, 64*1024 - 1, 64 * 1024} | ||
2983 | |||
2984 | for _, size := range sizes { | ||
2985 | data := newRandomReader(size, size) | ||
2986 | ui, err := c.PutObject(context.Background(), bucketName, objectName, data, int64(size), minio.PutObjectOptions{}) | ||
2987 | if err != nil { | ||
2988 | logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err) | ||
2989 | return | ||
2990 | } | ||
2991 | |||
2992 | if ui.Size != size { | ||
2993 | logError(testName, function, args, startTime, "", "PutObjectStreaming result has unexpected size", nil) | ||
2994 | return | ||
2995 | } | ||
2996 | |||
2997 | objInfo, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) | ||
2998 | if err != nil { | ||
2999 | logError(testName, function, args, startTime, "", "StatObject failed", err) | ||
3000 | return | ||
3001 | } | ||
3002 | if objInfo.Size != size { | ||
3003 | logError(testName, function, args, startTime, "", "Unexpected size", err) | ||
3004 | return | ||
3005 | } | ||
3006 | |||
3007 | } | ||
3008 | |||
3009 | successLogger(testName, function, args, startTime).Info() | ||
3010 | } | ||
3011 | |||
3012 | // Test get object seeker from the end, using whence set to '2'. | ||
3013 | func testGetObjectSeekEnd() { | ||
3014 | // initialize logging params | ||
3015 | startTime := time.Now() | ||
3016 | testName := getFuncName() | ||
3017 | function := "GetObject(bucketName, objectName)" | ||
3018 | args := map[string]interface{}{} | ||
3019 | |||
3020 | // Seed random based on current time. | ||
3021 | rand.Seed(time.Now().Unix()) | ||
3022 | |||
3023 | // Instantiate new minio client object. | ||
3024 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
3025 | &minio.Options{ | ||
3026 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
3027 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
3028 | }) | ||
3029 | if err != nil { | ||
3030 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
3031 | return | ||
3032 | } | ||
3033 | |||
3034 | // Enable tracing, write to stderr. | ||
3035 | // c.TraceOn(os.Stderr) | ||
3036 | |||
3037 | // Set user agent. | ||
3038 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
3039 | |||
3040 | // Generate a new random bucket name. | ||
3041 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
3042 | args["bucketName"] = bucketName | ||
3043 | |||
3044 | // Make a new bucket. | ||
3045 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
3046 | if err != nil { | ||
3047 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
3048 | return | ||
3049 | } | ||
3050 | |||
3051 | defer cleanupBucket(bucketName, c) | ||
3052 | |||
3053 | // Generate 33K of data. | ||
3054 | bufSize := dataFileMap["datafile-33-kB"] | ||
3055 | reader := getDataReader("datafile-33-kB") | ||
3056 | defer reader.Close() | ||
3057 | |||
3058 | // Save the data | ||
3059 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
3060 | args["objectName"] = objectName | ||
3061 | |||
3062 | buf, err := io.ReadAll(reader) | ||
3063 | if err != nil { | ||
3064 | logError(testName, function, args, startTime, "", "ReadAll failed", err) | ||
3065 | return | ||
3066 | } | ||
3067 | |||
3068 | _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) | ||
3069 | if err != nil { | ||
3070 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
3071 | return | ||
3072 | } | ||
3073 | |||
3074 | // Read the data back | ||
3075 | r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) | ||
3076 | if err != nil { | ||
3077 | logError(testName, function, args, startTime, "", "GetObject failed", err) | ||
3078 | return | ||
3079 | } | ||
3080 | |||
3081 | st, err := r.Stat() | ||
3082 | if err != nil { | ||
3083 | logError(testName, function, args, startTime, "", "Stat failed", err) | ||
3084 | return | ||
3085 | } | ||
3086 | |||
3087 | if st.Size != int64(bufSize) { | ||
3088 | logError(testName, function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err) | ||
3089 | return | ||
3090 | } | ||
3091 | |||
3092 | pos, err := r.Seek(-100, 2) | ||
3093 | if err != nil { | ||
3094 | logError(testName, function, args, startTime, "", "Object Seek failed", err) | ||
3095 | return | ||
3096 | } | ||
3097 | if pos != st.Size-100 { | ||
3098 | logError(testName, function, args, startTime, "", "Incorrect position", err) | ||
3099 | return | ||
3100 | } | ||
3101 | buf2 := make([]byte, 100) | ||
3102 | m, err := readFull(r, buf2) | ||
3103 | if err != nil { | ||
3104 | logError(testName, function, args, startTime, "", "Error reading through readFull", err) | ||
3105 | return | ||
3106 | } | ||
3107 | if m != len(buf2) { | ||
3108 | logError(testName, function, args, startTime, "", "Number of bytes dont match, expected "+string(len(buf2))+" got "+string(m), err) | ||
3109 | return | ||
3110 | } | ||
3111 | hexBuf1 := fmt.Sprintf("%02x", buf[len(buf)-100:]) | ||
3112 | hexBuf2 := fmt.Sprintf("%02x", buf2[:m]) | ||
3113 | if hexBuf1 != hexBuf2 { | ||
3114 | logError(testName, function, args, startTime, "", "Values at same index dont match", err) | ||
3115 | return | ||
3116 | } | ||
3117 | pos, err = r.Seek(-100, 2) | ||
3118 | if err != nil { | ||
3119 | logError(testName, function, args, startTime, "", "Object Seek failed", err) | ||
3120 | return | ||
3121 | } | ||
3122 | if pos != st.Size-100 { | ||
3123 | logError(testName, function, args, startTime, "", "Incorrect position", err) | ||
3124 | return | ||
3125 | } | ||
3126 | if err = r.Close(); err != nil { | ||
3127 | logError(testName, function, args, startTime, "", "ObjectClose failed", err) | ||
3128 | return | ||
3129 | } | ||
3130 | |||
3131 | successLogger(testName, function, args, startTime).Info() | ||
3132 | } | ||
3133 | |||
3134 | // Test get object reader to not throw error on being closed twice. | ||
3135 | func testGetObjectClosedTwice() { | ||
3136 | // initialize logging params | ||
3137 | startTime := time.Now() | ||
3138 | testName := getFuncName() | ||
3139 | function := "GetObject(bucketName, objectName)" | ||
3140 | args := map[string]interface{}{} | ||
3141 | |||
3142 | // Seed random based on current time. | ||
3143 | rand.Seed(time.Now().Unix()) | ||
3144 | |||
3145 | // Instantiate new minio client object. | ||
3146 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
3147 | &minio.Options{ | ||
3148 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
3149 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
3150 | }) | ||
3151 | if err != nil { | ||
3152 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
3153 | return | ||
3154 | } | ||
3155 | |||
3156 | // Enable tracing, write to stderr. | ||
3157 | // c.TraceOn(os.Stderr) | ||
3158 | |||
3159 | // Set user agent. | ||
3160 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
3161 | |||
3162 | // Generate a new random bucket name. | ||
3163 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
3164 | args["bucketName"] = bucketName | ||
3165 | |||
3166 | // Make a new bucket. | ||
3167 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
3168 | if err != nil { | ||
3169 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
3170 | return | ||
3171 | } | ||
3172 | |||
3173 | defer cleanupBucket(bucketName, c) | ||
3174 | |||
3175 | // Generate 33K of data. | ||
3176 | bufSize := dataFileMap["datafile-33-kB"] | ||
3177 | reader := getDataReader("datafile-33-kB") | ||
3178 | defer reader.Close() | ||
3179 | |||
3180 | // Save the data | ||
3181 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
3182 | args["objectName"] = objectName | ||
3183 | |||
3184 | _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) | ||
3185 | if err != nil { | ||
3186 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
3187 | return | ||
3188 | } | ||
3189 | |||
3190 | // Read the data back | ||
3191 | r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) | ||
3192 | if err != nil { | ||
3193 | logError(testName, function, args, startTime, "", "GetObject failed", err) | ||
3194 | return | ||
3195 | } | ||
3196 | |||
3197 | st, err := r.Stat() | ||
3198 | if err != nil { | ||
3199 | logError(testName, function, args, startTime, "", "Stat failed", err) | ||
3200 | return | ||
3201 | } | ||
3202 | if st.Size != int64(bufSize) { | ||
3203 | logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err) | ||
3204 | return | ||
3205 | } | ||
3206 | if err := crcMatchesName(r, "datafile-33-kB"); err != nil { | ||
3207 | logError(testName, function, args, startTime, "", "data CRC check failed", err) | ||
3208 | return | ||
3209 | } | ||
3210 | if err := r.Close(); err != nil { | ||
3211 | logError(testName, function, args, startTime, "", "Object Close failed", err) | ||
3212 | return | ||
3213 | } | ||
3214 | if err := r.Close(); err == nil { | ||
3215 | logError(testName, function, args, startTime, "", "Already closed object. No error returned", err) | ||
3216 | return | ||
3217 | } | ||
3218 | |||
3219 | successLogger(testName, function, args, startTime).Info() | ||
3220 | } | ||
3221 | |||
3222 | // Test RemoveObjects request where context cancels after timeout | ||
3223 | func testRemoveObjectsContext() { | ||
3224 | // Initialize logging params. | ||
3225 | startTime := time.Now() | ||
3226 | testName := getFuncName() | ||
3227 | function := "RemoveObjects(ctx, bucketName, objectsCh)" | ||
3228 | args := map[string]interface{}{ | ||
3229 | "bucketName": "", | ||
3230 | } | ||
3231 | |||
3232 | // Seed random based on current tie. | ||
3233 | rand.Seed(time.Now().Unix()) | ||
3234 | |||
3235 | // Instantiate new minio client. | ||
3236 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
3237 | &minio.Options{ | ||
3238 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
3239 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
3240 | }) | ||
3241 | if err != nil { | ||
3242 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
3243 | return | ||
3244 | } | ||
3245 | |||
3246 | // Set user agent. | ||
3247 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
3248 | // Enable tracing, write to stdout. | ||
3249 | // c.TraceOn(os.Stderr) | ||
3250 | |||
3251 | // Generate a new random bucket name. | ||
3252 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
3253 | args["bucketName"] = bucketName | ||
3254 | |||
3255 | // Make a new bucket. | ||
3256 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
3257 | if err != nil { | ||
3258 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
3259 | return | ||
3260 | } | ||
3261 | |||
3262 | defer cleanupBucket(bucketName, c) | ||
3263 | |||
3264 | // Generate put data. | ||
3265 | r := bytes.NewReader(bytes.Repeat([]byte("a"), 8)) | ||
3266 | |||
3267 | // Multi remove of 20 objects. | ||
3268 | nrObjects := 20 | ||
3269 | objectsCh := make(chan minio.ObjectInfo) | ||
3270 | go func() { | ||
3271 | defer close(objectsCh) | ||
3272 | for i := 0; i < nrObjects; i++ { | ||
3273 | objectName := "sample" + strconv.Itoa(i) + ".txt" | ||
3274 | info, err := c.PutObject(context.Background(), bucketName, objectName, r, 8, | ||
3275 | minio.PutObjectOptions{ContentType: "application/octet-stream"}) | ||
3276 | if err != nil { | ||
3277 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
3278 | continue | ||
3279 | } | ||
3280 | objectsCh <- minio.ObjectInfo{ | ||
3281 | Key: info.Key, | ||
3282 | VersionID: info.VersionID, | ||
3283 | } | ||
3284 | } | ||
3285 | }() | ||
3286 | // Set context to cancel in 1 nanosecond. | ||
3287 | ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) | ||
3288 | args["ctx"] = ctx | ||
3289 | defer cancel() | ||
3290 | |||
3291 | // Call RemoveObjects API with short timeout. | ||
3292 | errorCh := c.RemoveObjects(ctx, bucketName, objectsCh, minio.RemoveObjectsOptions{}) | ||
3293 | // Check for error. | ||
3294 | select { | ||
3295 | case r := <-errorCh: | ||
3296 | if r.Err == nil { | ||
3297 | logError(testName, function, args, startTime, "", "RemoveObjects should fail on short timeout", err) | ||
3298 | return | ||
3299 | } | ||
3300 | } | ||
3301 | // Set context with longer timeout. | ||
3302 | ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) | ||
3303 | args["ctx"] = ctx | ||
3304 | defer cancel() | ||
3305 | // Perform RemoveObjects with the longer timeout. Expect the removals to succeed. | ||
3306 | errorCh = c.RemoveObjects(ctx, bucketName, objectsCh, minio.RemoveObjectsOptions{}) | ||
3307 | select { | ||
3308 | case r, more := <-errorCh: | ||
3309 | if more || r.Err != nil { | ||
3310 | logError(testName, function, args, startTime, "", "Unexpected error", r.Err) | ||
3311 | return | ||
3312 | } | ||
3313 | } | ||
3314 | |||
3315 | successLogger(testName, function, args, startTime).Info() | ||
3316 | } | ||
3317 | |||
3318 | // Test removing multiple objects with Remove API | ||
3319 | func testRemoveMultipleObjects() { | ||
3320 | // initialize logging params | ||
3321 | startTime := time.Now() | ||
3322 | testName := getFuncName() | ||
3323 | function := "RemoveObjects(bucketName, objectsCh)" | ||
3324 | args := map[string]interface{}{ | ||
3325 | "bucketName": "", | ||
3326 | } | ||
3327 | |||
3328 | // Seed random based on current time. | ||
3329 | rand.Seed(time.Now().Unix()) | ||
3330 | |||
3331 | // Instantiate new minio client object. | ||
3332 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
3333 | &minio.Options{ | ||
3334 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
3335 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
3336 | }) | ||
3337 | if err != nil { | ||
3338 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
3339 | return | ||
3340 | } | ||
3341 | |||
3342 | // Set user agent. | ||
3343 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
3344 | |||
3345 | // Enable tracing, write to stdout. | ||
3346 | // c.TraceOn(os.Stderr) | ||
3347 | |||
3348 | // Generate a new random bucket name. | ||
3349 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
3350 | args["bucketName"] = bucketName | ||
3351 | |||
3352 | // Make a new bucket. | ||
3353 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
3354 | if err != nil { | ||
3355 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
3356 | return | ||
3357 | } | ||
3358 | |||
3359 | defer cleanupBucket(bucketName, c) | ||
3360 | |||
3361 | r := bytes.NewReader(bytes.Repeat([]byte("a"), 8)) | ||
3362 | |||
3363 | // Multi remove of 1100 objects | ||
3364 | nrObjects := 200 | ||
3365 | |||
3366 | objectsCh := make(chan minio.ObjectInfo) | ||
3367 | |||
3368 | go func() { | ||
3369 | defer close(objectsCh) | ||
3370 | // Upload objects and send them to objectsCh | ||
3371 | for i := 0; i < nrObjects; i++ { | ||
3372 | objectName := "sample" + strconv.Itoa(i) + ".txt" | ||
3373 | info, err := c.PutObject(context.Background(), bucketName, objectName, r, 8, | ||
3374 | minio.PutObjectOptions{ContentType: "application/octet-stream"}) | ||
3375 | if err != nil { | ||
3376 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
3377 | continue | ||
3378 | } | ||
3379 | objectsCh <- minio.ObjectInfo{ | ||
3380 | Key: info.Key, | ||
3381 | VersionID: info.VersionID, | ||
3382 | } | ||
3383 | } | ||
3384 | }() | ||
3385 | |||
3386 | // Call RemoveObjects API | ||
3387 | errorCh := c.RemoveObjects(context.Background(), bucketName, objectsCh, minio.RemoveObjectsOptions{}) | ||
3388 | |||
3389 | // Check if errorCh doesn't receive any error | ||
3390 | select { | ||
3391 | case r, more := <-errorCh: | ||
3392 | if more { | ||
3393 | logError(testName, function, args, startTime, "", "Unexpected error", r.Err) | ||
3394 | return | ||
3395 | } | ||
3396 | } | ||
3397 | |||
3398 | successLogger(testName, function, args, startTime).Info() | ||
3399 | } | ||
3400 | |||
3401 | // Test removing multiple objects and check for results | ||
3402 | func testRemoveMultipleObjectsWithResult() { | ||
3403 | // initialize logging params | ||
3404 | startTime := time.Now() | ||
3405 | testName := getFuncName() | ||
3406 | function := "RemoveObjects(bucketName, objectsCh)" | ||
3407 | args := map[string]interface{}{ | ||
3408 | "bucketName": "", | ||
3409 | } | ||
3410 | |||
3411 | // Seed random based on current time. | ||
3412 | rand.Seed(time.Now().Unix()) | ||
3413 | |||
3414 | // Instantiate new minio client object. | ||
3415 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
3416 | &minio.Options{ | ||
3417 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
3418 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
3419 | }) | ||
3420 | if err != nil { | ||
3421 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
3422 | return | ||
3423 | } | ||
3424 | |||
3425 | // Set user agent. | ||
3426 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
3427 | |||
3428 | // Enable tracing, write to stdout. | ||
3429 | // c.TraceOn(os.Stderr) | ||
3430 | |||
3431 | // Generate a new random bucket name. | ||
3432 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
3433 | args["bucketName"] = bucketName | ||
3434 | |||
3435 | // Make a new bucket. | ||
3436 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) | ||
3437 | if err != nil { | ||
3438 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
3439 | return | ||
3440 | } | ||
3441 | |||
3442 | defer cleanupVersionedBucket(bucketName, c) | ||
3443 | |||
3444 | r := bytes.NewReader(bytes.Repeat([]byte("a"), 8)) | ||
3445 | |||
3446 | nrObjects := 10 | ||
3447 | nrLockedObjects := 5 | ||
3448 | |||
3449 | objectsCh := make(chan minio.ObjectInfo) | ||
3450 | |||
3451 | go func() { | ||
3452 | defer close(objectsCh) | ||
3453 | // Upload objects and send them to objectsCh | ||
3454 | for i := 0; i < nrObjects; i++ { | ||
3455 | objectName := "sample" + strconv.Itoa(i) + ".txt" | ||
3456 | info, err := c.PutObject(context.Background(), bucketName, objectName, r, 8, | ||
3457 | minio.PutObjectOptions{ContentType: "application/octet-stream"}) | ||
3458 | if err != nil { | ||
3459 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
3460 | return | ||
3461 | } | ||
3462 | if i < nrLockedObjects { | ||
3463 | // t := time.Date(2130, time.April, 25, 14, 0, 0, 0, time.UTC) | ||
3464 | t := time.Now().Add(5 * time.Minute) | ||
3465 | m := minio.RetentionMode(minio.Governance) | ||
3466 | opts := minio.PutObjectRetentionOptions{ | ||
3467 | GovernanceBypass: false, | ||
3468 | RetainUntilDate: &t, | ||
3469 | Mode: &m, | ||
3470 | VersionID: info.VersionID, | ||
3471 | } | ||
3472 | err = c.PutObjectRetention(context.Background(), bucketName, objectName, opts) | ||
3473 | if err != nil { | ||
3474 | logError(testName, function, args, startTime, "", "Error setting retention", err) | ||
3475 | return | ||
3476 | } | ||
3477 | } | ||
3478 | |||
3479 | objectsCh <- minio.ObjectInfo{ | ||
3480 | Key: info.Key, | ||
3481 | VersionID: info.VersionID, | ||
3482 | } | ||
3483 | } | ||
3484 | }() | ||
3485 | |||
3486 | // Call RemoveObjects API | ||
3487 | resultCh := c.RemoveObjectsWithResult(context.Background(), bucketName, objectsCh, minio.RemoveObjectsOptions{}) | ||
3488 | |||
3489 | var foundNil, foundErr int | ||
3490 | |||
3491 | for { | ||
3492 | // Check if errorCh doesn't receive any error | ||
3493 | select { | ||
3494 | case deleteRes, ok := <-resultCh: | ||
3495 | if !ok { | ||
3496 | goto out | ||
3497 | } | ||
3498 | if deleteRes.ObjectName == "" { | ||
3499 | logError(testName, function, args, startTime, "", "Unexpected object name", nil) | ||
3500 | return | ||
3501 | } | ||
3502 | if deleteRes.ObjectVersionID == "" { | ||
3503 | logError(testName, function, args, startTime, "", "Unexpected object version ID", nil) | ||
3504 | return | ||
3505 | } | ||
3506 | |||
3507 | if deleteRes.Err == nil { | ||
3508 | foundNil++ | ||
3509 | } else { | ||
3510 | foundErr++ | ||
3511 | } | ||
3512 | } | ||
3513 | } | ||
3514 | out: | ||
3515 | if foundNil+foundErr != nrObjects { | ||
3516 | logError(testName, function, args, startTime, "", "Unexpected number of results", nil) | ||
3517 | return | ||
3518 | } | ||
3519 | |||
3520 | if foundNil != nrObjects-nrLockedObjects { | ||
3521 | logError(testName, function, args, startTime, "", "Unexpected number of nil errors", nil) | ||
3522 | return | ||
3523 | } | ||
3524 | |||
3525 | if foundErr != nrLockedObjects { | ||
3526 | logError(testName, function, args, startTime, "", "Unexpected number of errors", nil) | ||
3527 | return | ||
3528 | } | ||
3529 | |||
3530 | successLogger(testName, function, args, startTime).Info() | ||
3531 | } | ||
3532 | |||
3533 | // Tests FPutObject of a big file to trigger multipart | ||
3534 | func testFPutObjectMultipart() { | ||
3535 | // initialize logging params | ||
3536 | startTime := time.Now() | ||
3537 | testName := getFuncName() | ||
3538 | function := "FPutObject(bucketName, objectName, fileName, opts)" | ||
3539 | args := map[string]interface{}{ | ||
3540 | "bucketName": "", | ||
3541 | "objectName": "", | ||
3542 | "fileName": "", | ||
3543 | "opts": "", | ||
3544 | } | ||
3545 | |||
3546 | // Seed random based on current time. | ||
3547 | rand.Seed(time.Now().Unix()) | ||
3548 | |||
3549 | // Instantiate new minio client object. | ||
3550 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
3551 | &minio.Options{ | ||
3552 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
3553 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
3554 | }) | ||
3555 | if err != nil { | ||
3556 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
3557 | return | ||
3558 | } | ||
3559 | |||
3560 | // Enable tracing, write to stderr. | ||
3561 | // c.TraceOn(os.Stderr) | ||
3562 | |||
3563 | // Set user agent. | ||
3564 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
3565 | |||
3566 | // Generate a new random bucket name. | ||
3567 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
3568 | args["bucketName"] = bucketName | ||
3569 | |||
3570 | // Make a new bucket. | ||
3571 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
3572 | if err != nil { | ||
3573 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
3574 | return | ||
3575 | } | ||
3576 | |||
3577 | defer cleanupBucket(bucketName, c) | ||
3578 | |||
3579 | // Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload. | ||
3580 | fileName := getMintDataDirFilePath("datafile-129-MB") | ||
3581 | if fileName == "" { | ||
3582 | // Make a temp file with minPartSize bytes of data. | ||
3583 | file, err := os.CreateTemp(os.TempDir(), "FPutObjectTest") | ||
3584 | if err != nil { | ||
3585 | logError(testName, function, args, startTime, "", "TempFile creation failed", err) | ||
3586 | return | ||
3587 | } | ||
3588 | // Upload 2 parts to utilize all 3 'workers' in multipart and still have a part to upload. | ||
3589 | if _, err = io.Copy(file, getDataReader("datafile-129-MB")); err != nil { | ||
3590 | logError(testName, function, args, startTime, "", "Copy failed", err) | ||
3591 | return | ||
3592 | } | ||
3593 | if err = file.Close(); err != nil { | ||
3594 | logError(testName, function, args, startTime, "", "File Close failed", err) | ||
3595 | return | ||
3596 | } | ||
3597 | fileName = file.Name() | ||
3598 | args["fileName"] = fileName | ||
3599 | } | ||
3600 | totalSize := dataFileMap["datafile-129-MB"] | ||
3601 | // Set base object name | ||
3602 | objectName := bucketName + "FPutObject" + "-standard" | ||
3603 | args["objectName"] = objectName | ||
3604 | |||
3605 | objectContentType := "testapplication/octet-stream" | ||
3606 | args["objectContentType"] = objectContentType | ||
3607 | |||
3608 | // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) | ||
3609 | _, err = c.FPutObject(context.Background(), bucketName, objectName, fileName, minio.PutObjectOptions{ContentType: objectContentType}) | ||
3610 | if err != nil { | ||
3611 | logError(testName, function, args, startTime, "", "FPutObject failed", err) | ||
3612 | return | ||
3613 | } | ||
3614 | |||
3615 | r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) | ||
3616 | if err != nil { | ||
3617 | logError(testName, function, args, startTime, "", "GetObject failed", err) | ||
3618 | return | ||
3619 | } | ||
3620 | objInfo, err := r.Stat() | ||
3621 | if err != nil { | ||
3622 | logError(testName, function, args, startTime, "", "Unexpected error", err) | ||
3623 | return | ||
3624 | } | ||
3625 | if objInfo.Size != int64(totalSize) { | ||
3626 | logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(totalSize))+" got "+string(objInfo.Size), err) | ||
3627 | return | ||
3628 | } | ||
3629 | if objInfo.ContentType != objectContentType && objInfo.ContentType != "application/octet-stream" { | ||
3630 | logError(testName, function, args, startTime, "", "ContentType doesn't match", err) | ||
3631 | return | ||
3632 | } | ||
3633 | |||
3634 | successLogger(testName, function, args, startTime).Info() | ||
3635 | } | ||
3636 | |||
3637 | // Tests FPutObject with null contentType (default = application/octet-stream) | ||
3638 | func testFPutObject() { | ||
3639 | // initialize logging params | ||
3640 | startTime := time.Now() | ||
3641 | testName := getFuncName() | ||
3642 | function := "FPutObject(bucketName, objectName, fileName, opts)" | ||
3643 | |||
3644 | args := map[string]interface{}{ | ||
3645 | "bucketName": "", | ||
3646 | "objectName": "", | ||
3647 | "fileName": "", | ||
3648 | "opts": "", | ||
3649 | } | ||
3650 | |||
3651 | // Seed random based on current time. | ||
3652 | rand.Seed(time.Now().Unix()) | ||
3653 | |||
3654 | // Instantiate new minio client object. | ||
3655 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
3656 | &minio.Options{ | ||
3657 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
3658 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
3659 | }) | ||
3660 | if err != nil { | ||
3661 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
3662 | return | ||
3663 | } | ||
3664 | |||
3665 | // Enable tracing, write to stderr. | ||
3666 | // c.TraceOn(os.Stderr) | ||
3667 | |||
3668 | // Set user agent. | ||
3669 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
3670 | |||
3671 | // Generate a new random bucket name. | ||
3672 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
3673 | location := "us-east-1" | ||
3674 | |||
3675 | // Make a new bucket. | ||
3676 | args["bucketName"] = bucketName | ||
3677 | args["location"] = location | ||
3678 | function = "MakeBucket(bucketName, location)" | ||
3679 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: location}) | ||
3680 | if err != nil { | ||
3681 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
3682 | return | ||
3683 | } | ||
3684 | |||
3685 | defer cleanupBucket(bucketName, c) | ||
3686 | |||
3687 | // Upload 3 parts worth of data to use all 3 of multiparts 'workers' and have an extra part. | ||
3688 | // Use different data in part for multipart tests to check parts are uploaded in correct order. | ||
3689 | fName := getMintDataDirFilePath("datafile-129-MB") | ||
3690 | if fName == "" { | ||
3691 | // Make a temp file with minPartSize bytes of data. | ||
3692 | file, err := os.CreateTemp(os.TempDir(), "FPutObjectTest") | ||
3693 | if err != nil { | ||
3694 | logError(testName, function, args, startTime, "", "TempFile creation failed", err) | ||
3695 | return | ||
3696 | } | ||
3697 | |||
3698 | // Upload 3 parts to utilize all 3 'workers' in multipart and still have a part to upload. | ||
3699 | if _, err = io.Copy(file, getDataReader("datafile-129-MB")); err != nil { | ||
3700 | logError(testName, function, args, startTime, "", "File copy failed", err) | ||
3701 | return | ||
3702 | } | ||
3703 | // Close the file pro-actively for windows. | ||
3704 | if err = file.Close(); err != nil { | ||
3705 | logError(testName, function, args, startTime, "", "File close failed", err) | ||
3706 | return | ||
3707 | } | ||
3708 | defer os.Remove(file.Name()) | ||
3709 | fName = file.Name() | ||
3710 | } | ||
3711 | |||
3712 | // Set base object name | ||
3713 | function = "FPutObject(bucketName, objectName, fileName, opts)" | ||
3714 | objectName := bucketName + "FPutObject" | ||
3715 | args["objectName"] = objectName + "-standard" | ||
3716 | args["fileName"] = fName | ||
3717 | args["opts"] = minio.PutObjectOptions{ContentType: "application/octet-stream"} | ||
3718 | |||
3719 | // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) | ||
3720 | ui, err := c.FPutObject(context.Background(), bucketName, objectName+"-standard", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) | ||
3721 | if err != nil { | ||
3722 | logError(testName, function, args, startTime, "", "FPutObject failed", err) | ||
3723 | return | ||
3724 | } | ||
3725 | |||
3726 | if ui.Size != int64(dataFileMap["datafile-129-MB"]) { | ||
3727 | logError(testName, function, args, startTime, "", "FPutObject returned an unexpected upload size", err) | ||
3728 | return | ||
3729 | } | ||
3730 | |||
3731 | // Perform FPutObject with no contentType provided (Expecting application/octet-stream) | ||
3732 | args["objectName"] = objectName + "-Octet" | ||
3733 | _, err = c.FPutObject(context.Background(), bucketName, objectName+"-Octet", fName, minio.PutObjectOptions{}) | ||
3734 | if err != nil { | ||
3735 | logError(testName, function, args, startTime, "", "File close failed", err) | ||
3736 | return | ||
3737 | } | ||
3738 | |||
3739 | srcFile, err := os.Open(fName) | ||
3740 | if err != nil { | ||
3741 | logError(testName, function, args, startTime, "", "File open failed", err) | ||
3742 | return | ||
3743 | } | ||
3744 | defer srcFile.Close() | ||
3745 | // Add extension to temp file name | ||
3746 | tmpFile, err := os.Create(fName + ".gtar") | ||
3747 | if err != nil { | ||
3748 | logError(testName, function, args, startTime, "", "File create failed", err) | ||
3749 | return | ||
3750 | } | ||
3751 | _, err = io.Copy(tmpFile, srcFile) | ||
3752 | if err != nil { | ||
3753 | logError(testName, function, args, startTime, "", "File copy failed", err) | ||
3754 | return | ||
3755 | } | ||
3756 | tmpFile.Close() | ||
3757 | |||
3758 | // Perform FPutObject with no contentType provided (Expecting application/x-gtar) | ||
3759 | args["objectName"] = objectName + "-GTar" | ||
3760 | args["opts"] = minio.PutObjectOptions{} | ||
3761 | _, err = c.FPutObject(context.Background(), bucketName, objectName+"-GTar", fName+".gtar", minio.PutObjectOptions{}) | ||
3762 | if err != nil { | ||
3763 | logError(testName, function, args, startTime, "", "FPutObject failed", err) | ||
3764 | return | ||
3765 | } | ||
3766 | |||
3767 | // Check headers | ||
3768 | function = "StatObject(bucketName, objectName, opts)" | ||
3769 | args["objectName"] = objectName + "-standard" | ||
3770 | rStandard, err := c.StatObject(context.Background(), bucketName, objectName+"-standard", minio.StatObjectOptions{}) | ||
3771 | if err != nil { | ||
3772 | logError(testName, function, args, startTime, "", "StatObject failed", err) | ||
3773 | return | ||
3774 | } | ||
3775 | if rStandard.ContentType != "application/octet-stream" { | ||
3776 | logError(testName, function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rStandard.ContentType, err) | ||
3777 | return | ||
3778 | } | ||
3779 | |||
3780 | function = "StatObject(bucketName, objectName, opts)" | ||
3781 | args["objectName"] = objectName + "-Octet" | ||
3782 | rOctet, err := c.StatObject(context.Background(), bucketName, objectName+"-Octet", minio.StatObjectOptions{}) | ||
3783 | if err != nil { | ||
3784 | logError(testName, function, args, startTime, "", "StatObject failed", err) | ||
3785 | return | ||
3786 | } | ||
3787 | if rOctet.ContentType != "application/octet-stream" { | ||
3788 | logError(testName, function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rOctet.ContentType, err) | ||
3789 | return | ||
3790 | } | ||
3791 | |||
3792 | function = "StatObject(bucketName, objectName, opts)" | ||
3793 | args["objectName"] = objectName + "-GTar" | ||
3794 | rGTar, err := c.StatObject(context.Background(), bucketName, objectName+"-GTar", minio.StatObjectOptions{}) | ||
3795 | if err != nil { | ||
3796 | logError(testName, function, args, startTime, "", "StatObject failed", err) | ||
3797 | return | ||
3798 | } | ||
3799 | if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" && rGTar.ContentType != "application/x-tar" { | ||
3800 | logError(testName, function, args, startTime, "", "ContentType does not match, expected application/x-tar or application/octet-stream, got "+rGTar.ContentType, err) | ||
3801 | return | ||
3802 | } | ||
3803 | |||
3804 | os.Remove(fName + ".gtar") | ||
3805 | successLogger(testName, function, args, startTime).Info() | ||
3806 | } | ||
3807 | |||
3808 | // Tests FPutObject request when context cancels after timeout | ||
3809 | func testFPutObjectContext() { | ||
3810 | // initialize logging params | ||
3811 | startTime := time.Now() | ||
3812 | testName := getFuncName() | ||
3813 | function := "FPutObject(bucketName, objectName, fileName, opts)" | ||
3814 | args := map[string]interface{}{ | ||
3815 | "bucketName": "", | ||
3816 | "objectName": "", | ||
3817 | "fileName": "", | ||
3818 | "opts": "", | ||
3819 | } | ||
3820 | // Seed random based on current time. | ||
3821 | rand.Seed(time.Now().Unix()) | ||
3822 | |||
3823 | // Instantiate new minio client object. | ||
3824 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
3825 | &minio.Options{ | ||
3826 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
3827 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
3828 | }) | ||
3829 | if err != nil { | ||
3830 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
3831 | return | ||
3832 | } | ||
3833 | |||
3834 | // Enable tracing, write to stderr. | ||
3835 | // c.TraceOn(os.Stderr) | ||
3836 | |||
3837 | // Set user agent. | ||
3838 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
3839 | |||
3840 | // Generate a new random bucket name. | ||
3841 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
3842 | args["bucketName"] = bucketName | ||
3843 | |||
3844 | // Make a new bucket. | ||
3845 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
3846 | if err != nil { | ||
3847 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
3848 | return | ||
3849 | } | ||
3850 | |||
3851 | defer cleanupBucket(bucketName, c) | ||
3852 | |||
3853 | // Upload 1 parts worth of data to use multipart upload. | ||
3854 | // Use different data in part for multipart tests to check parts are uploaded in correct order. | ||
3855 | fName := getMintDataDirFilePath("datafile-1-MB") | ||
3856 | if fName == "" { | ||
3857 | // Make a temp file with 1 MiB bytes of data. | ||
3858 | file, err := os.CreateTemp(os.TempDir(), "FPutObjectContextTest") | ||
3859 | if err != nil { | ||
3860 | logError(testName, function, args, startTime, "", "TempFile creation failed", err) | ||
3861 | return | ||
3862 | } | ||
3863 | |||
3864 | // Upload 1 parts to trigger multipart upload | ||
3865 | if _, err = io.Copy(file, getDataReader("datafile-1-MB")); err != nil { | ||
3866 | logError(testName, function, args, startTime, "", "File copy failed", err) | ||
3867 | return | ||
3868 | } | ||
3869 | // Close the file pro-actively for windows. | ||
3870 | if err = file.Close(); err != nil { | ||
3871 | logError(testName, function, args, startTime, "", "File close failed", err) | ||
3872 | return | ||
3873 | } | ||
3874 | defer os.Remove(file.Name()) | ||
3875 | fName = file.Name() | ||
3876 | } | ||
3877 | |||
3878 | // Set base object name | ||
3879 | objectName := bucketName + "FPutObjectContext" | ||
3880 | args["objectName"] = objectName | ||
3881 | ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) | ||
3882 | args["ctx"] = ctx | ||
3883 | defer cancel() | ||
3884 | |||
3885 | // Perform FPutObject with contentType provided (Expecting application/octet-stream) | ||
3886 | _, err = c.FPutObject(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) | ||
3887 | if err == nil { | ||
3888 | logError(testName, function, args, startTime, "", "FPutObject should fail on short timeout", err) | ||
3889 | return | ||
3890 | } | ||
3891 | ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) | ||
3892 | defer cancel() | ||
3893 | // Perform FPutObject with a long timeout. Expect the put object to succeed | ||
3894 | _, err = c.FPutObject(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{}) | ||
3895 | if err != nil { | ||
3896 | logError(testName, function, args, startTime, "", "FPutObject shouldn't fail on long timeout", err) | ||
3897 | return | ||
3898 | } | ||
3899 | |||
3900 | _, err = c.StatObject(context.Background(), bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{}) | ||
3901 | if err != nil { | ||
3902 | logError(testName, function, args, startTime, "", "StatObject failed", err) | ||
3903 | return | ||
3904 | } | ||
3905 | |||
3906 | successLogger(testName, function, args, startTime).Info() | ||
3907 | } | ||
3908 | |||
3909 | // Tests FPutObject request when context cancels after timeout | ||
3910 | func testFPutObjectContextV2() { | ||
3911 | // initialize logging params | ||
3912 | startTime := time.Now() | ||
3913 | testName := getFuncName() | ||
3914 | function := "FPutObjectContext(ctx, bucketName, objectName, fileName, opts)" | ||
3915 | args := map[string]interface{}{ | ||
3916 | "bucketName": "", | ||
3917 | "objectName": "", | ||
3918 | "opts": "minio.PutObjectOptions{ContentType:objectContentType}", | ||
3919 | } | ||
3920 | // Seed random based on current time. | ||
3921 | rand.Seed(time.Now().Unix()) | ||
3922 | |||
3923 | // Instantiate new minio client object. | ||
3924 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
3925 | &minio.Options{ | ||
3926 | Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
3927 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
3928 | }) | ||
3929 | if err != nil { | ||
3930 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
3931 | return | ||
3932 | } | ||
3933 | |||
3934 | // Enable tracing, write to stderr. | ||
3935 | // c.TraceOn(os.Stderr) | ||
3936 | |||
3937 | // Set user agent. | ||
3938 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
3939 | |||
3940 | // Generate a new random bucket name. | ||
3941 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
3942 | args["bucketName"] = bucketName | ||
3943 | |||
3944 | // Make a new bucket. | ||
3945 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
3946 | if err != nil { | ||
3947 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
3948 | return | ||
3949 | } | ||
3950 | |||
3951 | defer cleanupBucket(bucketName, c) | ||
3952 | |||
3953 | // Upload 1 parts worth of data to use multipart upload. | ||
3954 | // Use different data in part for multipart tests to check parts are uploaded in correct order. | ||
3955 | fName := getMintDataDirFilePath("datafile-1-MB") | ||
3956 | if fName == "" { | ||
3957 | // Make a temp file with 1 MiB bytes of data. | ||
3958 | file, err := os.CreateTemp(os.TempDir(), "FPutObjectContextTest") | ||
3959 | if err != nil { | ||
3960 | logError(testName, function, args, startTime, "", "Temp file creation failed", err) | ||
3961 | return | ||
3962 | } | ||
3963 | |||
3964 | // Upload 1 parts to trigger multipart upload | ||
3965 | if _, err = io.Copy(file, getDataReader("datafile-1-MB")); err != nil { | ||
3966 | logError(testName, function, args, startTime, "", "File copy failed", err) | ||
3967 | return | ||
3968 | } | ||
3969 | |||
3970 | // Close the file pro-actively for windows. | ||
3971 | if err = file.Close(); err != nil { | ||
3972 | logError(testName, function, args, startTime, "", "File close failed", err) | ||
3973 | return | ||
3974 | } | ||
3975 | defer os.Remove(file.Name()) | ||
3976 | fName = file.Name() | ||
3977 | } | ||
3978 | |||
3979 | // Set base object name | ||
3980 | objectName := bucketName + "FPutObjectContext" | ||
3981 | args["objectName"] = objectName | ||
3982 | |||
3983 | ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) | ||
3984 | args["ctx"] = ctx | ||
3985 | defer cancel() | ||
3986 | |||
3987 | // Perform FPutObject with contentType provided (Expecting application/octet-stream) | ||
3988 | _, err = c.FPutObject(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) | ||
3989 | if err == nil { | ||
3990 | logError(testName, function, args, startTime, "", "FPutObject should fail on short timeout", err) | ||
3991 | return | ||
3992 | } | ||
3993 | ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) | ||
3994 | defer cancel() | ||
3995 | // Perform FPutObject with a long timeout. Expect the put object to succeed | ||
3996 | _, err = c.FPutObject(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{}) | ||
3997 | if err != nil { | ||
3998 | logError(testName, function, args, startTime, "", "FPutObject shouldn't fail on longer timeout", err) | ||
3999 | return | ||
4000 | } | ||
4001 | |||
4002 | _, err = c.StatObject(context.Background(), bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{}) | ||
4003 | if err != nil { | ||
4004 | logError(testName, function, args, startTime, "", "StatObject failed", err) | ||
4005 | return | ||
4006 | } | ||
4007 | |||
4008 | successLogger(testName, function, args, startTime).Info() | ||
4009 | } | ||
4010 | |||
4011 | // Test validates putObject with context to see if request cancellation is honored. | ||
4012 | func testPutObjectContext() { | ||
4013 | // initialize logging params | ||
4014 | startTime := time.Now() | ||
4015 | testName := getFuncName() | ||
4016 | function := "PutObject(ctx, bucketName, objectName, fileName, opts)" | ||
4017 | args := map[string]interface{}{ | ||
4018 | "ctx": "", | ||
4019 | "bucketName": "", | ||
4020 | "objectName": "", | ||
4021 | "opts": "", | ||
4022 | } | ||
4023 | |||
4024 | // Instantiate new minio client object. | ||
4025 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
4026 | &minio.Options{ | ||
4027 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
4028 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
4029 | }) | ||
4030 | if err != nil { | ||
4031 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
4032 | return | ||
4033 | } | ||
4034 | |||
4035 | // Enable tracing, write to stderr. | ||
4036 | // c.TraceOn(os.Stderr) | ||
4037 | |||
4038 | // Set user agent. | ||
4039 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
4040 | |||
4041 | // Make a new bucket. | ||
4042 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
4043 | args["bucketName"] = bucketName | ||
4044 | |||
4045 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
4046 | if err != nil { | ||
4047 | logError(testName, function, args, startTime, "", "MakeBucket call failed", err) | ||
4048 | return | ||
4049 | } | ||
4050 | |||
4051 | defer cleanupBucket(bucketName, c) | ||
4052 | |||
4053 | bufSize := dataFileMap["datafile-33-kB"] | ||
4054 | reader := getDataReader("datafile-33-kB") | ||
4055 | defer reader.Close() | ||
4056 | objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) | ||
4057 | args["objectName"] = objectName | ||
4058 | |||
4059 | ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) | ||
4060 | cancel() | ||
4061 | args["ctx"] = ctx | ||
4062 | args["opts"] = minio.PutObjectOptions{ContentType: "binary/octet-stream"} | ||
4063 | |||
4064 | _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) | ||
4065 | if err == nil { | ||
4066 | logError(testName, function, args, startTime, "", "PutObject should fail on short timeout", err) | ||
4067 | return | ||
4068 | } | ||
4069 | |||
4070 | ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) | ||
4071 | args["ctx"] = ctx | ||
4072 | |||
4073 | defer cancel() | ||
4074 | reader = getDataReader("datafile-33-kB") | ||
4075 | defer reader.Close() | ||
4076 | _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) | ||
4077 | if err != nil { | ||
4078 | logError(testName, function, args, startTime, "", "PutObject with long timeout failed", err) | ||
4079 | return | ||
4080 | } | ||
4081 | |||
4082 | successLogger(testName, function, args, startTime).Info() | ||
4083 | } | ||
4084 | |||
4085 | // Tests get object with s3zip extensions. | ||
4086 | func testGetObjectS3Zip() { | ||
4087 | // initialize logging params | ||
4088 | startTime := time.Now() | ||
4089 | testName := getFuncName() | ||
4090 | function := "GetObject(bucketName, objectName)" | ||
4091 | args := map[string]interface{}{"x-minio-extract": true} | ||
4092 | |||
4093 | // Seed random based on current time. | ||
4094 | rand.Seed(time.Now().Unix()) | ||
4095 | |||
4096 | // Instantiate new minio client object. | ||
4097 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
4098 | &minio.Options{ | ||
4099 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
4100 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
4101 | }) | ||
4102 | if err != nil { | ||
4103 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
4104 | return | ||
4105 | } | ||
4106 | |||
4107 | // Enable tracing, write to stderr. | ||
4108 | // c.TraceOn(os.Stderr) | ||
4109 | |||
4110 | // Set user agent. | ||
4111 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
4112 | |||
4113 | // Generate a new random bucket name. | ||
4114 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
4115 | args["bucketName"] = bucketName | ||
4116 | |||
4117 | // Make a new bucket. | ||
4118 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
4119 | if err != nil { | ||
4120 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
4121 | return | ||
4122 | } | ||
4123 | |||
4124 | defer func() { | ||
4125 | // Delete all objects and buckets | ||
4126 | if err = cleanupBucket(bucketName, c); err != nil { | ||
4127 | logError(testName, function, args, startTime, "", "CleanupBucket failed", err) | ||
4128 | return | ||
4129 | } | ||
4130 | }() | ||
4131 | |||
4132 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + ".zip" | ||
4133 | args["objectName"] = objectName | ||
4134 | |||
4135 | var zipFile bytes.Buffer | ||
4136 | zw := zip.NewWriter(&zipFile) | ||
4137 | rng := rand.New(rand.NewSource(0xc0cac01a)) | ||
4138 | const nFiles = 500 | ||
4139 | for i := 0; i <= nFiles; i++ { | ||
4140 | if i == nFiles { | ||
4141 | // Make one large, compressible file. | ||
4142 | i = 1000000 | ||
4143 | } | ||
4144 | b := make([]byte, i) | ||
4145 | if i < nFiles { | ||
4146 | rng.Read(b) | ||
4147 | } | ||
4148 | wc, err := zw.Create(fmt.Sprintf("test/small/file-%d.bin", i)) | ||
4149 | if err != nil { | ||
4150 | logError(testName, function, args, startTime, "", "zw.Create failed", err) | ||
4151 | return | ||
4152 | } | ||
4153 | wc.Write(b) | ||
4154 | } | ||
4155 | err = zw.Close() | ||
4156 | if err != nil { | ||
4157 | logError(testName, function, args, startTime, "", "zw.Close failed", err) | ||
4158 | return | ||
4159 | } | ||
4160 | buf := zipFile.Bytes() | ||
4161 | |||
4162 | // Save the data | ||
4163 | _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) | ||
4164 | if err != nil { | ||
4165 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
4166 | return | ||
4167 | } | ||
4168 | |||
4169 | // Read the data back | ||
4170 | r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) | ||
4171 | if err != nil { | ||
4172 | logError(testName, function, args, startTime, "", "GetObject failed", err) | ||
4173 | return | ||
4174 | } | ||
4175 | |||
4176 | st, err := r.Stat() | ||
4177 | if err != nil { | ||
4178 | logError(testName, function, args, startTime, "", "Stat object failed", err) | ||
4179 | return | ||
4180 | } | ||
4181 | |||
4182 | if st.Size != int64(len(buf)) { | ||
4183 | logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(len(buf))+", got "+string(st.Size), err) | ||
4184 | return | ||
4185 | } | ||
4186 | r.Close() | ||
4187 | |||
4188 | zr, err := zip.NewReader(bytes.NewReader(buf), int64(len(buf))) | ||
4189 | if err != nil { | ||
4190 | logError(testName, function, args, startTime, "", "zip.NewReader failed", err) | ||
4191 | return | ||
4192 | } | ||
4193 | lOpts := minio.ListObjectsOptions{} | ||
4194 | lOpts.Set("x-minio-extract", "true") | ||
4195 | lOpts.Prefix = objectName + "/" | ||
4196 | lOpts.Recursive = true | ||
4197 | list := c.ListObjects(context.Background(), bucketName, lOpts) | ||
4198 | listed := map[string]minio.ObjectInfo{} | ||
4199 | for item := range list { | ||
4200 | if item.Err != nil { | ||
4201 | break | ||
4202 | } | ||
4203 | listed[item.Key] = item | ||
4204 | } | ||
4205 | if len(listed) == 0 { | ||
4206 | // Assume we are running against non-minio. | ||
4207 | args["SKIPPED"] = true | ||
4208 | ignoredLog(testName, function, args, startTime, "s3zip does not appear to be present").Info() | ||
4209 | return | ||
4210 | } | ||
4211 | |||
4212 | for _, file := range zr.File { | ||
4213 | if file.FileInfo().IsDir() { | ||
4214 | continue | ||
4215 | } | ||
4216 | args["zipfile"] = file.Name | ||
4217 | zfr, err := file.Open() | ||
4218 | if err != nil { | ||
4219 | logError(testName, function, args, startTime, "", "file.Open failed", err) | ||
4220 | return | ||
4221 | } | ||
4222 | want, err := io.ReadAll(zfr) | ||
4223 | if err != nil { | ||
4224 | logError(testName, function, args, startTime, "", "fzip file read failed", err) | ||
4225 | return | ||
4226 | } | ||
4227 | |||
4228 | opts := minio.GetObjectOptions{} | ||
4229 | opts.Set("x-minio-extract", "true") | ||
4230 | key := path.Join(objectName, file.Name) | ||
4231 | r, err = c.GetObject(context.Background(), bucketName, key, opts) | ||
4232 | if err != nil { | ||
4233 | terr := minio.ToErrorResponse(err) | ||
4234 | if terr.StatusCode != http.StatusNotFound { | ||
4235 | logError(testName, function, args, startTime, "", "GetObject failed", err) | ||
4236 | } | ||
4237 | return | ||
4238 | } | ||
4239 | got, err := io.ReadAll(r) | ||
4240 | if err != nil { | ||
4241 | logError(testName, function, args, startTime, "", "ReadAll failed", err) | ||
4242 | return | ||
4243 | } | ||
4244 | r.Close() | ||
4245 | if !bytes.Equal(want, got) { | ||
4246 | logError(testName, function, args, startTime, "", "Content mismatch", err) | ||
4247 | return | ||
4248 | } | ||
4249 | oi, ok := listed[key] | ||
4250 | if !ok { | ||
4251 | logError(testName, function, args, startTime, "", "Object Missing", fmt.Errorf("%s not present in listing", key)) | ||
4252 | return | ||
4253 | } | ||
4254 | if int(oi.Size) != len(got) { | ||
4255 | logError(testName, function, args, startTime, "", "Object Size Incorrect", fmt.Errorf("listing %d, read %d", oi.Size, len(got))) | ||
4256 | return | ||
4257 | } | ||
4258 | delete(listed, key) | ||
4259 | } | ||
4260 | delete(args, "zipfile") | ||
4261 | if len(listed) > 0 { | ||
4262 | logError(testName, function, args, startTime, "", "Extra listed objects", fmt.Errorf("left over: %v", listed)) | ||
4263 | return | ||
4264 | } | ||
4265 | successLogger(testName, function, args, startTime).Info() | ||
4266 | } | ||
4267 | |||
4268 | // Tests get object ReaderSeeker interface methods. | ||
4269 | func testGetObjectReadSeekFunctional() { | ||
4270 | // initialize logging params | ||
4271 | startTime := time.Now() | ||
4272 | testName := getFuncName() | ||
4273 | function := "GetObject(bucketName, objectName)" | ||
4274 | args := map[string]interface{}{} | ||
4275 | |||
4276 | // Seed random based on current time. | ||
4277 | rand.Seed(time.Now().Unix()) | ||
4278 | |||
4279 | // Instantiate new minio client object. | ||
4280 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
4281 | &minio.Options{ | ||
4282 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
4283 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
4284 | }) | ||
4285 | if err != nil { | ||
4286 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
4287 | return | ||
4288 | } | ||
4289 | |||
4290 | // Enable tracing, write to stderr. | ||
4291 | // c.TraceOn(os.Stderr) | ||
4292 | |||
4293 | // Set user agent. | ||
4294 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
4295 | |||
4296 | // Generate a new random bucket name. | ||
4297 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
4298 | args["bucketName"] = bucketName | ||
4299 | |||
4300 | // Make a new bucket. | ||
4301 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
4302 | if err != nil { | ||
4303 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
4304 | return | ||
4305 | } | ||
4306 | |||
4307 | defer func() { | ||
4308 | // Delete all objects and buckets | ||
4309 | if err = cleanupBucket(bucketName, c); err != nil { | ||
4310 | logError(testName, function, args, startTime, "", "CleanupBucket failed", err) | ||
4311 | return | ||
4312 | } | ||
4313 | }() | ||
4314 | |||
4315 | // Generate 33K of data. | ||
4316 | bufSize := dataFileMap["datafile-33-kB"] | ||
4317 | reader := getDataReader("datafile-33-kB") | ||
4318 | defer reader.Close() | ||
4319 | |||
4320 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
4321 | args["objectName"] = objectName | ||
4322 | |||
4323 | buf, err := io.ReadAll(reader) | ||
4324 | if err != nil { | ||
4325 | logError(testName, function, args, startTime, "", "ReadAll failed", err) | ||
4326 | return | ||
4327 | } | ||
4328 | |||
4329 | // Save the data | ||
4330 | _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) | ||
4331 | if err != nil { | ||
4332 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
4333 | return | ||
4334 | } | ||
4335 | |||
4336 | // Read the data back | ||
4337 | r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) | ||
4338 | if err != nil { | ||
4339 | logError(testName, function, args, startTime, "", "GetObject failed", err) | ||
4340 | return | ||
4341 | } | ||
4342 | |||
4343 | st, err := r.Stat() | ||
4344 | if err != nil { | ||
4345 | logError(testName, function, args, startTime, "", "Stat object failed", err) | ||
4346 | return | ||
4347 | } | ||
4348 | |||
4349 | if st.Size != int64(bufSize) { | ||
4350 | logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) | ||
4351 | return | ||
4352 | } | ||
4353 | |||
4354 | // This following function helps us to compare data from the reader after seek | ||
4355 | // with the data from the original buffer | ||
4356 | cmpData := func(r io.Reader, start, end int) { | ||
4357 | if end-start == 0 { | ||
4358 | return | ||
4359 | } | ||
4360 | buffer := bytes.NewBuffer([]byte{}) | ||
4361 | if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil { | ||
4362 | if err != io.EOF { | ||
4363 | logError(testName, function, args, startTime, "", "CopyN failed", err) | ||
4364 | return | ||
4365 | } | ||
4366 | } | ||
4367 | if !bytes.Equal(buf[start:end], buffer.Bytes()) { | ||
4368 | logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) | ||
4369 | return | ||
4370 | } | ||
4371 | } | ||
4372 | |||
4373 | // Generic seek error for errors other than io.EOF | ||
4374 | seekErr := errors.New("seek error") | ||
4375 | |||
4376 | testCases := []struct { | ||
4377 | offset int64 | ||
4378 | whence int | ||
4379 | pos int64 | ||
4380 | err error | ||
4381 | shouldCmp bool | ||
4382 | start int | ||
4383 | end int | ||
4384 | }{ | ||
4385 | // Start from offset 0, fetch data and compare | ||
4386 | {0, 0, 0, nil, true, 0, 0}, | ||
4387 | // Start from offset 2048, fetch data and compare | ||
4388 | {2048, 0, 2048, nil, true, 2048, bufSize}, | ||
4389 | // Start from offset larger than possible | ||
4390 | {int64(bufSize) + 1024, 0, 0, seekErr, false, 0, 0}, | ||
4391 | // Move to offset 0 without comparing | ||
4392 | {0, 0, 0, nil, false, 0, 0}, | ||
4393 | // Move one step forward and compare | ||
4394 | {1, 1, 1, nil, true, 1, bufSize}, | ||
4395 | // Move larger than possible | ||
4396 | {int64(bufSize), 1, 0, seekErr, false, 0, 0}, | ||
4397 | // Provide negative offset with CUR_SEEK | ||
4398 | {int64(-1), 1, 0, seekErr, false, 0, 0}, | ||
4399 | // Test with whence SEEK_END and with positive offset | ||
4400 | {1024, 2, int64(bufSize) - 1024, io.EOF, true, 0, 0}, | ||
4401 | // Test with whence SEEK_END and with negative offset | ||
4402 | {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize}, | ||
4403 | // Test with whence SEEK_END and with large negative offset | ||
4404 | {-int64(bufSize) * 2, 2, 0, seekErr, true, 0, 0}, | ||
4405 | } | ||
4406 | |||
4407 | for i, testCase := range testCases { | ||
4408 | // Perform seek operation | ||
4409 | n, err := r.Seek(testCase.offset, testCase.whence) | ||
4410 | // We expect an error | ||
4411 | if testCase.err == seekErr && err == nil { | ||
4412 | logError(testName, function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err) | ||
4413 | return | ||
4414 | } | ||
4415 | // We expect a specific error | ||
4416 | if testCase.err != seekErr && testCase.err != err { | ||
4417 | logError(testName, function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err) | ||
4418 | return | ||
4419 | } | ||
4420 | // If we expect an error go to the next loop | ||
4421 | if testCase.err != nil { | ||
4422 | continue | ||
4423 | } | ||
4424 | // Check the returned seek pos | ||
4425 | if n != testCase.pos { | ||
4426 | logError(testName, function, args, startTime, "", "Test "+string(i+1)+", number of bytes seeked does not match, expected "+string(testCase.pos)+", got "+string(n), err) | ||
4427 | return | ||
4428 | } | ||
4429 | // Compare only if shouldCmp is activated | ||
4430 | if testCase.shouldCmp { | ||
4431 | cmpData(r, testCase.start, testCase.end) | ||
4432 | } | ||
4433 | } | ||
4434 | successLogger(testName, function, args, startTime).Info() | ||
4435 | } | ||
4436 | |||
4437 | // Tests get object ReaderAt interface methods. | ||
4438 | func testGetObjectReadAtFunctional() { | ||
4439 | // initialize logging params | ||
4440 | startTime := time.Now() | ||
4441 | testName := getFuncName() | ||
4442 | function := "GetObject(bucketName, objectName)" | ||
4443 | args := map[string]interface{}{} | ||
4444 | |||
4445 | // Seed random based on current time. | ||
4446 | rand.Seed(time.Now().Unix()) | ||
4447 | |||
4448 | // Instantiate new minio client object. | ||
4449 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
4450 | &minio.Options{ | ||
4451 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
4452 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
4453 | }) | ||
4454 | if err != nil { | ||
4455 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
4456 | return | ||
4457 | } | ||
4458 | |||
4459 | // Enable tracing, write to stderr. | ||
4460 | // c.TraceOn(os.Stderr) | ||
4461 | |||
4462 | // Set user agent. | ||
4463 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
4464 | |||
4465 | // Generate a new random bucket name. | ||
4466 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
4467 | args["bucketName"] = bucketName | ||
4468 | |||
4469 | // Make a new bucket. | ||
4470 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
4471 | if err != nil { | ||
4472 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
4473 | return | ||
4474 | } | ||
4475 | |||
4476 | defer cleanupBucket(bucketName, c) | ||
4477 | |||
4478 | // Generate 33K of data. | ||
4479 | bufSize := dataFileMap["datafile-33-kB"] | ||
4480 | reader := getDataReader("datafile-33-kB") | ||
4481 | defer reader.Close() | ||
4482 | |||
4483 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
4484 | args["objectName"] = objectName | ||
4485 | |||
4486 | buf, err := io.ReadAll(reader) | ||
4487 | if err != nil { | ||
4488 | logError(testName, function, args, startTime, "", "ReadAll failed", err) | ||
4489 | return | ||
4490 | } | ||
4491 | |||
4492 | // Save the data | ||
4493 | _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) | ||
4494 | if err != nil { | ||
4495 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
4496 | return | ||
4497 | } | ||
4498 | |||
4499 | // read the data back | ||
4500 | r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) | ||
4501 | if err != nil { | ||
4502 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
4503 | return | ||
4504 | } | ||
4505 | offset := int64(2048) | ||
4506 | |||
4507 | // read directly | ||
4508 | buf1 := make([]byte, 512) | ||
4509 | buf2 := make([]byte, 512) | ||
4510 | buf3 := make([]byte, 512) | ||
4511 | buf4 := make([]byte, 512) | ||
4512 | |||
4513 | // Test readAt before stat is called such that objectInfo doesn't change. | ||
4514 | m, err := r.ReadAt(buf1, offset) | ||
4515 | if err != nil { | ||
4516 | logError(testName, function, args, startTime, "", "ReadAt failed", err) | ||
4517 | return | ||
4518 | } | ||
4519 | if m != len(buf1) { | ||
4520 | logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) | ||
4521 | return | ||
4522 | } | ||
4523 | if !bytes.Equal(buf1, buf[offset:offset+512]) { | ||
4524 | logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) | ||
4525 | return | ||
4526 | } | ||
4527 | offset += 512 | ||
4528 | |||
4529 | st, err := r.Stat() | ||
4530 | if err != nil { | ||
4531 | logError(testName, function, args, startTime, "", "Stat failed", err) | ||
4532 | return | ||
4533 | } | ||
4534 | |||
4535 | if st.Size != int64(bufSize) { | ||
4536 | logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) | ||
4537 | return | ||
4538 | } | ||
4539 | |||
4540 | m, err = r.ReadAt(buf2, offset) | ||
4541 | if err != nil { | ||
4542 | logError(testName, function, args, startTime, "", "ReadAt failed", err) | ||
4543 | return | ||
4544 | } | ||
4545 | if m != len(buf2) { | ||
4546 | logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) | ||
4547 | return | ||
4548 | } | ||
4549 | if !bytes.Equal(buf2, buf[offset:offset+512]) { | ||
4550 | logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) | ||
4551 | return | ||
4552 | } | ||
4553 | |||
4554 | offset += 512 | ||
4555 | m, err = r.ReadAt(buf3, offset) | ||
4556 | if err != nil { | ||
4557 | logError(testName, function, args, startTime, "", "ReadAt failed", err) | ||
4558 | return | ||
4559 | } | ||
4560 | if m != len(buf3) { | ||
4561 | logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err) | ||
4562 | return | ||
4563 | } | ||
4564 | if !bytes.Equal(buf3, buf[offset:offset+512]) { | ||
4565 | logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) | ||
4566 | return | ||
4567 | } | ||
4568 | offset += 512 | ||
4569 | m, err = r.ReadAt(buf4, offset) | ||
4570 | if err != nil { | ||
4571 | logError(testName, function, args, startTime, "", "ReadAt failed", err) | ||
4572 | return | ||
4573 | } | ||
4574 | if m != len(buf4) { | ||
4575 | logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err) | ||
4576 | return | ||
4577 | } | ||
4578 | if !bytes.Equal(buf4, buf[offset:offset+512]) { | ||
4579 | logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) | ||
4580 | return | ||
4581 | } | ||
4582 | |||
4583 | buf5 := make([]byte, len(buf)) | ||
4584 | // Read the whole object. | ||
4585 | m, err = r.ReadAt(buf5, 0) | ||
4586 | if err != nil { | ||
4587 | if err != io.EOF { | ||
4588 | logError(testName, function, args, startTime, "", "ReadAt failed", err) | ||
4589 | return | ||
4590 | } | ||
4591 | } | ||
4592 | if m != len(buf5) { | ||
4593 | logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err) | ||
4594 | return | ||
4595 | } | ||
4596 | if !bytes.Equal(buf, buf5) { | ||
4597 | logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) | ||
4598 | return | ||
4599 | } | ||
4600 | |||
4601 | buf6 := make([]byte, len(buf)+1) | ||
4602 | // Read the whole object and beyond. | ||
4603 | _, err = r.ReadAt(buf6, 0) | ||
4604 | if err != nil { | ||
4605 | if err != io.EOF { | ||
4606 | logError(testName, function, args, startTime, "", "ReadAt failed", err) | ||
4607 | return | ||
4608 | } | ||
4609 | } | ||
4610 | |||
4611 | successLogger(testName, function, args, startTime).Info() | ||
4612 | } | ||
4613 | |||
4614 | // Reproduces issue https://github.com/minio/minio-go/issues/1137 | ||
4615 | func testGetObjectReadAtWhenEOFWasReached() { | ||
4616 | // initialize logging params | ||
4617 | startTime := time.Now() | ||
4618 | testName := getFuncName() | ||
4619 | function := "GetObject(bucketName, objectName)" | ||
4620 | args := map[string]interface{}{} | ||
4621 | |||
4622 | // Seed random based on current time. | ||
4623 | rand.Seed(time.Now().Unix()) | ||
4624 | |||
4625 | // Instantiate new minio client object. | ||
4626 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
4627 | &minio.Options{ | ||
4628 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
4629 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
4630 | }) | ||
4631 | if err != nil { | ||
4632 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
4633 | return | ||
4634 | } | ||
4635 | |||
4636 | // Enable tracing, write to stderr. | ||
4637 | // c.TraceOn(os.Stderr) | ||
4638 | |||
4639 | // Set user agent. | ||
4640 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
4641 | |||
4642 | // Generate a new random bucket name. | ||
4643 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
4644 | args["bucketName"] = bucketName | ||
4645 | |||
4646 | // Make a new bucket. | ||
4647 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
4648 | if err != nil { | ||
4649 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
4650 | return | ||
4651 | } | ||
4652 | |||
4653 | defer cleanupBucket(bucketName, c) | ||
4654 | |||
4655 | // Generate 33K of data. | ||
4656 | bufSize := dataFileMap["datafile-33-kB"] | ||
4657 | reader := getDataReader("datafile-33-kB") | ||
4658 | defer reader.Close() | ||
4659 | |||
4660 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
4661 | args["objectName"] = objectName | ||
4662 | |||
4663 | buf, err := io.ReadAll(reader) | ||
4664 | if err != nil { | ||
4665 | logError(testName, function, args, startTime, "", "ReadAll failed", err) | ||
4666 | return | ||
4667 | } | ||
4668 | |||
4669 | // Save the data | ||
4670 | _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) | ||
4671 | if err != nil { | ||
4672 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
4673 | return | ||
4674 | } | ||
4675 | |||
4676 | // read the data back | ||
4677 | r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) | ||
4678 | if err != nil { | ||
4679 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
4680 | return | ||
4681 | } | ||
4682 | |||
4683 | // read directly | ||
4684 | buf1 := make([]byte, len(buf)) | ||
4685 | buf2 := make([]byte, 512) | ||
4686 | |||
4687 | m, err := r.Read(buf1) | ||
4688 | if err != nil { | ||
4689 | if err != io.EOF { | ||
4690 | logError(testName, function, args, startTime, "", "Read failed", err) | ||
4691 | return | ||
4692 | } | ||
4693 | } | ||
4694 | if m != len(buf1) { | ||
4695 | logError(testName, function, args, startTime, "", "Read read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) | ||
4696 | return | ||
4697 | } | ||
4698 | if !bytes.Equal(buf1, buf) { | ||
4699 | logError(testName, function, args, startTime, "", "Incorrect count of Read data", err) | ||
4700 | return | ||
4701 | } | ||
4702 | |||
4703 | st, err := r.Stat() | ||
4704 | if err != nil { | ||
4705 | logError(testName, function, args, startTime, "", "Stat failed", err) | ||
4706 | return | ||
4707 | } | ||
4708 | |||
4709 | if st.Size != int64(bufSize) { | ||
4710 | logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) | ||
4711 | return | ||
4712 | } | ||
4713 | |||
4714 | m, err = r.ReadAt(buf2, 512) | ||
4715 | if err != nil { | ||
4716 | logError(testName, function, args, startTime, "", "ReadAt failed", err) | ||
4717 | return | ||
4718 | } | ||
4719 | if m != len(buf2) { | ||
4720 | logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) | ||
4721 | return | ||
4722 | } | ||
4723 | if !bytes.Equal(buf2, buf[512:1024]) { | ||
4724 | logError(testName, function, args, startTime, "", "Incorrect count of ReadAt data", err) | ||
4725 | return | ||
4726 | } | ||
4727 | |||
4728 | successLogger(testName, function, args, startTime).Info() | ||
4729 | } | ||
4730 | |||
4731 | // Test Presigned Post Policy | ||
4732 | func testPresignedPostPolicy() { | ||
4733 | // initialize logging params | ||
4734 | startTime := time.Now() | ||
4735 | testName := getFuncName() | ||
4736 | function := "PresignedPostPolicy(policy)" | ||
4737 | args := map[string]interface{}{ | ||
4738 | "policy": "", | ||
4739 | } | ||
4740 | |||
4741 | // Seed random based on current time. | ||
4742 | rand.Seed(time.Now().Unix()) | ||
4743 | |||
4744 | // Instantiate new minio client object | ||
4745 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
4746 | &minio.Options{ | ||
4747 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
4748 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
4749 | }) | ||
4750 | if err != nil { | ||
4751 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
4752 | return | ||
4753 | } | ||
4754 | |||
4755 | // Enable tracing, write to stderr. | ||
4756 | // c.TraceOn(os.Stderr) | ||
4757 | |||
4758 | // Set user agent. | ||
4759 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
4760 | |||
4761 | // Generate a new random bucket name. | ||
4762 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
4763 | |||
4764 | // Make a new bucket in 'us-east-1' (source bucket). | ||
4765 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
4766 | if err != nil { | ||
4767 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
4768 | return | ||
4769 | } | ||
4770 | |||
4771 | defer cleanupBucket(bucketName, c) | ||
4772 | |||
4773 | // Generate 33K of data. | ||
4774 | reader := getDataReader("datafile-33-kB") | ||
4775 | defer reader.Close() | ||
4776 | |||
4777 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
4778 | // Azure requires the key to not start with a number | ||
4779 | metadataKey := randString(60, rand.NewSource(time.Now().UnixNano()), "user") | ||
4780 | metadataValue := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
4781 | |||
4782 | buf, err := io.ReadAll(reader) | ||
4783 | if err != nil { | ||
4784 | logError(testName, function, args, startTime, "", "ReadAll failed", err) | ||
4785 | return | ||
4786 | } | ||
4787 | |||
4788 | // Save the data | ||
4789 | _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) | ||
4790 | if err != nil { | ||
4791 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
4792 | return | ||
4793 | } | ||
4794 | |||
4795 | policy := minio.NewPostPolicy() | ||
4796 | |||
4797 | if err := policy.SetBucket(""); err == nil { | ||
4798 | logError(testName, function, args, startTime, "", "SetBucket did not fail for invalid conditions", err) | ||
4799 | return | ||
4800 | } | ||
4801 | if err := policy.SetKey(""); err == nil { | ||
4802 | logError(testName, function, args, startTime, "", "SetKey did not fail for invalid conditions", err) | ||
4803 | return | ||
4804 | } | ||
4805 | if err := policy.SetExpires(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)); err == nil { | ||
4806 | logError(testName, function, args, startTime, "", "SetExpires did not fail for invalid conditions", err) | ||
4807 | return | ||
4808 | } | ||
4809 | if err := policy.SetContentType(""); err == nil { | ||
4810 | logError(testName, function, args, startTime, "", "SetContentType did not fail for invalid conditions", err) | ||
4811 | return | ||
4812 | } | ||
4813 | if err := policy.SetContentLengthRange(1024*1024, 1024); err == nil { | ||
4814 | logError(testName, function, args, startTime, "", "SetContentLengthRange did not fail for invalid conditions", err) | ||
4815 | return | ||
4816 | } | ||
4817 | if err := policy.SetUserMetadata("", ""); err == nil { | ||
4818 | logError(testName, function, args, startTime, "", "SetUserMetadata did not fail for invalid conditions", err) | ||
4819 | return | ||
4820 | } | ||
4821 | |||
4822 | policy.SetBucket(bucketName) | ||
4823 | policy.SetKey(objectName) | ||
4824 | policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days | ||
4825 | policy.SetContentType("binary/octet-stream") | ||
4826 | policy.SetContentLengthRange(10, 1024*1024) | ||
4827 | policy.SetUserMetadata(metadataKey, metadataValue) | ||
4828 | |||
4829 | // Add CRC32C | ||
4830 | checksum := minio.ChecksumCRC32C.ChecksumBytes(buf) | ||
4831 | policy.SetChecksum(checksum) | ||
4832 | |||
4833 | args["policy"] = policy.String() | ||
4834 | |||
4835 | presignedPostPolicyURL, formData, err := c.PresignedPostPolicy(context.Background(), policy) | ||
4836 | if err != nil { | ||
4837 | logError(testName, function, args, startTime, "", "PresignedPostPolicy failed", err) | ||
4838 | return | ||
4839 | } | ||
4840 | |||
4841 | var formBuf bytes.Buffer | ||
4842 | writer := multipart.NewWriter(&formBuf) | ||
4843 | for k, v := range formData { | ||
4844 | writer.WriteField(k, v) | ||
4845 | } | ||
4846 | |||
4847 | // Get a 33KB file to upload and test if set post policy works | ||
4848 | filePath := getMintDataDirFilePath("datafile-33-kB") | ||
4849 | if filePath == "" { | ||
4850 | // Make a temp file with 33 KB data. | ||
4851 | file, err := os.CreateTemp(os.TempDir(), "PresignedPostPolicyTest") | ||
4852 | if err != nil { | ||
4853 | logError(testName, function, args, startTime, "", "TempFile creation failed", err) | ||
4854 | return | ||
4855 | } | ||
4856 | if _, err = io.Copy(file, getDataReader("datafile-33-kB")); err != nil { | ||
4857 | logError(testName, function, args, startTime, "", "Copy failed", err) | ||
4858 | return | ||
4859 | } | ||
4860 | if err = file.Close(); err != nil { | ||
4861 | logError(testName, function, args, startTime, "", "File Close failed", err) | ||
4862 | return | ||
4863 | } | ||
4864 | filePath = file.Name() | ||
4865 | } | ||
4866 | |||
4867 | // add file to post request | ||
4868 | f, err := os.Open(filePath) | ||
4869 | defer f.Close() | ||
4870 | if err != nil { | ||
4871 | logError(testName, function, args, startTime, "", "File open failed", err) | ||
4872 | return | ||
4873 | } | ||
4874 | w, err := writer.CreateFormFile("file", filePath) | ||
4875 | if err != nil { | ||
4876 | logError(testName, function, args, startTime, "", "CreateFormFile failed", err) | ||
4877 | return | ||
4878 | } | ||
4879 | |||
4880 | _, err = io.Copy(w, f) | ||
4881 | if err != nil { | ||
4882 | logError(testName, function, args, startTime, "", "Copy failed", err) | ||
4883 | return | ||
4884 | } | ||
4885 | writer.Close() | ||
4886 | |||
4887 | transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS))) | ||
4888 | if err != nil { | ||
4889 | logError(testName, function, args, startTime, "", "DefaultTransport failed", err) | ||
4890 | return | ||
4891 | } | ||
4892 | |||
4893 | httpClient := &http.Client{ | ||
4894 | // Setting a sensible time out of 30secs to wait for response | ||
4895 | // headers. Request is pro-actively canceled after 30secs | ||
4896 | // with no response. | ||
4897 | Timeout: 30 * time.Second, | ||
4898 | Transport: transport, | ||
4899 | } | ||
4900 | args["url"] = presignedPostPolicyURL.String() | ||
4901 | |||
4902 | req, err := http.NewRequest(http.MethodPost, presignedPostPolicyURL.String(), bytes.NewReader(formBuf.Bytes())) | ||
4903 | if err != nil { | ||
4904 | logError(testName, function, args, startTime, "", "Http request failed", err) | ||
4905 | return | ||
4906 | } | ||
4907 | |||
4908 | req.Header.Set("Content-Type", writer.FormDataContentType()) | ||
4909 | |||
4910 | // make post request with correct form data | ||
4911 | res, err := httpClient.Do(req) | ||
4912 | if err != nil { | ||
4913 | logError(testName, function, args, startTime, "", "Http request failed", err) | ||
4914 | return | ||
4915 | } | ||
4916 | defer res.Body.Close() | ||
4917 | if res.StatusCode != http.StatusNoContent { | ||
4918 | logError(testName, function, args, startTime, "", "Http request failed", errors.New(res.Status)) | ||
4919 | return | ||
4920 | } | ||
4921 | |||
4922 | // expected path should be absolute path of the object | ||
4923 | var scheme string | ||
4924 | if mustParseBool(os.Getenv(enableHTTPS)) { | ||
4925 | scheme = "https://" | ||
4926 | } else { | ||
4927 | scheme = "http://" | ||
4928 | } | ||
4929 | |||
4930 | expectedLocation := scheme + os.Getenv(serverEndpoint) + "/" + bucketName + "/" + objectName | ||
4931 | expectedLocationBucketDNS := scheme + bucketName + "." + os.Getenv(serverEndpoint) + "/" + objectName | ||
4932 | |||
4933 | if !strings.Contains(expectedLocation, "s3.amazonaws.com/") { | ||
4934 | // Test when not against AWS S3. | ||
4935 | if val, ok := res.Header["Location"]; ok { | ||
4936 | if val[0] != expectedLocation && val[0] != expectedLocationBucketDNS { | ||
4937 | logError(testName, function, args, startTime, "", fmt.Sprintf("Location in header response is incorrect. Want %q or %q, got %q", expectedLocation, expectedLocationBucketDNS, val[0]), err) | ||
4938 | return | ||
4939 | } | ||
4940 | } else { | ||
4941 | logError(testName, function, args, startTime, "", "Location not found in header response", err) | ||
4942 | return | ||
4943 | } | ||
4944 | } | ||
4945 | want := checksum.Encoded() | ||
4946 | if got := res.Header.Get("X-Amz-Checksum-Crc32c"); got != want { | ||
4947 | logError(testName, function, args, startTime, "", fmt.Sprintf("Want checksum %q, got %q", want, got), nil) | ||
4948 | return | ||
4949 | } | ||
4950 | |||
4951 | successLogger(testName, function, args, startTime).Info() | ||
4952 | } | ||
4953 | |||
4954 | // Tests copy object | ||
4955 | func testCopyObject() { | ||
4956 | // initialize logging params | ||
4957 | startTime := time.Now() | ||
4958 | testName := getFuncName() | ||
4959 | function := "CopyObject(dst, src)" | ||
4960 | args := map[string]interface{}{} | ||
4961 | |||
4962 | // Seed random based on current time. | ||
4963 | rand.Seed(time.Now().Unix()) | ||
4964 | |||
4965 | // Instantiate new minio client object | ||
4966 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
4967 | &minio.Options{ | ||
4968 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
4969 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
4970 | }) | ||
4971 | if err != nil { | ||
4972 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
4973 | return | ||
4974 | } | ||
4975 | |||
4976 | // Enable tracing, write to stderr. | ||
4977 | // c.TraceOn(os.Stderr) | ||
4978 | |||
4979 | // Set user agent. | ||
4980 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
4981 | |||
4982 | // Generate a new random bucket name. | ||
4983 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
4984 | |||
4985 | // Make a new bucket in 'us-east-1' (source bucket). | ||
4986 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
4987 | if err != nil { | ||
4988 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
4989 | return | ||
4990 | } | ||
4991 | |||
4992 | defer cleanupBucket(bucketName, c) | ||
4993 | |||
4994 | // Make a new bucket in 'us-east-1' (destination bucket). | ||
4995 | err = c.MakeBucket(context.Background(), bucketName+"-copy", minio.MakeBucketOptions{Region: "us-east-1"}) | ||
4996 | if err != nil { | ||
4997 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
4998 | return | ||
4999 | } | ||
5000 | defer cleanupBucket(bucketName+"-copy", c) | ||
5001 | |||
5002 | // Generate 33K of data. | ||
5003 | bufSize := dataFileMap["datafile-33-kB"] | ||
5004 | reader := getDataReader("datafile-33-kB") | ||
5005 | |||
5006 | // Save the data | ||
5007 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
5008 | _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) | ||
5009 | if err != nil { | ||
5010 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
5011 | return | ||
5012 | } | ||
5013 | |||
5014 | r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) | ||
5015 | if err != nil { | ||
5016 | logError(testName, function, args, startTime, "", "GetObject failed", err) | ||
5017 | return | ||
5018 | } | ||
5019 | // Check the various fields of source object against destination object. | ||
5020 | objInfo, err := r.Stat() | ||
5021 | if err != nil { | ||
5022 | logError(testName, function, args, startTime, "", "Stat failed", err) | ||
5023 | return | ||
5024 | } | ||
5025 | |||
5026 | // Copy Source | ||
5027 | src := minio.CopySrcOptions{ | ||
5028 | Bucket: bucketName, | ||
5029 | Object: objectName, | ||
5030 | // Set copy conditions. | ||
5031 | MatchETag: objInfo.ETag, | ||
5032 | MatchModifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), | ||
5033 | } | ||
5034 | args["src"] = src | ||
5035 | |||
5036 | dst := minio.CopyDestOptions{ | ||
5037 | Bucket: bucketName + "-copy", | ||
5038 | Object: objectName + "-copy", | ||
5039 | } | ||
5040 | |||
5041 | // Perform the Copy | ||
5042 | if _, err = c.CopyObject(context.Background(), dst, src); err != nil { | ||
5043 | logError(testName, function, args, startTime, "", "CopyObject failed", err) | ||
5044 | return | ||
5045 | } | ||
5046 | |||
5047 | // Source object | ||
5048 | r, err = c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) | ||
5049 | if err != nil { | ||
5050 | logError(testName, function, args, startTime, "", "GetObject failed", err) | ||
5051 | return | ||
5052 | } | ||
5053 | |||
5054 | // Destination object | ||
5055 | readerCopy, err := c.GetObject(context.Background(), bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{}) | ||
5056 | if err != nil { | ||
5057 | logError(testName, function, args, startTime, "", "GetObject failed", err) | ||
5058 | return | ||
5059 | } | ||
5060 | |||
5061 | // Check the various fields of source object against destination object. | ||
5062 | objInfo, err = r.Stat() | ||
5063 | if err != nil { | ||
5064 | logError(testName, function, args, startTime, "", "Stat failed", err) | ||
5065 | return | ||
5066 | } | ||
5067 | objInfoCopy, err := readerCopy.Stat() | ||
5068 | if err != nil { | ||
5069 | logError(testName, function, args, startTime, "", "Stat failed", err) | ||
5070 | return | ||
5071 | } | ||
5072 | if objInfo.Size != objInfoCopy.Size { | ||
5073 | logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+", got "+string(objInfo.Size), err) | ||
5074 | return | ||
5075 | } | ||
5076 | |||
5077 | if err := crcMatchesName(r, "datafile-33-kB"); err != nil { | ||
5078 | logError(testName, function, args, startTime, "", "data CRC check failed", err) | ||
5079 | return | ||
5080 | } | ||
5081 | if err := crcMatchesName(readerCopy, "datafile-33-kB"); err != nil { | ||
5082 | logError(testName, function, args, startTime, "", "copy data CRC check failed", err) | ||
5083 | return | ||
5084 | } | ||
5085 | // Close all the get readers before proceeding with CopyObject operations. | ||
5086 | r.Close() | ||
5087 | readerCopy.Close() | ||
5088 | |||
5089 | // CopyObject again but with wrong conditions | ||
5090 | src = minio.CopySrcOptions{ | ||
5091 | Bucket: bucketName, | ||
5092 | Object: objectName, | ||
5093 | MatchUnmodifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), | ||
5094 | NoMatchETag: objInfo.ETag, | ||
5095 | } | ||
5096 | |||
5097 | // Perform the Copy which should fail | ||
5098 | _, err = c.CopyObject(context.Background(), dst, src) | ||
5099 | if err == nil { | ||
5100 | logError(testName, function, args, startTime, "", "CopyObject did not fail for invalid conditions", err) | ||
5101 | return | ||
5102 | } | ||
5103 | |||
5104 | src = minio.CopySrcOptions{ | ||
5105 | Bucket: bucketName, | ||
5106 | Object: objectName, | ||
5107 | } | ||
5108 | |||
5109 | dst = minio.CopyDestOptions{ | ||
5110 | Bucket: bucketName, | ||
5111 | Object: objectName, | ||
5112 | ReplaceMetadata: true, | ||
5113 | UserMetadata: map[string]string{ | ||
5114 | "Copy": "should be same", | ||
5115 | }, | ||
5116 | } | ||
5117 | args["dst"] = dst | ||
5118 | args["src"] = src | ||
5119 | |||
5120 | _, err = c.CopyObject(context.Background(), dst, src) | ||
5121 | if err != nil { | ||
5122 | logError(testName, function, args, startTime, "", "CopyObject shouldn't fail", err) | ||
5123 | return | ||
5124 | } | ||
5125 | |||
5126 | oi, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) | ||
5127 | if err != nil { | ||
5128 | logError(testName, function, args, startTime, "", "StatObject failed", err) | ||
5129 | return | ||
5130 | } | ||
5131 | |||
5132 | stOpts := minio.StatObjectOptions{} | ||
5133 | stOpts.SetMatchETag(oi.ETag) | ||
5134 | objInfo, err = c.StatObject(context.Background(), bucketName, objectName, stOpts) | ||
5135 | if err != nil { | ||
5136 | logError(testName, function, args, startTime, "", "CopyObject ETag should match and not fail", err) | ||
5137 | return | ||
5138 | } | ||
5139 | |||
5140 | if objInfo.Metadata.Get("x-amz-meta-copy") != "should be same" { | ||
5141 | logError(testName, function, args, startTime, "", "CopyObject modified metadata should match", err) | ||
5142 | return | ||
5143 | } | ||
5144 | |||
5145 | successLogger(testName, function, args, startTime).Info() | ||
5146 | } | ||
5147 | |||
5148 | // Tests SSE-C get object ReaderSeeker interface methods. | ||
5149 | func testSSECEncryptedGetObjectReadSeekFunctional() { | ||
5150 | // initialize logging params | ||
5151 | startTime := time.Now() | ||
5152 | testName := getFuncName() | ||
5153 | function := "GetObject(bucketName, objectName)" | ||
5154 | args := map[string]interface{}{} | ||
5155 | |||
5156 | // Seed random based on current time. | ||
5157 | rand.Seed(time.Now().Unix()) | ||
5158 | |||
5159 | // Instantiate new minio client object. | ||
5160 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
5161 | &minio.Options{ | ||
5162 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
5163 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
5164 | }) | ||
5165 | if err != nil { | ||
5166 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
5167 | return | ||
5168 | } | ||
5169 | |||
5170 | // Enable tracing, write to stderr. | ||
5171 | // c.TraceOn(os.Stderr) | ||
5172 | |||
5173 | // Set user agent. | ||
5174 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
5175 | |||
5176 | // Generate a new random bucket name. | ||
5177 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
5178 | args["bucketName"] = bucketName | ||
5179 | |||
5180 | // Make a new bucket. | ||
5181 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
5182 | if err != nil { | ||
5183 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
5184 | return | ||
5185 | } | ||
5186 | |||
5187 | defer func() { | ||
5188 | // Delete all objects and buckets | ||
5189 | if err = cleanupBucket(bucketName, c); err != nil { | ||
5190 | logError(testName, function, args, startTime, "", "CleanupBucket failed", err) | ||
5191 | return | ||
5192 | } | ||
5193 | }() | ||
5194 | |||
5195 | // Generate 129MiB of data. | ||
5196 | bufSize := dataFileMap["datafile-129-MB"] | ||
5197 | reader := getDataReader("datafile-129-MB") | ||
5198 | defer reader.Close() | ||
5199 | |||
5200 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
5201 | args["objectName"] = objectName | ||
5202 | |||
5203 | buf, err := io.ReadAll(reader) | ||
5204 | if err != nil { | ||
5205 | logError(testName, function, args, startTime, "", "ReadAll failed", err) | ||
5206 | return | ||
5207 | } | ||
5208 | |||
5209 | // Save the data | ||
5210 | _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ | ||
5211 | ContentType: "binary/octet-stream", | ||
5212 | ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), | ||
5213 | }) | ||
5214 | if err != nil { | ||
5215 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
5216 | return | ||
5217 | } | ||
5218 | |||
5219 | // Read the data back | ||
5220 | r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ | ||
5221 | ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), | ||
5222 | }) | ||
5223 | if err != nil { | ||
5224 | logError(testName, function, args, startTime, "", "GetObject failed", err) | ||
5225 | return | ||
5226 | } | ||
5227 | defer r.Close() | ||
5228 | |||
5229 | st, err := r.Stat() | ||
5230 | if err != nil { | ||
5231 | logError(testName, function, args, startTime, "", "Stat object failed", err) | ||
5232 | return | ||
5233 | } | ||
5234 | |||
5235 | if st.Size != int64(bufSize) { | ||
5236 | logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) | ||
5237 | return | ||
5238 | } | ||
5239 | |||
5240 | // This following function helps us to compare data from the reader after seek | ||
5241 | // with the data from the original buffer | ||
5242 | cmpData := func(r io.Reader, start, end int) { | ||
5243 | if end-start == 0 { | ||
5244 | return | ||
5245 | } | ||
5246 | buffer := bytes.NewBuffer([]byte{}) | ||
5247 | if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil { | ||
5248 | if err != io.EOF { | ||
5249 | logError(testName, function, args, startTime, "", "CopyN failed", err) | ||
5250 | return | ||
5251 | } | ||
5252 | } | ||
5253 | if !bytes.Equal(buf[start:end], buffer.Bytes()) { | ||
5254 | logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) | ||
5255 | return | ||
5256 | } | ||
5257 | } | ||
5258 | |||
5259 | testCases := []struct { | ||
5260 | offset int64 | ||
5261 | whence int | ||
5262 | pos int64 | ||
5263 | err error | ||
5264 | shouldCmp bool | ||
5265 | start int | ||
5266 | end int | ||
5267 | }{ | ||
5268 | // Start from offset 0, fetch data and compare | ||
5269 | {0, 0, 0, nil, true, 0, 0}, | ||
5270 | // Start from offset 2048, fetch data and compare | ||
5271 | {2048, 0, 2048, nil, true, 2048, bufSize}, | ||
5272 | // Start from offset larger than possible | ||
5273 | {int64(bufSize) + 1024, 0, 0, io.EOF, false, 0, 0}, | ||
5274 | // Move to offset 0 without comparing | ||
5275 | {0, 0, 0, nil, false, 0, 0}, | ||
5276 | // Move one step forward and compare | ||
5277 | {1, 1, 1, nil, true, 1, bufSize}, | ||
5278 | // Move larger than possible | ||
5279 | {int64(bufSize), 1, 0, io.EOF, false, 0, 0}, | ||
5280 | // Provide negative offset with CUR_SEEK | ||
5281 | {int64(-1), 1, 0, fmt.Errorf("Negative position not allowed for 1"), false, 0, 0}, | ||
5282 | // Test with whence SEEK_END and with positive offset | ||
5283 | {1024, 2, 0, io.EOF, false, 0, 0}, | ||
5284 | // Test with whence SEEK_END and with negative offset | ||
5285 | {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize}, | ||
5286 | // Test with whence SEEK_END and with large negative offset | ||
5287 | {-int64(bufSize) * 2, 2, 0, fmt.Errorf("Seeking at negative offset not allowed for 2"), false, 0, 0}, | ||
5288 | // Test with invalid whence | ||
5289 | {0, 3, 0, fmt.Errorf("Invalid whence 3"), false, 0, 0}, | ||
5290 | } | ||
5291 | |||
5292 | for i, testCase := range testCases { | ||
5293 | // Perform seek operation | ||
5294 | n, err := r.Seek(testCase.offset, testCase.whence) | ||
5295 | if err != nil && testCase.err == nil { | ||
5296 | // We expected success. | ||
5297 | logError(testName, function, args, startTime, "", | ||
5298 | fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) | ||
5299 | return | ||
5300 | } | ||
5301 | if err == nil && testCase.err != nil { | ||
5302 | // We expected failure, but got success. | ||
5303 | logError(testName, function, args, startTime, "", | ||
5304 | fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) | ||
5305 | return | ||
5306 | } | ||
5307 | if err != nil && testCase.err != nil { | ||
5308 | if err.Error() != testCase.err.Error() { | ||
5309 | // We expect a specific error | ||
5310 | logError(testName, function, args, startTime, "", | ||
5311 | fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) | ||
5312 | return | ||
5313 | } | ||
5314 | } | ||
5315 | // Check the returned seek pos | ||
5316 | if n != testCase.pos { | ||
5317 | logError(testName, function, args, startTime, "", | ||
5318 | fmt.Sprintf("Test %d, number of bytes seeked does not match, expected %d, got %d", i+1, testCase.pos, n), err) | ||
5319 | return | ||
5320 | } | ||
5321 | // Compare only if shouldCmp is activated | ||
5322 | if testCase.shouldCmp { | ||
5323 | cmpData(r, testCase.start, testCase.end) | ||
5324 | } | ||
5325 | } | ||
5326 | |||
5327 | successLogger(testName, function, args, startTime).Info() | ||
5328 | } | ||
5329 | |||
5330 | // Tests SSE-S3 get object ReaderSeeker interface methods. | ||
5331 | func testSSES3EncryptedGetObjectReadSeekFunctional() { | ||
5332 | // initialize logging params | ||
5333 | startTime := time.Now() | ||
5334 | testName := getFuncName() | ||
5335 | function := "GetObject(bucketName, objectName)" | ||
5336 | args := map[string]interface{}{} | ||
5337 | |||
5338 | // Seed random based on current time. | ||
5339 | rand.Seed(time.Now().Unix()) | ||
5340 | |||
5341 | // Instantiate new minio client object. | ||
5342 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
5343 | &minio.Options{ | ||
5344 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
5345 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
5346 | }) | ||
5347 | if err != nil { | ||
5348 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
5349 | return | ||
5350 | } | ||
5351 | |||
5352 | // Enable tracing, write to stderr. | ||
5353 | // c.TraceOn(os.Stderr) | ||
5354 | |||
5355 | // Set user agent. | ||
5356 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
5357 | |||
5358 | // Generate a new random bucket name. | ||
5359 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
5360 | args["bucketName"] = bucketName | ||
5361 | |||
5362 | // Make a new bucket. | ||
5363 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
5364 | if err != nil { | ||
5365 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
5366 | return | ||
5367 | } | ||
5368 | |||
5369 | defer func() { | ||
5370 | // Delete all objects and buckets | ||
5371 | if err = cleanupBucket(bucketName, c); err != nil { | ||
5372 | logError(testName, function, args, startTime, "", "CleanupBucket failed", err) | ||
5373 | return | ||
5374 | } | ||
5375 | }() | ||
5376 | |||
5377 | // Generate 129MiB of data. | ||
5378 | bufSize := dataFileMap["datafile-129-MB"] | ||
5379 | reader := getDataReader("datafile-129-MB") | ||
5380 | defer reader.Close() | ||
5381 | |||
5382 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
5383 | args["objectName"] = objectName | ||
5384 | |||
5385 | buf, err := io.ReadAll(reader) | ||
5386 | if err != nil { | ||
5387 | logError(testName, function, args, startTime, "", "ReadAll failed", err) | ||
5388 | return | ||
5389 | } | ||
5390 | |||
5391 | // Save the data | ||
5392 | _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ | ||
5393 | ContentType: "binary/octet-stream", | ||
5394 | ServerSideEncryption: encrypt.NewSSE(), | ||
5395 | }) | ||
5396 | if err != nil { | ||
5397 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
5398 | return | ||
5399 | } | ||
5400 | |||
5401 | // Read the data back | ||
5402 | r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) | ||
5403 | if err != nil { | ||
5404 | logError(testName, function, args, startTime, "", "GetObject failed", err) | ||
5405 | return | ||
5406 | } | ||
5407 | defer r.Close() | ||
5408 | |||
5409 | st, err := r.Stat() | ||
5410 | if err != nil { | ||
5411 | logError(testName, function, args, startTime, "", "Stat object failed", err) | ||
5412 | return | ||
5413 | } | ||
5414 | |||
5415 | if st.Size != int64(bufSize) { | ||
5416 | logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) | ||
5417 | return | ||
5418 | } | ||
5419 | |||
5420 | // This following function helps us to compare data from the reader after seek | ||
5421 | // with the data from the original buffer | ||
5422 | cmpData := func(r io.Reader, start, end int) { | ||
5423 | if end-start == 0 { | ||
5424 | return | ||
5425 | } | ||
5426 | buffer := bytes.NewBuffer([]byte{}) | ||
5427 | if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil { | ||
5428 | if err != io.EOF { | ||
5429 | logError(testName, function, args, startTime, "", "CopyN failed", err) | ||
5430 | return | ||
5431 | } | ||
5432 | } | ||
5433 | if !bytes.Equal(buf[start:end], buffer.Bytes()) { | ||
5434 | logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) | ||
5435 | return | ||
5436 | } | ||
5437 | } | ||
5438 | |||
5439 | testCases := []struct { | ||
5440 | offset int64 | ||
5441 | whence int | ||
5442 | pos int64 | ||
5443 | err error | ||
5444 | shouldCmp bool | ||
5445 | start int | ||
5446 | end int | ||
5447 | }{ | ||
5448 | // Start from offset 0, fetch data and compare | ||
5449 | {0, 0, 0, nil, true, 0, 0}, | ||
5450 | // Start from offset 2048, fetch data and compare | ||
5451 | {2048, 0, 2048, nil, true, 2048, bufSize}, | ||
5452 | // Start from offset larger than possible | ||
5453 | {int64(bufSize) + 1024, 0, 0, io.EOF, false, 0, 0}, | ||
5454 | // Move to offset 0 without comparing | ||
5455 | {0, 0, 0, nil, false, 0, 0}, | ||
5456 | // Move one step forward and compare | ||
5457 | {1, 1, 1, nil, true, 1, bufSize}, | ||
5458 | // Move larger than possible | ||
5459 | {int64(bufSize), 1, 0, io.EOF, false, 0, 0}, | ||
5460 | // Provide negative offset with CUR_SEEK | ||
5461 | {int64(-1), 1, 0, fmt.Errorf("Negative position not allowed for 1"), false, 0, 0}, | ||
5462 | // Test with whence SEEK_END and with positive offset | ||
5463 | {1024, 2, 0, io.EOF, false, 0, 0}, | ||
5464 | // Test with whence SEEK_END and with negative offset | ||
5465 | {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize}, | ||
5466 | // Test with whence SEEK_END and with large negative offset | ||
5467 | {-int64(bufSize) * 2, 2, 0, fmt.Errorf("Seeking at negative offset not allowed for 2"), false, 0, 0}, | ||
5468 | // Test with invalid whence | ||
5469 | {0, 3, 0, fmt.Errorf("Invalid whence 3"), false, 0, 0}, | ||
5470 | } | ||
5471 | |||
5472 | for i, testCase := range testCases { | ||
5473 | // Perform seek operation | ||
5474 | n, err := r.Seek(testCase.offset, testCase.whence) | ||
5475 | if err != nil && testCase.err == nil { | ||
5476 | // We expected success. | ||
5477 | logError(testName, function, args, startTime, "", | ||
5478 | fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) | ||
5479 | return | ||
5480 | } | ||
5481 | if err == nil && testCase.err != nil { | ||
5482 | // We expected failure, but got success. | ||
5483 | logError(testName, function, args, startTime, "", | ||
5484 | fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) | ||
5485 | return | ||
5486 | } | ||
5487 | if err != nil && testCase.err != nil { | ||
5488 | if err.Error() != testCase.err.Error() { | ||
5489 | // We expect a specific error | ||
5490 | logError(testName, function, args, startTime, "", | ||
5491 | fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) | ||
5492 | return | ||
5493 | } | ||
5494 | } | ||
5495 | // Check the returned seek pos | ||
5496 | if n != testCase.pos { | ||
5497 | logError(testName, function, args, startTime, "", | ||
5498 | fmt.Sprintf("Test %d, number of bytes seeked does not match, expected %d, got %d", i+1, testCase.pos, n), err) | ||
5499 | return | ||
5500 | } | ||
5501 | // Compare only if shouldCmp is activated | ||
5502 | if testCase.shouldCmp { | ||
5503 | cmpData(r, testCase.start, testCase.end) | ||
5504 | } | ||
5505 | } | ||
5506 | |||
5507 | successLogger(testName, function, args, startTime).Info() | ||
5508 | } | ||
5509 | |||
5510 | // Tests SSE-C get object ReaderAt interface methods. | ||
5511 | func testSSECEncryptedGetObjectReadAtFunctional() { | ||
5512 | // initialize logging params | ||
5513 | startTime := time.Now() | ||
5514 | testName := getFuncName() | ||
5515 | function := "GetObject(bucketName, objectName)" | ||
5516 | args := map[string]interface{}{} | ||
5517 | |||
5518 | // Seed random based on current time. | ||
5519 | rand.Seed(time.Now().Unix()) | ||
5520 | |||
5521 | // Instantiate new minio client object. | ||
5522 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
5523 | &minio.Options{ | ||
5524 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
5525 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
5526 | }) | ||
5527 | if err != nil { | ||
5528 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
5529 | return | ||
5530 | } | ||
5531 | |||
5532 | // Enable tracing, write to stderr. | ||
5533 | // c.TraceOn(os.Stderr) | ||
5534 | |||
5535 | // Set user agent. | ||
5536 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
5537 | |||
5538 | // Generate a new random bucket name. | ||
5539 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
5540 | args["bucketName"] = bucketName | ||
5541 | |||
5542 | // Make a new bucket. | ||
5543 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
5544 | if err != nil { | ||
5545 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
5546 | return | ||
5547 | } | ||
5548 | |||
5549 | defer cleanupBucket(bucketName, c) | ||
5550 | |||
5551 | // Generate 129MiB of data. | ||
5552 | bufSize := dataFileMap["datafile-129-MB"] | ||
5553 | reader := getDataReader("datafile-129-MB") | ||
5554 | defer reader.Close() | ||
5555 | |||
5556 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
5557 | args["objectName"] = objectName | ||
5558 | |||
5559 | buf, err := io.ReadAll(reader) | ||
5560 | if err != nil { | ||
5561 | logError(testName, function, args, startTime, "", "ReadAll failed", err) | ||
5562 | return | ||
5563 | } | ||
5564 | |||
5565 | // Save the data | ||
5566 | _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ | ||
5567 | ContentType: "binary/octet-stream", | ||
5568 | ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), | ||
5569 | }) | ||
5570 | if err != nil { | ||
5571 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
5572 | return | ||
5573 | } | ||
5574 | |||
5575 | // read the data back | ||
5576 | r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ | ||
5577 | ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), | ||
5578 | }) | ||
5579 | if err != nil { | ||
5580 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
5581 | return | ||
5582 | } | ||
5583 | defer r.Close() | ||
5584 | |||
5585 | offset := int64(2048) | ||
5586 | |||
5587 | // read directly | ||
5588 | buf1 := make([]byte, 512) | ||
5589 | buf2 := make([]byte, 512) | ||
5590 | buf3 := make([]byte, 512) | ||
5591 | buf4 := make([]byte, 512) | ||
5592 | |||
5593 | // Test readAt before stat is called such that objectInfo doesn't change. | ||
5594 | m, err := r.ReadAt(buf1, offset) | ||
5595 | if err != nil { | ||
5596 | logError(testName, function, args, startTime, "", "ReadAt failed", err) | ||
5597 | return | ||
5598 | } | ||
5599 | if m != len(buf1) { | ||
5600 | logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) | ||
5601 | return | ||
5602 | } | ||
5603 | if !bytes.Equal(buf1, buf[offset:offset+512]) { | ||
5604 | logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) | ||
5605 | return | ||
5606 | } | ||
5607 | offset += 512 | ||
5608 | |||
5609 | st, err := r.Stat() | ||
5610 | if err != nil { | ||
5611 | logError(testName, function, args, startTime, "", "Stat failed", err) | ||
5612 | return | ||
5613 | } | ||
5614 | |||
5615 | if st.Size != int64(bufSize) { | ||
5616 | logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) | ||
5617 | return | ||
5618 | } | ||
5619 | |||
5620 | m, err = r.ReadAt(buf2, offset) | ||
5621 | if err != nil { | ||
5622 | logError(testName, function, args, startTime, "", "ReadAt failed", err) | ||
5623 | return | ||
5624 | } | ||
5625 | if m != len(buf2) { | ||
5626 | logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) | ||
5627 | return | ||
5628 | } | ||
5629 | if !bytes.Equal(buf2, buf[offset:offset+512]) { | ||
5630 | logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) | ||
5631 | return | ||
5632 | } | ||
5633 | offset += 512 | ||
5634 | m, err = r.ReadAt(buf3, offset) | ||
5635 | if err != nil { | ||
5636 | logError(testName, function, args, startTime, "", "ReadAt failed", err) | ||
5637 | return | ||
5638 | } | ||
5639 | if m != len(buf3) { | ||
5640 | logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err) | ||
5641 | return | ||
5642 | } | ||
5643 | if !bytes.Equal(buf3, buf[offset:offset+512]) { | ||
5644 | logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) | ||
5645 | return | ||
5646 | } | ||
5647 | offset += 512 | ||
5648 | m, err = r.ReadAt(buf4, offset) | ||
5649 | if err != nil { | ||
5650 | logError(testName, function, args, startTime, "", "ReadAt failed", err) | ||
5651 | return | ||
5652 | } | ||
5653 | if m != len(buf4) { | ||
5654 | logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err) | ||
5655 | return | ||
5656 | } | ||
5657 | if !bytes.Equal(buf4, buf[offset:offset+512]) { | ||
5658 | logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) | ||
5659 | return | ||
5660 | } | ||
5661 | |||
5662 | buf5 := make([]byte, len(buf)) | ||
5663 | // Read the whole object. | ||
5664 | m, err = r.ReadAt(buf5, 0) | ||
5665 | if err != nil { | ||
5666 | if err != io.EOF { | ||
5667 | logError(testName, function, args, startTime, "", "ReadAt failed", err) | ||
5668 | return | ||
5669 | } | ||
5670 | } | ||
5671 | if m != len(buf5) { | ||
5672 | logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err) | ||
5673 | return | ||
5674 | } | ||
5675 | if !bytes.Equal(buf, buf5) { | ||
5676 | logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) | ||
5677 | return | ||
5678 | } | ||
5679 | |||
5680 | buf6 := make([]byte, len(buf)+1) | ||
5681 | // Read the whole object and beyond. | ||
5682 | _, err = r.ReadAt(buf6, 0) | ||
5683 | if err != nil { | ||
5684 | if err != io.EOF { | ||
5685 | logError(testName, function, args, startTime, "", "ReadAt failed", err) | ||
5686 | return | ||
5687 | } | ||
5688 | } | ||
5689 | |||
5690 | successLogger(testName, function, args, startTime).Info() | ||
5691 | } | ||
5692 | |||
5693 | // Tests SSE-S3 get object ReaderAt interface methods. | ||
5694 | func testSSES3EncryptedGetObjectReadAtFunctional() { | ||
5695 | // initialize logging params | ||
5696 | startTime := time.Now() | ||
5697 | testName := getFuncName() | ||
5698 | function := "GetObject(bucketName, objectName)" | ||
5699 | args := map[string]interface{}{} | ||
5700 | |||
5701 | // Seed random based on current time. | ||
5702 | rand.Seed(time.Now().Unix()) | ||
5703 | |||
5704 | // Instantiate new minio client object. | ||
5705 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
5706 | &minio.Options{ | ||
5707 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
5708 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
5709 | }) | ||
5710 | if err != nil { | ||
5711 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
5712 | return | ||
5713 | } | ||
5714 | |||
5715 | // Enable tracing, write to stderr. | ||
5716 | // c.TraceOn(os.Stderr) | ||
5717 | |||
5718 | // Set user agent. | ||
5719 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
5720 | |||
5721 | // Generate a new random bucket name. | ||
5722 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
5723 | args["bucketName"] = bucketName | ||
5724 | |||
5725 | // Make a new bucket. | ||
5726 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
5727 | if err != nil { | ||
5728 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
5729 | return | ||
5730 | } | ||
5731 | |||
5732 | defer cleanupBucket(bucketName, c) | ||
5733 | |||
5734 | // Generate 129MiB of data. | ||
5735 | bufSize := dataFileMap["datafile-129-MB"] | ||
5736 | reader := getDataReader("datafile-129-MB") | ||
5737 | defer reader.Close() | ||
5738 | |||
5739 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
5740 | args["objectName"] = objectName | ||
5741 | |||
5742 | buf, err := io.ReadAll(reader) | ||
5743 | if err != nil { | ||
5744 | logError(testName, function, args, startTime, "", "ReadAll failed", err) | ||
5745 | return | ||
5746 | } | ||
5747 | |||
5748 | // Save the data | ||
5749 | _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ | ||
5750 | ContentType: "binary/octet-stream", | ||
5751 | ServerSideEncryption: encrypt.NewSSE(), | ||
5752 | }) | ||
5753 | if err != nil { | ||
5754 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
5755 | return | ||
5756 | } | ||
5757 | |||
5758 | // read the data back | ||
5759 | r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) | ||
5760 | if err != nil { | ||
5761 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
5762 | return | ||
5763 | } | ||
5764 | defer r.Close() | ||
5765 | |||
5766 | offset := int64(2048) | ||
5767 | |||
5768 | // read directly | ||
5769 | buf1 := make([]byte, 512) | ||
5770 | buf2 := make([]byte, 512) | ||
5771 | buf3 := make([]byte, 512) | ||
5772 | buf4 := make([]byte, 512) | ||
5773 | |||
5774 | // Test readAt before stat is called such that objectInfo doesn't change. | ||
5775 | m, err := r.ReadAt(buf1, offset) | ||
5776 | if err != nil { | ||
5777 | logError(testName, function, args, startTime, "", "ReadAt failed", err) | ||
5778 | return | ||
5779 | } | ||
5780 | if m != len(buf1) { | ||
5781 | logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) | ||
5782 | return | ||
5783 | } | ||
5784 | if !bytes.Equal(buf1, buf[offset:offset+512]) { | ||
5785 | logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) | ||
5786 | return | ||
5787 | } | ||
5788 | offset += 512 | ||
5789 | |||
5790 | st, err := r.Stat() | ||
5791 | if err != nil { | ||
5792 | logError(testName, function, args, startTime, "", "Stat failed", err) | ||
5793 | return | ||
5794 | } | ||
5795 | |||
5796 | if st.Size != int64(bufSize) { | ||
5797 | logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) | ||
5798 | return | ||
5799 | } | ||
5800 | |||
5801 | m, err = r.ReadAt(buf2, offset) | ||
5802 | if err != nil { | ||
5803 | logError(testName, function, args, startTime, "", "ReadAt failed", err) | ||
5804 | return | ||
5805 | } | ||
5806 | if m != len(buf2) { | ||
5807 | logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) | ||
5808 | return | ||
5809 | } | ||
5810 | if !bytes.Equal(buf2, buf[offset:offset+512]) { | ||
5811 | logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) | ||
5812 | return | ||
5813 | } | ||
5814 | offset += 512 | ||
5815 | m, err = r.ReadAt(buf3, offset) | ||
5816 | if err != nil { | ||
5817 | logError(testName, function, args, startTime, "", "ReadAt failed", err) | ||
5818 | return | ||
5819 | } | ||
5820 | if m != len(buf3) { | ||
5821 | logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err) | ||
5822 | return | ||
5823 | } | ||
5824 | if !bytes.Equal(buf3, buf[offset:offset+512]) { | ||
5825 | logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) | ||
5826 | return | ||
5827 | } | ||
5828 | offset += 512 | ||
5829 | m, err = r.ReadAt(buf4, offset) | ||
5830 | if err != nil { | ||
5831 | logError(testName, function, args, startTime, "", "ReadAt failed", err) | ||
5832 | return | ||
5833 | } | ||
5834 | if m != len(buf4) { | ||
5835 | logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err) | ||
5836 | return | ||
5837 | } | ||
5838 | if !bytes.Equal(buf4, buf[offset:offset+512]) { | ||
5839 | logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) | ||
5840 | return | ||
5841 | } | ||
5842 | |||
5843 | buf5 := make([]byte, len(buf)) | ||
5844 | // Read the whole object. | ||
5845 | m, err = r.ReadAt(buf5, 0) | ||
5846 | if err != nil { | ||
5847 | if err != io.EOF { | ||
5848 | logError(testName, function, args, startTime, "", "ReadAt failed", err) | ||
5849 | return | ||
5850 | } | ||
5851 | } | ||
5852 | if m != len(buf5) { | ||
5853 | logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err) | ||
5854 | return | ||
5855 | } | ||
5856 | if !bytes.Equal(buf, buf5) { | ||
5857 | logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) | ||
5858 | return | ||
5859 | } | ||
5860 | |||
5861 | buf6 := make([]byte, len(buf)+1) | ||
5862 | // Read the whole object and beyond. | ||
5863 | _, err = r.ReadAt(buf6, 0) | ||
5864 | if err != nil { | ||
5865 | if err != io.EOF { | ||
5866 | logError(testName, function, args, startTime, "", "ReadAt failed", err) | ||
5867 | return | ||
5868 | } | ||
5869 | } | ||
5870 | |||
5871 | successLogger(testName, function, args, startTime).Info() | ||
5872 | } | ||
5873 | |||
5874 | // testSSECEncryptionPutGet tests encryption with customer provided encryption keys | ||
5875 | func testSSECEncryptionPutGet() { | ||
5876 | // initialize logging params | ||
5877 | startTime := time.Now() | ||
5878 | testName := getFuncName() | ||
5879 | function := "PutEncryptedObject(bucketName, objectName, reader, sse)" | ||
5880 | args := map[string]interface{}{ | ||
5881 | "bucketName": "", | ||
5882 | "objectName": "", | ||
5883 | "sse": "", | ||
5884 | } | ||
5885 | // Seed random based on current time. | ||
5886 | rand.Seed(time.Now().Unix()) | ||
5887 | |||
5888 | // Instantiate new minio client object | ||
5889 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
5890 | &minio.Options{ | ||
5891 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
5892 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
5893 | }) | ||
5894 | if err != nil { | ||
5895 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
5896 | return | ||
5897 | } | ||
5898 | |||
5899 | // Enable tracing, write to stderr. | ||
5900 | // c.TraceOn(os.Stderr) | ||
5901 | |||
5902 | // Set user agent. | ||
5903 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
5904 | |||
5905 | // Generate a new random bucket name. | ||
5906 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
5907 | args["bucketName"] = bucketName | ||
5908 | |||
5909 | // Make a new bucket. | ||
5910 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
5911 | if err != nil { | ||
5912 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
5913 | return | ||
5914 | } | ||
5915 | |||
5916 | defer cleanupBucket(bucketName, c) | ||
5917 | |||
5918 | testCases := []struct { | ||
5919 | buf []byte | ||
5920 | }{ | ||
5921 | {buf: bytes.Repeat([]byte("F"), 1)}, | ||
5922 | {buf: bytes.Repeat([]byte("F"), 15)}, | ||
5923 | {buf: bytes.Repeat([]byte("F"), 16)}, | ||
5924 | {buf: bytes.Repeat([]byte("F"), 17)}, | ||
5925 | {buf: bytes.Repeat([]byte("F"), 31)}, | ||
5926 | {buf: bytes.Repeat([]byte("F"), 32)}, | ||
5927 | {buf: bytes.Repeat([]byte("F"), 33)}, | ||
5928 | {buf: bytes.Repeat([]byte("F"), 1024)}, | ||
5929 | {buf: bytes.Repeat([]byte("F"), 1024*2)}, | ||
5930 | {buf: bytes.Repeat([]byte("F"), 1024*1024)}, | ||
5931 | } | ||
5932 | |||
5933 | const password = "correct horse battery staple" // https://xkcd.com/936/ | ||
5934 | |||
5935 | for i, testCase := range testCases { | ||
5936 | // Generate a random object name | ||
5937 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
5938 | args["objectName"] = objectName | ||
5939 | |||
5940 | // Secured object | ||
5941 | sse := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) | ||
5942 | args["sse"] = sse | ||
5943 | |||
5944 | // Put encrypted data | ||
5945 | _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(testCase.buf), int64(len(testCase.buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) | ||
5946 | if err != nil { | ||
5947 | logError(testName, function, args, startTime, "", "PutEncryptedObject failed", err) | ||
5948 | return | ||
5949 | } | ||
5950 | |||
5951 | // Read the data back | ||
5952 | r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse}) | ||
5953 | if err != nil { | ||
5954 | logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) | ||
5955 | return | ||
5956 | } | ||
5957 | defer r.Close() | ||
5958 | |||
5959 | // Compare the sent object with the received one | ||
5960 | recvBuffer := bytes.NewBuffer([]byte{}) | ||
5961 | if _, err = io.Copy(recvBuffer, r); err != nil { | ||
5962 | logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) | ||
5963 | return | ||
5964 | } | ||
5965 | if recvBuffer.Len() != len(testCase.buf) { | ||
5966 | logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) | ||
5967 | return | ||
5968 | } | ||
5969 | if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { | ||
5970 | logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) | ||
5971 | return | ||
5972 | } | ||
5973 | |||
5974 | successLogger(testName, function, args, startTime).Info() | ||
5975 | |||
5976 | } | ||
5977 | |||
5978 | successLogger(testName, function, args, startTime).Info() | ||
5979 | } | ||
5980 | |||
5981 | // TestEncryptionFPut tests encryption with customer specified encryption keys | ||
5982 | func testSSECEncryptionFPut() { | ||
5983 | // initialize logging params | ||
5984 | startTime := time.Now() | ||
5985 | testName := getFuncName() | ||
5986 | function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, sse)" | ||
5987 | args := map[string]interface{}{ | ||
5988 | "bucketName": "", | ||
5989 | "objectName": "", | ||
5990 | "filePath": "", | ||
5991 | "contentType": "", | ||
5992 | "sse": "", | ||
5993 | } | ||
5994 | // Seed random based on current time. | ||
5995 | rand.Seed(time.Now().Unix()) | ||
5996 | |||
5997 | // Instantiate new minio client object | ||
5998 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
5999 | &minio.Options{ | ||
6000 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
6001 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
6002 | }) | ||
6003 | if err != nil { | ||
6004 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
6005 | return | ||
6006 | } | ||
6007 | |||
6008 | // Enable tracing, write to stderr. | ||
6009 | // c.TraceOn(os.Stderr) | ||
6010 | |||
6011 | // Set user agent. | ||
6012 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
6013 | |||
6014 | // Generate a new random bucket name. | ||
6015 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
6016 | args["bucketName"] = bucketName | ||
6017 | |||
6018 | // Make a new bucket. | ||
6019 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
6020 | if err != nil { | ||
6021 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
6022 | return | ||
6023 | } | ||
6024 | |||
6025 | defer cleanupBucket(bucketName, c) | ||
6026 | |||
6027 | // Object custom metadata | ||
6028 | customContentType := "custom/contenttype" | ||
6029 | args["metadata"] = customContentType | ||
6030 | |||
6031 | testCases := []struct { | ||
6032 | buf []byte | ||
6033 | }{ | ||
6034 | {buf: bytes.Repeat([]byte("F"), 0)}, | ||
6035 | {buf: bytes.Repeat([]byte("F"), 1)}, | ||
6036 | {buf: bytes.Repeat([]byte("F"), 15)}, | ||
6037 | {buf: bytes.Repeat([]byte("F"), 16)}, | ||
6038 | {buf: bytes.Repeat([]byte("F"), 17)}, | ||
6039 | {buf: bytes.Repeat([]byte("F"), 31)}, | ||
6040 | {buf: bytes.Repeat([]byte("F"), 32)}, | ||
6041 | {buf: bytes.Repeat([]byte("F"), 33)}, | ||
6042 | {buf: bytes.Repeat([]byte("F"), 1024)}, | ||
6043 | {buf: bytes.Repeat([]byte("F"), 1024*2)}, | ||
6044 | {buf: bytes.Repeat([]byte("F"), 1024*1024)}, | ||
6045 | } | ||
6046 | |||
6047 | const password = "correct horse battery staple" // https://xkcd.com/936/ | ||
6048 | for i, testCase := range testCases { | ||
6049 | // Generate a random object name | ||
6050 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
6051 | args["objectName"] = objectName | ||
6052 | |||
6053 | // Secured object | ||
6054 | sse := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) | ||
6055 | args["sse"] = sse | ||
6056 | |||
6057 | // Generate a random file name. | ||
6058 | fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
6059 | file, err := os.Create(fileName) | ||
6060 | if err != nil { | ||
6061 | logError(testName, function, args, startTime, "", "file create failed", err) | ||
6062 | return | ||
6063 | } | ||
6064 | _, err = file.Write(testCase.buf) | ||
6065 | if err != nil { | ||
6066 | logError(testName, function, args, startTime, "", "file write failed", err) | ||
6067 | return | ||
6068 | } | ||
6069 | file.Close() | ||
6070 | // Put encrypted data | ||
6071 | if _, err = c.FPutObject(context.Background(), bucketName, objectName, fileName, minio.PutObjectOptions{ServerSideEncryption: sse}); err != nil { | ||
6072 | logError(testName, function, args, startTime, "", "FPutEncryptedObject failed", err) | ||
6073 | return | ||
6074 | } | ||
6075 | |||
6076 | // Read the data back | ||
6077 | r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse}) | ||
6078 | if err != nil { | ||
6079 | logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) | ||
6080 | return | ||
6081 | } | ||
6082 | defer r.Close() | ||
6083 | |||
6084 | // Compare the sent object with the received one | ||
6085 | recvBuffer := bytes.NewBuffer([]byte{}) | ||
6086 | if _, err = io.Copy(recvBuffer, r); err != nil { | ||
6087 | logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) | ||
6088 | return | ||
6089 | } | ||
6090 | if recvBuffer.Len() != len(testCase.buf) { | ||
6091 | logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) | ||
6092 | return | ||
6093 | } | ||
6094 | if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { | ||
6095 | logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) | ||
6096 | return | ||
6097 | } | ||
6098 | |||
6099 | os.Remove(fileName) | ||
6100 | } | ||
6101 | |||
6102 | successLogger(testName, function, args, startTime).Info() | ||
6103 | } | ||
6104 | |||
6105 | // testSSES3EncryptionPutGet tests SSE-S3 encryption | ||
6106 | func testSSES3EncryptionPutGet() { | ||
6107 | // initialize logging params | ||
6108 | startTime := time.Now() | ||
6109 | testName := getFuncName() | ||
6110 | function := "PutEncryptedObject(bucketName, objectName, reader, sse)" | ||
6111 | args := map[string]interface{}{ | ||
6112 | "bucketName": "", | ||
6113 | "objectName": "", | ||
6114 | "sse": "", | ||
6115 | } | ||
6116 | // Seed random based on current time. | ||
6117 | rand.Seed(time.Now().Unix()) | ||
6118 | |||
6119 | // Instantiate new minio client object | ||
6120 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
6121 | &minio.Options{ | ||
6122 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
6123 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
6124 | }) | ||
6125 | if err != nil { | ||
6126 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
6127 | return | ||
6128 | } | ||
6129 | |||
6130 | // Enable tracing, write to stderr. | ||
6131 | // c.TraceOn(os.Stderr) | ||
6132 | |||
6133 | // Set user agent. | ||
6134 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
6135 | |||
6136 | // Generate a new random bucket name. | ||
6137 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
6138 | args["bucketName"] = bucketName | ||
6139 | |||
6140 | // Make a new bucket. | ||
6141 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
6142 | if err != nil { | ||
6143 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
6144 | return | ||
6145 | } | ||
6146 | |||
6147 | defer cleanupBucket(bucketName, c) | ||
6148 | |||
6149 | testCases := []struct { | ||
6150 | buf []byte | ||
6151 | }{ | ||
6152 | {buf: bytes.Repeat([]byte("F"), 1)}, | ||
6153 | {buf: bytes.Repeat([]byte("F"), 15)}, | ||
6154 | {buf: bytes.Repeat([]byte("F"), 16)}, | ||
6155 | {buf: bytes.Repeat([]byte("F"), 17)}, | ||
6156 | {buf: bytes.Repeat([]byte("F"), 31)}, | ||
6157 | {buf: bytes.Repeat([]byte("F"), 32)}, | ||
6158 | {buf: bytes.Repeat([]byte("F"), 33)}, | ||
6159 | {buf: bytes.Repeat([]byte("F"), 1024)}, | ||
6160 | {buf: bytes.Repeat([]byte("F"), 1024*2)}, | ||
6161 | {buf: bytes.Repeat([]byte("F"), 1024*1024)}, | ||
6162 | } | ||
6163 | |||
6164 | for i, testCase := range testCases { | ||
6165 | // Generate a random object name | ||
6166 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
6167 | args["objectName"] = objectName | ||
6168 | |||
6169 | // Secured object | ||
6170 | sse := encrypt.NewSSE() | ||
6171 | args["sse"] = sse | ||
6172 | |||
6173 | // Put encrypted data | ||
6174 | _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(testCase.buf), int64(len(testCase.buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) | ||
6175 | if err != nil { | ||
6176 | logError(testName, function, args, startTime, "", "PutEncryptedObject failed", err) | ||
6177 | return | ||
6178 | } | ||
6179 | |||
6180 | // Read the data back without any encryption headers | ||
6181 | r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) | ||
6182 | if err != nil { | ||
6183 | logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) | ||
6184 | return | ||
6185 | } | ||
6186 | defer r.Close() | ||
6187 | |||
6188 | // Compare the sent object with the received one | ||
6189 | recvBuffer := bytes.NewBuffer([]byte{}) | ||
6190 | if _, err = io.Copy(recvBuffer, r); err != nil { | ||
6191 | logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) | ||
6192 | return | ||
6193 | } | ||
6194 | if recvBuffer.Len() != len(testCase.buf) { | ||
6195 | logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) | ||
6196 | return | ||
6197 | } | ||
6198 | if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { | ||
6199 | logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) | ||
6200 | return | ||
6201 | } | ||
6202 | |||
6203 | successLogger(testName, function, args, startTime).Info() | ||
6204 | |||
6205 | } | ||
6206 | |||
6207 | successLogger(testName, function, args, startTime).Info() | ||
6208 | } | ||
6209 | |||
6210 | // TestSSES3EncryptionFPut tests server side encryption | ||
6211 | func testSSES3EncryptionFPut() { | ||
6212 | // initialize logging params | ||
6213 | startTime := time.Now() | ||
6214 | testName := getFuncName() | ||
6215 | function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, sse)" | ||
6216 | args := map[string]interface{}{ | ||
6217 | "bucketName": "", | ||
6218 | "objectName": "", | ||
6219 | "filePath": "", | ||
6220 | "contentType": "", | ||
6221 | "sse": "", | ||
6222 | } | ||
6223 | // Seed random based on current time. | ||
6224 | rand.Seed(time.Now().Unix()) | ||
6225 | |||
6226 | // Instantiate new minio client object | ||
6227 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
6228 | &minio.Options{ | ||
6229 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
6230 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
6231 | }) | ||
6232 | if err != nil { | ||
6233 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
6234 | return | ||
6235 | } | ||
6236 | |||
6237 | // Enable tracing, write to stderr. | ||
6238 | // c.TraceOn(os.Stderr) | ||
6239 | |||
6240 | // Set user agent. | ||
6241 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
6242 | |||
6243 | // Generate a new random bucket name. | ||
6244 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
6245 | args["bucketName"] = bucketName | ||
6246 | |||
6247 | // Make a new bucket. | ||
6248 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
6249 | if err != nil { | ||
6250 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
6251 | return | ||
6252 | } | ||
6253 | |||
6254 | defer cleanupBucket(bucketName, c) | ||
6255 | |||
6256 | // Object custom metadata | ||
6257 | customContentType := "custom/contenttype" | ||
6258 | args["metadata"] = customContentType | ||
6259 | |||
6260 | testCases := []struct { | ||
6261 | buf []byte | ||
6262 | }{ | ||
6263 | {buf: bytes.Repeat([]byte("F"), 0)}, | ||
6264 | {buf: bytes.Repeat([]byte("F"), 1)}, | ||
6265 | {buf: bytes.Repeat([]byte("F"), 15)}, | ||
6266 | {buf: bytes.Repeat([]byte("F"), 16)}, | ||
6267 | {buf: bytes.Repeat([]byte("F"), 17)}, | ||
6268 | {buf: bytes.Repeat([]byte("F"), 31)}, | ||
6269 | {buf: bytes.Repeat([]byte("F"), 32)}, | ||
6270 | {buf: bytes.Repeat([]byte("F"), 33)}, | ||
6271 | {buf: bytes.Repeat([]byte("F"), 1024)}, | ||
6272 | {buf: bytes.Repeat([]byte("F"), 1024*2)}, | ||
6273 | {buf: bytes.Repeat([]byte("F"), 1024*1024)}, | ||
6274 | } | ||
6275 | |||
6276 | for i, testCase := range testCases { | ||
6277 | // Generate a random object name | ||
6278 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
6279 | args["objectName"] = objectName | ||
6280 | |||
6281 | // Secured object | ||
6282 | sse := encrypt.NewSSE() | ||
6283 | args["sse"] = sse | ||
6284 | |||
6285 | // Generate a random file name. | ||
6286 | fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
6287 | file, err := os.Create(fileName) | ||
6288 | if err != nil { | ||
6289 | logError(testName, function, args, startTime, "", "file create failed", err) | ||
6290 | return | ||
6291 | } | ||
6292 | _, err = file.Write(testCase.buf) | ||
6293 | if err != nil { | ||
6294 | logError(testName, function, args, startTime, "", "file write failed", err) | ||
6295 | return | ||
6296 | } | ||
6297 | file.Close() | ||
6298 | // Put encrypted data | ||
6299 | if _, err = c.FPutObject(context.Background(), bucketName, objectName, fileName, minio.PutObjectOptions{ServerSideEncryption: sse}); err != nil { | ||
6300 | logError(testName, function, args, startTime, "", "FPutEncryptedObject failed", err) | ||
6301 | return | ||
6302 | } | ||
6303 | |||
6304 | // Read the data back | ||
6305 | r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) | ||
6306 | if err != nil { | ||
6307 | logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) | ||
6308 | return | ||
6309 | } | ||
6310 | defer r.Close() | ||
6311 | |||
6312 | // Compare the sent object with the received one | ||
6313 | recvBuffer := bytes.NewBuffer([]byte{}) | ||
6314 | if _, err = io.Copy(recvBuffer, r); err != nil { | ||
6315 | logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) | ||
6316 | return | ||
6317 | } | ||
6318 | if recvBuffer.Len() != len(testCase.buf) { | ||
6319 | logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) | ||
6320 | return | ||
6321 | } | ||
6322 | if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { | ||
6323 | logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) | ||
6324 | return | ||
6325 | } | ||
6326 | |||
6327 | os.Remove(fileName) | ||
6328 | } | ||
6329 | |||
6330 | successLogger(testName, function, args, startTime).Info() | ||
6331 | } | ||
6332 | |||
6333 | func testBucketNotification() { | ||
6334 | // initialize logging params | ||
6335 | startTime := time.Now() | ||
6336 | testName := getFuncName() | ||
6337 | function := "SetBucketNotification(bucketName)" | ||
6338 | args := map[string]interface{}{ | ||
6339 | "bucketName": "", | ||
6340 | } | ||
6341 | |||
6342 | if os.Getenv("NOTIFY_BUCKET") == "" || | ||
6343 | os.Getenv("NOTIFY_SERVICE") == "" || | ||
6344 | os.Getenv("NOTIFY_REGION") == "" || | ||
6345 | os.Getenv("NOTIFY_ACCOUNTID") == "" || | ||
6346 | os.Getenv("NOTIFY_RESOURCE") == "" { | ||
6347 | ignoredLog(testName, function, args, startTime, "Skipped notification test as it is not configured").Info() | ||
6348 | return | ||
6349 | } | ||
6350 | |||
6351 | // Seed random based on current time. | ||
6352 | rand.Seed(time.Now().Unix()) | ||
6353 | |||
6354 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
6355 | &minio.Options{ | ||
6356 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
6357 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
6358 | }) | ||
6359 | if err != nil { | ||
6360 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
6361 | return | ||
6362 | } | ||
6363 | |||
6364 | // Enable to debug | ||
6365 | // c.TraceOn(os.Stderr) | ||
6366 | |||
6367 | // Set user agent. | ||
6368 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
6369 | |||
6370 | bucketName := os.Getenv("NOTIFY_BUCKET") | ||
6371 | args["bucketName"] = bucketName | ||
6372 | |||
6373 | topicArn := notification.NewArn("aws", os.Getenv("NOTIFY_SERVICE"), os.Getenv("NOTIFY_REGION"), os.Getenv("NOTIFY_ACCOUNTID"), os.Getenv("NOTIFY_RESOURCE")) | ||
6374 | queueArn := notification.NewArn("aws", "dummy-service", "dummy-region", "dummy-accountid", "dummy-resource") | ||
6375 | |||
6376 | topicConfig := notification.NewConfig(topicArn) | ||
6377 | topicConfig.AddEvents(notification.ObjectCreatedAll, notification.ObjectRemovedAll) | ||
6378 | topicConfig.AddFilterSuffix("jpg") | ||
6379 | |||
6380 | queueConfig := notification.NewConfig(queueArn) | ||
6381 | queueConfig.AddEvents(notification.ObjectCreatedAll) | ||
6382 | queueConfig.AddFilterPrefix("photos/") | ||
6383 | |||
6384 | config := notification.Configuration{} | ||
6385 | config.AddTopic(topicConfig) | ||
6386 | |||
6387 | // Add the same topicConfig again, should have no effect | ||
6388 | // because it is duplicated | ||
6389 | config.AddTopic(topicConfig) | ||
6390 | if len(config.TopicConfigs) != 1 { | ||
6391 | logError(testName, function, args, startTime, "", "Duplicate entry added", err) | ||
6392 | return | ||
6393 | } | ||
6394 | |||
6395 | // Add and remove a queue config | ||
6396 | config.AddQueue(queueConfig) | ||
6397 | config.RemoveQueueByArn(queueArn) | ||
6398 | |||
6399 | err = c.SetBucketNotification(context.Background(), bucketName, config) | ||
6400 | if err != nil { | ||
6401 | logError(testName, function, args, startTime, "", "SetBucketNotification failed", err) | ||
6402 | return | ||
6403 | } | ||
6404 | |||
6405 | config, err = c.GetBucketNotification(context.Background(), bucketName) | ||
6406 | if err != nil { | ||
6407 | logError(testName, function, args, startTime, "", "GetBucketNotification failed", err) | ||
6408 | return | ||
6409 | } | ||
6410 | |||
6411 | if len(config.TopicConfigs) != 1 { | ||
6412 | logError(testName, function, args, startTime, "", "Topic config is empty", err) | ||
6413 | return | ||
6414 | } | ||
6415 | |||
6416 | if config.TopicConfigs[0].Filter.S3Key.FilterRules[0].Value != "jpg" { | ||
6417 | logError(testName, function, args, startTime, "", "Couldn't get the suffix", err) | ||
6418 | return | ||
6419 | } | ||
6420 | |||
6421 | err = c.RemoveAllBucketNotification(context.Background(), bucketName) | ||
6422 | if err != nil { | ||
6423 | logError(testName, function, args, startTime, "", "RemoveAllBucketNotification failed", err) | ||
6424 | return | ||
6425 | } | ||
6426 | |||
6427 | // Delete all objects and buckets | ||
6428 | if err = cleanupBucket(bucketName, c); err != nil { | ||
6429 | logError(testName, function, args, startTime, "", "CleanupBucket failed", err) | ||
6430 | return | ||
6431 | } | ||
6432 | |||
6433 | successLogger(testName, function, args, startTime).Info() | ||
6434 | } | ||
6435 | |||
6436 | // Tests comprehensive list of all methods. | ||
6437 | func testFunctional() { | ||
6438 | // initialize logging params | ||
6439 | startTime := time.Now() | ||
6440 | testName := getFuncName() | ||
6441 | function := "testFunctional()" | ||
6442 | functionAll := "" | ||
6443 | args := map[string]interface{}{} | ||
6444 | |||
6445 | // Seed random based on current time. | ||
6446 | rand.Seed(time.Now().Unix()) | ||
6447 | |||
6448 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
6449 | &minio.Options{ | ||
6450 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
6451 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
6452 | }) | ||
6453 | if err != nil { | ||
6454 | logError(testName, function, nil, startTime, "", "MinIO client object creation failed", err) | ||
6455 | return | ||
6456 | } | ||
6457 | |||
6458 | // Enable to debug | ||
6459 | // c.TraceOn(os.Stderr) | ||
6460 | |||
6461 | // Set user agent. | ||
6462 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
6463 | |||
6464 | // Generate a new random bucket name. | ||
6465 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
6466 | |||
6467 | // Make a new bucket. | ||
6468 | function = "MakeBucket(bucketName, region)" | ||
6469 | functionAll = "MakeBucket(bucketName, region)" | ||
6470 | args["bucketName"] = bucketName | ||
6471 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
6472 | |||
6473 | defer cleanupBucket(bucketName, c) | ||
6474 | if err != nil { | ||
6475 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
6476 | return | ||
6477 | } | ||
6478 | |||
6479 | // Generate a random file name. | ||
6480 | fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
6481 | file, err := os.Create(fileName) | ||
6482 | if err != nil { | ||
6483 | logError(testName, function, args, startTime, "", "File creation failed", err) | ||
6484 | return | ||
6485 | } | ||
6486 | for i := 0; i < 3; i++ { | ||
6487 | buf := make([]byte, rand.Intn(1<<19)) | ||
6488 | _, err = file.Write(buf) | ||
6489 | if err != nil { | ||
6490 | logError(testName, function, args, startTime, "", "File write failed", err) | ||
6491 | return | ||
6492 | } | ||
6493 | } | ||
6494 | file.Close() | ||
6495 | |||
6496 | // Verify if bucket exits and you have access. | ||
6497 | var exists bool | ||
6498 | function = "BucketExists(bucketName)" | ||
6499 | functionAll += ", " + function | ||
6500 | args = map[string]interface{}{ | ||
6501 | "bucketName": bucketName, | ||
6502 | } | ||
6503 | exists, err = c.BucketExists(context.Background(), bucketName) | ||
6504 | |||
6505 | if err != nil { | ||
6506 | logError(testName, function, args, startTime, "", "BucketExists failed", err) | ||
6507 | return | ||
6508 | } | ||
6509 | if !exists { | ||
6510 | logError(testName, function, args, startTime, "", "Could not find the bucket", err) | ||
6511 | return | ||
6512 | } | ||
6513 | |||
6514 | // Asserting the default bucket policy. | ||
6515 | function = "GetBucketPolicy(ctx, bucketName)" | ||
6516 | functionAll += ", " + function | ||
6517 | args = map[string]interface{}{ | ||
6518 | "bucketName": bucketName, | ||
6519 | } | ||
6520 | nilPolicy, err := c.GetBucketPolicy(context.Background(), bucketName) | ||
6521 | if err != nil { | ||
6522 | logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) | ||
6523 | return | ||
6524 | } | ||
6525 | if nilPolicy != "" { | ||
6526 | logError(testName, function, args, startTime, "", "policy should be set to nil", err) | ||
6527 | return | ||
6528 | } | ||
6529 | |||
6530 | // Set the bucket policy to 'public readonly'. | ||
6531 | function = "SetBucketPolicy(bucketName, readOnlyPolicy)" | ||
6532 | functionAll += ", " + function | ||
6533 | |||
6534 | readOnlyPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucket"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}` | ||
6535 | args = map[string]interface{}{ | ||
6536 | "bucketName": bucketName, | ||
6537 | "bucketPolicy": readOnlyPolicy, | ||
6538 | } | ||
6539 | |||
6540 | err = c.SetBucketPolicy(context.Background(), bucketName, readOnlyPolicy) | ||
6541 | if err != nil { | ||
6542 | logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) | ||
6543 | return | ||
6544 | } | ||
6545 | // should return policy `readonly`. | ||
6546 | function = "GetBucketPolicy(ctx, bucketName)" | ||
6547 | functionAll += ", " + function | ||
6548 | args = map[string]interface{}{ | ||
6549 | "bucketName": bucketName, | ||
6550 | } | ||
6551 | _, err = c.GetBucketPolicy(context.Background(), bucketName) | ||
6552 | if err != nil { | ||
6553 | logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) | ||
6554 | return | ||
6555 | } | ||
6556 | |||
6557 | // Make the bucket 'public writeonly'. | ||
6558 | function = "SetBucketPolicy(bucketName, writeOnlyPolicy)" | ||
6559 | functionAll += ", " + function | ||
6560 | |||
6561 | writeOnlyPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucketMultipartUploads"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}` | ||
6562 | args = map[string]interface{}{ | ||
6563 | "bucketName": bucketName, | ||
6564 | "bucketPolicy": writeOnlyPolicy, | ||
6565 | } | ||
6566 | err = c.SetBucketPolicy(context.Background(), bucketName, writeOnlyPolicy) | ||
6567 | |||
6568 | if err != nil { | ||
6569 | logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) | ||
6570 | return | ||
6571 | } | ||
6572 | // should return policy `writeonly`. | ||
6573 | function = "GetBucketPolicy(ctx, bucketName)" | ||
6574 | functionAll += ", " + function | ||
6575 | args = map[string]interface{}{ | ||
6576 | "bucketName": bucketName, | ||
6577 | } | ||
6578 | |||
6579 | _, err = c.GetBucketPolicy(context.Background(), bucketName) | ||
6580 | if err != nil { | ||
6581 | logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) | ||
6582 | return | ||
6583 | } | ||
6584 | |||
6585 | // Make the bucket 'public read/write'. | ||
6586 | function = "SetBucketPolicy(bucketName, readWritePolicy)" | ||
6587 | functionAll += ", " + function | ||
6588 | |||
6589 | readWritePolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}` | ||
6590 | |||
6591 | args = map[string]interface{}{ | ||
6592 | "bucketName": bucketName, | ||
6593 | "bucketPolicy": readWritePolicy, | ||
6594 | } | ||
6595 | err = c.SetBucketPolicy(context.Background(), bucketName, readWritePolicy) | ||
6596 | |||
6597 | if err != nil { | ||
6598 | logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) | ||
6599 | return | ||
6600 | } | ||
6601 | // should return policy `readwrite`. | ||
6602 | function = "GetBucketPolicy(bucketName)" | ||
6603 | functionAll += ", " + function | ||
6604 | args = map[string]interface{}{ | ||
6605 | "bucketName": bucketName, | ||
6606 | } | ||
6607 | _, err = c.GetBucketPolicy(context.Background(), bucketName) | ||
6608 | if err != nil { | ||
6609 | logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) | ||
6610 | return | ||
6611 | } | ||
6612 | |||
6613 | // List all buckets. | ||
6614 | function = "ListBuckets()" | ||
6615 | functionAll += ", " + function | ||
6616 | args = nil | ||
6617 | buckets, err := c.ListBuckets(context.Background()) | ||
6618 | |||
6619 | if len(buckets) == 0 { | ||
6620 | logError(testName, function, args, startTime, "", "Found bucket list to be empty", err) | ||
6621 | return | ||
6622 | } | ||
6623 | if err != nil { | ||
6624 | logError(testName, function, args, startTime, "", "ListBuckets failed", err) | ||
6625 | return | ||
6626 | } | ||
6627 | |||
6628 | // Verify if previously created bucket is listed in list buckets. | ||
6629 | bucketFound := false | ||
6630 | for _, bucket := range buckets { | ||
6631 | if bucket.Name == bucketName { | ||
6632 | bucketFound = true | ||
6633 | } | ||
6634 | } | ||
6635 | |||
6636 | // If bucket not found error out. | ||
6637 | if !bucketFound { | ||
6638 | logError(testName, function, args, startTime, "", "Bucket: "+bucketName+" not found", err) | ||
6639 | return | ||
6640 | } | ||
6641 | |||
6642 | objectName := bucketName + "unique" | ||
6643 | |||
6644 | // Generate data | ||
6645 | buf := bytes.Repeat([]byte("f"), 1<<19) | ||
6646 | |||
6647 | function = "PutObject(bucketName, objectName, reader, contentType)" | ||
6648 | functionAll += ", " + function | ||
6649 | args = map[string]interface{}{ | ||
6650 | "bucketName": bucketName, | ||
6651 | "objectName": objectName, | ||
6652 | "contentType": "", | ||
6653 | } | ||
6654 | |||
6655 | _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) | ||
6656 | if err != nil { | ||
6657 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
6658 | return | ||
6659 | } | ||
6660 | |||
6661 | args = map[string]interface{}{ | ||
6662 | "bucketName": bucketName, | ||
6663 | "objectName": objectName + "-nolength", | ||
6664 | "contentType": "binary/octet-stream", | ||
6665 | } | ||
6666 | |||
6667 | _, err = c.PutObject(context.Background(), bucketName, objectName+"-nolength", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) | ||
6668 | if err != nil { | ||
6669 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
6670 | return | ||
6671 | } | ||
6672 | |||
6673 | // Instantiate a done channel to close all listing. | ||
6674 | doneCh := make(chan struct{}) | ||
6675 | defer close(doneCh) | ||
6676 | |||
6677 | objFound := false | ||
6678 | isRecursive := true // Recursive is true. | ||
6679 | |||
6680 | function = "ListObjects(bucketName, objectName, isRecursive, doneCh)" | ||
6681 | functionAll += ", " + function | ||
6682 | args = map[string]interface{}{ | ||
6683 | "bucketName": bucketName, | ||
6684 | "objectName": objectName, | ||
6685 | "isRecursive": isRecursive, | ||
6686 | } | ||
6687 | |||
6688 | for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Prefix: objectName, Recursive: true}) { | ||
6689 | if obj.Key == objectName { | ||
6690 | objFound = true | ||
6691 | break | ||
6692 | } | ||
6693 | } | ||
6694 | if !objFound { | ||
6695 | logError(testName, function, args, startTime, "", "Object "+objectName+" not found", err) | ||
6696 | return | ||
6697 | } | ||
6698 | |||
6699 | objFound = false | ||
6700 | isRecursive = true // Recursive is true. | ||
6701 | function = "ListObjects()" | ||
6702 | functionAll += ", " + function | ||
6703 | args = map[string]interface{}{ | ||
6704 | "bucketName": bucketName, | ||
6705 | "objectName": objectName, | ||
6706 | "isRecursive": isRecursive, | ||
6707 | } | ||
6708 | |||
6709 | for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{Prefix: objectName, Recursive: isRecursive}) { | ||
6710 | if obj.Key == objectName { | ||
6711 | objFound = true | ||
6712 | break | ||
6713 | } | ||
6714 | } | ||
6715 | if !objFound { | ||
6716 | logError(testName, function, args, startTime, "", "Object "+objectName+" not found", err) | ||
6717 | return | ||
6718 | } | ||
6719 | |||
6720 | incompObjNotFound := true | ||
6721 | |||
6722 | function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)" | ||
6723 | functionAll += ", " + function | ||
6724 | args = map[string]interface{}{ | ||
6725 | "bucketName": bucketName, | ||
6726 | "objectName": objectName, | ||
6727 | "isRecursive": isRecursive, | ||
6728 | } | ||
6729 | |||
6730 | for objIncompl := range c.ListIncompleteUploads(context.Background(), bucketName, objectName, isRecursive) { | ||
6731 | if objIncompl.Key != "" { | ||
6732 | incompObjNotFound = false | ||
6733 | break | ||
6734 | } | ||
6735 | } | ||
6736 | if !incompObjNotFound { | ||
6737 | logError(testName, function, args, startTime, "", "Unexpected dangling incomplete upload found", err) | ||
6738 | return | ||
6739 | } | ||
6740 | |||
6741 | function = "GetObject(bucketName, objectName)" | ||
6742 | functionAll += ", " + function | ||
6743 | args = map[string]interface{}{ | ||
6744 | "bucketName": bucketName, | ||
6745 | "objectName": objectName, | ||
6746 | } | ||
6747 | newReader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) | ||
6748 | if err != nil { | ||
6749 | logError(testName, function, args, startTime, "", "GetObject failed", err) | ||
6750 | return | ||
6751 | } | ||
6752 | |||
6753 | newReadBytes, err := io.ReadAll(newReader) | ||
6754 | if err != nil { | ||
6755 | logError(testName, function, args, startTime, "", "ReadAll failed", err) | ||
6756 | return | ||
6757 | } | ||
6758 | |||
6759 | if !bytes.Equal(newReadBytes, buf) { | ||
6760 | logError(testName, function, args, startTime, "", "GetObject bytes mismatch", err) | ||
6761 | return | ||
6762 | } | ||
6763 | newReader.Close() | ||
6764 | |||
6765 | function = "FGetObject(bucketName, objectName, fileName)" | ||
6766 | functionAll += ", " + function | ||
6767 | args = map[string]interface{}{ | ||
6768 | "bucketName": bucketName, | ||
6769 | "objectName": objectName, | ||
6770 | "fileName": fileName + "-f", | ||
6771 | } | ||
6772 | err = c.FGetObject(context.Background(), bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) | ||
6773 | |||
6774 | if err != nil { | ||
6775 | logError(testName, function, args, startTime, "", "FGetObject failed", err) | ||
6776 | return | ||
6777 | } | ||
6778 | |||
6779 | function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" | ||
6780 | functionAll += ", " + function | ||
6781 | args = map[string]interface{}{ | ||
6782 | "bucketName": bucketName, | ||
6783 | "objectName": "", | ||
6784 | "expires": 3600 * time.Second, | ||
6785 | } | ||
6786 | if _, err = c.PresignedHeadObject(context.Background(), bucketName, "", 3600*time.Second, nil); err == nil { | ||
6787 | logError(testName, function, args, startTime, "", "PresignedHeadObject success", err) | ||
6788 | return | ||
6789 | } | ||
6790 | |||
6791 | // Generate presigned HEAD object url. | ||
6792 | function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" | ||
6793 | functionAll += ", " + function | ||
6794 | args = map[string]interface{}{ | ||
6795 | "bucketName": bucketName, | ||
6796 | "objectName": objectName, | ||
6797 | "expires": 3600 * time.Second, | ||
6798 | } | ||
6799 | presignedHeadURL, err := c.PresignedHeadObject(context.Background(), bucketName, objectName, 3600*time.Second, nil) | ||
6800 | if err != nil { | ||
6801 | logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err) | ||
6802 | return | ||
6803 | } | ||
6804 | |||
6805 | transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS))) | ||
6806 | if err != nil { | ||
6807 | logError(testName, function, args, startTime, "", "DefaultTransport failed", err) | ||
6808 | return | ||
6809 | } | ||
6810 | |||
6811 | httpClient := &http.Client{ | ||
6812 | // Setting a sensible time out of 30secs to wait for response | ||
6813 | // headers. Request is pro-actively canceled after 30secs | ||
6814 | // with no response. | ||
6815 | Timeout: 30 * time.Second, | ||
6816 | Transport: transport, | ||
6817 | } | ||
6818 | |||
6819 | req, err := http.NewRequest(http.MethodHead, presignedHeadURL.String(), nil) | ||
6820 | if err != nil { | ||
6821 | logError(testName, function, args, startTime, "", "PresignedHeadObject request was incorrect", err) | ||
6822 | return | ||
6823 | } | ||
6824 | |||
6825 | // Verify if presigned url works. | ||
6826 | resp, err := httpClient.Do(req) | ||
6827 | if err != nil { | ||
6828 | logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect", err) | ||
6829 | return | ||
6830 | } | ||
6831 | if resp.StatusCode != http.StatusOK { | ||
6832 | logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect, status "+string(resp.StatusCode), err) | ||
6833 | return | ||
6834 | } | ||
6835 | if resp.Header.Get("ETag") == "" { | ||
6836 | logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect", err) | ||
6837 | return | ||
6838 | } | ||
6839 | resp.Body.Close() | ||
6840 | |||
6841 | function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" | ||
6842 | functionAll += ", " + function | ||
6843 | args = map[string]interface{}{ | ||
6844 | "bucketName": bucketName, | ||
6845 | "objectName": "", | ||
6846 | "expires": 3600 * time.Second, | ||
6847 | } | ||
6848 | _, err = c.PresignedGetObject(context.Background(), bucketName, "", 3600*time.Second, nil) | ||
6849 | if err == nil { | ||
6850 | logError(testName, function, args, startTime, "", "PresignedGetObject success", err) | ||
6851 | return | ||
6852 | } | ||
6853 | |||
6854 | // Generate presigned GET object url. | ||
6855 | function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" | ||
6856 | functionAll += ", " + function | ||
6857 | args = map[string]interface{}{ | ||
6858 | "bucketName": bucketName, | ||
6859 | "objectName": objectName, | ||
6860 | "expires": 3600 * time.Second, | ||
6861 | } | ||
6862 | presignedGetURL, err := c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, nil) | ||
6863 | if err != nil { | ||
6864 | logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) | ||
6865 | return | ||
6866 | } | ||
6867 | |||
6868 | // Verify if presigned url works. | ||
6869 | req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil) | ||
6870 | if err != nil { | ||
6871 | logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err) | ||
6872 | return | ||
6873 | } | ||
6874 | |||
6875 | resp, err = httpClient.Do(req) | ||
6876 | if err != nil { | ||
6877 | logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) | ||
6878 | return | ||
6879 | } | ||
6880 | if resp.StatusCode != http.StatusOK { | ||
6881 | logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err) | ||
6882 | return | ||
6883 | } | ||
6884 | newPresignedBytes, err := io.ReadAll(resp.Body) | ||
6885 | if err != nil { | ||
6886 | logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) | ||
6887 | return | ||
6888 | } | ||
6889 | resp.Body.Close() | ||
6890 | if !bytes.Equal(newPresignedBytes, buf) { | ||
6891 | logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) | ||
6892 | return | ||
6893 | } | ||
6894 | |||
6895 | // Set request parameters. | ||
6896 | reqParams := make(url.Values) | ||
6897 | reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"") | ||
6898 | args = map[string]interface{}{ | ||
6899 | "bucketName": bucketName, | ||
6900 | "objectName": objectName, | ||
6901 | "expires": 3600 * time.Second, | ||
6902 | "reqParams": reqParams, | ||
6903 | } | ||
6904 | presignedGetURL, err = c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, reqParams) | ||
6905 | |||
6906 | if err != nil { | ||
6907 | logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) | ||
6908 | return | ||
6909 | } | ||
6910 | |||
6911 | // Verify if presigned url works. | ||
6912 | req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil) | ||
6913 | if err != nil { | ||
6914 | logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err) | ||
6915 | return | ||
6916 | } | ||
6917 | |||
6918 | resp, err = httpClient.Do(req) | ||
6919 | if err != nil { | ||
6920 | logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) | ||
6921 | return | ||
6922 | } | ||
6923 | if resp.StatusCode != http.StatusOK { | ||
6924 | logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err) | ||
6925 | return | ||
6926 | } | ||
6927 | newPresignedBytes, err = io.ReadAll(resp.Body) | ||
6928 | if err != nil { | ||
6929 | logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) | ||
6930 | return | ||
6931 | } | ||
6932 | if !bytes.Equal(newPresignedBytes, buf) { | ||
6933 | logError(testName, function, args, startTime, "", "Bytes mismatch for presigned GET URL", err) | ||
6934 | return | ||
6935 | } | ||
6936 | if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" { | ||
6937 | logError(testName, function, args, startTime, "", "wrong Content-Disposition received "+string(resp.Header.Get("Content-Disposition")), err) | ||
6938 | return | ||
6939 | } | ||
6940 | |||
6941 | function = "PresignedPutObject(bucketName, objectName, expires)" | ||
6942 | functionAll += ", " + function | ||
6943 | args = map[string]interface{}{ | ||
6944 | "bucketName": bucketName, | ||
6945 | "objectName": "", | ||
6946 | "expires": 3600 * time.Second, | ||
6947 | } | ||
6948 | _, err = c.PresignedPutObject(context.Background(), bucketName, "", 3600*time.Second) | ||
6949 | if err == nil { | ||
6950 | logError(testName, function, args, startTime, "", "PresignedPutObject success", err) | ||
6951 | return | ||
6952 | } | ||
6953 | |||
6954 | function = "PresignedPutObject(bucketName, objectName, expires)" | ||
6955 | functionAll += ", " + function | ||
6956 | args = map[string]interface{}{ | ||
6957 | "bucketName": bucketName, | ||
6958 | "objectName": objectName + "-presigned", | ||
6959 | "expires": 3600 * time.Second, | ||
6960 | } | ||
6961 | presignedPutURL, err := c.PresignedPutObject(context.Background(), bucketName, objectName+"-presigned", 3600*time.Second) | ||
6962 | if err != nil { | ||
6963 | logError(testName, function, args, startTime, "", "PresignedPutObject failed", err) | ||
6964 | return | ||
6965 | } | ||
6966 | |||
6967 | buf = bytes.Repeat([]byte("g"), 1<<19) | ||
6968 | |||
6969 | req, err = http.NewRequest(http.MethodPut, presignedPutURL.String(), bytes.NewReader(buf)) | ||
6970 | if err != nil { | ||
6971 | logError(testName, function, args, startTime, "", "Couldn't make HTTP request with PresignedPutObject URL", err) | ||
6972 | return | ||
6973 | } | ||
6974 | |||
6975 | resp, err = httpClient.Do(req) | ||
6976 | if err != nil { | ||
6977 | logError(testName, function, args, startTime, "", "PresignedPutObject failed", err) | ||
6978 | return | ||
6979 | } | ||
6980 | |||
6981 | newReader, err = c.GetObject(context.Background(), bucketName, objectName+"-presigned", minio.GetObjectOptions{}) | ||
6982 | if err != nil { | ||
6983 | logError(testName, function, args, startTime, "", "GetObject after PresignedPutObject failed", err) | ||
6984 | return | ||
6985 | } | ||
6986 | |||
6987 | newReadBytes, err = io.ReadAll(newReader) | ||
6988 | if err != nil { | ||
6989 | logError(testName, function, args, startTime, "", "ReadAll after GetObject failed", err) | ||
6990 | return | ||
6991 | } | ||
6992 | |||
6993 | if !bytes.Equal(newReadBytes, buf) { | ||
6994 | logError(testName, function, args, startTime, "", "Bytes mismatch", err) | ||
6995 | return | ||
6996 | } | ||
6997 | |||
6998 | function = "PresignHeader(method, bucketName, objectName, expires, reqParams, extraHeaders)" | ||
6999 | functionAll += ", " + function | ||
7000 | presignExtraHeaders := map[string][]string{ | ||
7001 | "mysecret": {"abcxxx"}, | ||
7002 | } | ||
7003 | args = map[string]interface{}{ | ||
7004 | "method": "PUT", | ||
7005 | "bucketName": bucketName, | ||
7006 | "objectName": objectName + "-presign-custom", | ||
7007 | "expires": 3600 * time.Second, | ||
7008 | "extraHeaders": presignExtraHeaders, | ||
7009 | } | ||
7010 | presignedURL, err := c.PresignHeader(context.Background(), "PUT", bucketName, objectName+"-presign-custom", 3600*time.Second, nil, presignExtraHeaders) | ||
7011 | if err != nil { | ||
7012 | logError(testName, function, args, startTime, "", "Presigned failed", err) | ||
7013 | return | ||
7014 | } | ||
7015 | |||
7016 | // Generate data more than 32K | ||
7017 | buf = bytes.Repeat([]byte("1"), rand.Intn(1<<10)+32*1024) | ||
7018 | |||
7019 | req, err = http.NewRequest(http.MethodPut, presignedURL.String(), bytes.NewReader(buf)) | ||
7020 | if err != nil { | ||
7021 | logError(testName, function, args, startTime, "", "HTTP request to Presigned URL failed", err) | ||
7022 | return | ||
7023 | } | ||
7024 | |||
7025 | req.Header.Add("mysecret", "abcxxx") | ||
7026 | resp, err = httpClient.Do(req) | ||
7027 | if err != nil { | ||
7028 | logError(testName, function, args, startTime, "", "HTTP request to Presigned URL failed", err) | ||
7029 | return | ||
7030 | } | ||
7031 | |||
7032 | // Download the uploaded object to verify | ||
7033 | args = map[string]interface{}{ | ||
7034 | "bucketName": bucketName, | ||
7035 | "objectName": objectName + "-presign-custom", | ||
7036 | } | ||
7037 | newReader, err = c.GetObject(context.Background(), bucketName, objectName+"-presign-custom", minio.GetObjectOptions{}) | ||
7038 | if err != nil { | ||
7039 | logError(testName, function, args, startTime, "", "GetObject of uploaded custom-presigned object failed", err) | ||
7040 | return | ||
7041 | } | ||
7042 | |||
7043 | newReadBytes, err = io.ReadAll(newReader) | ||
7044 | if err != nil { | ||
7045 | logError(testName, function, args, startTime, "", "ReadAll failed during get on custom-presigned put object", err) | ||
7046 | return | ||
7047 | } | ||
7048 | newReader.Close() | ||
7049 | |||
7050 | if !bytes.Equal(newReadBytes, buf) { | ||
7051 | logError(testName, function, args, startTime, "", "Bytes mismatch on custom-presigned object upload verification", err) | ||
7052 | return | ||
7053 | } | ||
7054 | |||
7055 | function = "RemoveObject(bucketName, objectName)" | ||
7056 | functionAll += ", " + function | ||
7057 | args = map[string]interface{}{ | ||
7058 | "bucketName": bucketName, | ||
7059 | "objectName": objectName, | ||
7060 | } | ||
7061 | err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{}) | ||
7062 | |||
7063 | if err != nil { | ||
7064 | logError(testName, function, args, startTime, "", "RemoveObject failed", err) | ||
7065 | return | ||
7066 | } | ||
7067 | args["objectName"] = objectName + "-f" | ||
7068 | err = c.RemoveObject(context.Background(), bucketName, objectName+"-f", minio.RemoveObjectOptions{}) | ||
7069 | |||
7070 | if err != nil { | ||
7071 | logError(testName, function, args, startTime, "", "RemoveObject failed", err) | ||
7072 | return | ||
7073 | } | ||
7074 | |||
7075 | args["objectName"] = objectName + "-nolength" | ||
7076 | err = c.RemoveObject(context.Background(), bucketName, objectName+"-nolength", minio.RemoveObjectOptions{}) | ||
7077 | |||
7078 | if err != nil { | ||
7079 | logError(testName, function, args, startTime, "", "RemoveObject failed", err) | ||
7080 | return | ||
7081 | } | ||
7082 | |||
7083 | args["objectName"] = objectName + "-presigned" | ||
7084 | err = c.RemoveObject(context.Background(), bucketName, objectName+"-presigned", minio.RemoveObjectOptions{}) | ||
7085 | |||
7086 | if err != nil { | ||
7087 | logError(testName, function, args, startTime, "", "RemoveObject failed", err) | ||
7088 | return | ||
7089 | } | ||
7090 | |||
7091 | args["objectName"] = objectName + "-presign-custom" | ||
7092 | err = c.RemoveObject(context.Background(), bucketName, objectName+"-presign-custom", minio.RemoveObjectOptions{}) | ||
7093 | |||
7094 | if err != nil { | ||
7095 | logError(testName, function, args, startTime, "", "RemoveObject failed", err) | ||
7096 | return | ||
7097 | } | ||
7098 | |||
7099 | function = "RemoveBucket(bucketName)" | ||
7100 | functionAll += ", " + function | ||
7101 | args = map[string]interface{}{ | ||
7102 | "bucketName": bucketName, | ||
7103 | } | ||
7104 | err = c.RemoveBucket(context.Background(), bucketName) | ||
7105 | |||
7106 | if err != nil { | ||
7107 | logError(testName, function, args, startTime, "", "RemoveBucket failed", err) | ||
7108 | return | ||
7109 | } | ||
7110 | err = c.RemoveBucket(context.Background(), bucketName) | ||
7111 | if err == nil { | ||
7112 | logError(testName, function, args, startTime, "", "RemoveBucket did not fail for invalid bucket name", err) | ||
7113 | return | ||
7114 | } | ||
7115 | if err.Error() != "The specified bucket does not exist" { | ||
7116 | logError(testName, function, args, startTime, "", "RemoveBucket failed", err) | ||
7117 | return | ||
7118 | } | ||
7119 | |||
7120 | os.Remove(fileName) | ||
7121 | os.Remove(fileName + "-f") | ||
7122 | successLogger(testName, functionAll, args, startTime).Info() | ||
7123 | } | ||
7124 | |||
7125 | // Test for validating GetObject Reader* methods functioning when the | ||
7126 | // object is modified in the object store. | ||
7127 | func testGetObjectModified() { | ||
7128 | // initialize logging params | ||
7129 | startTime := time.Now() | ||
7130 | testName := getFuncName() | ||
7131 | function := "GetObject(bucketName, objectName)" | ||
7132 | args := map[string]interface{}{} | ||
7133 | |||
7134 | // Instantiate new minio client object. | ||
7135 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
7136 | &minio.Options{ | ||
7137 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
7138 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
7139 | }) | ||
7140 | if err != nil { | ||
7141 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
7142 | return | ||
7143 | } | ||
7144 | |||
7145 | // Enable tracing, write to stderr. | ||
7146 | // c.TraceOn(os.Stderr) | ||
7147 | |||
7148 | // Set user agent. | ||
7149 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
7150 | |||
7151 | // Make a new bucket. | ||
7152 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
7153 | args["bucketName"] = bucketName | ||
7154 | |||
7155 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
7156 | if err != nil { | ||
7157 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
7158 | return | ||
7159 | } | ||
7160 | |||
7161 | defer cleanupBucket(bucketName, c) | ||
7162 | |||
7163 | // Upload an object. | ||
7164 | objectName := "myobject" | ||
7165 | args["objectName"] = objectName | ||
7166 | content := "helloworld" | ||
7167 | _, err = c.PutObject(context.Background(), bucketName, objectName, strings.NewReader(content), int64(len(content)), minio.PutObjectOptions{ContentType: "application/text"}) | ||
7168 | if err != nil { | ||
7169 | logError(testName, function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err) | ||
7170 | return | ||
7171 | } | ||
7172 | |||
7173 | defer c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{}) | ||
7174 | |||
7175 | reader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) | ||
7176 | if err != nil { | ||
7177 | logError(testName, function, args, startTime, "", "Failed to GetObject "+objectName+", from bucket "+bucketName, err) | ||
7178 | return | ||
7179 | } | ||
7180 | defer reader.Close() | ||
7181 | |||
7182 | // Read a few bytes of the object. | ||
7183 | b := make([]byte, 5) | ||
7184 | n, err := reader.ReadAt(b, 0) | ||
7185 | if err != nil { | ||
7186 | logError(testName, function, args, startTime, "", "Failed to read object "+objectName+", from bucket "+bucketName+" at an offset", err) | ||
7187 | return | ||
7188 | } | ||
7189 | |||
7190 | // Upload different contents to the same object while object is being read. | ||
7191 | newContent := "goodbyeworld" | ||
7192 | _, err = c.PutObject(context.Background(), bucketName, objectName, strings.NewReader(newContent), int64(len(newContent)), minio.PutObjectOptions{ContentType: "application/text"}) | ||
7193 | if err != nil { | ||
7194 | logError(testName, function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err) | ||
7195 | return | ||
7196 | } | ||
7197 | |||
7198 | // Confirm that a Stat() call in between doesn't change the Object's cached etag. | ||
7199 | _, err = reader.Stat() | ||
7200 | expectedError := "At least one of the pre-conditions you specified did not hold" | ||
7201 | if err.Error() != expectedError { | ||
7202 | logError(testName, function, args, startTime, "", "Expected Stat to fail with error "+expectedError+", but received "+err.Error(), err) | ||
7203 | return | ||
7204 | } | ||
7205 | |||
7206 | // Read again only to find object contents have been modified since last read. | ||
7207 | _, err = reader.ReadAt(b, int64(n)) | ||
7208 | if err.Error() != expectedError { | ||
7209 | logError(testName, function, args, startTime, "", "Expected ReadAt to fail with error "+expectedError+", but received "+err.Error(), err) | ||
7210 | return | ||
7211 | } | ||
7212 | |||
7213 | successLogger(testName, function, args, startTime).Info() | ||
7214 | } | ||
7215 | |||
7216 | // Test validates putObject to upload a file seeked at a given offset. | ||
7217 | func testPutObjectUploadSeekedObject() { | ||
7218 | // initialize logging params | ||
7219 | startTime := time.Now() | ||
7220 | testName := getFuncName() | ||
7221 | function := "PutObject(bucketName, objectName, fileToUpload, contentType)" | ||
7222 | args := map[string]interface{}{ | ||
7223 | "bucketName": "", | ||
7224 | "objectName": "", | ||
7225 | "fileToUpload": "", | ||
7226 | "contentType": "binary/octet-stream", | ||
7227 | } | ||
7228 | |||
7229 | // Instantiate new minio client object. | ||
7230 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
7231 | &minio.Options{ | ||
7232 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
7233 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
7234 | }) | ||
7235 | if err != nil { | ||
7236 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
7237 | return | ||
7238 | } | ||
7239 | |||
7240 | // Enable tracing, write to stderr. | ||
7241 | // c.TraceOn(os.Stderr) | ||
7242 | |||
7243 | // Set user agent. | ||
7244 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
7245 | |||
7246 | // Make a new bucket. | ||
7247 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
7248 | args["bucketName"] = bucketName | ||
7249 | |||
7250 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
7251 | if err != nil { | ||
7252 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
7253 | return | ||
7254 | } | ||
7255 | defer cleanupBucket(bucketName, c) | ||
7256 | |||
7257 | var tempfile *os.File | ||
7258 | |||
7259 | if fileName := getMintDataDirFilePath("datafile-100-kB"); fileName != "" { | ||
7260 | tempfile, err = os.Open(fileName) | ||
7261 | if err != nil { | ||
7262 | logError(testName, function, args, startTime, "", "File open failed", err) | ||
7263 | return | ||
7264 | } | ||
7265 | args["fileToUpload"] = fileName | ||
7266 | } else { | ||
7267 | tempfile, err = os.CreateTemp("", "minio-go-upload-test-") | ||
7268 | if err != nil { | ||
7269 | logError(testName, function, args, startTime, "", "TempFile create failed", err) | ||
7270 | return | ||
7271 | } | ||
7272 | args["fileToUpload"] = tempfile.Name() | ||
7273 | |||
7274 | // Generate 100kB data | ||
7275 | if _, err = io.Copy(tempfile, getDataReader("datafile-100-kB")); err != nil { | ||
7276 | logError(testName, function, args, startTime, "", "File copy failed", err) | ||
7277 | return | ||
7278 | } | ||
7279 | |||
7280 | defer os.Remove(tempfile.Name()) | ||
7281 | |||
7282 | // Seek back to the beginning of the file. | ||
7283 | tempfile.Seek(0, 0) | ||
7284 | } | ||
7285 | length := 100 * humanize.KiByte | ||
7286 | objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) | ||
7287 | args["objectName"] = objectName | ||
7288 | |||
7289 | offset := length / 2 | ||
7290 | if _, err = tempfile.Seek(int64(offset), 0); err != nil { | ||
7291 | logError(testName, function, args, startTime, "", "TempFile seek failed", err) | ||
7292 | return | ||
7293 | } | ||
7294 | |||
7295 | _, err = c.PutObject(context.Background(), bucketName, objectName, tempfile, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) | ||
7296 | if err != nil { | ||
7297 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
7298 | return | ||
7299 | } | ||
7300 | tempfile.Close() | ||
7301 | |||
7302 | obj, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) | ||
7303 | if err != nil { | ||
7304 | logError(testName, function, args, startTime, "", "GetObject failed", err) | ||
7305 | return | ||
7306 | } | ||
7307 | defer obj.Close() | ||
7308 | |||
7309 | n, err := obj.Seek(int64(offset), 0) | ||
7310 | if err != nil { | ||
7311 | logError(testName, function, args, startTime, "", "Seek failed", err) | ||
7312 | return | ||
7313 | } | ||
7314 | if n != int64(offset) { | ||
7315 | logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid offset returned, expected %d got %d", int64(offset), n), err) | ||
7316 | return | ||
7317 | } | ||
7318 | |||
7319 | _, err = c.PutObject(context.Background(), bucketName, objectName+"getobject", obj, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) | ||
7320 | if err != nil { | ||
7321 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
7322 | return | ||
7323 | } | ||
7324 | st, err := c.StatObject(context.Background(), bucketName, objectName+"getobject", minio.StatObjectOptions{}) | ||
7325 | if err != nil { | ||
7326 | logError(testName, function, args, startTime, "", "StatObject failed", err) | ||
7327 | return | ||
7328 | } | ||
7329 | if st.Size != int64(length-offset) { | ||
7330 | logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid offset returned, expected %d got %d", int64(length-offset), n), err) | ||
7331 | return | ||
7332 | } | ||
7333 | |||
7334 | successLogger(testName, function, args, startTime).Info() | ||
7335 | } | ||
7336 | |||
7337 | // Tests bucket re-create errors. | ||
7338 | func testMakeBucketErrorV2() { | ||
7339 | // initialize logging params | ||
7340 | startTime := time.Now() | ||
7341 | testName := getFuncName() | ||
7342 | function := "MakeBucket(bucketName, region)" | ||
7343 | args := map[string]interface{}{ | ||
7344 | "bucketName": "", | ||
7345 | "region": "eu-west-1", | ||
7346 | } | ||
7347 | |||
7348 | // Seed random based on current time. | ||
7349 | rand.Seed(time.Now().Unix()) | ||
7350 | |||
7351 | // Instantiate new minio client object. | ||
7352 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
7353 | &minio.Options{ | ||
7354 | Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
7355 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
7356 | }) | ||
7357 | if err != nil { | ||
7358 | logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) | ||
7359 | return | ||
7360 | } | ||
7361 | |||
7362 | // Enable tracing, write to stderr. | ||
7363 | // c.TraceOn(os.Stderr) | ||
7364 | |||
7365 | // Set user agent. | ||
7366 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
7367 | |||
7368 | // Generate a new random bucket name. | ||
7369 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
7370 | region := "eu-west-1" | ||
7371 | args["bucketName"] = bucketName | ||
7372 | args["region"] = region | ||
7373 | |||
7374 | // Make a new bucket in 'eu-west-1'. | ||
7375 | if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err != nil { | ||
7376 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
7377 | return | ||
7378 | } | ||
7379 | |||
7380 | defer cleanupBucket(bucketName, c) | ||
7381 | |||
7382 | if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err == nil { | ||
7383 | logError(testName, function, args, startTime, "", "MakeBucket did not fail for existing bucket name", err) | ||
7384 | return | ||
7385 | } | ||
7386 | // Verify valid error response from server. | ||
7387 | if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" && | ||
7388 | minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" { | ||
7389 | logError(testName, function, args, startTime, "", "Invalid error returned by server", err) | ||
7390 | return | ||
7391 | } | ||
7392 | |||
7393 | successLogger(testName, function, args, startTime).Info() | ||
7394 | } | ||
7395 | |||
7396 | // Test get object reader to not throw error on being closed twice. | ||
7397 | func testGetObjectClosedTwiceV2() { | ||
7398 | // initialize logging params | ||
7399 | startTime := time.Now() | ||
7400 | testName := getFuncName() | ||
7401 | function := "MakeBucket(bucketName, region)" | ||
7402 | args := map[string]interface{}{ | ||
7403 | "bucketName": "", | ||
7404 | "region": "eu-west-1", | ||
7405 | } | ||
7406 | |||
7407 | // Seed random based on current time. | ||
7408 | rand.Seed(time.Now().Unix()) | ||
7409 | |||
7410 | // Instantiate new minio client object. | ||
7411 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
7412 | &minio.Options{ | ||
7413 | Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
7414 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
7415 | }) | ||
7416 | if err != nil { | ||
7417 | logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) | ||
7418 | return | ||
7419 | } | ||
7420 | |||
7421 | // Enable tracing, write to stderr. | ||
7422 | // c.TraceOn(os.Stderr) | ||
7423 | |||
7424 | // Set user agent. | ||
7425 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
7426 | |||
7427 | // Generate a new random bucket name. | ||
7428 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
7429 | args["bucketName"] = bucketName | ||
7430 | |||
7431 | // Make a new bucket. | ||
7432 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
7433 | if err != nil { | ||
7434 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
7435 | return | ||
7436 | } | ||
7437 | |||
7438 | defer cleanupBucket(bucketName, c) | ||
7439 | |||
7440 | // Generate 33K of data. | ||
7441 | bufSize := dataFileMap["datafile-33-kB"] | ||
7442 | reader := getDataReader("datafile-33-kB") | ||
7443 | defer reader.Close() | ||
7444 | |||
7445 | // Save the data | ||
7446 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
7447 | args["objectName"] = objectName | ||
7448 | |||
7449 | _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) | ||
7450 | if err != nil { | ||
7451 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
7452 | return | ||
7453 | } | ||
7454 | |||
7455 | // Read the data back | ||
7456 | r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) | ||
7457 | if err != nil { | ||
7458 | logError(testName, function, args, startTime, "", "GetObject failed", err) | ||
7459 | return | ||
7460 | } | ||
7461 | |||
7462 | st, err := r.Stat() | ||
7463 | if err != nil { | ||
7464 | logError(testName, function, args, startTime, "", "Stat failed", err) | ||
7465 | return | ||
7466 | } | ||
7467 | |||
7468 | if st.Size != int64(bufSize) { | ||
7469 | logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(st.Size), err) | ||
7470 | return | ||
7471 | } | ||
7472 | if err := r.Close(); err != nil { | ||
7473 | logError(testName, function, args, startTime, "", "Stat failed", err) | ||
7474 | return | ||
7475 | } | ||
7476 | if err := r.Close(); err == nil { | ||
7477 | logError(testName, function, args, startTime, "", "Object is already closed, should return error", err) | ||
7478 | return | ||
7479 | } | ||
7480 | |||
7481 | successLogger(testName, function, args, startTime).Info() | ||
7482 | } | ||
7483 | |||
7484 | // Tests FPutObject hidden contentType setting | ||
7485 | func testFPutObjectV2() { | ||
7486 | // initialize logging params | ||
7487 | startTime := time.Now() | ||
7488 | testName := getFuncName() | ||
7489 | function := "FPutObject(bucketName, objectName, fileName, opts)" | ||
7490 | args := map[string]interface{}{ | ||
7491 | "bucketName": "", | ||
7492 | "objectName": "", | ||
7493 | "fileName": "", | ||
7494 | "opts": "", | ||
7495 | } | ||
7496 | |||
7497 | // Seed random based on current time. | ||
7498 | rand.Seed(time.Now().Unix()) | ||
7499 | |||
7500 | // Instantiate new minio client object. | ||
7501 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
7502 | &minio.Options{ | ||
7503 | Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
7504 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
7505 | }) | ||
7506 | if err != nil { | ||
7507 | logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) | ||
7508 | return | ||
7509 | } | ||
7510 | |||
7511 | // Enable tracing, write to stderr. | ||
7512 | // c.TraceOn(os.Stderr) | ||
7513 | |||
7514 | // Set user agent. | ||
7515 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
7516 | |||
7517 | // Generate a new random bucket name. | ||
7518 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
7519 | args["bucketName"] = bucketName | ||
7520 | |||
7521 | // Make a new bucket. | ||
7522 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
7523 | if err != nil { | ||
7524 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
7525 | return | ||
7526 | } | ||
7527 | |||
7528 | defer cleanupBucket(bucketName, c) | ||
7529 | |||
7530 | // Make a temp file with 11*1024*1024 bytes of data. | ||
7531 | file, err := os.CreateTemp(os.TempDir(), "FPutObjectTest") | ||
7532 | if err != nil { | ||
7533 | logError(testName, function, args, startTime, "", "TempFile creation failed", err) | ||
7534 | return | ||
7535 | } | ||
7536 | |||
7537 | r := bytes.NewReader(bytes.Repeat([]byte("b"), 11*1024*1024)) | ||
7538 | n, err := io.CopyN(file, r, 11*1024*1024) | ||
7539 | if err != nil { | ||
7540 | logError(testName, function, args, startTime, "", "Copy failed", err) | ||
7541 | return | ||
7542 | } | ||
7543 | if n != int64(11*1024*1024) { | ||
7544 | logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err) | ||
7545 | return | ||
7546 | } | ||
7547 | |||
7548 | // Close the file pro-actively for windows. | ||
7549 | err = file.Close() | ||
7550 | if err != nil { | ||
7551 | logError(testName, function, args, startTime, "", "File close failed", err) | ||
7552 | return | ||
7553 | } | ||
7554 | |||
7555 | // Set base object name | ||
7556 | objectName := bucketName + "FPutObject" | ||
7557 | args["objectName"] = objectName | ||
7558 | args["fileName"] = file.Name() | ||
7559 | |||
7560 | // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) | ||
7561 | _, err = c.FPutObject(context.Background(), bucketName, objectName+"-standard", file.Name(), minio.PutObjectOptions{ContentType: "application/octet-stream"}) | ||
7562 | if err != nil { | ||
7563 | logError(testName, function, args, startTime, "", "FPutObject failed", err) | ||
7564 | return | ||
7565 | } | ||
7566 | |||
7567 | // Perform FPutObject with no contentType provided (Expecting application/octet-stream) | ||
7568 | args["objectName"] = objectName + "-Octet" | ||
7569 | args["contentType"] = "" | ||
7570 | |||
7571 | _, err = c.FPutObject(context.Background(), bucketName, objectName+"-Octet", file.Name(), minio.PutObjectOptions{}) | ||
7572 | if err != nil { | ||
7573 | logError(testName, function, args, startTime, "", "FPutObject failed", err) | ||
7574 | return | ||
7575 | } | ||
7576 | |||
7577 | // Add extension to temp file name | ||
7578 | fileName := file.Name() | ||
7579 | err = os.Rename(fileName, fileName+".gtar") | ||
7580 | if err != nil { | ||
7581 | logError(testName, function, args, startTime, "", "Rename failed", err) | ||
7582 | return | ||
7583 | } | ||
7584 | |||
7585 | // Perform FPutObject with no contentType provided (Expecting application/x-gtar) | ||
7586 | args["objectName"] = objectName + "-Octet" | ||
7587 | args["contentType"] = "" | ||
7588 | args["fileName"] = fileName + ".gtar" | ||
7589 | |||
7590 | _, err = c.FPutObject(context.Background(), bucketName, objectName+"-GTar", fileName+".gtar", minio.PutObjectOptions{}) | ||
7591 | if err != nil { | ||
7592 | logError(testName, function, args, startTime, "", "FPutObject failed", err) | ||
7593 | return | ||
7594 | } | ||
7595 | |||
7596 | // Check headers and sizes | ||
7597 | rStandard, err := c.StatObject(context.Background(), bucketName, objectName+"-standard", minio.StatObjectOptions{}) | ||
7598 | if err != nil { | ||
7599 | logError(testName, function, args, startTime, "", "StatObject failed", err) | ||
7600 | return | ||
7601 | } | ||
7602 | |||
7603 | if rStandard.Size != 11*1024*1024 { | ||
7604 | logError(testName, function, args, startTime, "", "Unexpected size", nil) | ||
7605 | return | ||
7606 | } | ||
7607 | |||
7608 | if rStandard.ContentType != "application/octet-stream" { | ||
7609 | logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rStandard.ContentType, err) | ||
7610 | return | ||
7611 | } | ||
7612 | |||
7613 | rOctet, err := c.StatObject(context.Background(), bucketName, objectName+"-Octet", minio.StatObjectOptions{}) | ||
7614 | if err != nil { | ||
7615 | logError(testName, function, args, startTime, "", "StatObject failed", err) | ||
7616 | return | ||
7617 | } | ||
7618 | if rOctet.ContentType != "application/octet-stream" { | ||
7619 | logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rOctet.ContentType, err) | ||
7620 | return | ||
7621 | } | ||
7622 | |||
7623 | if rOctet.Size != 11*1024*1024 { | ||
7624 | logError(testName, function, args, startTime, "", "Unexpected size", nil) | ||
7625 | return | ||
7626 | } | ||
7627 | |||
7628 | rGTar, err := c.StatObject(context.Background(), bucketName, objectName+"-GTar", minio.StatObjectOptions{}) | ||
7629 | if err != nil { | ||
7630 | logError(testName, function, args, startTime, "", "StatObject failed", err) | ||
7631 | return | ||
7632 | } | ||
7633 | if rGTar.Size != 11*1024*1024 { | ||
7634 | logError(testName, function, args, startTime, "", "Unexpected size", nil) | ||
7635 | return | ||
7636 | } | ||
7637 | if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" && rGTar.ContentType != "application/x-tar" { | ||
7638 | logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/x-tar , got "+rGTar.ContentType, err) | ||
7639 | return | ||
7640 | } | ||
7641 | |||
7642 | os.Remove(fileName + ".gtar") | ||
7643 | successLogger(testName, function, args, startTime).Info() | ||
7644 | } | ||
7645 | |||
7646 | // Tests various bucket supported formats. | ||
7647 | func testMakeBucketRegionsV2() { | ||
7648 | // initialize logging params | ||
7649 | startTime := time.Now() | ||
7650 | testName := getFuncName() | ||
7651 | function := "MakeBucket(bucketName, region)" | ||
7652 | args := map[string]interface{}{ | ||
7653 | "bucketName": "", | ||
7654 | "region": "eu-west-1", | ||
7655 | } | ||
7656 | |||
7657 | // Seed random based on current time. | ||
7658 | rand.Seed(time.Now().Unix()) | ||
7659 | |||
7660 | // Instantiate new minio client object. | ||
7661 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
7662 | &minio.Options{ | ||
7663 | Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
7664 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
7665 | }) | ||
7666 | if err != nil { | ||
7667 | logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) | ||
7668 | return | ||
7669 | } | ||
7670 | |||
7671 | // Enable tracing, write to stderr. | ||
7672 | // c.TraceOn(os.Stderr) | ||
7673 | |||
7674 | // Set user agent. | ||
7675 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
7676 | |||
7677 | // Generate a new random bucket name. | ||
7678 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
7679 | args["bucketName"] = bucketName | ||
7680 | |||
7681 | // Make a new bucket in 'eu-central-1'. | ||
7682 | if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "eu-west-1"}); err != nil { | ||
7683 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
7684 | return | ||
7685 | } | ||
7686 | |||
7687 | if err = cleanupBucket(bucketName, c); err != nil { | ||
7688 | logError(testName, function, args, startTime, "", "CleanupBucket failed while removing bucket recursively", err) | ||
7689 | return | ||
7690 | } | ||
7691 | |||
7692 | // Make a new bucket with '.' in its name, in 'us-west-2'. This | ||
7693 | // request is internally staged into a path style instead of | ||
7694 | // virtual host style. | ||
7695 | if err = c.MakeBucket(context.Background(), bucketName+".withperiod", minio.MakeBucketOptions{Region: "us-west-2"}); err != nil { | ||
7696 | args["bucketName"] = bucketName + ".withperiod" | ||
7697 | args["region"] = "us-west-2" | ||
7698 | logError(testName, function, args, startTime, "", "MakeBucket test with a bucket name with period, '.', failed", err) | ||
7699 | return | ||
7700 | } | ||
7701 | |||
7702 | // Delete all objects and buckets | ||
7703 | if err = cleanupBucket(bucketName+".withperiod", c); err != nil { | ||
7704 | logError(testName, function, args, startTime, "", "CleanupBucket failed while removing bucket recursively", err) | ||
7705 | return | ||
7706 | } | ||
7707 | |||
7708 | successLogger(testName, function, args, startTime).Info() | ||
7709 | } | ||
7710 | |||
7711 | // Tests get object ReaderSeeker interface methods. | ||
7712 | func testGetObjectReadSeekFunctionalV2() { | ||
7713 | // initialize logging params | ||
7714 | startTime := time.Now() | ||
7715 | testName := getFuncName() | ||
7716 | function := "GetObject(bucketName, objectName)" | ||
7717 | args := map[string]interface{}{} | ||
7718 | |||
7719 | // Seed random based on current time. | ||
7720 | rand.Seed(time.Now().Unix()) | ||
7721 | |||
7722 | // Instantiate new minio client object. | ||
7723 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
7724 | &minio.Options{ | ||
7725 | Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
7726 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
7727 | }) | ||
7728 | if err != nil { | ||
7729 | logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) | ||
7730 | return | ||
7731 | } | ||
7732 | |||
7733 | // Enable tracing, write to stderr. | ||
7734 | // c.TraceOn(os.Stderr) | ||
7735 | |||
7736 | // Set user agent. | ||
7737 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
7738 | |||
7739 | // Generate a new random bucket name. | ||
7740 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
7741 | args["bucketName"] = bucketName | ||
7742 | |||
7743 | // Make a new bucket. | ||
7744 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
7745 | if err != nil { | ||
7746 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
7747 | return | ||
7748 | } | ||
7749 | |||
7750 | defer cleanupBucket(bucketName, c) | ||
7751 | |||
7752 | // Generate 33K of data. | ||
7753 | bufSize := dataFileMap["datafile-33-kB"] | ||
7754 | reader := getDataReader("datafile-33-kB") | ||
7755 | defer reader.Close() | ||
7756 | |||
7757 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
7758 | args["objectName"] = objectName | ||
7759 | |||
7760 | buf, err := io.ReadAll(reader) | ||
7761 | if err != nil { | ||
7762 | logError(testName, function, args, startTime, "", "ReadAll failed", err) | ||
7763 | return | ||
7764 | } | ||
7765 | |||
7766 | // Save the data. | ||
7767 | _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) | ||
7768 | if err != nil { | ||
7769 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
7770 | return | ||
7771 | } | ||
7772 | |||
7773 | // Read the data back | ||
7774 | r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) | ||
7775 | if err != nil { | ||
7776 | logError(testName, function, args, startTime, "", "GetObject failed", err) | ||
7777 | return | ||
7778 | } | ||
7779 | defer r.Close() | ||
7780 | |||
7781 | st, err := r.Stat() | ||
7782 | if err != nil { | ||
7783 | logError(testName, function, args, startTime, "", "Stat failed", err) | ||
7784 | return | ||
7785 | } | ||
7786 | |||
7787 | if st.Size != int64(bufSize) { | ||
7788 | logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err) | ||
7789 | return | ||
7790 | } | ||
7791 | |||
7792 | offset := int64(2048) | ||
7793 | n, err := r.Seek(offset, 0) | ||
7794 | if err != nil { | ||
7795 | logError(testName, function, args, startTime, "", "Seek failed", err) | ||
7796 | return | ||
7797 | } | ||
7798 | if n != offset { | ||
7799 | logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err) | ||
7800 | return | ||
7801 | } | ||
7802 | n, err = r.Seek(0, 1) | ||
7803 | if err != nil { | ||
7804 | logError(testName, function, args, startTime, "", "Seek failed", err) | ||
7805 | return | ||
7806 | } | ||
7807 | if n != offset { | ||
7808 | logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err) | ||
7809 | return | ||
7810 | } | ||
7811 | _, err = r.Seek(offset, 2) | ||
7812 | if err == nil { | ||
7813 | logError(testName, function, args, startTime, "", "Seek on positive offset for whence '2' should error out", err) | ||
7814 | return | ||
7815 | } | ||
7816 | n, err = r.Seek(-offset, 2) | ||
7817 | if err != nil { | ||
7818 | logError(testName, function, args, startTime, "", "Seek failed", err) | ||
7819 | return | ||
7820 | } | ||
7821 | if n != st.Size-offset { | ||
7822 | logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(st.Size-offset)+" got "+string(n), err) | ||
7823 | return | ||
7824 | } | ||
7825 | |||
7826 | var buffer1 bytes.Buffer | ||
7827 | if _, err = io.CopyN(&buffer1, r, st.Size); err != nil { | ||
7828 | if err != io.EOF { | ||
7829 | logError(testName, function, args, startTime, "", "Copy failed", err) | ||
7830 | return | ||
7831 | } | ||
7832 | } | ||
7833 | if !bytes.Equal(buf[len(buf)-int(offset):], buffer1.Bytes()) { | ||
7834 | logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) | ||
7835 | return | ||
7836 | } | ||
7837 | |||
7838 | // Seek again and read again. | ||
7839 | n, err = r.Seek(offset-1, 0) | ||
7840 | if err != nil { | ||
7841 | logError(testName, function, args, startTime, "", "Seek failed", err) | ||
7842 | return | ||
7843 | } | ||
7844 | if n != (offset - 1) { | ||
7845 | logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset-1)+" got "+string(n), err) | ||
7846 | return | ||
7847 | } | ||
7848 | |||
7849 | var buffer2 bytes.Buffer | ||
7850 | if _, err = io.CopyN(&buffer2, r, st.Size); err != nil { | ||
7851 | if err != io.EOF { | ||
7852 | logError(testName, function, args, startTime, "", "Copy failed", err) | ||
7853 | return | ||
7854 | } | ||
7855 | } | ||
7856 | // Verify now lesser bytes. | ||
7857 | if !bytes.Equal(buf[2047:], buffer2.Bytes()) { | ||
7858 | logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) | ||
7859 | return | ||
7860 | } | ||
7861 | |||
7862 | successLogger(testName, function, args, startTime).Info() | ||
7863 | } | ||
7864 | |||
7865 | // Tests get object ReaderAt interface methods. | ||
7866 | func testGetObjectReadAtFunctionalV2() { | ||
7867 | // initialize logging params | ||
7868 | startTime := time.Now() | ||
7869 | testName := getFuncName() | ||
7870 | function := "GetObject(bucketName, objectName)" | ||
7871 | args := map[string]interface{}{} | ||
7872 | |||
7873 | // Seed random based on current time. | ||
7874 | rand.Seed(time.Now().Unix()) | ||
7875 | |||
7876 | // Instantiate new minio client object. | ||
7877 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
7878 | &minio.Options{ | ||
7879 | Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
7880 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
7881 | }) | ||
7882 | if err != nil { | ||
7883 | logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) | ||
7884 | return | ||
7885 | } | ||
7886 | |||
7887 | // Enable tracing, write to stderr. | ||
7888 | // c.TraceOn(os.Stderr) | ||
7889 | |||
7890 | // Set user agent. | ||
7891 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
7892 | |||
7893 | // Generate a new random bucket name. | ||
7894 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
7895 | args["bucketName"] = bucketName | ||
7896 | |||
7897 | // Make a new bucket. | ||
7898 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
7899 | if err != nil { | ||
7900 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
7901 | return | ||
7902 | } | ||
7903 | |||
7904 | defer cleanupBucket(bucketName, c) | ||
7905 | |||
7906 | // Generate 33K of data. | ||
7907 | bufSize := dataFileMap["datafile-33-kB"] | ||
7908 | reader := getDataReader("datafile-33-kB") | ||
7909 | defer reader.Close() | ||
7910 | |||
7911 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
7912 | args["objectName"] = objectName | ||
7913 | |||
7914 | buf, err := io.ReadAll(reader) | ||
7915 | if err != nil { | ||
7916 | logError(testName, function, args, startTime, "", "ReadAll failed", err) | ||
7917 | return | ||
7918 | } | ||
7919 | |||
7920 | // Save the data | ||
7921 | _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) | ||
7922 | if err != nil { | ||
7923 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
7924 | return | ||
7925 | } | ||
7926 | |||
7927 | // Read the data back | ||
7928 | r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) | ||
7929 | if err != nil { | ||
7930 | logError(testName, function, args, startTime, "", "GetObject failed", err) | ||
7931 | return | ||
7932 | } | ||
7933 | defer r.Close() | ||
7934 | |||
7935 | st, err := r.Stat() | ||
7936 | if err != nil { | ||
7937 | logError(testName, function, args, startTime, "", "Stat failed", err) | ||
7938 | return | ||
7939 | } | ||
7940 | |||
7941 | if st.Size != int64(bufSize) { | ||
7942 | logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(st.Size), err) | ||
7943 | return | ||
7944 | } | ||
7945 | |||
7946 | offset := int64(2048) | ||
7947 | |||
7948 | // Read directly | ||
7949 | buf2 := make([]byte, 512) | ||
7950 | buf3 := make([]byte, 512) | ||
7951 | buf4 := make([]byte, 512) | ||
7952 | |||
7953 | m, err := r.ReadAt(buf2, offset) | ||
7954 | if err != nil { | ||
7955 | logError(testName, function, args, startTime, "", "ReadAt failed", err) | ||
7956 | return | ||
7957 | } | ||
7958 | if m != len(buf2) { | ||
7959 | logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+" got "+string(m), err) | ||
7960 | return | ||
7961 | } | ||
7962 | if !bytes.Equal(buf2, buf[offset:offset+512]) { | ||
7963 | logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) | ||
7964 | return | ||
7965 | } | ||
7966 | offset += 512 | ||
7967 | m, err = r.ReadAt(buf3, offset) | ||
7968 | if err != nil { | ||
7969 | logError(testName, function, args, startTime, "", "ReadAt failed", err) | ||
7970 | return | ||
7971 | } | ||
7972 | if m != len(buf3) { | ||
7973 | logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+" got "+string(m), err) | ||
7974 | return | ||
7975 | } | ||
7976 | if !bytes.Equal(buf3, buf[offset:offset+512]) { | ||
7977 | logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) | ||
7978 | return | ||
7979 | } | ||
7980 | offset += 512 | ||
7981 | m, err = r.ReadAt(buf4, offset) | ||
7982 | if err != nil { | ||
7983 | logError(testName, function, args, startTime, "", "ReadAt failed", err) | ||
7984 | return | ||
7985 | } | ||
7986 | if m != len(buf4) { | ||
7987 | logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+" got "+string(m), err) | ||
7988 | return | ||
7989 | } | ||
7990 | if !bytes.Equal(buf4, buf[offset:offset+512]) { | ||
7991 | logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) | ||
7992 | return | ||
7993 | } | ||
7994 | |||
7995 | buf5 := make([]byte, bufSize) | ||
7996 | // Read the whole object. | ||
7997 | m, err = r.ReadAt(buf5, 0) | ||
7998 | if err != nil { | ||
7999 | if err != io.EOF { | ||
8000 | logError(testName, function, args, startTime, "", "ReadAt failed", err) | ||
8001 | return | ||
8002 | } | ||
8003 | } | ||
8004 | if m != len(buf5) { | ||
8005 | logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+" got "+string(m), err) | ||
8006 | return | ||
8007 | } | ||
8008 | if !bytes.Equal(buf, buf5) { | ||
8009 | logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) | ||
8010 | return | ||
8011 | } | ||
8012 | |||
8013 | buf6 := make([]byte, bufSize+1) | ||
8014 | // Read the whole object and beyond. | ||
8015 | _, err = r.ReadAt(buf6, 0) | ||
8016 | if err != nil { | ||
8017 | if err != io.EOF { | ||
8018 | logError(testName, function, args, startTime, "", "ReadAt failed", err) | ||
8019 | return | ||
8020 | } | ||
8021 | } | ||
8022 | |||
8023 | successLogger(testName, function, args, startTime).Info() | ||
8024 | } | ||
8025 | |||
8026 | // Tests copy object | ||
8027 | func testCopyObjectV2() { | ||
8028 | // initialize logging params | ||
8029 | startTime := time.Now() | ||
8030 | testName := getFuncName() | ||
8031 | function := "CopyObject(destination, source)" | ||
8032 | args := map[string]interface{}{} | ||
8033 | |||
8034 | // Seed random based on current time. | ||
8035 | rand.Seed(time.Now().Unix()) | ||
8036 | |||
8037 | // Instantiate new minio client object | ||
8038 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
8039 | &minio.Options{ | ||
8040 | Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
8041 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
8042 | }) | ||
8043 | if err != nil { | ||
8044 | logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) | ||
8045 | return | ||
8046 | } | ||
8047 | |||
8048 | // Enable tracing, write to stderr. | ||
8049 | // c.TraceOn(os.Stderr) | ||
8050 | |||
8051 | // Set user agent. | ||
8052 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
8053 | |||
8054 | // Generate a new random bucket name. | ||
8055 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
8056 | |||
8057 | // Make a new bucket in 'us-east-1' (source bucket). | ||
8058 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
8059 | if err != nil { | ||
8060 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
8061 | return | ||
8062 | } | ||
8063 | defer cleanupBucket(bucketName, c) | ||
8064 | |||
8065 | // Make a new bucket in 'us-east-1' (destination bucket). | ||
8066 | err = c.MakeBucket(context.Background(), bucketName+"-copy", minio.MakeBucketOptions{Region: "us-east-1"}) | ||
8067 | if err != nil { | ||
8068 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
8069 | return | ||
8070 | } | ||
8071 | defer cleanupBucket(bucketName+"-copy", c) | ||
8072 | |||
8073 | // Generate 33K of data. | ||
8074 | bufSize := dataFileMap["datafile-33-kB"] | ||
8075 | reader := getDataReader("datafile-33-kB") | ||
8076 | defer reader.Close() | ||
8077 | |||
8078 | // Save the data | ||
8079 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
8080 | _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) | ||
8081 | if err != nil { | ||
8082 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
8083 | return | ||
8084 | } | ||
8085 | |||
8086 | r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) | ||
8087 | if err != nil { | ||
8088 | logError(testName, function, args, startTime, "", "GetObject failed", err) | ||
8089 | return | ||
8090 | } | ||
8091 | // Check the various fields of source object against destination object. | ||
8092 | objInfo, err := r.Stat() | ||
8093 | if err != nil { | ||
8094 | logError(testName, function, args, startTime, "", "Stat failed", err) | ||
8095 | return | ||
8096 | } | ||
8097 | r.Close() | ||
8098 | |||
8099 | // Copy Source | ||
8100 | src := minio.CopySrcOptions{ | ||
8101 | Bucket: bucketName, | ||
8102 | Object: objectName, | ||
8103 | MatchModifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), | ||
8104 | MatchETag: objInfo.ETag, | ||
8105 | } | ||
8106 | args["source"] = src | ||
8107 | |||
8108 | // Set copy conditions. | ||
8109 | dst := minio.CopyDestOptions{ | ||
8110 | Bucket: bucketName + "-copy", | ||
8111 | Object: objectName + "-copy", | ||
8112 | } | ||
8113 | args["destination"] = dst | ||
8114 | |||
8115 | // Perform the Copy | ||
8116 | _, err = c.CopyObject(context.Background(), dst, src) | ||
8117 | if err != nil { | ||
8118 | logError(testName, function, args, startTime, "", "CopyObject failed", err) | ||
8119 | return | ||
8120 | } | ||
8121 | |||
8122 | // Source object | ||
8123 | r, err = c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) | ||
8124 | if err != nil { | ||
8125 | logError(testName, function, args, startTime, "", "GetObject failed", err) | ||
8126 | return | ||
8127 | } | ||
8128 | // Destination object | ||
8129 | readerCopy, err := c.GetObject(context.Background(), bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{}) | ||
8130 | if err != nil { | ||
8131 | logError(testName, function, args, startTime, "", "GetObject failed", err) | ||
8132 | return | ||
8133 | } | ||
8134 | // Check the various fields of source object against destination object. | ||
8135 | objInfo, err = r.Stat() | ||
8136 | if err != nil { | ||
8137 | logError(testName, function, args, startTime, "", "Stat failed", err) | ||
8138 | return | ||
8139 | } | ||
8140 | objInfoCopy, err := readerCopy.Stat() | ||
8141 | if err != nil { | ||
8142 | logError(testName, function, args, startTime, "", "Stat failed", err) | ||
8143 | return | ||
8144 | } | ||
8145 | if objInfo.Size != objInfoCopy.Size { | ||
8146 | logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+" got "+string(objInfo.Size), err) | ||
8147 | return | ||
8148 | } | ||
8149 | |||
8150 | // Close all the readers. | ||
8151 | r.Close() | ||
8152 | readerCopy.Close() | ||
8153 | |||
8154 | // CopyObject again but with wrong conditions | ||
8155 | src = minio.CopySrcOptions{ | ||
8156 | Bucket: bucketName, | ||
8157 | Object: objectName, | ||
8158 | MatchUnmodifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), | ||
8159 | NoMatchETag: objInfo.ETag, | ||
8160 | } | ||
8161 | |||
8162 | // Perform the Copy which should fail | ||
8163 | _, err = c.CopyObject(context.Background(), dst, src) | ||
8164 | if err == nil { | ||
8165 | logError(testName, function, args, startTime, "", "CopyObject did not fail for invalid conditions", err) | ||
8166 | return | ||
8167 | } | ||
8168 | |||
8169 | successLogger(testName, function, args, startTime).Info() | ||
8170 | } | ||
8171 | |||
8172 | func testComposeObjectErrorCasesWrapper(c *minio.Client) { | ||
8173 | // initialize logging params | ||
8174 | startTime := time.Now() | ||
8175 | testName := getFuncName() | ||
8176 | function := "ComposeObject(destination, sourceList)" | ||
8177 | args := map[string]interface{}{} | ||
8178 | |||
8179 | // Generate a new random bucket name. | ||
8180 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
8181 | |||
8182 | // Make a new bucket in 'us-east-1' (source bucket). | ||
8183 | err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
8184 | if err != nil { | ||
8185 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
8186 | return | ||
8187 | } | ||
8188 | |||
8189 | defer cleanupBucket(bucketName, c) | ||
8190 | |||
8191 | // Test that more than 10K source objects cannot be | ||
8192 | // concatenated. | ||
8193 | srcArr := [10001]minio.CopySrcOptions{} | ||
8194 | srcSlice := srcArr[:] | ||
8195 | dst := minio.CopyDestOptions{ | ||
8196 | Bucket: bucketName, | ||
8197 | Object: "object", | ||
8198 | } | ||
8199 | |||
8200 | args["destination"] = dst | ||
8201 | // Just explain about srcArr in args["sourceList"] | ||
8202 | // to stop having 10,001 null headers logged | ||
8203 | args["sourceList"] = "source array of 10,001 elements" | ||
8204 | if _, err := c.ComposeObject(context.Background(), dst, srcSlice...); err == nil { | ||
8205 | logError(testName, function, args, startTime, "", "Expected error in ComposeObject", err) | ||
8206 | return | ||
8207 | } else if err.Error() != "There must be as least one and up to 10000 source objects." { | ||
8208 | logError(testName, function, args, startTime, "", "Got unexpected error", err) | ||
8209 | return | ||
8210 | } | ||
8211 | |||
8212 | // Create a source with invalid offset spec and check that | ||
8213 | // error is returned: | ||
8214 | // 1. Create the source object. | ||
8215 | const badSrcSize = 5 * 1024 * 1024 | ||
8216 | buf := bytes.Repeat([]byte("1"), badSrcSize) | ||
8217 | _, err = c.PutObject(context.Background(), bucketName, "badObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) | ||
8218 | if err != nil { | ||
8219 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
8220 | return | ||
8221 | } | ||
8222 | // 2. Set invalid range spec on the object (going beyond | ||
8223 | // object size) | ||
8224 | badSrc := minio.CopySrcOptions{ | ||
8225 | Bucket: bucketName, | ||
8226 | Object: "badObject", | ||
8227 | MatchRange: true, | ||
8228 | Start: 1, | ||
8229 | End: badSrcSize, | ||
8230 | } | ||
8231 | |||
8232 | // 3. ComposeObject call should fail. | ||
8233 | if _, err := c.ComposeObject(context.Background(), dst, badSrc); err == nil { | ||
8234 | logError(testName, function, args, startTime, "", "ComposeObject expected to fail", err) | ||
8235 | return | ||
8236 | } else if !strings.Contains(err.Error(), "has invalid segment-to-copy") { | ||
8237 | logError(testName, function, args, startTime, "", "Got invalid error", err) | ||
8238 | return | ||
8239 | } | ||
8240 | |||
8241 | successLogger(testName, function, args, startTime).Info() | ||
8242 | } | ||
8243 | |||
8244 | // Test expected error cases | ||
8245 | func testComposeObjectErrorCasesV2() { | ||
8246 | // initialize logging params | ||
8247 | startTime := time.Now() | ||
8248 | testName := getFuncName() | ||
8249 | function := "ComposeObject(destination, sourceList)" | ||
8250 | args := map[string]interface{}{} | ||
8251 | |||
8252 | // Instantiate new minio client object | ||
8253 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
8254 | &minio.Options{ | ||
8255 | Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
8256 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
8257 | }) | ||
8258 | if err != nil { | ||
8259 | logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) | ||
8260 | return | ||
8261 | } | ||
8262 | |||
8263 | testComposeObjectErrorCasesWrapper(c) | ||
8264 | } | ||
8265 | |||
8266 | func testComposeMultipleSources(c *minio.Client) { | ||
8267 | // initialize logging params | ||
8268 | startTime := time.Now() | ||
8269 | testName := getFuncName() | ||
8270 | function := "ComposeObject(destination, sourceList)" | ||
8271 | args := map[string]interface{}{ | ||
8272 | "destination": "", | ||
8273 | "sourceList": "", | ||
8274 | } | ||
8275 | |||
8276 | // Generate a new random bucket name. | ||
8277 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
8278 | // Make a new bucket in 'us-east-1' (source bucket). | ||
8279 | err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
8280 | if err != nil { | ||
8281 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
8282 | return | ||
8283 | } | ||
8284 | |||
8285 | defer cleanupBucket(bucketName, c) | ||
8286 | |||
8287 | // Upload a small source object | ||
8288 | const srcSize = 1024 * 1024 * 5 | ||
8289 | buf := bytes.Repeat([]byte("1"), srcSize) | ||
8290 | _, err = c.PutObject(context.Background(), bucketName, "srcObject", bytes.NewReader(buf), int64(srcSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) | ||
8291 | if err != nil { | ||
8292 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
8293 | return | ||
8294 | } | ||
8295 | |||
8296 | // We will append 10 copies of the object. | ||
8297 | srcs := []minio.CopySrcOptions{} | ||
8298 | for i := 0; i < 10; i++ { | ||
8299 | srcs = append(srcs, minio.CopySrcOptions{ | ||
8300 | Bucket: bucketName, | ||
8301 | Object: "srcObject", | ||
8302 | }) | ||
8303 | } | ||
8304 | |||
8305 | // make the last part very small | ||
8306 | srcs[9].MatchRange = true | ||
8307 | |||
8308 | args["sourceList"] = srcs | ||
8309 | |||
8310 | dst := minio.CopyDestOptions{ | ||
8311 | Bucket: bucketName, | ||
8312 | Object: "dstObject", | ||
8313 | } | ||
8314 | args["destination"] = dst | ||
8315 | |||
8316 | ui, err := c.ComposeObject(context.Background(), dst, srcs...) | ||
8317 | if err != nil { | ||
8318 | logError(testName, function, args, startTime, "", "ComposeObject failed", err) | ||
8319 | return | ||
8320 | } | ||
8321 | |||
8322 | if ui.Size != 9*srcSize+1 { | ||
8323 | logError(testName, function, args, startTime, "", "ComposeObject returned unexpected size", err) | ||
8324 | return | ||
8325 | } | ||
8326 | |||
8327 | objProps, err := c.StatObject(context.Background(), bucketName, "dstObject", minio.StatObjectOptions{}) | ||
8328 | if err != nil { | ||
8329 | logError(testName, function, args, startTime, "", "StatObject failed", err) | ||
8330 | return | ||
8331 | } | ||
8332 | |||
8333 | if objProps.Size != 9*srcSize+1 { | ||
8334 | logError(testName, function, args, startTime, "", "Size mismatched! Expected "+string(10000*srcSize)+" got "+string(objProps.Size), err) | ||
8335 | return | ||
8336 | } | ||
8337 | |||
8338 | successLogger(testName, function, args, startTime).Info() | ||
8339 | } | ||
8340 | |||
8341 | // Test concatenating multiple 10K objects V2 | ||
8342 | func testCompose10KSourcesV2() { | ||
8343 | // initialize logging params | ||
8344 | startTime := time.Now() | ||
8345 | testName := getFuncName() | ||
8346 | function := "ComposeObject(destination, sourceList)" | ||
8347 | args := map[string]interface{}{} | ||
8348 | |||
8349 | // Instantiate new minio client object | ||
8350 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
8351 | &minio.Options{ | ||
8352 | Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
8353 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
8354 | }) | ||
8355 | if err != nil { | ||
8356 | logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) | ||
8357 | return | ||
8358 | } | ||
8359 | |||
8360 | testComposeMultipleSources(c) | ||
8361 | } | ||
8362 | |||
8363 | func testEncryptedEmptyObject() { | ||
8364 | // initialize logging params | ||
8365 | startTime := time.Now() | ||
8366 | testName := getFuncName() | ||
8367 | function := "PutObject(bucketName, objectName, reader, objectSize, opts)" | ||
8368 | args := map[string]interface{}{} | ||
8369 | |||
8370 | // Instantiate new minio client object | ||
8371 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
8372 | &minio.Options{ | ||
8373 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
8374 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
8375 | }) | ||
8376 | if err != nil { | ||
8377 | logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) | ||
8378 | return | ||
8379 | } | ||
8380 | |||
8381 | // Generate a new random bucket name. | ||
8382 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
8383 | args["bucketName"] = bucketName | ||
8384 | // Make a new bucket in 'us-east-1' (source bucket). | ||
8385 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
8386 | if err != nil { | ||
8387 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
8388 | return | ||
8389 | } | ||
8390 | |||
8391 | defer cleanupBucket(bucketName, c) | ||
8392 | |||
8393 | sse := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"object")) | ||
8394 | |||
8395 | // 1. create an sse-c encrypted object to copy by uploading | ||
8396 | const srcSize = 0 | ||
8397 | var buf []byte // Empty buffer | ||
8398 | args["objectName"] = "object" | ||
8399 | _, err = c.PutObject(context.Background(), bucketName, "object", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) | ||
8400 | if err != nil { | ||
8401 | logError(testName, function, args, startTime, "", "PutObject call failed", err) | ||
8402 | return | ||
8403 | } | ||
8404 | |||
8405 | // 2. Test CopyObject for an empty object | ||
8406 | src := minio.CopySrcOptions{ | ||
8407 | Bucket: bucketName, | ||
8408 | Object: "object", | ||
8409 | Encryption: sse, | ||
8410 | } | ||
8411 | |||
8412 | dst := minio.CopyDestOptions{ | ||
8413 | Bucket: bucketName, | ||
8414 | Object: "new-object", | ||
8415 | Encryption: sse, | ||
8416 | } | ||
8417 | |||
8418 | if _, err = c.CopyObject(context.Background(), dst, src); err != nil { | ||
8419 | function = "CopyObject(dst, src)" | ||
8420 | logError(testName, function, map[string]interface{}{}, startTime, "", "CopyObject failed", err) | ||
8421 | return | ||
8422 | } | ||
8423 | |||
8424 | // 3. Test Key rotation | ||
8425 | newSSE := encrypt.DefaultPBKDF([]byte("Don't Panic"), []byte(bucketName+"new-object")) | ||
8426 | src = minio.CopySrcOptions{ | ||
8427 | Bucket: bucketName, | ||
8428 | Object: "new-object", | ||
8429 | Encryption: sse, | ||
8430 | } | ||
8431 | |||
8432 | dst = minio.CopyDestOptions{ | ||
8433 | Bucket: bucketName, | ||
8434 | Object: "new-object", | ||
8435 | Encryption: newSSE, | ||
8436 | } | ||
8437 | |||
8438 | if _, err = c.CopyObject(context.Background(), dst, src); err != nil { | ||
8439 | function = "CopyObject(dst, src)" | ||
8440 | logError(testName, function, map[string]interface{}{}, startTime, "", "CopyObject with key rotation failed", err) | ||
8441 | return | ||
8442 | } | ||
8443 | |||
8444 | // 4. Download the object. | ||
8445 | reader, err := c.GetObject(context.Background(), bucketName, "new-object", minio.GetObjectOptions{ServerSideEncryption: newSSE}) | ||
8446 | if err != nil { | ||
8447 | logError(testName, function, args, startTime, "", "GetObject failed", err) | ||
8448 | return | ||
8449 | } | ||
8450 | defer reader.Close() | ||
8451 | |||
8452 | decBytes, err := io.ReadAll(reader) | ||
8453 | if err != nil { | ||
8454 | logError(testName, function, map[string]interface{}{}, startTime, "", "ReadAll failed", err) | ||
8455 | return | ||
8456 | } | ||
8457 | if !bytes.Equal(decBytes, buf) { | ||
8458 | logError(testName, function, map[string]interface{}{}, startTime, "", "Downloaded object doesn't match the empty encrypted object", err) | ||
8459 | return | ||
8460 | } | ||
8461 | |||
8462 | delete(args, "objectName") | ||
8463 | successLogger(testName, function, args, startTime).Info() | ||
8464 | } | ||
8465 | |||
8466 | func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, sseDst encrypt.ServerSide) { | ||
8467 | // initialize logging params | ||
8468 | startTime := time.Now() | ||
8469 | testName := getFuncNameLoc(2) | ||
8470 | function := "CopyObject(destination, source)" | ||
8471 | args := map[string]interface{}{} | ||
8472 | var srcEncryption, dstEncryption encrypt.ServerSide | ||
8473 | |||
8474 | // Make a new bucket in 'us-east-1' (source bucket). | ||
8475 | err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
8476 | if err != nil { | ||
8477 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
8478 | return | ||
8479 | } | ||
8480 | |||
8481 | defer cleanupBucket(bucketName, c) | ||
8482 | |||
8483 | // 1. create an sse-c encrypted object to copy by uploading | ||
8484 | const srcSize = 1024 * 1024 | ||
8485 | buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB | ||
8486 | _, err = c.PutObject(context.Background(), bucketName, "srcObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ | ||
8487 | ServerSideEncryption: sseSrc, | ||
8488 | }) | ||
8489 | if err != nil { | ||
8490 | logError(testName, function, args, startTime, "", "PutObject call failed", err) | ||
8491 | return | ||
8492 | } | ||
8493 | |||
8494 | if sseSrc != nil && sseSrc.Type() != encrypt.S3 { | ||
8495 | srcEncryption = sseSrc | ||
8496 | } | ||
8497 | |||
8498 | // 2. copy object and change encryption key | ||
8499 | src := minio.CopySrcOptions{ | ||
8500 | Bucket: bucketName, | ||
8501 | Object: "srcObject", | ||
8502 | Encryption: srcEncryption, | ||
8503 | } | ||
8504 | args["source"] = src | ||
8505 | |||
8506 | dst := minio.CopyDestOptions{ | ||
8507 | Bucket: bucketName, | ||
8508 | Object: "dstObject", | ||
8509 | Encryption: sseDst, | ||
8510 | } | ||
8511 | args["destination"] = dst | ||
8512 | |||
8513 | _, err = c.CopyObject(context.Background(), dst, src) | ||
8514 | if err != nil { | ||
8515 | logError(testName, function, args, startTime, "", "CopyObject failed", err) | ||
8516 | return | ||
8517 | } | ||
8518 | |||
8519 | if sseDst != nil && sseDst.Type() != encrypt.S3 { | ||
8520 | dstEncryption = sseDst | ||
8521 | } | ||
8522 | // 3. get copied object and check if content is equal | ||
8523 | coreClient := minio.Core{c} | ||
8524 | reader, _, _, err := coreClient.GetObject(context.Background(), bucketName, "dstObject", minio.GetObjectOptions{ServerSideEncryption: dstEncryption}) | ||
8525 | if err != nil { | ||
8526 | logError(testName, function, args, startTime, "", "GetObject failed", err) | ||
8527 | return | ||
8528 | } | ||
8529 | |||
8530 | decBytes, err := io.ReadAll(reader) | ||
8531 | if err != nil { | ||
8532 | logError(testName, function, args, startTime, "", "ReadAll failed", err) | ||
8533 | return | ||
8534 | } | ||
8535 | if !bytes.Equal(decBytes, buf) { | ||
8536 | logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) | ||
8537 | return | ||
8538 | } | ||
8539 | reader.Close() | ||
8540 | |||
8541 | // Test key rotation for source object in-place. | ||
8542 | var newSSE encrypt.ServerSide | ||
8543 | if sseSrc != nil && sseSrc.Type() == encrypt.SSEC { | ||
8544 | newSSE = encrypt.DefaultPBKDF([]byte("Don't Panic"), []byte(bucketName+"srcObject")) // replace key | ||
8545 | } | ||
8546 | if sseSrc != nil && sseSrc.Type() == encrypt.S3 { | ||
8547 | newSSE = encrypt.NewSSE() | ||
8548 | } | ||
8549 | if newSSE != nil { | ||
8550 | dst = minio.CopyDestOptions{ | ||
8551 | Bucket: bucketName, | ||
8552 | Object: "srcObject", | ||
8553 | Encryption: newSSE, | ||
8554 | } | ||
8555 | args["destination"] = dst | ||
8556 | |||
8557 | _, err = c.CopyObject(context.Background(), dst, src) | ||
8558 | if err != nil { | ||
8559 | logError(testName, function, args, startTime, "", "CopyObject failed", err) | ||
8560 | return | ||
8561 | } | ||
8562 | |||
8563 | // Get copied object and check if content is equal | ||
8564 | reader, _, _, err = coreClient.GetObject(context.Background(), bucketName, "srcObject", minio.GetObjectOptions{ServerSideEncryption: newSSE}) | ||
8565 | if err != nil { | ||
8566 | logError(testName, function, args, startTime, "", "GetObject failed", err) | ||
8567 | return | ||
8568 | } | ||
8569 | |||
8570 | decBytes, err = io.ReadAll(reader) | ||
8571 | if err != nil { | ||
8572 | logError(testName, function, args, startTime, "", "ReadAll failed", err) | ||
8573 | return | ||
8574 | } | ||
8575 | if !bytes.Equal(decBytes, buf) { | ||
8576 | logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) | ||
8577 | return | ||
8578 | } | ||
8579 | reader.Close() | ||
8580 | |||
8581 | // Test in-place decryption. | ||
8582 | dst = minio.CopyDestOptions{ | ||
8583 | Bucket: bucketName, | ||
8584 | Object: "srcObject", | ||
8585 | } | ||
8586 | args["destination"] = dst | ||
8587 | |||
8588 | src = minio.CopySrcOptions{ | ||
8589 | Bucket: bucketName, | ||
8590 | Object: "srcObject", | ||
8591 | Encryption: newSSE, | ||
8592 | } | ||
8593 | args["source"] = src | ||
8594 | _, err = c.CopyObject(context.Background(), dst, src) | ||
8595 | if err != nil { | ||
8596 | logError(testName, function, args, startTime, "", "CopyObject Key rotation failed", err) | ||
8597 | return | ||
8598 | } | ||
8599 | } | ||
8600 | |||
8601 | // Get copied decrypted object and check if content is equal | ||
8602 | reader, _, _, err = coreClient.GetObject(context.Background(), bucketName, "srcObject", minio.GetObjectOptions{}) | ||
8603 | if err != nil { | ||
8604 | logError(testName, function, args, startTime, "", "GetObject failed", err) | ||
8605 | return | ||
8606 | } | ||
8607 | defer reader.Close() | ||
8608 | |||
8609 | decBytes, err = io.ReadAll(reader) | ||
8610 | if err != nil { | ||
8611 | logError(testName, function, args, startTime, "", "ReadAll failed", err) | ||
8612 | return | ||
8613 | } | ||
8614 | if !bytes.Equal(decBytes, buf) { | ||
8615 | logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) | ||
8616 | return | ||
8617 | } | ||
8618 | |||
8619 | successLogger(testName, function, args, startTime).Info() | ||
8620 | } | ||
8621 | |||
8622 | // Test encrypted copy object | ||
8623 | func testUnencryptedToSSECCopyObject() { | ||
8624 | // initialize logging params | ||
8625 | startTime := time.Now() | ||
8626 | testName := getFuncName() | ||
8627 | function := "CopyObject(destination, source)" | ||
8628 | args := map[string]interface{}{} | ||
8629 | |||
8630 | // Instantiate new minio client object | ||
8631 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
8632 | &minio.Options{ | ||
8633 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
8634 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
8635 | }) | ||
8636 | if err != nil { | ||
8637 | logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) | ||
8638 | return | ||
8639 | } | ||
8640 | // Generate a new random bucket name. | ||
8641 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
8642 | |||
8643 | sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) | ||
8644 | // c.TraceOn(os.Stderr) | ||
8645 | testEncryptedCopyObjectWrapper(c, bucketName, nil, sseDst) | ||
8646 | } | ||
8647 | |||
8648 | // Test encrypted copy object | ||
8649 | func testUnencryptedToSSES3CopyObject() { | ||
8650 | // initialize logging params | ||
8651 | startTime := time.Now() | ||
8652 | testName := getFuncName() | ||
8653 | function := "CopyObject(destination, source)" | ||
8654 | args := map[string]interface{}{} | ||
8655 | |||
8656 | // Instantiate new minio client object | ||
8657 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
8658 | &minio.Options{ | ||
8659 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
8660 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
8661 | }) | ||
8662 | if err != nil { | ||
8663 | logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) | ||
8664 | return | ||
8665 | } | ||
8666 | // Generate a new random bucket name. | ||
8667 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
8668 | |||
8669 | var sseSrc encrypt.ServerSide | ||
8670 | sseDst := encrypt.NewSSE() | ||
8671 | // c.TraceOn(os.Stderr) | ||
8672 | testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) | ||
8673 | } | ||
8674 | |||
8675 | // Test encrypted copy object | ||
8676 | func testUnencryptedToUnencryptedCopyObject() { | ||
8677 | // initialize logging params | ||
8678 | startTime := time.Now() | ||
8679 | testName := getFuncName() | ||
8680 | function := "CopyObject(destination, source)" | ||
8681 | args := map[string]interface{}{} | ||
8682 | |||
8683 | // Instantiate new minio client object | ||
8684 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
8685 | &minio.Options{ | ||
8686 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
8687 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
8688 | }) | ||
8689 | if err != nil { | ||
8690 | logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) | ||
8691 | return | ||
8692 | } | ||
8693 | // Generate a new random bucket name. | ||
8694 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
8695 | |||
8696 | var sseSrc, sseDst encrypt.ServerSide | ||
8697 | // c.TraceOn(os.Stderr) | ||
8698 | testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) | ||
8699 | } | ||
8700 | |||
8701 | // Test encrypted copy object | ||
8702 | func testEncryptedSSECToSSECCopyObject() { | ||
8703 | // initialize logging params | ||
8704 | startTime := time.Now() | ||
8705 | testName := getFuncName() | ||
8706 | function := "CopyObject(destination, source)" | ||
8707 | args := map[string]interface{}{} | ||
8708 | |||
8709 | // Instantiate new minio client object | ||
8710 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
8711 | &minio.Options{ | ||
8712 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
8713 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
8714 | }) | ||
8715 | if err != nil { | ||
8716 | logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) | ||
8717 | return | ||
8718 | } | ||
8719 | // Generate a new random bucket name. | ||
8720 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
8721 | |||
8722 | sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) | ||
8723 | sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) | ||
8724 | // c.TraceOn(os.Stderr) | ||
8725 | testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) | ||
8726 | } | ||
8727 | |||
8728 | // Test encrypted copy object | ||
8729 | func testEncryptedSSECToSSES3CopyObject() { | ||
8730 | // initialize logging params | ||
8731 | startTime := time.Now() | ||
8732 | testName := getFuncName() | ||
8733 | function := "CopyObject(destination, source)" | ||
8734 | args := map[string]interface{}{} | ||
8735 | |||
8736 | // Instantiate new minio client object | ||
8737 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
8738 | &minio.Options{ | ||
8739 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
8740 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
8741 | }) | ||
8742 | if err != nil { | ||
8743 | logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) | ||
8744 | return | ||
8745 | } | ||
8746 | // Generate a new random bucket name. | ||
8747 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
8748 | |||
8749 | sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) | ||
8750 | sseDst := encrypt.NewSSE() | ||
8751 | // c.TraceOn(os.Stderr) | ||
8752 | testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) | ||
8753 | } | ||
8754 | |||
8755 | // Test encrypted copy object | ||
8756 | func testEncryptedSSECToUnencryptedCopyObject() { | ||
8757 | // initialize logging params | ||
8758 | startTime := time.Now() | ||
8759 | testName := getFuncName() | ||
8760 | function := "CopyObject(destination, source)" | ||
8761 | args := map[string]interface{}{} | ||
8762 | |||
8763 | // Instantiate new minio client object | ||
8764 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
8765 | &minio.Options{ | ||
8766 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
8767 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
8768 | }) | ||
8769 | if err != nil { | ||
8770 | logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) | ||
8771 | return | ||
8772 | } | ||
8773 | // Generate a new random bucket name. | ||
8774 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
8775 | |||
8776 | sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) | ||
8777 | var sseDst encrypt.ServerSide | ||
8778 | // c.TraceOn(os.Stderr) | ||
8779 | testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) | ||
8780 | } | ||
8781 | |||
8782 | // Test encrypted copy object | ||
8783 | func testEncryptedSSES3ToSSECCopyObject() { | ||
8784 | // initialize logging params | ||
8785 | startTime := time.Now() | ||
8786 | testName := getFuncName() | ||
8787 | function := "CopyObject(destination, source)" | ||
8788 | args := map[string]interface{}{} | ||
8789 | |||
8790 | // Instantiate new minio client object | ||
8791 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
8792 | &minio.Options{ | ||
8793 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
8794 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
8795 | }) | ||
8796 | if err != nil { | ||
8797 | logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) | ||
8798 | return | ||
8799 | } | ||
8800 | // Generate a new random bucket name. | ||
8801 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
8802 | |||
8803 | sseSrc := encrypt.NewSSE() | ||
8804 | sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) | ||
8805 | // c.TraceOn(os.Stderr) | ||
8806 | testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) | ||
8807 | } | ||
8808 | |||
8809 | // Test encrypted copy object | ||
8810 | func testEncryptedSSES3ToSSES3CopyObject() { | ||
8811 | // initialize logging params | ||
8812 | startTime := time.Now() | ||
8813 | testName := getFuncName() | ||
8814 | function := "CopyObject(destination, source)" | ||
8815 | args := map[string]interface{}{} | ||
8816 | |||
8817 | // Instantiate new minio client object | ||
8818 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
8819 | &minio.Options{ | ||
8820 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
8821 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
8822 | }) | ||
8823 | if err != nil { | ||
8824 | logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) | ||
8825 | return | ||
8826 | } | ||
8827 | // Generate a new random bucket name. | ||
8828 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
8829 | |||
8830 | sseSrc := encrypt.NewSSE() | ||
8831 | sseDst := encrypt.NewSSE() | ||
8832 | // c.TraceOn(os.Stderr) | ||
8833 | testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) | ||
8834 | } | ||
8835 | |||
8836 | // Test encrypted copy object | ||
8837 | func testEncryptedSSES3ToUnencryptedCopyObject() { | ||
8838 | // initialize logging params | ||
8839 | startTime := time.Now() | ||
8840 | testName := getFuncName() | ||
8841 | function := "CopyObject(destination, source)" | ||
8842 | args := map[string]interface{}{} | ||
8843 | |||
8844 | // Instantiate new minio client object | ||
8845 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
8846 | &minio.Options{ | ||
8847 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
8848 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
8849 | }) | ||
8850 | if err != nil { | ||
8851 | logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) | ||
8852 | return | ||
8853 | } | ||
8854 | // Generate a new random bucket name. | ||
8855 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
8856 | |||
8857 | sseSrc := encrypt.NewSSE() | ||
8858 | var sseDst encrypt.ServerSide | ||
8859 | // c.TraceOn(os.Stderr) | ||
8860 | testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) | ||
8861 | } | ||
8862 | |||
8863 | // Test encrypted copy object | ||
8864 | func testEncryptedCopyObjectV2() { | ||
8865 | // initialize logging params | ||
8866 | startTime := time.Now() | ||
8867 | testName := getFuncName() | ||
8868 | function := "CopyObject(destination, source)" | ||
8869 | args := map[string]interface{}{} | ||
8870 | |||
8871 | // Instantiate new minio client object | ||
8872 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
8873 | &minio.Options{ | ||
8874 | Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
8875 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
8876 | }) | ||
8877 | if err != nil { | ||
8878 | logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) | ||
8879 | return | ||
8880 | } | ||
8881 | // Generate a new random bucket name. | ||
8882 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
8883 | |||
8884 | sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) | ||
8885 | sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) | ||
8886 | // c.TraceOn(os.Stderr) | ||
8887 | testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) | ||
8888 | } | ||
8889 | |||
8890 | func testDecryptedCopyObject() { | ||
8891 | // initialize logging params | ||
8892 | startTime := time.Now() | ||
8893 | testName := getFuncName() | ||
8894 | function := "CopyObject(destination, source)" | ||
8895 | args := map[string]interface{}{} | ||
8896 | |||
8897 | // Instantiate new minio client object | ||
8898 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
8899 | &minio.Options{ | ||
8900 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
8901 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
8902 | }) | ||
8903 | if err != nil { | ||
8904 | logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) | ||
8905 | return | ||
8906 | } | ||
8907 | |||
8908 | bucketName, objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-"), "object" | ||
8909 | if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}); err != nil { | ||
8910 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
8911 | return | ||
8912 | } | ||
8913 | |||
8914 | defer cleanupBucket(bucketName, c) | ||
8915 | |||
8916 | encryption := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)) | ||
8917 | _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(bytes.Repeat([]byte("a"), 1024*1024)), 1024*1024, minio.PutObjectOptions{ | ||
8918 | ServerSideEncryption: encryption, | ||
8919 | }) | ||
8920 | if err != nil { | ||
8921 | logError(testName, function, args, startTime, "", "PutObject call failed", err) | ||
8922 | return | ||
8923 | } | ||
8924 | |||
8925 | src := minio.CopySrcOptions{ | ||
8926 | Bucket: bucketName, | ||
8927 | Object: objectName, | ||
8928 | Encryption: encrypt.SSECopy(encryption), | ||
8929 | } | ||
8930 | args["source"] = src | ||
8931 | |||
8932 | dst := minio.CopyDestOptions{ | ||
8933 | Bucket: bucketName, | ||
8934 | Object: "decrypted-" + objectName, | ||
8935 | } | ||
8936 | args["destination"] = dst | ||
8937 | |||
8938 | if _, err = c.CopyObject(context.Background(), dst, src); err != nil { | ||
8939 | logError(testName, function, args, startTime, "", "CopyObject failed", err) | ||
8940 | return | ||
8941 | } | ||
8942 | if _, err = c.GetObject(context.Background(), bucketName, "decrypted-"+objectName, minio.GetObjectOptions{}); err != nil { | ||
8943 | logError(testName, function, args, startTime, "", "GetObject failed", err) | ||
8944 | return | ||
8945 | } | ||
8946 | successLogger(testName, function, args, startTime).Info() | ||
8947 | } | ||
8948 | |||
8949 | func testSSECMultipartEncryptedToSSECCopyObjectPart() { | ||
8950 | // initialize logging params | ||
8951 | startTime := time.Now() | ||
8952 | testName := getFuncName() | ||
8953 | function := "CopyObjectPart(destination, source)" | ||
8954 | args := map[string]interface{}{} | ||
8955 | |||
8956 | // Instantiate new minio client object | ||
8957 | client, err := minio.New(os.Getenv(serverEndpoint), | ||
8958 | &minio.Options{ | ||
8959 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
8960 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
8961 | }) | ||
8962 | if err != nil { | ||
8963 | logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) | ||
8964 | return | ||
8965 | } | ||
8966 | |||
8967 | // Instantiate new core client object. | ||
8968 | c := minio.Core{client} | ||
8969 | |||
8970 | // Enable tracing, write to stderr. | ||
8971 | // c.TraceOn(os.Stderr) | ||
8972 | |||
8973 | // Set user agent. | ||
8974 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
8975 | |||
8976 | // Generate a new random bucket name. | ||
8977 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") | ||
8978 | |||
8979 | // Make a new bucket. | ||
8980 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
8981 | if err != nil { | ||
8982 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
8983 | return | ||
8984 | } | ||
8985 | defer cleanupBucket(bucketName, client) | ||
8986 | // Make a buffer with 6MB of data | ||
8987 | buf := bytes.Repeat([]byte("abcdef"), 1024*1024) | ||
8988 | |||
8989 | // Save the data | ||
8990 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
8991 | password := "correct horse battery staple" | ||
8992 | srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) | ||
8993 | |||
8994 | // Upload a 6MB object using multipart mechanism | ||
8995 | uploadID, err := c.NewMultipartUpload(context.Background(), bucketName, objectName, minio.PutObjectOptions{ServerSideEncryption: srcencryption}) | ||
8996 | if err != nil { | ||
8997 | logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) | ||
8998 | return | ||
8999 | } | ||
9000 | |||
9001 | var completeParts []minio.CompletePart | ||
9002 | |||
9003 | part, err := c.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 1, | ||
9004 | bytes.NewReader(buf[:5*1024*1024]), 5*1024*1024, | ||
9005 | minio.PutObjectPartOptions{SSE: srcencryption}, | ||
9006 | ) | ||
9007 | if err != nil { | ||
9008 | logError(testName, function, args, startTime, "", "PutObjectPart call failed", err) | ||
9009 | return | ||
9010 | } | ||
9011 | completeParts = append(completeParts, minio.CompletePart{PartNumber: part.PartNumber, ETag: part.ETag}) | ||
9012 | |||
9013 | part, err = c.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 2, | ||
9014 | bytes.NewReader(buf[5*1024*1024:]), 1024*1024, | ||
9015 | minio.PutObjectPartOptions{SSE: srcencryption}, | ||
9016 | ) | ||
9017 | if err != nil { | ||
9018 | logError(testName, function, args, startTime, "", "PutObjectPart call failed", err) | ||
9019 | return | ||
9020 | } | ||
9021 | completeParts = append(completeParts, minio.CompletePart{PartNumber: part.PartNumber, ETag: part.ETag}) | ||
9022 | |||
9023 | // Complete the multipart upload | ||
9024 | _, err = c.CompleteMultipartUpload(context.Background(), bucketName, objectName, uploadID, completeParts, minio.PutObjectOptions{}) | ||
9025 | if err != nil { | ||
9026 | logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) | ||
9027 | return | ||
9028 | } | ||
9029 | |||
9030 | // Stat the object and check its length matches | ||
9031 | objInfo, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption}) | ||
9032 | if err != nil { | ||
9033 | logError(testName, function, args, startTime, "", "StatObject call failed", err) | ||
9034 | return | ||
9035 | } | ||
9036 | |||
9037 | destBucketName := bucketName | ||
9038 | destObjectName := objectName + "-dest" | ||
9039 | dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) | ||
9040 | |||
9041 | uploadID, err = c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) | ||
9042 | if err != nil { | ||
9043 | logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) | ||
9044 | return | ||
9045 | } | ||
9046 | |||
9047 | // Content of the destination object will be two copies of | ||
9048 | // `objectName` concatenated, followed by first byte of | ||
9049 | // `objectName`. | ||
9050 | metadata := make(map[string]string) | ||
9051 | header := make(http.Header) | ||
9052 | encrypt.SSECopy(srcencryption).Marshal(header) | ||
9053 | dstencryption.Marshal(header) | ||
9054 | for k, v := range header { | ||
9055 | metadata[k] = v[0] | ||
9056 | } | ||
9057 | |||
9058 | metadata["x-amz-copy-source-if-match"] = objInfo.ETag | ||
9059 | |||
9060 | // First of three parts | ||
9061 | fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) | ||
9062 | if err != nil { | ||
9063 | logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) | ||
9064 | return | ||
9065 | } | ||
9066 | |||
9067 | // Second of three parts | ||
9068 | sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) | ||
9069 | if err != nil { | ||
9070 | logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) | ||
9071 | return | ||
9072 | } | ||
9073 | |||
9074 | // Last of three parts | ||
9075 | lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) | ||
9076 | if err != nil { | ||
9077 | logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) | ||
9078 | return | ||
9079 | } | ||
9080 | |||
9081 | // Complete the multipart upload | ||
9082 | _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) | ||
9083 | if err != nil { | ||
9084 | logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) | ||
9085 | return | ||
9086 | } | ||
9087 | |||
9088 | // Stat the object and check its length matches | ||
9089 | objInfo, err = c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption}) | ||
9090 | if err != nil { | ||
9091 | logError(testName, function, args, startTime, "", "StatObject call failed", err) | ||
9092 | return | ||
9093 | } | ||
9094 | |||
9095 | if objInfo.Size != (6*1024*1024)*2+1 { | ||
9096 | logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) | ||
9097 | return | ||
9098 | } | ||
9099 | |||
9100 | // Now we read the data back | ||
9101 | getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} | ||
9102 | getOpts.SetRange(0, 6*1024*1024-1) | ||
9103 | r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) | ||
9104 | if err != nil { | ||
9105 | logError(testName, function, args, startTime, "", "GetObject call failed", err) | ||
9106 | return | ||
9107 | } | ||
9108 | getBuf := make([]byte, 6*1024*1024) | ||
9109 | _, err = readFull(r, getBuf) | ||
9110 | if err != nil { | ||
9111 | logError(testName, function, args, startTime, "", "Read buffer failed", err) | ||
9112 | return | ||
9113 | } | ||
9114 | if !bytes.Equal(getBuf, buf) { | ||
9115 | logError(testName, function, args, startTime, "", "Got unexpected data in first 6MB", err) | ||
9116 | return | ||
9117 | } | ||
9118 | |||
9119 | getOpts.SetRange(6*1024*1024, 0) | ||
9120 | r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) | ||
9121 | if err != nil { | ||
9122 | logError(testName, function, args, startTime, "", "GetObject call failed", err) | ||
9123 | return | ||
9124 | } | ||
9125 | getBuf = make([]byte, 6*1024*1024+1) | ||
9126 | _, err = readFull(r, getBuf) | ||
9127 | if err != nil { | ||
9128 | logError(testName, function, args, startTime, "", "Read buffer failed", err) | ||
9129 | return | ||
9130 | } | ||
9131 | if !bytes.Equal(getBuf[:6*1024*1024], buf) { | ||
9132 | logError(testName, function, args, startTime, "", "Got unexpected data in second 6MB", err) | ||
9133 | return | ||
9134 | } | ||
9135 | if getBuf[6*1024*1024] != buf[0] { | ||
9136 | logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) | ||
9137 | return | ||
9138 | } | ||
9139 | |||
9140 | successLogger(testName, function, args, startTime).Info() | ||
9141 | |||
9142 | // Do not need to remove destBucketName its same as bucketName. | ||
9143 | } | ||
9144 | |||
9145 | // Test Core CopyObjectPart implementation | ||
9146 | func testSSECEncryptedToSSECCopyObjectPart() { | ||
9147 | // initialize logging params | ||
9148 | startTime := time.Now() | ||
9149 | testName := getFuncName() | ||
9150 | function := "CopyObjectPart(destination, source)" | ||
9151 | args := map[string]interface{}{} | ||
9152 | |||
9153 | // Instantiate new minio client object | ||
9154 | client, err := minio.New(os.Getenv(serverEndpoint), | ||
9155 | &minio.Options{ | ||
9156 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
9157 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
9158 | }) | ||
9159 | if err != nil { | ||
9160 | logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) | ||
9161 | return | ||
9162 | } | ||
9163 | |||
9164 | // Instantiate new core client object. | ||
9165 | c := minio.Core{client} | ||
9166 | |||
9167 | // Enable tracing, write to stderr. | ||
9168 | // c.TraceOn(os.Stderr) | ||
9169 | |||
9170 | // Set user agent. | ||
9171 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
9172 | |||
9173 | // Generate a new random bucket name. | ||
9174 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") | ||
9175 | |||
9176 | // Make a new bucket. | ||
9177 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
9178 | if err != nil { | ||
9179 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
9180 | return | ||
9181 | } | ||
9182 | defer cleanupBucket(bucketName, client) | ||
9183 | // Make a buffer with 5MB of data | ||
9184 | buf := bytes.Repeat([]byte("abcde"), 1024*1024) | ||
9185 | |||
9186 | // Save the data | ||
9187 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
9188 | password := "correct horse battery staple" | ||
9189 | srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) | ||
9190 | putmetadata := map[string]string{ | ||
9191 | "Content-Type": "binary/octet-stream", | ||
9192 | } | ||
9193 | opts := minio.PutObjectOptions{ | ||
9194 | UserMetadata: putmetadata, | ||
9195 | ServerSideEncryption: srcencryption, | ||
9196 | } | ||
9197 | uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) | ||
9198 | if err != nil { | ||
9199 | logError(testName, function, args, startTime, "", "PutObject call failed", err) | ||
9200 | return | ||
9201 | } | ||
9202 | |||
9203 | st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption}) | ||
9204 | if err != nil { | ||
9205 | logError(testName, function, args, startTime, "", "StatObject call failed", err) | ||
9206 | return | ||
9207 | } | ||
9208 | |||
9209 | if st.Size != int64(len(buf)) { | ||
9210 | logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) | ||
9211 | return | ||
9212 | } | ||
9213 | |||
9214 | destBucketName := bucketName | ||
9215 | destObjectName := objectName + "-dest" | ||
9216 | dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) | ||
9217 | |||
9218 | uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) | ||
9219 | if err != nil { | ||
9220 | logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) | ||
9221 | return | ||
9222 | } | ||
9223 | |||
9224 | // Content of the destination object will be two copies of | ||
9225 | // `objectName` concatenated, followed by first byte of | ||
9226 | // `objectName`. | ||
9227 | metadata := make(map[string]string) | ||
9228 | header := make(http.Header) | ||
9229 | encrypt.SSECopy(srcencryption).Marshal(header) | ||
9230 | dstencryption.Marshal(header) | ||
9231 | for k, v := range header { | ||
9232 | metadata[k] = v[0] | ||
9233 | } | ||
9234 | |||
9235 | metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag | ||
9236 | |||
9237 | // First of three parts | ||
9238 | fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) | ||
9239 | if err != nil { | ||
9240 | logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) | ||
9241 | return | ||
9242 | } | ||
9243 | |||
9244 | // Second of three parts | ||
9245 | sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) | ||
9246 | if err != nil { | ||
9247 | logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) | ||
9248 | return | ||
9249 | } | ||
9250 | |||
9251 | // Last of three parts | ||
9252 | lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) | ||
9253 | if err != nil { | ||
9254 | logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) | ||
9255 | return | ||
9256 | } | ||
9257 | |||
9258 | // Complete the multipart upload | ||
9259 | _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) | ||
9260 | if err != nil { | ||
9261 | logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) | ||
9262 | return | ||
9263 | } | ||
9264 | |||
9265 | // Stat the object and check its length matches | ||
9266 | objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption}) | ||
9267 | if err != nil { | ||
9268 | logError(testName, function, args, startTime, "", "StatObject call failed", err) | ||
9269 | return | ||
9270 | } | ||
9271 | |||
9272 | if objInfo.Size != (5*1024*1024)*2+1 { | ||
9273 | logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) | ||
9274 | return | ||
9275 | } | ||
9276 | |||
9277 | // Now we read the data back | ||
9278 | getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} | ||
9279 | getOpts.SetRange(0, 5*1024*1024-1) | ||
9280 | r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) | ||
9281 | if err != nil { | ||
9282 | logError(testName, function, args, startTime, "", "GetObject call failed", err) | ||
9283 | return | ||
9284 | } | ||
9285 | getBuf := make([]byte, 5*1024*1024) | ||
9286 | _, err = readFull(r, getBuf) | ||
9287 | if err != nil { | ||
9288 | logError(testName, function, args, startTime, "", "Read buffer failed", err) | ||
9289 | return | ||
9290 | } | ||
9291 | if !bytes.Equal(getBuf, buf) { | ||
9292 | logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) | ||
9293 | return | ||
9294 | } | ||
9295 | |||
9296 | getOpts.SetRange(5*1024*1024, 0) | ||
9297 | r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) | ||
9298 | if err != nil { | ||
9299 | logError(testName, function, args, startTime, "", "GetObject call failed", err) | ||
9300 | return | ||
9301 | } | ||
9302 | getBuf = make([]byte, 5*1024*1024+1) | ||
9303 | _, err = readFull(r, getBuf) | ||
9304 | if err != nil { | ||
9305 | logError(testName, function, args, startTime, "", "Read buffer failed", err) | ||
9306 | return | ||
9307 | } | ||
9308 | if !bytes.Equal(getBuf[:5*1024*1024], buf) { | ||
9309 | logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) | ||
9310 | return | ||
9311 | } | ||
9312 | if getBuf[5*1024*1024] != buf[0] { | ||
9313 | logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) | ||
9314 | return | ||
9315 | } | ||
9316 | |||
9317 | successLogger(testName, function, args, startTime).Info() | ||
9318 | |||
9319 | // Do not need to remove destBucketName its same as bucketName. | ||
9320 | } | ||
9321 | |||
9322 | // Test Core CopyObjectPart implementation for SSEC encrypted to unencrypted copy | ||
9323 | func testSSECEncryptedToUnencryptedCopyPart() { | ||
9324 | // initialize logging params | ||
9325 | startTime := time.Now() | ||
9326 | testName := getFuncName() | ||
9327 | function := "CopyObjectPart(destination, source)" | ||
9328 | args := map[string]interface{}{} | ||
9329 | |||
9330 | // Instantiate new minio client object | ||
9331 | client, err := minio.New(os.Getenv(serverEndpoint), | ||
9332 | &minio.Options{ | ||
9333 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
9334 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
9335 | }) | ||
9336 | if err != nil { | ||
9337 | logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) | ||
9338 | return | ||
9339 | } | ||
9340 | |||
9341 | // Instantiate new core client object. | ||
9342 | c := minio.Core{client} | ||
9343 | |||
9344 | // Enable tracing, write to stderr. | ||
9345 | // c.TraceOn(os.Stderr) | ||
9346 | |||
9347 | // Set user agent. | ||
9348 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
9349 | |||
9350 | // Generate a new random bucket name. | ||
9351 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") | ||
9352 | |||
9353 | // Make a new bucket. | ||
9354 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
9355 | if err != nil { | ||
9356 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
9357 | return | ||
9358 | } | ||
9359 | defer cleanupBucket(bucketName, client) | ||
9360 | // Make a buffer with 5MB of data | ||
9361 | buf := bytes.Repeat([]byte("abcde"), 1024*1024) | ||
9362 | |||
9363 | // Save the data | ||
9364 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
9365 | password := "correct horse battery staple" | ||
9366 | srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) | ||
9367 | |||
9368 | opts := minio.PutObjectOptions{ | ||
9369 | UserMetadata: map[string]string{ | ||
9370 | "Content-Type": "binary/octet-stream", | ||
9371 | }, | ||
9372 | ServerSideEncryption: srcencryption, | ||
9373 | } | ||
9374 | uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) | ||
9375 | if err != nil { | ||
9376 | logError(testName, function, args, startTime, "", "PutObject call failed", err) | ||
9377 | return | ||
9378 | } | ||
9379 | |||
9380 | st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption}) | ||
9381 | if err != nil { | ||
9382 | logError(testName, function, args, startTime, "", "StatObject call failed", err) | ||
9383 | return | ||
9384 | } | ||
9385 | |||
9386 | if st.Size != int64(len(buf)) { | ||
9387 | logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) | ||
9388 | return | ||
9389 | } | ||
9390 | |||
9391 | destBucketName := bucketName | ||
9392 | destObjectName := objectName + "-dest" | ||
9393 | var dstencryption encrypt.ServerSide | ||
9394 | |||
9395 | uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) | ||
9396 | if err != nil { | ||
9397 | logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) | ||
9398 | return | ||
9399 | } | ||
9400 | |||
9401 | // Content of the destination object will be two copies of | ||
9402 | // `objectName` concatenated, followed by first byte of | ||
9403 | // `objectName`. | ||
9404 | metadata := make(map[string]string) | ||
9405 | header := make(http.Header) | ||
9406 | encrypt.SSECopy(srcencryption).Marshal(header) | ||
9407 | for k, v := range header { | ||
9408 | metadata[k] = v[0] | ||
9409 | } | ||
9410 | |||
9411 | metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag | ||
9412 | |||
9413 | // First of three parts | ||
9414 | fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) | ||
9415 | if err != nil { | ||
9416 | logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) | ||
9417 | return | ||
9418 | } | ||
9419 | |||
9420 | // Second of three parts | ||
9421 | sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) | ||
9422 | if err != nil { | ||
9423 | logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) | ||
9424 | return | ||
9425 | } | ||
9426 | |||
9427 | // Last of three parts | ||
9428 | lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) | ||
9429 | if err != nil { | ||
9430 | logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) | ||
9431 | return | ||
9432 | } | ||
9433 | |||
9434 | // Complete the multipart upload | ||
9435 | _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) | ||
9436 | if err != nil { | ||
9437 | logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) | ||
9438 | return | ||
9439 | } | ||
9440 | |||
9441 | // Stat the object and check its length matches | ||
9442 | objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) | ||
9443 | if err != nil { | ||
9444 | logError(testName, function, args, startTime, "", "StatObject call failed", err) | ||
9445 | return | ||
9446 | } | ||
9447 | |||
9448 | if objInfo.Size != (5*1024*1024)*2+1 { | ||
9449 | logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) | ||
9450 | return | ||
9451 | } | ||
9452 | |||
9453 | // Now we read the data back | ||
9454 | getOpts := minio.GetObjectOptions{} | ||
9455 | getOpts.SetRange(0, 5*1024*1024-1) | ||
9456 | r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) | ||
9457 | if err != nil { | ||
9458 | logError(testName, function, args, startTime, "", "GetObject call failed", err) | ||
9459 | return | ||
9460 | } | ||
9461 | getBuf := make([]byte, 5*1024*1024) | ||
9462 | _, err = readFull(r, getBuf) | ||
9463 | if err != nil { | ||
9464 | logError(testName, function, args, startTime, "", "Read buffer failed", err) | ||
9465 | return | ||
9466 | } | ||
9467 | if !bytes.Equal(getBuf, buf) { | ||
9468 | logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) | ||
9469 | return | ||
9470 | } | ||
9471 | |||
9472 | getOpts.SetRange(5*1024*1024, 0) | ||
9473 | r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) | ||
9474 | if err != nil { | ||
9475 | logError(testName, function, args, startTime, "", "GetObject call failed", err) | ||
9476 | return | ||
9477 | } | ||
9478 | getBuf = make([]byte, 5*1024*1024+1) | ||
9479 | _, err = readFull(r, getBuf) | ||
9480 | if err != nil { | ||
9481 | logError(testName, function, args, startTime, "", "Read buffer failed", err) | ||
9482 | return | ||
9483 | } | ||
9484 | if !bytes.Equal(getBuf[:5*1024*1024], buf) { | ||
9485 | logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) | ||
9486 | return | ||
9487 | } | ||
9488 | if getBuf[5*1024*1024] != buf[0] { | ||
9489 | logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) | ||
9490 | return | ||
9491 | } | ||
9492 | |||
9493 | successLogger(testName, function, args, startTime).Info() | ||
9494 | |||
9495 | // Do not need to remove destBucketName its same as bucketName. | ||
9496 | } | ||
9497 | |||
9498 | // Test Core CopyObjectPart implementation for SSEC encrypted to SSE-S3 encrypted copy | ||
9499 | func testSSECEncryptedToSSES3CopyObjectPart() { | ||
9500 | // initialize logging params | ||
9501 | startTime := time.Now() | ||
9502 | testName := getFuncName() | ||
9503 | function := "CopyObjectPart(destination, source)" | ||
9504 | args := map[string]interface{}{} | ||
9505 | |||
9506 | // Instantiate new minio client object | ||
9507 | client, err := minio.New(os.Getenv(serverEndpoint), | ||
9508 | &minio.Options{ | ||
9509 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
9510 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
9511 | }) | ||
9512 | if err != nil { | ||
9513 | logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) | ||
9514 | return | ||
9515 | } | ||
9516 | |||
9517 | // Instantiate new core client object. | ||
9518 | c := minio.Core{client} | ||
9519 | |||
9520 | // Enable tracing, write to stderr. | ||
9521 | // c.TraceOn(os.Stderr) | ||
9522 | |||
9523 | // Set user agent. | ||
9524 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
9525 | |||
9526 | // Generate a new random bucket name. | ||
9527 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") | ||
9528 | |||
9529 | // Make a new bucket. | ||
9530 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
9531 | if err != nil { | ||
9532 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
9533 | return | ||
9534 | } | ||
9535 | defer cleanupBucket(bucketName, client) | ||
9536 | // Make a buffer with 5MB of data | ||
9537 | buf := bytes.Repeat([]byte("abcde"), 1024*1024) | ||
9538 | |||
9539 | // Save the data | ||
9540 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
9541 | password := "correct horse battery staple" | ||
9542 | srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) | ||
9543 | putmetadata := map[string]string{ | ||
9544 | "Content-Type": "binary/octet-stream", | ||
9545 | } | ||
9546 | opts := minio.PutObjectOptions{ | ||
9547 | UserMetadata: putmetadata, | ||
9548 | ServerSideEncryption: srcencryption, | ||
9549 | } | ||
9550 | |||
9551 | uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) | ||
9552 | if err != nil { | ||
9553 | logError(testName, function, args, startTime, "", "PutObject call failed", err) | ||
9554 | return | ||
9555 | } | ||
9556 | |||
9557 | st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption}) | ||
9558 | if err != nil { | ||
9559 | logError(testName, function, args, startTime, "", "StatObject call failed", err) | ||
9560 | return | ||
9561 | } | ||
9562 | |||
9563 | if st.Size != int64(len(buf)) { | ||
9564 | logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) | ||
9565 | return | ||
9566 | } | ||
9567 | |||
9568 | destBucketName := bucketName | ||
9569 | destObjectName := objectName + "-dest" | ||
9570 | dstencryption := encrypt.NewSSE() | ||
9571 | |||
9572 | uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) | ||
9573 | if err != nil { | ||
9574 | logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) | ||
9575 | return | ||
9576 | } | ||
9577 | |||
9578 | // Content of the destination object will be two copies of | ||
9579 | // `objectName` concatenated, followed by first byte of | ||
9580 | // `objectName`. | ||
9581 | metadata := make(map[string]string) | ||
9582 | header := make(http.Header) | ||
9583 | encrypt.SSECopy(srcencryption).Marshal(header) | ||
9584 | dstencryption.Marshal(header) | ||
9585 | |||
9586 | for k, v := range header { | ||
9587 | metadata[k] = v[0] | ||
9588 | } | ||
9589 | |||
9590 | metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag | ||
9591 | |||
9592 | // First of three parts | ||
9593 | fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) | ||
9594 | if err != nil { | ||
9595 | logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) | ||
9596 | return | ||
9597 | } | ||
9598 | |||
9599 | // Second of three parts | ||
9600 | sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) | ||
9601 | if err != nil { | ||
9602 | logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) | ||
9603 | return | ||
9604 | } | ||
9605 | |||
9606 | // Last of three parts | ||
9607 | lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) | ||
9608 | if err != nil { | ||
9609 | logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) | ||
9610 | return | ||
9611 | } | ||
9612 | |||
9613 | // Complete the multipart upload | ||
9614 | _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) | ||
9615 | if err != nil { | ||
9616 | logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) | ||
9617 | return | ||
9618 | } | ||
9619 | |||
9620 | // Stat the object and check its length matches | ||
9621 | objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) | ||
9622 | if err != nil { | ||
9623 | logError(testName, function, args, startTime, "", "StatObject call failed", err) | ||
9624 | return | ||
9625 | } | ||
9626 | |||
9627 | if objInfo.Size != (5*1024*1024)*2+1 { | ||
9628 | logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) | ||
9629 | return | ||
9630 | } | ||
9631 | |||
9632 | // Now we read the data back | ||
9633 | getOpts := minio.GetObjectOptions{} | ||
9634 | getOpts.SetRange(0, 5*1024*1024-1) | ||
9635 | r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) | ||
9636 | if err != nil { | ||
9637 | logError(testName, function, args, startTime, "", "GetObject call failed", err) | ||
9638 | return | ||
9639 | } | ||
9640 | getBuf := make([]byte, 5*1024*1024) | ||
9641 | _, err = readFull(r, getBuf) | ||
9642 | if err != nil { | ||
9643 | logError(testName, function, args, startTime, "", "Read buffer failed", err) | ||
9644 | return | ||
9645 | } | ||
9646 | if !bytes.Equal(getBuf, buf) { | ||
9647 | logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) | ||
9648 | return | ||
9649 | } | ||
9650 | |||
9651 | getOpts.SetRange(5*1024*1024, 0) | ||
9652 | r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) | ||
9653 | if err != nil { | ||
9654 | logError(testName, function, args, startTime, "", "GetObject call failed", err) | ||
9655 | return | ||
9656 | } | ||
9657 | getBuf = make([]byte, 5*1024*1024+1) | ||
9658 | _, err = readFull(r, getBuf) | ||
9659 | if err != nil { | ||
9660 | logError(testName, function, args, startTime, "", "Read buffer failed", err) | ||
9661 | return | ||
9662 | } | ||
9663 | if !bytes.Equal(getBuf[:5*1024*1024], buf) { | ||
9664 | logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) | ||
9665 | return | ||
9666 | } | ||
9667 | if getBuf[5*1024*1024] != buf[0] { | ||
9668 | logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) | ||
9669 | return | ||
9670 | } | ||
9671 | |||
9672 | successLogger(testName, function, args, startTime).Info() | ||
9673 | |||
9674 | // Do not need to remove destBucketName its same as bucketName. | ||
9675 | } | ||
9676 | |||
9677 | // Test Core CopyObjectPart implementation for unencrypted to SSEC encryption copy part | ||
9678 | func testUnencryptedToSSECCopyObjectPart() { | ||
9679 | // initialize logging params | ||
9680 | startTime := time.Now() | ||
9681 | testName := getFuncName() | ||
9682 | function := "CopyObjectPart(destination, source)" | ||
9683 | args := map[string]interface{}{} | ||
9684 | |||
9685 | // Instantiate new minio client object | ||
9686 | client, err := minio.New(os.Getenv(serverEndpoint), | ||
9687 | &minio.Options{ | ||
9688 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
9689 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
9690 | }) | ||
9691 | if err != nil { | ||
9692 | logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) | ||
9693 | return | ||
9694 | } | ||
9695 | |||
9696 | // Instantiate new core client object. | ||
9697 | c := minio.Core{client} | ||
9698 | |||
9699 | // Enable tracing, write to stderr. | ||
9700 | // c.TraceOn(os.Stderr) | ||
9701 | |||
9702 | // Set user agent. | ||
9703 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
9704 | |||
9705 | // Generate a new random bucket name. | ||
9706 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") | ||
9707 | |||
9708 | // Make a new bucket. | ||
9709 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
9710 | if err != nil { | ||
9711 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
9712 | return | ||
9713 | } | ||
9714 | defer cleanupBucket(bucketName, client) | ||
9715 | // Make a buffer with 5MB of data | ||
9716 | buf := bytes.Repeat([]byte("abcde"), 1024*1024) | ||
9717 | |||
9718 | // Save the data | ||
9719 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
9720 | password := "correct horse battery staple" | ||
9721 | putmetadata := map[string]string{ | ||
9722 | "Content-Type": "binary/octet-stream", | ||
9723 | } | ||
9724 | opts := minio.PutObjectOptions{ | ||
9725 | UserMetadata: putmetadata, | ||
9726 | } | ||
9727 | uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) | ||
9728 | if err != nil { | ||
9729 | logError(testName, function, args, startTime, "", "PutObject call failed", err) | ||
9730 | return | ||
9731 | } | ||
9732 | |||
9733 | st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) | ||
9734 | if err != nil { | ||
9735 | logError(testName, function, args, startTime, "", "StatObject call failed", err) | ||
9736 | return | ||
9737 | } | ||
9738 | |||
9739 | if st.Size != int64(len(buf)) { | ||
9740 | logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) | ||
9741 | return | ||
9742 | } | ||
9743 | |||
9744 | destBucketName := bucketName | ||
9745 | destObjectName := objectName + "-dest" | ||
9746 | dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) | ||
9747 | |||
9748 | uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) | ||
9749 | if err != nil { | ||
9750 | logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) | ||
9751 | return | ||
9752 | } | ||
9753 | |||
9754 | // Content of the destination object will be two copies of | ||
9755 | // `objectName` concatenated, followed by first byte of | ||
9756 | // `objectName`. | ||
9757 | metadata := make(map[string]string) | ||
9758 | header := make(http.Header) | ||
9759 | dstencryption.Marshal(header) | ||
9760 | for k, v := range header { | ||
9761 | metadata[k] = v[0] | ||
9762 | } | ||
9763 | |||
9764 | metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag | ||
9765 | |||
9766 | // First of three parts | ||
9767 | fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) | ||
9768 | if err != nil { | ||
9769 | logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) | ||
9770 | return | ||
9771 | } | ||
9772 | |||
9773 | // Second of three parts | ||
9774 | sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) | ||
9775 | if err != nil { | ||
9776 | logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) | ||
9777 | return | ||
9778 | } | ||
9779 | |||
9780 | // Last of three parts | ||
9781 | lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) | ||
9782 | if err != nil { | ||
9783 | logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) | ||
9784 | return | ||
9785 | } | ||
9786 | |||
9787 | // Complete the multipart upload | ||
9788 | _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) | ||
9789 | if err != nil { | ||
9790 | logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) | ||
9791 | return | ||
9792 | } | ||
9793 | |||
9794 | // Stat the object and check its length matches | ||
9795 | objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption}) | ||
9796 | if err != nil { | ||
9797 | logError(testName, function, args, startTime, "", "StatObject call failed", err) | ||
9798 | return | ||
9799 | } | ||
9800 | |||
9801 | if objInfo.Size != (5*1024*1024)*2+1 { | ||
9802 | logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) | ||
9803 | return | ||
9804 | } | ||
9805 | |||
9806 | // Now we read the data back | ||
9807 | getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} | ||
9808 | getOpts.SetRange(0, 5*1024*1024-1) | ||
9809 | r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) | ||
9810 | if err != nil { | ||
9811 | logError(testName, function, args, startTime, "", "GetObject call failed", err) | ||
9812 | return | ||
9813 | } | ||
9814 | getBuf := make([]byte, 5*1024*1024) | ||
9815 | _, err = readFull(r, getBuf) | ||
9816 | if err != nil { | ||
9817 | logError(testName, function, args, startTime, "", "Read buffer failed", err) | ||
9818 | return | ||
9819 | } | ||
9820 | if !bytes.Equal(getBuf, buf) { | ||
9821 | logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) | ||
9822 | return | ||
9823 | } | ||
9824 | |||
9825 | getOpts.SetRange(5*1024*1024, 0) | ||
9826 | r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) | ||
9827 | if err != nil { | ||
9828 | logError(testName, function, args, startTime, "", "GetObject call failed", err) | ||
9829 | return | ||
9830 | } | ||
9831 | getBuf = make([]byte, 5*1024*1024+1) | ||
9832 | _, err = readFull(r, getBuf) | ||
9833 | if err != nil { | ||
9834 | logError(testName, function, args, startTime, "", "Read buffer failed", err) | ||
9835 | return | ||
9836 | } | ||
9837 | if !bytes.Equal(getBuf[:5*1024*1024], buf) { | ||
9838 | logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) | ||
9839 | return | ||
9840 | } | ||
9841 | if getBuf[5*1024*1024] != buf[0] { | ||
9842 | logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) | ||
9843 | return | ||
9844 | } | ||
9845 | |||
9846 | successLogger(testName, function, args, startTime).Info() | ||
9847 | |||
9848 | // Do not need to remove destBucketName its same as bucketName. | ||
9849 | } | ||
9850 | |||
9851 | // Test Core CopyObjectPart implementation for unencrypted to unencrypted copy | ||
9852 | func testUnencryptedToUnencryptedCopyPart() { | ||
9853 | // initialize logging params | ||
9854 | startTime := time.Now() | ||
9855 | testName := getFuncName() | ||
9856 | function := "CopyObjectPart(destination, source)" | ||
9857 | args := map[string]interface{}{} | ||
9858 | |||
9859 | // Instantiate new minio client object | ||
9860 | client, err := minio.New(os.Getenv(serverEndpoint), | ||
9861 | &minio.Options{ | ||
9862 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
9863 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
9864 | }) | ||
9865 | if err != nil { | ||
9866 | logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) | ||
9867 | return | ||
9868 | } | ||
9869 | |||
9870 | // Instantiate new core client object. | ||
9871 | c := minio.Core{client} | ||
9872 | |||
9873 | // Enable tracing, write to stderr. | ||
9874 | // c.TraceOn(os.Stderr) | ||
9875 | |||
9876 | // Set user agent. | ||
9877 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
9878 | |||
9879 | // Generate a new random bucket name. | ||
9880 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") | ||
9881 | |||
9882 | // Make a new bucket. | ||
9883 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
9884 | if err != nil { | ||
9885 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
9886 | return | ||
9887 | } | ||
9888 | defer cleanupBucket(bucketName, client) | ||
9889 | // Make a buffer with 5MB of data | ||
9890 | buf := bytes.Repeat([]byte("abcde"), 1024*1024) | ||
9891 | |||
9892 | // Save the data | ||
9893 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
9894 | putmetadata := map[string]string{ | ||
9895 | "Content-Type": "binary/octet-stream", | ||
9896 | } | ||
9897 | opts := minio.PutObjectOptions{ | ||
9898 | UserMetadata: putmetadata, | ||
9899 | } | ||
9900 | uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) | ||
9901 | if err != nil { | ||
9902 | logError(testName, function, args, startTime, "", "PutObject call failed", err) | ||
9903 | return | ||
9904 | } | ||
9905 | st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) | ||
9906 | if err != nil { | ||
9907 | logError(testName, function, args, startTime, "", "StatObject call failed", err) | ||
9908 | return | ||
9909 | } | ||
9910 | |||
9911 | if st.Size != int64(len(buf)) { | ||
9912 | logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) | ||
9913 | return | ||
9914 | } | ||
9915 | |||
9916 | destBucketName := bucketName | ||
9917 | destObjectName := objectName + "-dest" | ||
9918 | |||
9919 | uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{}) | ||
9920 | if err != nil { | ||
9921 | logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) | ||
9922 | return | ||
9923 | } | ||
9924 | |||
9925 | // Content of the destination object will be two copies of | ||
9926 | // `objectName` concatenated, followed by first byte of | ||
9927 | // `objectName`. | ||
9928 | metadata := make(map[string]string) | ||
9929 | header := make(http.Header) | ||
9930 | for k, v := range header { | ||
9931 | metadata[k] = v[0] | ||
9932 | } | ||
9933 | |||
9934 | metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag | ||
9935 | |||
9936 | // First of three parts | ||
9937 | fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) | ||
9938 | if err != nil { | ||
9939 | logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) | ||
9940 | return | ||
9941 | } | ||
9942 | |||
9943 | // Second of three parts | ||
9944 | sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) | ||
9945 | if err != nil { | ||
9946 | logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) | ||
9947 | return | ||
9948 | } | ||
9949 | |||
9950 | // Last of three parts | ||
9951 | lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) | ||
9952 | if err != nil { | ||
9953 | logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) | ||
9954 | return | ||
9955 | } | ||
9956 | |||
9957 | // Complete the multipart upload | ||
9958 | _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) | ||
9959 | if err != nil { | ||
9960 | logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) | ||
9961 | return | ||
9962 | } | ||
9963 | |||
9964 | // Stat the object and check its length matches | ||
9965 | objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) | ||
9966 | if err != nil { | ||
9967 | logError(testName, function, args, startTime, "", "StatObject call failed", err) | ||
9968 | return | ||
9969 | } | ||
9970 | |||
9971 | if objInfo.Size != (5*1024*1024)*2+1 { | ||
9972 | logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) | ||
9973 | return | ||
9974 | } | ||
9975 | |||
9976 | // Now we read the data back | ||
9977 | getOpts := minio.GetObjectOptions{} | ||
9978 | getOpts.SetRange(0, 5*1024*1024-1) | ||
9979 | r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) | ||
9980 | if err != nil { | ||
9981 | logError(testName, function, args, startTime, "", "GetObject call failed", err) | ||
9982 | return | ||
9983 | } | ||
9984 | getBuf := make([]byte, 5*1024*1024) | ||
9985 | _, err = readFull(r, getBuf) | ||
9986 | if err != nil { | ||
9987 | logError(testName, function, args, startTime, "", "Read buffer failed", err) | ||
9988 | return | ||
9989 | } | ||
9990 | if !bytes.Equal(getBuf, buf) { | ||
9991 | logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) | ||
9992 | return | ||
9993 | } | ||
9994 | |||
9995 | getOpts.SetRange(5*1024*1024, 0) | ||
9996 | r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) | ||
9997 | if err != nil { | ||
9998 | logError(testName, function, args, startTime, "", "GetObject call failed", err) | ||
9999 | return | ||
10000 | } | ||
10001 | getBuf = make([]byte, 5*1024*1024+1) | ||
10002 | _, err = readFull(r, getBuf) | ||
10003 | if err != nil { | ||
10004 | logError(testName, function, args, startTime, "", "Read buffer failed", err) | ||
10005 | return | ||
10006 | } | ||
10007 | if !bytes.Equal(getBuf[:5*1024*1024], buf) { | ||
10008 | logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) | ||
10009 | return | ||
10010 | } | ||
10011 | if getBuf[5*1024*1024] != buf[0] { | ||
10012 | logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) | ||
10013 | return | ||
10014 | } | ||
10015 | |||
10016 | successLogger(testName, function, args, startTime).Info() | ||
10017 | |||
10018 | // Do not need to remove destBucketName its same as bucketName. | ||
10019 | } | ||
10020 | |||
10021 | // Test Core CopyObjectPart implementation for unencrypted to SSE-S3 encrypted copy | ||
10022 | func testUnencryptedToSSES3CopyObjectPart() { | ||
10023 | // initialize logging params | ||
10024 | startTime := time.Now() | ||
10025 | testName := getFuncName() | ||
10026 | function := "CopyObjectPart(destination, source)" | ||
10027 | args := map[string]interface{}{} | ||
10028 | |||
10029 | // Instantiate new minio client object | ||
10030 | client, err := minio.New(os.Getenv(serverEndpoint), | ||
10031 | &minio.Options{ | ||
10032 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
10033 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
10034 | }) | ||
10035 | if err != nil { | ||
10036 | logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) | ||
10037 | return | ||
10038 | } | ||
10039 | |||
10040 | // Instantiate new core client object. | ||
10041 | c := minio.Core{client} | ||
10042 | |||
10043 | // Enable tracing, write to stderr. | ||
10044 | // c.TraceOn(os.Stderr) | ||
10045 | |||
10046 | // Set user agent. | ||
10047 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
10048 | |||
10049 | // Generate a new random bucket name. | ||
10050 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") | ||
10051 | |||
10052 | // Make a new bucket. | ||
10053 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
10054 | if err != nil { | ||
10055 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
10056 | return | ||
10057 | } | ||
10058 | defer cleanupBucket(bucketName, client) | ||
10059 | // Make a buffer with 5MB of data | ||
10060 | buf := bytes.Repeat([]byte("abcde"), 1024*1024) | ||
10061 | |||
10062 | // Save the data | ||
10063 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
10064 | opts := minio.PutObjectOptions{ | ||
10065 | UserMetadata: map[string]string{ | ||
10066 | "Content-Type": "binary/octet-stream", | ||
10067 | }, | ||
10068 | } | ||
10069 | uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) | ||
10070 | if err != nil { | ||
10071 | logError(testName, function, args, startTime, "", "PutObject call failed", err) | ||
10072 | return | ||
10073 | } | ||
10074 | st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) | ||
10075 | if err != nil { | ||
10076 | logError(testName, function, args, startTime, "", "StatObject call failed", err) | ||
10077 | return | ||
10078 | } | ||
10079 | |||
10080 | if st.Size != int64(len(buf)) { | ||
10081 | logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) | ||
10082 | return | ||
10083 | } | ||
10084 | |||
10085 | destBucketName := bucketName | ||
10086 | destObjectName := objectName + "-dest" | ||
10087 | dstencryption := encrypt.NewSSE() | ||
10088 | |||
10089 | uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) | ||
10090 | if err != nil { | ||
10091 | logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) | ||
10092 | return | ||
10093 | } | ||
10094 | |||
10095 | // Content of the destination object will be two copies of | ||
10096 | // `objectName` concatenated, followed by first byte of | ||
10097 | // `objectName`. | ||
10098 | metadata := make(map[string]string) | ||
10099 | header := make(http.Header) | ||
10100 | dstencryption.Marshal(header) | ||
10101 | |||
10102 | for k, v := range header { | ||
10103 | metadata[k] = v[0] | ||
10104 | } | ||
10105 | |||
10106 | metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag | ||
10107 | |||
10108 | // First of three parts | ||
10109 | fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) | ||
10110 | if err != nil { | ||
10111 | logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) | ||
10112 | return | ||
10113 | } | ||
10114 | |||
10115 | // Second of three parts | ||
10116 | sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) | ||
10117 | if err != nil { | ||
10118 | logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) | ||
10119 | return | ||
10120 | } | ||
10121 | |||
10122 | // Last of three parts | ||
10123 | lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) | ||
10124 | if err != nil { | ||
10125 | logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) | ||
10126 | return | ||
10127 | } | ||
10128 | |||
10129 | // Complete the multipart upload | ||
10130 | _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) | ||
10131 | if err != nil { | ||
10132 | logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) | ||
10133 | return | ||
10134 | } | ||
10135 | |||
10136 | // Stat the object and check its length matches | ||
10137 | objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) | ||
10138 | if err != nil { | ||
10139 | logError(testName, function, args, startTime, "", "StatObject call failed", err) | ||
10140 | return | ||
10141 | } | ||
10142 | |||
10143 | if objInfo.Size != (5*1024*1024)*2+1 { | ||
10144 | logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) | ||
10145 | return | ||
10146 | } | ||
10147 | |||
10148 | // Now we read the data back | ||
10149 | getOpts := minio.GetObjectOptions{} | ||
10150 | getOpts.SetRange(0, 5*1024*1024-1) | ||
10151 | r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) | ||
10152 | if err != nil { | ||
10153 | logError(testName, function, args, startTime, "", "GetObject call failed", err) | ||
10154 | return | ||
10155 | } | ||
10156 | getBuf := make([]byte, 5*1024*1024) | ||
10157 | _, err = readFull(r, getBuf) | ||
10158 | if err != nil { | ||
10159 | logError(testName, function, args, startTime, "", "Read buffer failed", err) | ||
10160 | return | ||
10161 | } | ||
10162 | if !bytes.Equal(getBuf, buf) { | ||
10163 | logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) | ||
10164 | return | ||
10165 | } | ||
10166 | |||
10167 | getOpts.SetRange(5*1024*1024, 0) | ||
10168 | r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) | ||
10169 | if err != nil { | ||
10170 | logError(testName, function, args, startTime, "", "GetObject call failed", err) | ||
10171 | return | ||
10172 | } | ||
10173 | getBuf = make([]byte, 5*1024*1024+1) | ||
10174 | _, err = readFull(r, getBuf) | ||
10175 | if err != nil { | ||
10176 | logError(testName, function, args, startTime, "", "Read buffer failed", err) | ||
10177 | return | ||
10178 | } | ||
10179 | if !bytes.Equal(getBuf[:5*1024*1024], buf) { | ||
10180 | logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) | ||
10181 | return | ||
10182 | } | ||
10183 | if getBuf[5*1024*1024] != buf[0] { | ||
10184 | logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) | ||
10185 | return | ||
10186 | } | ||
10187 | |||
10188 | successLogger(testName, function, args, startTime).Info() | ||
10189 | |||
10190 | // Do not need to remove destBucketName its same as bucketName. | ||
10191 | } | ||
10192 | |||
10193 | // Test Core CopyObjectPart implementation for SSE-S3 to SSEC encryption copy part | ||
10194 | func testSSES3EncryptedToSSECCopyObjectPart() { | ||
10195 | // initialize logging params | ||
10196 | startTime := time.Now() | ||
10197 | testName := getFuncName() | ||
10198 | function := "CopyObjectPart(destination, source)" | ||
10199 | args := map[string]interface{}{} | ||
10200 | |||
10201 | // Instantiate new minio client object | ||
10202 | client, err := minio.New(os.Getenv(serverEndpoint), | ||
10203 | &minio.Options{ | ||
10204 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
10205 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
10206 | }) | ||
10207 | if err != nil { | ||
10208 | logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) | ||
10209 | return | ||
10210 | } | ||
10211 | |||
10212 | // Instantiate new core client object. | ||
10213 | c := minio.Core{client} | ||
10214 | |||
10215 | // Enable tracing, write to stderr. | ||
10216 | // c.TraceOn(os.Stderr) | ||
10217 | |||
10218 | // Set user agent. | ||
10219 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
10220 | |||
10221 | // Generate a new random bucket name. | ||
10222 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") | ||
10223 | |||
10224 | // Make a new bucket. | ||
10225 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
10226 | if err != nil { | ||
10227 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
10228 | return | ||
10229 | } | ||
10230 | defer cleanupBucket(bucketName, client) | ||
10231 | // Make a buffer with 5MB of data | ||
10232 | buf := bytes.Repeat([]byte("abcde"), 1024*1024) | ||
10233 | |||
10234 | // Save the data | ||
10235 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
10236 | password := "correct horse battery staple" | ||
10237 | srcEncryption := encrypt.NewSSE() | ||
10238 | opts := minio.PutObjectOptions{ | ||
10239 | UserMetadata: map[string]string{ | ||
10240 | "Content-Type": "binary/octet-stream", | ||
10241 | }, | ||
10242 | ServerSideEncryption: srcEncryption, | ||
10243 | } | ||
10244 | uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) | ||
10245 | if err != nil { | ||
10246 | logError(testName, function, args, startTime, "", "PutObject call failed", err) | ||
10247 | return | ||
10248 | } | ||
10249 | |||
10250 | st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcEncryption}) | ||
10251 | if err != nil { | ||
10252 | logError(testName, function, args, startTime, "", "StatObject call failed", err) | ||
10253 | return | ||
10254 | } | ||
10255 | |||
10256 | if st.Size != int64(len(buf)) { | ||
10257 | logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) | ||
10258 | return | ||
10259 | } | ||
10260 | |||
10261 | destBucketName := bucketName | ||
10262 | destObjectName := objectName + "-dest" | ||
10263 | dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) | ||
10264 | |||
10265 | uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) | ||
10266 | if err != nil { | ||
10267 | logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) | ||
10268 | return | ||
10269 | } | ||
10270 | |||
10271 | // Content of the destination object will be two copies of | ||
10272 | // `objectName` concatenated, followed by first byte of | ||
10273 | // `objectName`. | ||
10274 | metadata := make(map[string]string) | ||
10275 | header := make(http.Header) | ||
10276 | dstencryption.Marshal(header) | ||
10277 | for k, v := range header { | ||
10278 | metadata[k] = v[0] | ||
10279 | } | ||
10280 | |||
10281 | metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag | ||
10282 | |||
10283 | // First of three parts | ||
10284 | fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) | ||
10285 | if err != nil { | ||
10286 | logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) | ||
10287 | return | ||
10288 | } | ||
10289 | |||
10290 | // Second of three parts | ||
10291 | sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) | ||
10292 | if err != nil { | ||
10293 | logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) | ||
10294 | return | ||
10295 | } | ||
10296 | |||
10297 | // Last of three parts | ||
10298 | lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) | ||
10299 | if err != nil { | ||
10300 | logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) | ||
10301 | return | ||
10302 | } | ||
10303 | |||
10304 | // Complete the multipart upload | ||
10305 | _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) | ||
10306 | if err != nil { | ||
10307 | logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) | ||
10308 | return | ||
10309 | } | ||
10310 | |||
10311 | // Stat the object and check its length matches | ||
10312 | objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption}) | ||
10313 | if err != nil { | ||
10314 | logError(testName, function, args, startTime, "", "StatObject call failed", err) | ||
10315 | return | ||
10316 | } | ||
10317 | |||
10318 | if objInfo.Size != (5*1024*1024)*2+1 { | ||
10319 | logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) | ||
10320 | return | ||
10321 | } | ||
10322 | |||
10323 | // Now we read the data back | ||
10324 | getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} | ||
10325 | getOpts.SetRange(0, 5*1024*1024-1) | ||
10326 | r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) | ||
10327 | if err != nil { | ||
10328 | logError(testName, function, args, startTime, "", "GetObject call failed", err) | ||
10329 | return | ||
10330 | } | ||
10331 | getBuf := make([]byte, 5*1024*1024) | ||
10332 | _, err = readFull(r, getBuf) | ||
10333 | if err != nil { | ||
10334 | logError(testName, function, args, startTime, "", "Read buffer failed", err) | ||
10335 | return | ||
10336 | } | ||
10337 | if !bytes.Equal(getBuf, buf) { | ||
10338 | logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) | ||
10339 | return | ||
10340 | } | ||
10341 | |||
10342 | getOpts.SetRange(5*1024*1024, 0) | ||
10343 | r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) | ||
10344 | if err != nil { | ||
10345 | logError(testName, function, args, startTime, "", "GetObject call failed", err) | ||
10346 | return | ||
10347 | } | ||
10348 | getBuf = make([]byte, 5*1024*1024+1) | ||
10349 | _, err = readFull(r, getBuf) | ||
10350 | if err != nil { | ||
10351 | logError(testName, function, args, startTime, "", "Read buffer failed", err) | ||
10352 | return | ||
10353 | } | ||
10354 | if !bytes.Equal(getBuf[:5*1024*1024], buf) { | ||
10355 | logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) | ||
10356 | return | ||
10357 | } | ||
10358 | if getBuf[5*1024*1024] != buf[0] { | ||
10359 | logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) | ||
10360 | return | ||
10361 | } | ||
10362 | |||
10363 | successLogger(testName, function, args, startTime).Info() | ||
10364 | |||
10365 | // Do not need to remove destBucketName its same as bucketName. | ||
10366 | } | ||
10367 | |||
10368 | // Test Core CopyObjectPart implementation for unencrypted to unencrypted copy | ||
10369 | func testSSES3EncryptedToUnencryptedCopyPart() { | ||
10370 | // initialize logging params | ||
10371 | startTime := time.Now() | ||
10372 | testName := getFuncName() | ||
10373 | function := "CopyObjectPart(destination, source)" | ||
10374 | args := map[string]interface{}{} | ||
10375 | |||
10376 | // Instantiate new minio client object | ||
10377 | client, err := minio.New(os.Getenv(serverEndpoint), | ||
10378 | &minio.Options{ | ||
10379 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
10380 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
10381 | }) | ||
10382 | if err != nil { | ||
10383 | logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) | ||
10384 | return | ||
10385 | } | ||
10386 | |||
10387 | // Instantiate new core client object. | ||
10388 | c := minio.Core{client} | ||
10389 | |||
10390 | // Enable tracing, write to stderr. | ||
10391 | // c.TraceOn(os.Stderr) | ||
10392 | |||
10393 | // Set user agent. | ||
10394 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
10395 | |||
10396 | // Generate a new random bucket name. | ||
10397 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") | ||
10398 | |||
10399 | // Make a new bucket. | ||
10400 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
10401 | if err != nil { | ||
10402 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
10403 | return | ||
10404 | } | ||
10405 | defer cleanupBucket(bucketName, client) | ||
10406 | // Make a buffer with 5MB of data | ||
10407 | buf := bytes.Repeat([]byte("abcde"), 1024*1024) | ||
10408 | |||
10409 | // Save the data | ||
10410 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
10411 | srcEncryption := encrypt.NewSSE() | ||
10412 | opts := minio.PutObjectOptions{ | ||
10413 | UserMetadata: map[string]string{ | ||
10414 | "Content-Type": "binary/octet-stream", | ||
10415 | }, | ||
10416 | ServerSideEncryption: srcEncryption, | ||
10417 | } | ||
10418 | uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) | ||
10419 | if err != nil { | ||
10420 | logError(testName, function, args, startTime, "", "PutObject call failed", err) | ||
10421 | return | ||
10422 | } | ||
10423 | st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcEncryption}) | ||
10424 | if err != nil { | ||
10425 | logError(testName, function, args, startTime, "", "StatObject call failed", err) | ||
10426 | return | ||
10427 | } | ||
10428 | |||
10429 | if st.Size != int64(len(buf)) { | ||
10430 | logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) | ||
10431 | return | ||
10432 | } | ||
10433 | |||
10434 | destBucketName := bucketName | ||
10435 | destObjectName := objectName + "-dest" | ||
10436 | |||
10437 | uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{}) | ||
10438 | if err != nil { | ||
10439 | logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) | ||
10440 | return | ||
10441 | } | ||
10442 | |||
10443 | // Content of the destination object will be two copies of | ||
10444 | // `objectName` concatenated, followed by first byte of | ||
10445 | // `objectName`. | ||
10446 | metadata := make(map[string]string) | ||
10447 | header := make(http.Header) | ||
10448 | for k, v := range header { | ||
10449 | metadata[k] = v[0] | ||
10450 | } | ||
10451 | |||
10452 | metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag | ||
10453 | |||
10454 | // First of three parts | ||
10455 | fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) | ||
10456 | if err != nil { | ||
10457 | logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) | ||
10458 | return | ||
10459 | } | ||
10460 | |||
10461 | // Second of three parts | ||
10462 | sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) | ||
10463 | if err != nil { | ||
10464 | logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) | ||
10465 | return | ||
10466 | } | ||
10467 | |||
10468 | // Last of three parts | ||
10469 | lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) | ||
10470 | if err != nil { | ||
10471 | logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) | ||
10472 | return | ||
10473 | } | ||
10474 | |||
10475 | // Complete the multipart upload | ||
10476 | _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) | ||
10477 | if err != nil { | ||
10478 | logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) | ||
10479 | return | ||
10480 | } | ||
10481 | |||
10482 | // Stat the object and check its length matches | ||
10483 | objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) | ||
10484 | if err != nil { | ||
10485 | logError(testName, function, args, startTime, "", "StatObject call failed", err) | ||
10486 | return | ||
10487 | } | ||
10488 | |||
10489 | if objInfo.Size != (5*1024*1024)*2+1 { | ||
10490 | logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) | ||
10491 | return | ||
10492 | } | ||
10493 | |||
10494 | // Now we read the data back | ||
10495 | getOpts := minio.GetObjectOptions{} | ||
10496 | getOpts.SetRange(0, 5*1024*1024-1) | ||
10497 | r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) | ||
10498 | if err != nil { | ||
10499 | logError(testName, function, args, startTime, "", "GetObject call failed", err) | ||
10500 | return | ||
10501 | } | ||
10502 | getBuf := make([]byte, 5*1024*1024) | ||
10503 | _, err = readFull(r, getBuf) | ||
10504 | if err != nil { | ||
10505 | logError(testName, function, args, startTime, "", "Read buffer failed", err) | ||
10506 | return | ||
10507 | } | ||
10508 | if !bytes.Equal(getBuf, buf) { | ||
10509 | logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) | ||
10510 | return | ||
10511 | } | ||
10512 | |||
10513 | getOpts.SetRange(5*1024*1024, 0) | ||
10514 | r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) | ||
10515 | if err != nil { | ||
10516 | logError(testName, function, args, startTime, "", "GetObject call failed", err) | ||
10517 | return | ||
10518 | } | ||
10519 | getBuf = make([]byte, 5*1024*1024+1) | ||
10520 | _, err = readFull(r, getBuf) | ||
10521 | if err != nil { | ||
10522 | logError(testName, function, args, startTime, "", "Read buffer failed", err) | ||
10523 | return | ||
10524 | } | ||
10525 | if !bytes.Equal(getBuf[:5*1024*1024], buf) { | ||
10526 | logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) | ||
10527 | return | ||
10528 | } | ||
10529 | if getBuf[5*1024*1024] != buf[0] { | ||
10530 | logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) | ||
10531 | return | ||
10532 | } | ||
10533 | |||
10534 | successLogger(testName, function, args, startTime).Info() | ||
10535 | |||
10536 | // Do not need to remove destBucketName its same as bucketName. | ||
10537 | } | ||
10538 | |||
10539 | // Test Core CopyObjectPart implementation for unencrypted to SSE-S3 encrypted copy | ||
10540 | func testSSES3EncryptedToSSES3CopyObjectPart() { | ||
10541 | // initialize logging params | ||
10542 | startTime := time.Now() | ||
10543 | testName := getFuncName() | ||
10544 | function := "CopyObjectPart(destination, source)" | ||
10545 | args := map[string]interface{}{} | ||
10546 | |||
10547 | // Instantiate new minio client object | ||
10548 | client, err := minio.New(os.Getenv(serverEndpoint), | ||
10549 | &minio.Options{ | ||
10550 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
10551 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
10552 | }) | ||
10553 | if err != nil { | ||
10554 | logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) | ||
10555 | return | ||
10556 | } | ||
10557 | |||
10558 | // Instantiate new core client object. | ||
10559 | c := minio.Core{client} | ||
10560 | |||
10561 | // Enable tracing, write to stderr. | ||
10562 | // c.TraceOn(os.Stderr) | ||
10563 | |||
10564 | // Set user agent. | ||
10565 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
10566 | |||
10567 | // Generate a new random bucket name. | ||
10568 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") | ||
10569 | |||
10570 | // Make a new bucket. | ||
10571 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
10572 | if err != nil { | ||
10573 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
10574 | return | ||
10575 | } | ||
10576 | defer cleanupBucket(bucketName, client) | ||
10577 | // Make a buffer with 5MB of data | ||
10578 | buf := bytes.Repeat([]byte("abcde"), 1024*1024) | ||
10579 | |||
10580 | // Save the data | ||
10581 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
10582 | srcEncryption := encrypt.NewSSE() | ||
10583 | opts := minio.PutObjectOptions{ | ||
10584 | UserMetadata: map[string]string{ | ||
10585 | "Content-Type": "binary/octet-stream", | ||
10586 | }, | ||
10587 | ServerSideEncryption: srcEncryption, | ||
10588 | } | ||
10589 | |||
10590 | uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) | ||
10591 | if err != nil { | ||
10592 | logError(testName, function, args, startTime, "", "PutObject call failed", err) | ||
10593 | return | ||
10594 | } | ||
10595 | st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcEncryption}) | ||
10596 | if err != nil { | ||
10597 | logError(testName, function, args, startTime, "", "StatObject call failed", err) | ||
10598 | return | ||
10599 | } | ||
10600 | if st.Size != int64(len(buf)) { | ||
10601 | logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) | ||
10602 | return | ||
10603 | } | ||
10604 | |||
10605 | destBucketName := bucketName | ||
10606 | destObjectName := objectName + "-dest" | ||
10607 | dstencryption := encrypt.NewSSE() | ||
10608 | |||
10609 | uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) | ||
10610 | if err != nil { | ||
10611 | logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) | ||
10612 | return | ||
10613 | } | ||
10614 | |||
10615 | // Content of the destination object will be two copies of | ||
10616 | // `objectName` concatenated, followed by first byte of | ||
10617 | // `objectName`. | ||
10618 | metadata := make(map[string]string) | ||
10619 | header := make(http.Header) | ||
10620 | dstencryption.Marshal(header) | ||
10621 | |||
10622 | for k, v := range header { | ||
10623 | metadata[k] = v[0] | ||
10624 | } | ||
10625 | |||
10626 | metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag | ||
10627 | |||
10628 | // First of three parts | ||
10629 | fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) | ||
10630 | if err != nil { | ||
10631 | logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) | ||
10632 | return | ||
10633 | } | ||
10634 | |||
10635 | // Second of three parts | ||
10636 | sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) | ||
10637 | if err != nil { | ||
10638 | logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) | ||
10639 | return | ||
10640 | } | ||
10641 | |||
10642 | // Last of three parts | ||
10643 | lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) | ||
10644 | if err != nil { | ||
10645 | logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) | ||
10646 | return | ||
10647 | } | ||
10648 | |||
10649 | // Complete the multipart upload | ||
10650 | _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) | ||
10651 | if err != nil { | ||
10652 | logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) | ||
10653 | return | ||
10654 | } | ||
10655 | |||
10656 | // Stat the object and check its length matches | ||
10657 | objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) | ||
10658 | if err != nil { | ||
10659 | logError(testName, function, args, startTime, "", "StatObject call failed", err) | ||
10660 | return | ||
10661 | } | ||
10662 | |||
10663 | if objInfo.Size != (5*1024*1024)*2+1 { | ||
10664 | logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) | ||
10665 | return | ||
10666 | } | ||
10667 | |||
10668 | // Now we read the data back | ||
10669 | getOpts := minio.GetObjectOptions{} | ||
10670 | getOpts.SetRange(0, 5*1024*1024-1) | ||
10671 | r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) | ||
10672 | if err != nil { | ||
10673 | logError(testName, function, args, startTime, "", "GetObject call failed", err) | ||
10674 | return | ||
10675 | } | ||
10676 | getBuf := make([]byte, 5*1024*1024) | ||
10677 | _, err = readFull(r, getBuf) | ||
10678 | if err != nil { | ||
10679 | logError(testName, function, args, startTime, "", "Read buffer failed", err) | ||
10680 | return | ||
10681 | } | ||
10682 | if !bytes.Equal(getBuf, buf) { | ||
10683 | logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) | ||
10684 | return | ||
10685 | } | ||
10686 | |||
10687 | getOpts.SetRange(5*1024*1024, 0) | ||
10688 | r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) | ||
10689 | if err != nil { | ||
10690 | logError(testName, function, args, startTime, "", "GetObject call failed", err) | ||
10691 | return | ||
10692 | } | ||
10693 | getBuf = make([]byte, 5*1024*1024+1) | ||
10694 | _, err = readFull(r, getBuf) | ||
10695 | if err != nil { | ||
10696 | logError(testName, function, args, startTime, "", "Read buffer failed", err) | ||
10697 | return | ||
10698 | } | ||
10699 | if !bytes.Equal(getBuf[:5*1024*1024], buf) { | ||
10700 | logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) | ||
10701 | return | ||
10702 | } | ||
10703 | if getBuf[5*1024*1024] != buf[0] { | ||
10704 | logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) | ||
10705 | return | ||
10706 | } | ||
10707 | |||
10708 | successLogger(testName, function, args, startTime).Info() | ||
10709 | |||
10710 | // Do not need to remove destBucketName its same as bucketName. | ||
10711 | } | ||
10712 | |||
10713 | func testUserMetadataCopying() { | ||
10714 | // initialize logging params | ||
10715 | startTime := time.Now() | ||
10716 | testName := getFuncName() | ||
10717 | function := "CopyObject(destination, source)" | ||
10718 | args := map[string]interface{}{} | ||
10719 | |||
10720 | // Instantiate new minio client object | ||
10721 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
10722 | &minio.Options{ | ||
10723 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
10724 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
10725 | }) | ||
10726 | if err != nil { | ||
10727 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
10728 | return | ||
10729 | } | ||
10730 | |||
10731 | // c.TraceOn(os.Stderr) | ||
10732 | testUserMetadataCopyingWrapper(c) | ||
10733 | } | ||
10734 | |||
10735 | func testUserMetadataCopyingWrapper(c *minio.Client) { | ||
10736 | // initialize logging params | ||
10737 | startTime := time.Now() | ||
10738 | testName := getFuncName() | ||
10739 | function := "CopyObject(destination, source)" | ||
10740 | args := map[string]interface{}{} | ||
10741 | |||
10742 | // Generate a new random bucket name. | ||
10743 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
10744 | // Make a new bucket in 'us-east-1' (source bucket). | ||
10745 | err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
10746 | if err != nil { | ||
10747 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
10748 | return | ||
10749 | } | ||
10750 | |||
10751 | defer cleanupBucket(bucketName, c) | ||
10752 | |||
10753 | fetchMeta := func(object string) (h http.Header) { | ||
10754 | objInfo, err := c.StatObject(context.Background(), bucketName, object, minio.StatObjectOptions{}) | ||
10755 | if err != nil { | ||
10756 | logError(testName, function, args, startTime, "", "Stat failed", err) | ||
10757 | return | ||
10758 | } | ||
10759 | h = make(http.Header) | ||
10760 | for k, vs := range objInfo.Metadata { | ||
10761 | if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") { | ||
10762 | h.Add(k, vs[0]) | ||
10763 | } | ||
10764 | } | ||
10765 | return h | ||
10766 | } | ||
10767 | |||
10768 | // 1. create a client encrypted object to copy by uploading | ||
10769 | const srcSize = 1024 * 1024 | ||
10770 | buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB | ||
10771 | metadata := make(http.Header) | ||
10772 | metadata.Set("x-amz-meta-myheader", "myvalue") | ||
10773 | m := make(map[string]string) | ||
10774 | m["x-amz-meta-myheader"] = "myvalue" | ||
10775 | _, err = c.PutObject(context.Background(), bucketName, "srcObject", | ||
10776 | bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{UserMetadata: m}) | ||
10777 | if err != nil { | ||
10778 | logError(testName, function, args, startTime, "", "PutObjectWithMetadata failed", err) | ||
10779 | return | ||
10780 | } | ||
10781 | if !reflect.DeepEqual(metadata, fetchMeta("srcObject")) { | ||
10782 | logError(testName, function, args, startTime, "", "Metadata match failed", err) | ||
10783 | return | ||
10784 | } | ||
10785 | |||
10786 | // 2. create source | ||
10787 | src := minio.CopySrcOptions{ | ||
10788 | Bucket: bucketName, | ||
10789 | Object: "srcObject", | ||
10790 | } | ||
10791 | |||
10792 | // 2.1 create destination with metadata set | ||
10793 | dst1 := minio.CopyDestOptions{ | ||
10794 | Bucket: bucketName, | ||
10795 | Object: "dstObject-1", | ||
10796 | UserMetadata: map[string]string{"notmyheader": "notmyvalue"}, | ||
10797 | ReplaceMetadata: true, | ||
10798 | } | ||
10799 | |||
10800 | // 3. Check that copying to an object with metadata set resets | ||
10801 | // the headers on the copy. | ||
10802 | args["source"] = src | ||
10803 | args["destination"] = dst1 | ||
10804 | _, err = c.CopyObject(context.Background(), dst1, src) | ||
10805 | if err != nil { | ||
10806 | logError(testName, function, args, startTime, "", "CopyObject failed", err) | ||
10807 | return | ||
10808 | } | ||
10809 | |||
10810 | expectedHeaders := make(http.Header) | ||
10811 | expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue") | ||
10812 | if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-1")) { | ||
10813 | logError(testName, function, args, startTime, "", "Metadata match failed", err) | ||
10814 | return | ||
10815 | } | ||
10816 | |||
10817 | // 4. create destination with no metadata set and same source | ||
10818 | dst2 := minio.CopyDestOptions{ | ||
10819 | Bucket: bucketName, | ||
10820 | Object: "dstObject-2", | ||
10821 | } | ||
10822 | |||
10823 | // 5. Check that copying to an object with no metadata set, | ||
10824 | // copies metadata. | ||
10825 | args["source"] = src | ||
10826 | args["destination"] = dst2 | ||
10827 | _, err = c.CopyObject(context.Background(), dst2, src) | ||
10828 | if err != nil { | ||
10829 | logError(testName, function, args, startTime, "", "CopyObject failed", err) | ||
10830 | return | ||
10831 | } | ||
10832 | |||
10833 | expectedHeaders = metadata | ||
10834 | if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-2")) { | ||
10835 | logError(testName, function, args, startTime, "", "Metadata match failed", err) | ||
10836 | return | ||
10837 | } | ||
10838 | |||
10839 | // 6. Compose a pair of sources. | ||
10840 | dst3 := minio.CopyDestOptions{ | ||
10841 | Bucket: bucketName, | ||
10842 | Object: "dstObject-3", | ||
10843 | ReplaceMetadata: true, | ||
10844 | } | ||
10845 | |||
10846 | function = "ComposeObject(destination, sources)" | ||
10847 | args["source"] = []minio.CopySrcOptions{src, src} | ||
10848 | args["destination"] = dst3 | ||
10849 | _, err = c.ComposeObject(context.Background(), dst3, src, src) | ||
10850 | if err != nil { | ||
10851 | logError(testName, function, args, startTime, "", "ComposeObject failed", err) | ||
10852 | return | ||
10853 | } | ||
10854 | |||
10855 | // Check that no headers are copied in this case | ||
10856 | if !reflect.DeepEqual(make(http.Header), fetchMeta("dstObject-3")) { | ||
10857 | logError(testName, function, args, startTime, "", "Metadata match failed", err) | ||
10858 | return | ||
10859 | } | ||
10860 | |||
10861 | // 7. Compose a pair of sources with dest user metadata set. | ||
10862 | dst4 := minio.CopyDestOptions{ | ||
10863 | Bucket: bucketName, | ||
10864 | Object: "dstObject-4", | ||
10865 | UserMetadata: map[string]string{"notmyheader": "notmyvalue"}, | ||
10866 | ReplaceMetadata: true, | ||
10867 | } | ||
10868 | |||
10869 | function = "ComposeObject(destination, sources)" | ||
10870 | args["source"] = []minio.CopySrcOptions{src, src} | ||
10871 | args["destination"] = dst4 | ||
10872 | _, err = c.ComposeObject(context.Background(), dst4, src, src) | ||
10873 | if err != nil { | ||
10874 | logError(testName, function, args, startTime, "", "ComposeObject failed", err) | ||
10875 | return | ||
10876 | } | ||
10877 | |||
10878 | // Check that no headers are copied in this case | ||
10879 | expectedHeaders = make(http.Header) | ||
10880 | expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue") | ||
10881 | if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-4")) { | ||
10882 | logError(testName, function, args, startTime, "", "Metadata match failed", err) | ||
10883 | return | ||
10884 | } | ||
10885 | |||
10886 | successLogger(testName, function, args, startTime).Info() | ||
10887 | } | ||
10888 | |||
10889 | func testUserMetadataCopyingV2() { | ||
10890 | // initialize logging params | ||
10891 | startTime := time.Now() | ||
10892 | testName := getFuncName() | ||
10893 | function := "CopyObject(destination, source)" | ||
10894 | args := map[string]interface{}{} | ||
10895 | |||
10896 | // Instantiate new minio client object | ||
10897 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
10898 | &minio.Options{ | ||
10899 | Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
10900 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
10901 | }) | ||
10902 | if err != nil { | ||
10903 | logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) | ||
10904 | return | ||
10905 | } | ||
10906 | |||
10907 | // c.TraceOn(os.Stderr) | ||
10908 | testUserMetadataCopyingWrapper(c) | ||
10909 | } | ||
10910 | |||
10911 | func testStorageClassMetadataPutObject() { | ||
10912 | // initialize logging params | ||
10913 | startTime := time.Now() | ||
10914 | function := "testStorageClassMetadataPutObject()" | ||
10915 | args := map[string]interface{}{} | ||
10916 | testName := getFuncName() | ||
10917 | |||
10918 | // Instantiate new minio client object | ||
10919 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
10920 | &minio.Options{ | ||
10921 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
10922 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
10923 | }) | ||
10924 | if err != nil { | ||
10925 | logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) | ||
10926 | return | ||
10927 | } | ||
10928 | |||
10929 | // Generate a new random bucket name. | ||
10930 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") | ||
10931 | // Make a new bucket in 'us-east-1' (source bucket). | ||
10932 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
10933 | if err != nil { | ||
10934 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
10935 | return | ||
10936 | } | ||
10937 | |||
10938 | defer cleanupBucket(bucketName, c) | ||
10939 | |||
10940 | fetchMeta := func(object string) (h http.Header) { | ||
10941 | objInfo, err := c.StatObject(context.Background(), bucketName, object, minio.StatObjectOptions{}) | ||
10942 | if err != nil { | ||
10943 | logError(testName, function, args, startTime, "", "Stat failed", err) | ||
10944 | return | ||
10945 | } | ||
10946 | h = make(http.Header) | ||
10947 | for k, vs := range objInfo.Metadata { | ||
10948 | if strings.HasPrefix(strings.ToLower(k), "x-amz-storage-class") { | ||
10949 | for _, v := range vs { | ||
10950 | h.Add(k, v) | ||
10951 | } | ||
10952 | } | ||
10953 | } | ||
10954 | return h | ||
10955 | } | ||
10956 | |||
10957 | metadata := make(http.Header) | ||
10958 | metadata.Set("x-amz-storage-class", "REDUCED_REDUNDANCY") | ||
10959 | |||
10960 | emptyMetadata := make(http.Header) | ||
10961 | |||
10962 | const srcSize = 1024 * 1024 | ||
10963 | buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 1MiB | ||
10964 | |||
10965 | _, err = c.PutObject(context.Background(), bucketName, "srcObjectRRSClass", | ||
10966 | bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "REDUCED_REDUNDANCY"}) | ||
10967 | if err != nil { | ||
10968 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
10969 | return | ||
10970 | } | ||
10971 | |||
10972 | // Get the returned metadata | ||
10973 | returnedMeta := fetchMeta("srcObjectRRSClass") | ||
10974 | |||
10975 | // The response metada should either be equal to metadata (with REDUCED_REDUNDANCY) or emptyMetadata (in case of gateways) | ||
10976 | if !reflect.DeepEqual(metadata, returnedMeta) && !reflect.DeepEqual(emptyMetadata, returnedMeta) { | ||
10977 | logError(testName, function, args, startTime, "", "Metadata match failed", err) | ||
10978 | return | ||
10979 | } | ||
10980 | |||
10981 | metadata = make(http.Header) | ||
10982 | metadata.Set("x-amz-storage-class", "STANDARD") | ||
10983 | |||
10984 | _, err = c.PutObject(context.Background(), bucketName, "srcObjectSSClass", | ||
10985 | bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "STANDARD"}) | ||
10986 | if err != nil { | ||
10987 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
10988 | return | ||
10989 | } | ||
10990 | if reflect.DeepEqual(metadata, fetchMeta("srcObjectSSClass")) { | ||
10991 | logError(testName, function, args, startTime, "", "Metadata verification failed, STANDARD storage class should not be a part of response metadata", err) | ||
10992 | return | ||
10993 | } | ||
10994 | |||
10995 | successLogger(testName, function, args, startTime).Info() | ||
10996 | } | ||
10997 | |||
10998 | func testStorageClassInvalidMetadataPutObject() { | ||
10999 | // initialize logging params | ||
11000 | startTime := time.Now() | ||
11001 | function := "testStorageClassInvalidMetadataPutObject()" | ||
11002 | args := map[string]interface{}{} | ||
11003 | testName := getFuncName() | ||
11004 | |||
11005 | // Instantiate new minio client object | ||
11006 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
11007 | &minio.Options{ | ||
11008 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
11009 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
11010 | }) | ||
11011 | if err != nil { | ||
11012 | logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) | ||
11013 | return | ||
11014 | } | ||
11015 | |||
11016 | // Generate a new random bucket name. | ||
11017 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") | ||
11018 | // Make a new bucket in 'us-east-1' (source bucket). | ||
11019 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
11020 | if err != nil { | ||
11021 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
11022 | return | ||
11023 | } | ||
11024 | |||
11025 | defer cleanupBucket(bucketName, c) | ||
11026 | |||
11027 | const srcSize = 1024 * 1024 | ||
11028 | buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 1MiB | ||
11029 | |||
11030 | _, err = c.PutObject(context.Background(), bucketName, "srcObjectRRSClass", | ||
11031 | bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "INVALID_STORAGE_CLASS"}) | ||
11032 | if err == nil { | ||
11033 | logError(testName, function, args, startTime, "", "PutObject with invalid storage class passed, was expected to fail", err) | ||
11034 | return | ||
11035 | } | ||
11036 | |||
11037 | successLogger(testName, function, args, startTime).Info() | ||
11038 | } | ||
11039 | |||
11040 | func testStorageClassMetadataCopyObject() { | ||
11041 | // initialize logging params | ||
11042 | startTime := time.Now() | ||
11043 | function := "testStorageClassMetadataCopyObject()" | ||
11044 | args := map[string]interface{}{} | ||
11045 | testName := getFuncName() | ||
11046 | |||
11047 | // Instantiate new minio client object | ||
11048 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
11049 | &minio.Options{ | ||
11050 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
11051 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
11052 | }) | ||
11053 | if err != nil { | ||
11054 | logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) | ||
11055 | return | ||
11056 | } | ||
11057 | |||
11058 | // Generate a new random bucket name. | ||
11059 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") | ||
11060 | // Make a new bucket in 'us-east-1' (source bucket). | ||
11061 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
11062 | if err != nil { | ||
11063 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
11064 | return | ||
11065 | } | ||
11066 | |||
11067 | defer cleanupBucket(bucketName, c) | ||
11068 | |||
11069 | fetchMeta := func(object string) (h http.Header) { | ||
11070 | objInfo, err := c.StatObject(context.Background(), bucketName, object, minio.StatObjectOptions{}) | ||
11071 | args["bucket"] = bucketName | ||
11072 | args["object"] = object | ||
11073 | if err != nil { | ||
11074 | logError(testName, function, args, startTime, "", "Stat failed", err) | ||
11075 | return | ||
11076 | } | ||
11077 | h = make(http.Header) | ||
11078 | for k, vs := range objInfo.Metadata { | ||
11079 | if strings.HasPrefix(strings.ToLower(k), "x-amz-storage-class") { | ||
11080 | for _, v := range vs { | ||
11081 | h.Add(k, v) | ||
11082 | } | ||
11083 | } | ||
11084 | } | ||
11085 | return h | ||
11086 | } | ||
11087 | |||
11088 | metadata := make(http.Header) | ||
11089 | metadata.Set("x-amz-storage-class", "REDUCED_REDUNDANCY") | ||
11090 | |||
11091 | emptyMetadata := make(http.Header) | ||
11092 | |||
11093 | const srcSize = 1024 * 1024 | ||
11094 | buf := bytes.Repeat([]byte("abcde"), srcSize) | ||
11095 | |||
11096 | // Put an object with RRS Storage class | ||
11097 | _, err = c.PutObject(context.Background(), bucketName, "srcObjectRRSClass", | ||
11098 | bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "REDUCED_REDUNDANCY"}) | ||
11099 | if err != nil { | ||
11100 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
11101 | return | ||
11102 | } | ||
11103 | |||
11104 | // Make server side copy of object uploaded in previous step | ||
11105 | src := minio.CopySrcOptions{ | ||
11106 | Bucket: bucketName, | ||
11107 | Object: "srcObjectRRSClass", | ||
11108 | } | ||
11109 | dst := minio.CopyDestOptions{ | ||
11110 | Bucket: bucketName, | ||
11111 | Object: "srcObjectRRSClassCopy", | ||
11112 | } | ||
11113 | if _, err = c.CopyObject(context.Background(), dst, src); err != nil { | ||
11114 | logError(testName, function, args, startTime, "", "CopyObject failed on RRS", err) | ||
11115 | return | ||
11116 | } | ||
11117 | |||
11118 | // Get the returned metadata | ||
11119 | returnedMeta := fetchMeta("srcObjectRRSClassCopy") | ||
11120 | |||
11121 | // The response metada should either be equal to metadata (with REDUCED_REDUNDANCY) or emptyMetadata (in case of gateways) | ||
11122 | if !reflect.DeepEqual(metadata, returnedMeta) && !reflect.DeepEqual(emptyMetadata, returnedMeta) { | ||
11123 | logError(testName, function, args, startTime, "", "Metadata match failed", err) | ||
11124 | return | ||
11125 | } | ||
11126 | |||
11127 | metadata = make(http.Header) | ||
11128 | metadata.Set("x-amz-storage-class", "STANDARD") | ||
11129 | |||
11130 | // Put an object with Standard Storage class | ||
11131 | _, err = c.PutObject(context.Background(), bucketName, "srcObjectSSClass", | ||
11132 | bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "STANDARD"}) | ||
11133 | if err != nil { | ||
11134 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
11135 | return | ||
11136 | } | ||
11137 | |||
11138 | // Make server side copy of object uploaded in previous step | ||
11139 | src = minio.CopySrcOptions{ | ||
11140 | Bucket: bucketName, | ||
11141 | Object: "srcObjectSSClass", | ||
11142 | } | ||
11143 | dst = minio.CopyDestOptions{ | ||
11144 | Bucket: bucketName, | ||
11145 | Object: "srcObjectSSClassCopy", | ||
11146 | } | ||
11147 | if _, err = c.CopyObject(context.Background(), dst, src); err != nil { | ||
11148 | logError(testName, function, args, startTime, "", "CopyObject failed on SS", err) | ||
11149 | return | ||
11150 | } | ||
11151 | // Fetch the meta data of copied object | ||
11152 | if reflect.DeepEqual(metadata, fetchMeta("srcObjectSSClassCopy")) { | ||
11153 | logError(testName, function, args, startTime, "", "Metadata verification failed, STANDARD storage class should not be a part of response metadata", err) | ||
11154 | return | ||
11155 | } | ||
11156 | |||
11157 | successLogger(testName, function, args, startTime).Info() | ||
11158 | } | ||
11159 | |||
11160 | // Test put object with size -1 byte object. | ||
11161 | func testPutObjectNoLengthV2() { | ||
11162 | // initialize logging params | ||
11163 | startTime := time.Now() | ||
11164 | testName := getFuncName() | ||
11165 | function := "PutObject(bucketName, objectName, reader, size, opts)" | ||
11166 | args := map[string]interface{}{ | ||
11167 | "bucketName": "", | ||
11168 | "objectName": "", | ||
11169 | "size": -1, | ||
11170 | "opts": "", | ||
11171 | } | ||
11172 | |||
11173 | // Seed random based on current time. | ||
11174 | rand.Seed(time.Now().Unix()) | ||
11175 | |||
11176 | // Instantiate new minio client object. | ||
11177 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
11178 | &minio.Options{ | ||
11179 | Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
11180 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
11181 | }) | ||
11182 | if err != nil { | ||
11183 | logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) | ||
11184 | return | ||
11185 | } | ||
11186 | |||
11187 | // Enable tracing, write to stderr. | ||
11188 | // c.TraceOn(os.Stderr) | ||
11189 | |||
11190 | // Set user agent. | ||
11191 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
11192 | |||
11193 | // Generate a new random bucket name. | ||
11194 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
11195 | args["bucketName"] = bucketName | ||
11196 | |||
11197 | // Make a new bucket. | ||
11198 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
11199 | if err != nil { | ||
11200 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
11201 | return | ||
11202 | } | ||
11203 | |||
11204 | defer cleanupBucket(bucketName, c) | ||
11205 | |||
11206 | objectName := bucketName + "unique" | ||
11207 | args["objectName"] = objectName | ||
11208 | |||
11209 | bufSize := dataFileMap["datafile-129-MB"] | ||
11210 | reader := getDataReader("datafile-129-MB") | ||
11211 | defer reader.Close() | ||
11212 | args["size"] = bufSize | ||
11213 | |||
11214 | // Upload an object. | ||
11215 | _, err = c.PutObject(context.Background(), bucketName, objectName, reader, -1, minio.PutObjectOptions{}) | ||
11216 | if err != nil { | ||
11217 | logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err) | ||
11218 | return | ||
11219 | } | ||
11220 | |||
11221 | st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) | ||
11222 | if err != nil { | ||
11223 | logError(testName, function, args, startTime, "", "StatObject failed", err) | ||
11224 | return | ||
11225 | } | ||
11226 | |||
11227 | if st.Size != int64(bufSize) { | ||
11228 | logError(testName, function, args, startTime, "", "Expected upload object size "+string(bufSize)+" got "+string(st.Size), err) | ||
11229 | return | ||
11230 | } | ||
11231 | |||
11232 | successLogger(testName, function, args, startTime).Info() | ||
11233 | } | ||
11234 | |||
11235 | // Test put objects of unknown size. | ||
11236 | func testPutObjectsUnknownV2() { | ||
11237 | // initialize logging params | ||
11238 | startTime := time.Now() | ||
11239 | testName := getFuncName() | ||
11240 | function := "PutObject(bucketName, objectName, reader,size,opts)" | ||
11241 | args := map[string]interface{}{ | ||
11242 | "bucketName": "", | ||
11243 | "objectName": "", | ||
11244 | "size": "", | ||
11245 | "opts": "", | ||
11246 | } | ||
11247 | |||
11248 | // Seed random based on current time. | ||
11249 | rand.Seed(time.Now().Unix()) | ||
11250 | |||
11251 | // Instantiate new minio client object. | ||
11252 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
11253 | &minio.Options{ | ||
11254 | Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
11255 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
11256 | }) | ||
11257 | if err != nil { | ||
11258 | logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) | ||
11259 | return | ||
11260 | } | ||
11261 | |||
11262 | // Enable tracing, write to stderr. | ||
11263 | // c.TraceOn(os.Stderr) | ||
11264 | |||
11265 | // Set user agent. | ||
11266 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
11267 | |||
11268 | // Generate a new random bucket name. | ||
11269 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
11270 | args["bucketName"] = bucketName | ||
11271 | |||
11272 | // Make a new bucket. | ||
11273 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
11274 | if err != nil { | ||
11275 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
11276 | return | ||
11277 | } | ||
11278 | |||
11279 | defer cleanupBucket(bucketName, c) | ||
11280 | |||
11281 | // Issues are revealed by trying to upload multiple files of unknown size | ||
11282 | // sequentially (on 4GB machines) | ||
11283 | for i := 1; i <= 4; i++ { | ||
11284 | // Simulate that we could be receiving byte slices of data that we want | ||
11285 | // to upload as a file | ||
11286 | rpipe, wpipe := io.Pipe() | ||
11287 | defer rpipe.Close() | ||
11288 | go func() { | ||
11289 | b := []byte("test") | ||
11290 | wpipe.Write(b) | ||
11291 | wpipe.Close() | ||
11292 | }() | ||
11293 | |||
11294 | // Upload the object. | ||
11295 | objectName := fmt.Sprintf("%sunique%d", bucketName, i) | ||
11296 | args["objectName"] = objectName | ||
11297 | |||
11298 | ui, err := c.PutObject(context.Background(), bucketName, objectName, rpipe, -1, minio.PutObjectOptions{}) | ||
11299 | if err != nil { | ||
11300 | logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err) | ||
11301 | return | ||
11302 | } | ||
11303 | |||
11304 | if ui.Size != 4 { | ||
11305 | logError(testName, function, args, startTime, "", "Expected upload object size "+string(4)+" got "+string(ui.Size), nil) | ||
11306 | return | ||
11307 | } | ||
11308 | |||
11309 | st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) | ||
11310 | if err != nil { | ||
11311 | logError(testName, function, args, startTime, "", "StatObjectStreaming failed", err) | ||
11312 | return | ||
11313 | } | ||
11314 | |||
11315 | if st.Size != int64(4) { | ||
11316 | logError(testName, function, args, startTime, "", "Expected upload object size "+string(4)+" got "+string(st.Size), err) | ||
11317 | return | ||
11318 | } | ||
11319 | |||
11320 | } | ||
11321 | |||
11322 | successLogger(testName, function, args, startTime).Info() | ||
11323 | } | ||
11324 | |||
11325 | // Test put object with 0 byte object. | ||
11326 | func testPutObject0ByteV2() { | ||
11327 | // initialize logging params | ||
11328 | startTime := time.Now() | ||
11329 | testName := getFuncName() | ||
11330 | function := "PutObject(bucketName, objectName, reader, size, opts)" | ||
11331 | args := map[string]interface{}{ | ||
11332 | "bucketName": "", | ||
11333 | "objectName": "", | ||
11334 | "size": 0, | ||
11335 | "opts": "", | ||
11336 | } | ||
11337 | |||
11338 | // Seed random based on current time. | ||
11339 | rand.Seed(time.Now().Unix()) | ||
11340 | |||
11341 | // Instantiate new minio client object. | ||
11342 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
11343 | &minio.Options{ | ||
11344 | Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
11345 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
11346 | }) | ||
11347 | if err != nil { | ||
11348 | logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) | ||
11349 | return | ||
11350 | } | ||
11351 | |||
11352 | // Enable tracing, write to stderr. | ||
11353 | // c.TraceOn(os.Stderr) | ||
11354 | |||
11355 | // Set user agent. | ||
11356 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
11357 | |||
11358 | // Generate a new random bucket name. | ||
11359 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
11360 | args["bucketName"] = bucketName | ||
11361 | |||
11362 | // Make a new bucket. | ||
11363 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
11364 | if err != nil { | ||
11365 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
11366 | return | ||
11367 | } | ||
11368 | |||
11369 | defer cleanupBucket(bucketName, c) | ||
11370 | |||
11371 | objectName := bucketName + "unique" | ||
11372 | args["objectName"] = objectName | ||
11373 | args["opts"] = minio.PutObjectOptions{} | ||
11374 | |||
11375 | // Upload an object. | ||
11376 | _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader([]byte("")), 0, minio.PutObjectOptions{}) | ||
11377 | if err != nil { | ||
11378 | logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err) | ||
11379 | return | ||
11380 | } | ||
11381 | st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) | ||
11382 | if err != nil { | ||
11383 | logError(testName, function, args, startTime, "", "StatObjectWithSize failed", err) | ||
11384 | return | ||
11385 | } | ||
11386 | if st.Size != 0 { | ||
11387 | logError(testName, function, args, startTime, "", "Expected upload object size 0 but got "+string(st.Size), err) | ||
11388 | return | ||
11389 | } | ||
11390 | |||
11391 | successLogger(testName, function, args, startTime).Info() | ||
11392 | } | ||
11393 | |||
11394 | // Test expected error cases | ||
11395 | func testComposeObjectErrorCases() { | ||
11396 | // initialize logging params | ||
11397 | startTime := time.Now() | ||
11398 | testName := getFuncName() | ||
11399 | function := "ComposeObject(destination, sourceList)" | ||
11400 | args := map[string]interface{}{} | ||
11401 | |||
11402 | // Instantiate new minio client object | ||
11403 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
11404 | &minio.Options{ | ||
11405 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
11406 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
11407 | }) | ||
11408 | if err != nil { | ||
11409 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
11410 | return | ||
11411 | } | ||
11412 | |||
11413 | testComposeObjectErrorCasesWrapper(c) | ||
11414 | } | ||
11415 | |||
11416 | // Test concatenating multiple 10K objects V4 | ||
11417 | func testCompose10KSources() { | ||
11418 | // initialize logging params | ||
11419 | startTime := time.Now() | ||
11420 | testName := getFuncName() | ||
11421 | function := "ComposeObject(destination, sourceList)" | ||
11422 | args := map[string]interface{}{} | ||
11423 | |||
11424 | // Instantiate new minio client object | ||
11425 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
11426 | &minio.Options{ | ||
11427 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
11428 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
11429 | }) | ||
11430 | if err != nil { | ||
11431 | logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) | ||
11432 | return | ||
11433 | } | ||
11434 | |||
11435 | testComposeMultipleSources(c) | ||
11436 | } | ||
11437 | |||
11438 | // Tests comprehensive list of all methods. | ||
11439 | func testFunctionalV2() { | ||
11440 | // initialize logging params | ||
11441 | startTime := time.Now() | ||
11442 | testName := getFuncName() | ||
11443 | function := "testFunctionalV2()" | ||
11444 | functionAll := "" | ||
11445 | args := map[string]interface{}{} | ||
11446 | |||
11447 | // Seed random based on current time. | ||
11448 | rand.Seed(time.Now().Unix()) | ||
11449 | |||
11450 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
11451 | &minio.Options{ | ||
11452 | Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
11453 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
11454 | }) | ||
11455 | if err != nil { | ||
11456 | logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) | ||
11457 | return | ||
11458 | } | ||
11459 | |||
11460 | // Enable to debug | ||
11461 | // c.TraceOn(os.Stderr) | ||
11462 | |||
11463 | // Set user agent. | ||
11464 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
11465 | |||
11466 | // Generate a new random bucket name. | ||
11467 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
11468 | location := "us-east-1" | ||
11469 | // Make a new bucket. | ||
11470 | function = "MakeBucket(bucketName, location)" | ||
11471 | functionAll = "MakeBucket(bucketName, location)" | ||
11472 | args = map[string]interface{}{ | ||
11473 | "bucketName": bucketName, | ||
11474 | "location": location, | ||
11475 | } | ||
11476 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: location}) | ||
11477 | if err != nil { | ||
11478 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
11479 | return | ||
11480 | } | ||
11481 | |||
11482 | defer cleanupBucket(bucketName, c) | ||
11483 | |||
11484 | // Generate a random file name. | ||
11485 | fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
11486 | file, err := os.Create(fileName) | ||
11487 | if err != nil { | ||
11488 | logError(testName, function, args, startTime, "", "file create failed", err) | ||
11489 | return | ||
11490 | } | ||
11491 | for i := 0; i < 3; i++ { | ||
11492 | buf := make([]byte, rand.Intn(1<<19)) | ||
11493 | _, err = file.Write(buf) | ||
11494 | if err != nil { | ||
11495 | logError(testName, function, args, startTime, "", "file write failed", err) | ||
11496 | return | ||
11497 | } | ||
11498 | } | ||
11499 | file.Close() | ||
11500 | |||
11501 | // Verify if bucket exits and you have access. | ||
11502 | var exists bool | ||
11503 | function = "BucketExists(bucketName)" | ||
11504 | functionAll += ", " + function | ||
11505 | args = map[string]interface{}{ | ||
11506 | "bucketName": bucketName, | ||
11507 | } | ||
11508 | exists, err = c.BucketExists(context.Background(), bucketName) | ||
11509 | if err != nil { | ||
11510 | logError(testName, function, args, startTime, "", "BucketExists failed", err) | ||
11511 | return | ||
11512 | } | ||
11513 | if !exists { | ||
11514 | logError(testName, function, args, startTime, "", "Could not find existing bucket "+bucketName, err) | ||
11515 | return | ||
11516 | } | ||
11517 | |||
11518 | // Make the bucket 'public read/write'. | ||
11519 | function = "SetBucketPolicy(bucketName, bucketPolicy)" | ||
11520 | functionAll += ", " + function | ||
11521 | |||
11522 | readWritePolicy := `{"Version": "2012-10-17","Statement": [{"Action": ["s3:ListBucketMultipartUploads", "s3:ListBucket"],"Effect": "Allow","Principal": {"AWS": ["*"]},"Resource": ["arn:aws:s3:::` + bucketName + `"],"Sid": ""}]}` | ||
11523 | |||
11524 | args = map[string]interface{}{ | ||
11525 | "bucketName": bucketName, | ||
11526 | "bucketPolicy": readWritePolicy, | ||
11527 | } | ||
11528 | err = c.SetBucketPolicy(context.Background(), bucketName, readWritePolicy) | ||
11529 | |||
11530 | if err != nil { | ||
11531 | logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) | ||
11532 | return | ||
11533 | } | ||
11534 | |||
11535 | // List all buckets. | ||
11536 | function = "ListBuckets()" | ||
11537 | functionAll += ", " + function | ||
11538 | args = nil | ||
11539 | buckets, err := c.ListBuckets(context.Background()) | ||
11540 | if len(buckets) == 0 { | ||
11541 | logError(testName, function, args, startTime, "", "List buckets cannot be empty", err) | ||
11542 | return | ||
11543 | } | ||
11544 | if err != nil { | ||
11545 | logError(testName, function, args, startTime, "", "ListBuckets failed", err) | ||
11546 | return | ||
11547 | } | ||
11548 | |||
11549 | // Verify if previously created bucket is listed in list buckets. | ||
11550 | bucketFound := false | ||
11551 | for _, bucket := range buckets { | ||
11552 | if bucket.Name == bucketName { | ||
11553 | bucketFound = true | ||
11554 | } | ||
11555 | } | ||
11556 | |||
11557 | // If bucket not found error out. | ||
11558 | if !bucketFound { | ||
11559 | logError(testName, function, args, startTime, "", "Bucket "+bucketName+"not found", err) | ||
11560 | return | ||
11561 | } | ||
11562 | |||
11563 | objectName := bucketName + "unique" | ||
11564 | |||
11565 | // Generate data | ||
11566 | buf := bytes.Repeat([]byte("n"), rand.Intn(1<<19)) | ||
11567 | |||
11568 | args = map[string]interface{}{ | ||
11569 | "bucketName": bucketName, | ||
11570 | "objectName": objectName, | ||
11571 | "contentType": "", | ||
11572 | } | ||
11573 | _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) | ||
11574 | if err != nil { | ||
11575 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
11576 | return | ||
11577 | } | ||
11578 | |||
11579 | st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) | ||
11580 | if err != nil { | ||
11581 | logError(testName, function, args, startTime, "", "StatObject failed", err) | ||
11582 | return | ||
11583 | } | ||
11584 | if st.Size != int64(len(buf)) { | ||
11585 | logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(st.Size), err) | ||
11586 | return | ||
11587 | } | ||
11588 | |||
11589 | objectNameNoLength := objectName + "-nolength" | ||
11590 | args["objectName"] = objectNameNoLength | ||
11591 | _, err = c.PutObject(context.Background(), bucketName, objectNameNoLength, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) | ||
11592 | if err != nil { | ||
11593 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
11594 | return | ||
11595 | } | ||
11596 | st, err = c.StatObject(context.Background(), bucketName, objectNameNoLength, minio.StatObjectOptions{}) | ||
11597 | if err != nil { | ||
11598 | logError(testName, function, args, startTime, "", "StatObject failed", err) | ||
11599 | return | ||
11600 | } | ||
11601 | if st.Size != int64(len(buf)) { | ||
11602 | logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(st.Size), err) | ||
11603 | return | ||
11604 | } | ||
11605 | |||
11606 | // Instantiate a done channel to close all listing. | ||
11607 | doneCh := make(chan struct{}) | ||
11608 | defer close(doneCh) | ||
11609 | |||
11610 | objFound := false | ||
11611 | isRecursive := true // Recursive is true. | ||
11612 | function = "ListObjects(bucketName, objectName, isRecursive, doneCh)" | ||
11613 | functionAll += ", " + function | ||
11614 | args = map[string]interface{}{ | ||
11615 | "bucketName": bucketName, | ||
11616 | "objectName": objectName, | ||
11617 | "isRecursive": isRecursive, | ||
11618 | } | ||
11619 | for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Prefix: objectName, Recursive: isRecursive}) { | ||
11620 | if obj.Key == objectName { | ||
11621 | objFound = true | ||
11622 | break | ||
11623 | } | ||
11624 | } | ||
11625 | if !objFound { | ||
11626 | logError(testName, function, args, startTime, "", "Could not find existing object "+objectName, err) | ||
11627 | return | ||
11628 | } | ||
11629 | |||
11630 | incompObjNotFound := true | ||
11631 | function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)" | ||
11632 | functionAll += ", " + function | ||
11633 | args = map[string]interface{}{ | ||
11634 | "bucketName": bucketName, | ||
11635 | "objectName": objectName, | ||
11636 | "isRecursive": isRecursive, | ||
11637 | } | ||
11638 | for objIncompl := range c.ListIncompleteUploads(context.Background(), bucketName, objectName, isRecursive) { | ||
11639 | if objIncompl.Key != "" { | ||
11640 | incompObjNotFound = false | ||
11641 | break | ||
11642 | } | ||
11643 | } | ||
11644 | if !incompObjNotFound { | ||
11645 | logError(testName, function, args, startTime, "", "Unexpected dangling incomplete upload found", err) | ||
11646 | return | ||
11647 | } | ||
11648 | |||
11649 | function = "GetObject(bucketName, objectName)" | ||
11650 | functionAll += ", " + function | ||
11651 | args = map[string]interface{}{ | ||
11652 | "bucketName": bucketName, | ||
11653 | "objectName": objectName, | ||
11654 | } | ||
11655 | newReader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) | ||
11656 | if err != nil { | ||
11657 | logError(testName, function, args, startTime, "", "GetObject failed", err) | ||
11658 | return | ||
11659 | } | ||
11660 | |||
11661 | newReadBytes, err := io.ReadAll(newReader) | ||
11662 | if err != nil { | ||
11663 | logError(testName, function, args, startTime, "", "ReadAll failed", err) | ||
11664 | return | ||
11665 | } | ||
11666 | newReader.Close() | ||
11667 | |||
11668 | if !bytes.Equal(newReadBytes, buf) { | ||
11669 | logError(testName, function, args, startTime, "", "Bytes mismatch", err) | ||
11670 | return | ||
11671 | } | ||
11672 | |||
11673 | function = "FGetObject(bucketName, objectName, fileName)" | ||
11674 | functionAll += ", " + function | ||
11675 | args = map[string]interface{}{ | ||
11676 | "bucketName": bucketName, | ||
11677 | "objectName": objectName, | ||
11678 | "fileName": fileName + "-f", | ||
11679 | } | ||
11680 | err = c.FGetObject(context.Background(), bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) | ||
11681 | if err != nil { | ||
11682 | logError(testName, function, args, startTime, "", "FgetObject failed", err) | ||
11683 | return | ||
11684 | } | ||
11685 | |||
11686 | // Generate presigned HEAD object url. | ||
11687 | function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" | ||
11688 | functionAll += ", " + function | ||
11689 | args = map[string]interface{}{ | ||
11690 | "bucketName": bucketName, | ||
11691 | "objectName": objectName, | ||
11692 | "expires": 3600 * time.Second, | ||
11693 | } | ||
11694 | presignedHeadURL, err := c.PresignedHeadObject(context.Background(), bucketName, objectName, 3600*time.Second, nil) | ||
11695 | if err != nil { | ||
11696 | logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err) | ||
11697 | return | ||
11698 | } | ||
11699 | |||
11700 | transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS))) | ||
11701 | if err != nil { | ||
11702 | logError(testName, function, args, startTime, "", "DefaultTransport failed", err) | ||
11703 | return | ||
11704 | } | ||
11705 | |||
11706 | httpClient := &http.Client{ | ||
11707 | // Setting a sensible time out of 30secs to wait for response | ||
11708 | // headers. Request is pro-actively canceled after 30secs | ||
11709 | // with no response. | ||
11710 | Timeout: 30 * time.Second, | ||
11711 | Transport: transport, | ||
11712 | } | ||
11713 | |||
11714 | req, err := http.NewRequest(http.MethodHead, presignedHeadURL.String(), nil) | ||
11715 | if err != nil { | ||
11716 | logError(testName, function, args, startTime, "", "PresignedHeadObject URL head request failed", err) | ||
11717 | return | ||
11718 | } | ||
11719 | |||
11720 | // Verify if presigned url works. | ||
11721 | resp, err := httpClient.Do(req) | ||
11722 | if err != nil { | ||
11723 | logError(testName, function, args, startTime, "", "PresignedHeadObject URL head request failed", err) | ||
11724 | return | ||
11725 | } | ||
11726 | if resp.StatusCode != http.StatusOK { | ||
11727 | logError(testName, function, args, startTime, "", "PresignedHeadObject URL returns status "+string(resp.StatusCode), err) | ||
11728 | return | ||
11729 | } | ||
11730 | if resp.Header.Get("ETag") == "" { | ||
11731 | logError(testName, function, args, startTime, "", "Got empty ETag", err) | ||
11732 | return | ||
11733 | } | ||
11734 | resp.Body.Close() | ||
11735 | |||
11736 | // Generate presigned GET object url. | ||
11737 | function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" | ||
11738 | functionAll += ", " + function | ||
11739 | args = map[string]interface{}{ | ||
11740 | "bucketName": bucketName, | ||
11741 | "objectName": objectName, | ||
11742 | "expires": 3600 * time.Second, | ||
11743 | } | ||
11744 | presignedGetURL, err := c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, nil) | ||
11745 | if err != nil { | ||
11746 | logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) | ||
11747 | return | ||
11748 | } | ||
11749 | |||
11750 | // Verify if presigned url works. | ||
11751 | req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil) | ||
11752 | if err != nil { | ||
11753 | logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err) | ||
11754 | return | ||
11755 | } | ||
11756 | |||
11757 | resp, err = httpClient.Do(req) | ||
11758 | if err != nil { | ||
11759 | logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) | ||
11760 | return | ||
11761 | } | ||
11762 | |||
11763 | if resp.StatusCode != http.StatusOK { | ||
11764 | logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err) | ||
11765 | return | ||
11766 | } | ||
11767 | newPresignedBytes, err := io.ReadAll(resp.Body) | ||
11768 | if err != nil { | ||
11769 | logError(testName, function, args, startTime, "", "ReadAll failed", err) | ||
11770 | return | ||
11771 | } | ||
11772 | resp.Body.Close() | ||
11773 | if !bytes.Equal(newPresignedBytes, buf) { | ||
11774 | logError(testName, function, args, startTime, "", "Bytes mismatch", err) | ||
11775 | return | ||
11776 | } | ||
11777 | |||
11778 | // Set request parameters. | ||
11779 | reqParams := make(url.Values) | ||
11780 | reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"") | ||
11781 | // Generate presigned GET object url. | ||
11782 | args["reqParams"] = reqParams | ||
11783 | presignedGetURL, err = c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, reqParams) | ||
11784 | if err != nil { | ||
11785 | logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) | ||
11786 | return | ||
11787 | } | ||
11788 | |||
11789 | // Verify if presigned url works. | ||
11790 | req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil) | ||
11791 | if err != nil { | ||
11792 | logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err) | ||
11793 | return | ||
11794 | } | ||
11795 | |||
11796 | resp, err = httpClient.Do(req) | ||
11797 | if err != nil { | ||
11798 | logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) | ||
11799 | return | ||
11800 | } | ||
11801 | |||
11802 | if resp.StatusCode != http.StatusOK { | ||
11803 | logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err) | ||
11804 | return | ||
11805 | } | ||
11806 | newPresignedBytes, err = io.ReadAll(resp.Body) | ||
11807 | if err != nil { | ||
11808 | logError(testName, function, args, startTime, "", "ReadAll failed", err) | ||
11809 | return | ||
11810 | } | ||
11811 | if !bytes.Equal(newPresignedBytes, buf) { | ||
11812 | logError(testName, function, args, startTime, "", "Bytes mismatch", err) | ||
11813 | return | ||
11814 | } | ||
11815 | // Verify content disposition. | ||
11816 | if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" { | ||
11817 | logError(testName, function, args, startTime, "", "wrong Content-Disposition received ", err) | ||
11818 | return | ||
11819 | } | ||
11820 | |||
11821 | function = "PresignedPutObject(bucketName, objectName, expires)" | ||
11822 | functionAll += ", " + function | ||
11823 | args = map[string]interface{}{ | ||
11824 | "bucketName": bucketName, | ||
11825 | "objectName": objectName + "-presigned", | ||
11826 | "expires": 3600 * time.Second, | ||
11827 | } | ||
11828 | presignedPutURL, err := c.PresignedPutObject(context.Background(), bucketName, objectName+"-presigned", 3600*time.Second) | ||
11829 | if err != nil { | ||
11830 | logError(testName, function, args, startTime, "", "PresignedPutObject failed", err) | ||
11831 | return | ||
11832 | } | ||
11833 | |||
11834 | // Generate data more than 32K | ||
11835 | buf = bytes.Repeat([]byte("1"), rand.Intn(1<<10)+32*1024) | ||
11836 | |||
11837 | req, err = http.NewRequest(http.MethodPut, presignedPutURL.String(), bytes.NewReader(buf)) | ||
11838 | if err != nil { | ||
11839 | logError(testName, function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err) | ||
11840 | return | ||
11841 | } | ||
11842 | |||
11843 | resp, err = httpClient.Do(req) | ||
11844 | if err != nil { | ||
11845 | logError(testName, function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err) | ||
11846 | return | ||
11847 | } | ||
11848 | |||
11849 | // Download the uploaded object to verify | ||
11850 | args = map[string]interface{}{ | ||
11851 | "bucketName": bucketName, | ||
11852 | "objectName": objectName + "-presigned", | ||
11853 | } | ||
11854 | newReader, err = c.GetObject(context.Background(), bucketName, objectName+"-presigned", minio.GetObjectOptions{}) | ||
11855 | if err != nil { | ||
11856 | logError(testName, function, args, startTime, "", "GetObject of uploaded presigned object failed", err) | ||
11857 | return | ||
11858 | } | ||
11859 | |||
11860 | newReadBytes, err = io.ReadAll(newReader) | ||
11861 | if err != nil { | ||
11862 | logError(testName, function, args, startTime, "", "ReadAll failed during get on presigned put object", err) | ||
11863 | return | ||
11864 | } | ||
11865 | newReader.Close() | ||
11866 | |||
11867 | if !bytes.Equal(newReadBytes, buf) { | ||
11868 | logError(testName, function, args, startTime, "", "Bytes mismatch on presigned object upload verification", err) | ||
11869 | return | ||
11870 | } | ||
11871 | |||
11872 | function = "PresignHeader(method, bucketName, objectName, expires, reqParams, extraHeaders)" | ||
11873 | functionAll += ", " + function | ||
11874 | presignExtraHeaders := map[string][]string{ | ||
11875 | "mysecret": {"abcxxx"}, | ||
11876 | } | ||
11877 | args = map[string]interface{}{ | ||
11878 | "method": "PUT", | ||
11879 | "bucketName": bucketName, | ||
11880 | "objectName": objectName + "-presign-custom", | ||
11881 | "expires": 3600 * time.Second, | ||
11882 | "extraHeaders": presignExtraHeaders, | ||
11883 | } | ||
11884 | _, err = c.PresignHeader(context.Background(), "PUT", bucketName, objectName+"-presign-custom", 3600*time.Second, nil, presignExtraHeaders) | ||
11885 | if err == nil { | ||
11886 | logError(testName, function, args, startTime, "", "Presigned with extra headers succeeded", err) | ||
11887 | return | ||
11888 | } | ||
11889 | |||
11890 | os.Remove(fileName) | ||
11891 | os.Remove(fileName + "-f") | ||
11892 | successLogger(testName, functionAll, args, startTime).Info() | ||
11893 | } | ||
11894 | |||
11895 | // Test get object with GetObject with context | ||
11896 | func testGetObjectContext() { | ||
11897 | // initialize logging params | ||
11898 | startTime := time.Now() | ||
11899 | testName := getFuncName() | ||
11900 | function := "GetObject(ctx, bucketName, objectName)" | ||
11901 | args := map[string]interface{}{ | ||
11902 | "ctx": "", | ||
11903 | "bucketName": "", | ||
11904 | "objectName": "", | ||
11905 | } | ||
11906 | // Seed random based on current time. | ||
11907 | rand.Seed(time.Now().Unix()) | ||
11908 | |||
11909 | // Instantiate new minio client object. | ||
11910 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
11911 | &minio.Options{ | ||
11912 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
11913 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
11914 | }) | ||
11915 | if err != nil { | ||
11916 | logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) | ||
11917 | return | ||
11918 | } | ||
11919 | |||
11920 | // Enable tracing, write to stderr. | ||
11921 | // c.TraceOn(os.Stderr) | ||
11922 | |||
11923 | // Set user agent. | ||
11924 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
11925 | |||
11926 | // Generate a new random bucket name. | ||
11927 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
11928 | args["bucketName"] = bucketName | ||
11929 | |||
11930 | // Make a new bucket. | ||
11931 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
11932 | if err != nil { | ||
11933 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
11934 | return | ||
11935 | } | ||
11936 | |||
11937 | defer cleanupBucket(bucketName, c) | ||
11938 | |||
11939 | bufSize := dataFileMap["datafile-33-kB"] | ||
11940 | reader := getDataReader("datafile-33-kB") | ||
11941 | defer reader.Close() | ||
11942 | // Save the data | ||
11943 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
11944 | args["objectName"] = objectName | ||
11945 | |||
11946 | _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) | ||
11947 | if err != nil { | ||
11948 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
11949 | return | ||
11950 | } | ||
11951 | |||
11952 | ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) | ||
11953 | args["ctx"] = ctx | ||
11954 | cancel() | ||
11955 | |||
11956 | r, err := c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) | ||
11957 | if err != nil { | ||
11958 | logError(testName, function, args, startTime, "", "GetObject failed unexpectedly", err) | ||
11959 | return | ||
11960 | } | ||
11961 | |||
11962 | if _, err = r.Stat(); err == nil { | ||
11963 | logError(testName, function, args, startTime, "", "GetObject should fail on short timeout", err) | ||
11964 | return | ||
11965 | } | ||
11966 | r.Close() | ||
11967 | |||
11968 | ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) | ||
11969 | args["ctx"] = ctx | ||
11970 | defer cancel() | ||
11971 | |||
11972 | // Read the data back | ||
11973 | r, err = c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) | ||
11974 | if err != nil { | ||
11975 | logError(testName, function, args, startTime, "", "GetObject failed", err) | ||
11976 | return | ||
11977 | } | ||
11978 | |||
11979 | st, err := r.Stat() | ||
11980 | if err != nil { | ||
11981 | logError(testName, function, args, startTime, "", "object Stat call failed", err) | ||
11982 | return | ||
11983 | } | ||
11984 | if st.Size != int64(bufSize) { | ||
11985 | logError(testName, function, args, startTime, "", "Number of bytes in stat does not match: want "+string(bufSize)+", got"+string(st.Size), err) | ||
11986 | return | ||
11987 | } | ||
11988 | if err := r.Close(); err != nil { | ||
11989 | logError(testName, function, args, startTime, "", "object Close() call failed", err) | ||
11990 | return | ||
11991 | } | ||
11992 | |||
11993 | successLogger(testName, function, args, startTime).Info() | ||
11994 | } | ||
11995 | |||
11996 | // Test get object with FGetObject with a user provided context | ||
11997 | func testFGetObjectContext() { | ||
11998 | // initialize logging params | ||
11999 | startTime := time.Now() | ||
12000 | testName := getFuncName() | ||
12001 | function := "FGetObject(ctx, bucketName, objectName, fileName)" | ||
12002 | args := map[string]interface{}{ | ||
12003 | "ctx": "", | ||
12004 | "bucketName": "", | ||
12005 | "objectName": "", | ||
12006 | "fileName": "", | ||
12007 | } | ||
12008 | // Seed random based on current time. | ||
12009 | rand.Seed(time.Now().Unix()) | ||
12010 | |||
12011 | // Instantiate new minio client object. | ||
12012 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
12013 | &minio.Options{ | ||
12014 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
12015 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
12016 | }) | ||
12017 | if err != nil { | ||
12018 | logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) | ||
12019 | return | ||
12020 | } | ||
12021 | |||
12022 | // Enable tracing, write to stderr. | ||
12023 | // c.TraceOn(os.Stderr) | ||
12024 | |||
12025 | // Set user agent. | ||
12026 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
12027 | |||
12028 | // Generate a new random bucket name. | ||
12029 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
12030 | args["bucketName"] = bucketName | ||
12031 | |||
12032 | // Make a new bucket. | ||
12033 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
12034 | if err != nil { | ||
12035 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
12036 | return | ||
12037 | } | ||
12038 | |||
12039 | defer cleanupBucket(bucketName, c) | ||
12040 | |||
12041 | bufSize := dataFileMap["datafile-1-MB"] | ||
12042 | reader := getDataReader("datafile-1-MB") | ||
12043 | defer reader.Close() | ||
12044 | // Save the data | ||
12045 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
12046 | args["objectName"] = objectName | ||
12047 | |||
12048 | _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) | ||
12049 | if err != nil { | ||
12050 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
12051 | return | ||
12052 | } | ||
12053 | |||
12054 | ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) | ||
12055 | args["ctx"] = ctx | ||
12056 | defer cancel() | ||
12057 | |||
12058 | fileName := "tempfile-context" | ||
12059 | args["fileName"] = fileName | ||
12060 | // Read the data back | ||
12061 | err = c.FGetObject(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) | ||
12062 | if err == nil { | ||
12063 | logError(testName, function, args, startTime, "", "FGetObject should fail on short timeout", err) | ||
12064 | return | ||
12065 | } | ||
12066 | ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) | ||
12067 | defer cancel() | ||
12068 | |||
12069 | // Read the data back | ||
12070 | err = c.FGetObject(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{}) | ||
12071 | if err != nil { | ||
12072 | logError(testName, function, args, startTime, "", "FGetObject with long timeout failed", err) | ||
12073 | return | ||
12074 | } | ||
12075 | if err = os.Remove(fileName + "-fcontext"); err != nil { | ||
12076 | logError(testName, function, args, startTime, "", "Remove file failed", err) | ||
12077 | return | ||
12078 | } | ||
12079 | |||
12080 | successLogger(testName, function, args, startTime).Info() | ||
12081 | } | ||
12082 | |||
12083 | // Test get object with GetObject with a user provided context | ||
12084 | func testGetObjectRanges() { | ||
12085 | // initialize logging params | ||
12086 | startTime := time.Now() | ||
12087 | testName := getFuncName() | ||
12088 | function := "GetObject(ctx, bucketName, objectName, fileName)" | ||
12089 | args := map[string]interface{}{ | ||
12090 | "ctx": "", | ||
12091 | "bucketName": "", | ||
12092 | "objectName": "", | ||
12093 | "fileName": "", | ||
12094 | } | ||
12095 | ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) | ||
12096 | defer cancel() | ||
12097 | |||
12098 | rng := rand.NewSource(time.Now().UnixNano()) | ||
12099 | // Instantiate new minio client object. | ||
12100 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
12101 | &minio.Options{ | ||
12102 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
12103 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
12104 | }) | ||
12105 | if err != nil { | ||
12106 | logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) | ||
12107 | return | ||
12108 | } | ||
12109 | |||
12110 | // Enable tracing, write to stderr. | ||
12111 | // c.TraceOn(os.Stderr) | ||
12112 | |||
12113 | // Set user agent. | ||
12114 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
12115 | |||
12116 | // Generate a new random bucket name. | ||
12117 | bucketName := randString(60, rng, "minio-go-test-") | ||
12118 | args["bucketName"] = bucketName | ||
12119 | |||
12120 | // Make a new bucket. | ||
12121 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
12122 | if err != nil { | ||
12123 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
12124 | return | ||
12125 | } | ||
12126 | |||
12127 | defer cleanupBucket(bucketName, c) | ||
12128 | |||
12129 | bufSize := dataFileMap["datafile-129-MB"] | ||
12130 | reader := getDataReader("datafile-129-MB") | ||
12131 | defer reader.Close() | ||
12132 | // Save the data | ||
12133 | objectName := randString(60, rng, "") | ||
12134 | args["objectName"] = objectName | ||
12135 | |||
12136 | _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) | ||
12137 | if err != nil { | ||
12138 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
12139 | return | ||
12140 | } | ||
12141 | |||
12142 | // Read the data back | ||
12143 | tests := []struct { | ||
12144 | start int64 | ||
12145 | end int64 | ||
12146 | }{ | ||
12147 | { | ||
12148 | start: 1024, | ||
12149 | end: 1024 + 1<<20, | ||
12150 | }, | ||
12151 | { | ||
12152 | start: 20e6, | ||
12153 | end: 20e6 + 10000, | ||
12154 | }, | ||
12155 | { | ||
12156 | start: 40e6, | ||
12157 | end: 40e6 + 10000, | ||
12158 | }, | ||
12159 | { | ||
12160 | start: 60e6, | ||
12161 | end: 60e6 + 10000, | ||
12162 | }, | ||
12163 | { | ||
12164 | start: 80e6, | ||
12165 | end: 80e6 + 10000, | ||
12166 | }, | ||
12167 | { | ||
12168 | start: 120e6, | ||
12169 | end: int64(bufSize), | ||
12170 | }, | ||
12171 | } | ||
12172 | for _, test := range tests { | ||
12173 | wantRC := getDataReader("datafile-129-MB") | ||
12174 | io.CopyN(io.Discard, wantRC, test.start) | ||
12175 | want := mustCrcReader(io.LimitReader(wantRC, test.end-test.start+1)) | ||
12176 | opts := minio.GetObjectOptions{} | ||
12177 | opts.SetRange(test.start, test.end) | ||
12178 | args["opts"] = fmt.Sprintf("%+v", test) | ||
12179 | obj, err := c.GetObject(ctx, bucketName, objectName, opts) | ||
12180 | if err != nil { | ||
12181 | logError(testName, function, args, startTime, "", "FGetObject with long timeout failed", err) | ||
12182 | return | ||
12183 | } | ||
12184 | err = crcMatches(obj, want) | ||
12185 | if err != nil { | ||
12186 | logError(testName, function, args, startTime, "", fmt.Sprintf("GetObject offset %d -> %d", test.start, test.end), err) | ||
12187 | return | ||
12188 | } | ||
12189 | } | ||
12190 | |||
12191 | successLogger(testName, function, args, startTime).Info() | ||
12192 | } | ||
12193 | |||
12194 | // Test get object ACLs with GetObjectACL with custom provided context | ||
12195 | func testGetObjectACLContext() { | ||
12196 | // initialize logging params | ||
12197 | startTime := time.Now() | ||
12198 | testName := getFuncName() | ||
12199 | function := "GetObjectACL(ctx, bucketName, objectName)" | ||
12200 | args := map[string]interface{}{ | ||
12201 | "ctx": "", | ||
12202 | "bucketName": "", | ||
12203 | "objectName": "", | ||
12204 | } | ||
12205 | // Seed random based on current time. | ||
12206 | rand.Seed(time.Now().Unix()) | ||
12207 | |||
12208 | // Instantiate new minio client object. | ||
12209 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
12210 | &minio.Options{ | ||
12211 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
12212 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
12213 | }) | ||
12214 | if err != nil { | ||
12215 | logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) | ||
12216 | return | ||
12217 | } | ||
12218 | |||
12219 | // Enable tracing, write to stderr. | ||
12220 | // c.TraceOn(os.Stderr) | ||
12221 | |||
12222 | // Set user agent. | ||
12223 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
12224 | |||
12225 | // Generate a new random bucket name. | ||
12226 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
12227 | args["bucketName"] = bucketName | ||
12228 | |||
12229 | // Make a new bucket. | ||
12230 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
12231 | if err != nil { | ||
12232 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
12233 | return | ||
12234 | } | ||
12235 | |||
12236 | defer cleanupBucket(bucketName, c) | ||
12237 | |||
12238 | bufSize := dataFileMap["datafile-1-MB"] | ||
12239 | reader := getDataReader("datafile-1-MB") | ||
12240 | defer reader.Close() | ||
12241 | // Save the data | ||
12242 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
12243 | args["objectName"] = objectName | ||
12244 | |||
12245 | // Add meta data to add a canned acl | ||
12246 | metaData := map[string]string{ | ||
12247 | "X-Amz-Acl": "public-read-write", | ||
12248 | } | ||
12249 | |||
12250 | _, err = c.PutObject(context.Background(), bucketName, | ||
12251 | objectName, reader, int64(bufSize), | ||
12252 | minio.PutObjectOptions{ | ||
12253 | ContentType: "binary/octet-stream", | ||
12254 | UserMetadata: metaData, | ||
12255 | }) | ||
12256 | |||
12257 | if err != nil { | ||
12258 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
12259 | return | ||
12260 | } | ||
12261 | |||
12262 | ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) | ||
12263 | args["ctx"] = ctx | ||
12264 | defer cancel() | ||
12265 | |||
12266 | // Read the data back | ||
12267 | objectInfo, getObjectACLErr := c.GetObjectACL(ctx, bucketName, objectName) | ||
12268 | if getObjectACLErr != nil { | ||
12269 | logError(testName, function, args, startTime, "", "GetObjectACL failed. ", getObjectACLErr) | ||
12270 | return | ||
12271 | } | ||
12272 | |||
12273 | s, ok := objectInfo.Metadata["X-Amz-Acl"] | ||
12274 | if !ok { | ||
12275 | logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Acl\"", nil) | ||
12276 | return | ||
12277 | } | ||
12278 | |||
12279 | if len(s) != 1 { | ||
12280 | logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Acl\" canned acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil) | ||
12281 | return | ||
12282 | } | ||
12283 | |||
12284 | // Do a very limited testing if this is not AWS S3 | ||
12285 | if os.Getenv(serverEndpoint) != "s3.amazonaws.com" { | ||
12286 | if s[0] != "private" { | ||
12287 | logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Acl\" expected \"private\" but got"+fmt.Sprintf("%q", s[0]), nil) | ||
12288 | return | ||
12289 | } | ||
12290 | |||
12291 | successLogger(testName, function, args, startTime).Info() | ||
12292 | return | ||
12293 | } | ||
12294 | |||
12295 | if s[0] != "public-read-write" { | ||
12296 | logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Acl\" expected \"public-read-write\" but got"+fmt.Sprintf("%q", s[0]), nil) | ||
12297 | return | ||
12298 | } | ||
12299 | |||
12300 | bufSize = dataFileMap["datafile-1-MB"] | ||
12301 | reader2 := getDataReader("datafile-1-MB") | ||
12302 | defer reader2.Close() | ||
12303 | // Save the data | ||
12304 | objectName = randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
12305 | args["objectName"] = objectName | ||
12306 | |||
12307 | // Add meta data to add a canned acl | ||
12308 | metaData = map[string]string{ | ||
12309 | "X-Amz-Grant-Read": "[email protected]", | ||
12310 | "X-Amz-Grant-Write": "[email protected]", | ||
12311 | } | ||
12312 | |||
12313 | _, err = c.PutObject(context.Background(), bucketName, objectName, reader2, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream", UserMetadata: metaData}) | ||
12314 | if err != nil { | ||
12315 | logError(testName, function, args, startTime, "", "PutObject failed", err) | ||
12316 | return | ||
12317 | } | ||
12318 | |||
12319 | ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second) | ||
12320 | args["ctx"] = ctx | ||
12321 | defer cancel() | ||
12322 | |||
12323 | // Read the data back | ||
12324 | objectInfo, getObjectACLErr = c.GetObjectACL(ctx, bucketName, objectName) | ||
12325 | if getObjectACLErr == nil { | ||
12326 | logError(testName, function, args, startTime, "", "GetObjectACL fail", getObjectACLErr) | ||
12327 | return | ||
12328 | } | ||
12329 | |||
12330 | if len(objectInfo.Metadata) != 3 { | ||
12331 | logError(testName, function, args, startTime, "", "GetObjectACL fail expected \"3\" ACLs but got "+fmt.Sprintf(`"%d"`, len(objectInfo.Metadata)), nil) | ||
12332 | return | ||
12333 | } | ||
12334 | |||
12335 | s, ok = objectInfo.Metadata["X-Amz-Grant-Read"] | ||
12336 | if !ok { | ||
12337 | logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Grant-Read\"", nil) | ||
12338 | return | ||
12339 | } | ||
12340 | |||
12341 | if len(s) != 1 { | ||
12342 | logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Read\" acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil) | ||
12343 | return | ||
12344 | } | ||
12345 | |||
12346 | if s[0] != "[email protected]" { | ||
12347 | logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Read\" acl expected \"[email protected]\" got "+fmt.Sprintf("%q", s), nil) | ||
12348 | return | ||
12349 | } | ||
12350 | |||
12351 | s, ok = objectInfo.Metadata["X-Amz-Grant-Write"] | ||
12352 | if !ok { | ||
12353 | logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Grant-Write\"", nil) | ||
12354 | return | ||
12355 | } | ||
12356 | |||
12357 | if len(s) != 1 { | ||
12358 | logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Write\" acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil) | ||
12359 | return | ||
12360 | } | ||
12361 | |||
12362 | if s[0] != "[email protected]" { | ||
12363 | logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Write\" acl expected \"[email protected]\" got "+fmt.Sprintf("%q", s), nil) | ||
12364 | return | ||
12365 | } | ||
12366 | |||
12367 | successLogger(testName, function, args, startTime).Info() | ||
12368 | } | ||
12369 | |||
12370 | // Test validates putObject with context to see if request cancellation is honored for V2. | ||
12371 | func testPutObjectContextV2() { | ||
12372 | // initialize logging params | ||
12373 | startTime := time.Now() | ||
12374 | testName := getFuncName() | ||
12375 | function := "PutObject(ctx, bucketName, objectName, reader, size, opts)" | ||
12376 | args := map[string]interface{}{ | ||
12377 | "ctx": "", | ||
12378 | "bucketName": "", | ||
12379 | "objectName": "", | ||
12380 | "size": "", | ||
12381 | "opts": "", | ||
12382 | } | ||
12383 | // Instantiate new minio client object. | ||
12384 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
12385 | &minio.Options{ | ||
12386 | Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
12387 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
12388 | }) | ||
12389 | if err != nil { | ||
12390 | logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) | ||
12391 | return | ||
12392 | } | ||
12393 | |||
12394 | // Enable tracing, write to stderr. | ||
12395 | // c.TraceOn(os.Stderr) | ||
12396 | |||
12397 | // Set user agent. | ||
12398 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
12399 | |||
12400 | // Make a new bucket. | ||
12401 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
12402 | args["bucketName"] = bucketName | ||
12403 | |||
12404 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
12405 | if err != nil { | ||
12406 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
12407 | return | ||
12408 | } | ||
12409 | defer cleanupBucket(bucketName, c) | ||
12410 | bufSize := dataFileMap["datatfile-33-kB"] | ||
12411 | reader := getDataReader("datafile-33-kB") | ||
12412 | defer reader.Close() | ||
12413 | |||
12414 | objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) | ||
12415 | args["objectName"] = objectName | ||
12416 | |||
12417 | ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) | ||
12418 | args["ctx"] = ctx | ||
12419 | args["size"] = bufSize | ||
12420 | defer cancel() | ||
12421 | |||
12422 | _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) | ||
12423 | if err != nil { | ||
12424 | logError(testName, function, args, startTime, "", "PutObject with short timeout failed", err) | ||
12425 | return | ||
12426 | } | ||
12427 | |||
12428 | ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) | ||
12429 | args["ctx"] = ctx | ||
12430 | |||
12431 | defer cancel() | ||
12432 | reader = getDataReader("datafile-33-kB") | ||
12433 | defer reader.Close() | ||
12434 | _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) | ||
12435 | if err != nil { | ||
12436 | logError(testName, function, args, startTime, "", "PutObject with long timeout failed", err) | ||
12437 | return | ||
12438 | } | ||
12439 | |||
12440 | successLogger(testName, function, args, startTime).Info() | ||
12441 | } | ||
12442 | |||
12443 | // Test get object with GetObject with custom context | ||
12444 | func testGetObjectContextV2() { | ||
12445 | // initialize logging params | ||
12446 | startTime := time.Now() | ||
12447 | testName := getFuncName() | ||
12448 | function := "GetObject(ctx, bucketName, objectName)" | ||
12449 | args := map[string]interface{}{ | ||
12450 | "ctx": "", | ||
12451 | "bucketName": "", | ||
12452 | "objectName": "", | ||
12453 | } | ||
12454 | // Seed random based on current time. | ||
12455 | rand.Seed(time.Now().Unix()) | ||
12456 | |||
12457 | // Instantiate new minio client object. | ||
12458 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
12459 | &minio.Options{ | ||
12460 | Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
12461 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
12462 | }) | ||
12463 | if err != nil { | ||
12464 | logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) | ||
12465 | return | ||
12466 | } | ||
12467 | |||
12468 | // Enable tracing, write to stderr. | ||
12469 | // c.TraceOn(os.Stderr) | ||
12470 | |||
12471 | // Set user agent. | ||
12472 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
12473 | |||
12474 | // Generate a new random bucket name. | ||
12475 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
12476 | args["bucketName"] = bucketName | ||
12477 | |||
12478 | // Make a new bucket. | ||
12479 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
12480 | if err != nil { | ||
12481 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
12482 | return | ||
12483 | } | ||
12484 | |||
12485 | defer cleanupBucket(bucketName, c) | ||
12486 | |||
12487 | bufSize := dataFileMap["datafile-33-kB"] | ||
12488 | reader := getDataReader("datafile-33-kB") | ||
12489 | defer reader.Close() | ||
12490 | // Save the data | ||
12491 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
12492 | args["objectName"] = objectName | ||
12493 | |||
12494 | _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) | ||
12495 | if err != nil { | ||
12496 | logError(testName, function, args, startTime, "", "PutObject call failed", err) | ||
12497 | return | ||
12498 | } | ||
12499 | |||
12500 | ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) | ||
12501 | args["ctx"] = ctx | ||
12502 | cancel() | ||
12503 | |||
12504 | r, err := c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) | ||
12505 | if err != nil { | ||
12506 | logError(testName, function, args, startTime, "", "GetObject failed unexpectedly", err) | ||
12507 | return | ||
12508 | } | ||
12509 | if _, err = r.Stat(); err == nil { | ||
12510 | logError(testName, function, args, startTime, "", "GetObject should fail on short timeout", err) | ||
12511 | return | ||
12512 | } | ||
12513 | r.Close() | ||
12514 | |||
12515 | ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) | ||
12516 | defer cancel() | ||
12517 | |||
12518 | // Read the data back | ||
12519 | r, err = c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) | ||
12520 | if err != nil { | ||
12521 | logError(testName, function, args, startTime, "", "GetObject shouldn't fail on longer timeout", err) | ||
12522 | return | ||
12523 | } | ||
12524 | |||
12525 | st, err := r.Stat() | ||
12526 | if err != nil { | ||
12527 | logError(testName, function, args, startTime, "", "object Stat call failed", err) | ||
12528 | return | ||
12529 | } | ||
12530 | if st.Size != int64(bufSize) { | ||
12531 | logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(bufSize)+" got "+string(st.Size), err) | ||
12532 | return | ||
12533 | } | ||
12534 | if err := r.Close(); err != nil { | ||
12535 | logError(testName, function, args, startTime, "", " object Close() call failed", err) | ||
12536 | return | ||
12537 | } | ||
12538 | |||
12539 | successLogger(testName, function, args, startTime).Info() | ||
12540 | } | ||
12541 | |||
12542 | // Test get object with FGetObject with custom context | ||
12543 | func testFGetObjectContextV2() { | ||
12544 | // initialize logging params | ||
12545 | startTime := time.Now() | ||
12546 | testName := getFuncName() | ||
12547 | function := "FGetObject(ctx, bucketName, objectName,fileName)" | ||
12548 | args := map[string]interface{}{ | ||
12549 | "ctx": "", | ||
12550 | "bucketName": "", | ||
12551 | "objectName": "", | ||
12552 | "fileName": "", | ||
12553 | } | ||
12554 | // Seed random based on current time. | ||
12555 | rand.Seed(time.Now().Unix()) | ||
12556 | |||
12557 | // Instantiate new minio client object. | ||
12558 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
12559 | &minio.Options{ | ||
12560 | Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
12561 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
12562 | }) | ||
12563 | if err != nil { | ||
12564 | logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) | ||
12565 | return | ||
12566 | } | ||
12567 | |||
12568 | // Enable tracing, write to stderr. | ||
12569 | // c.TraceOn(os.Stderr) | ||
12570 | |||
12571 | // Set user agent. | ||
12572 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
12573 | |||
12574 | // Generate a new random bucket name. | ||
12575 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
12576 | args["bucketName"] = bucketName | ||
12577 | |||
12578 | // Make a new bucket. | ||
12579 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
12580 | if err != nil { | ||
12581 | logError(testName, function, args, startTime, "", "MakeBucket call failed", err) | ||
12582 | return | ||
12583 | } | ||
12584 | |||
12585 | defer cleanupBucket(bucketName, c) | ||
12586 | |||
12587 | bufSize := dataFileMap["datatfile-1-MB"] | ||
12588 | reader := getDataReader("datafile-1-MB") | ||
12589 | defer reader.Close() | ||
12590 | // Save the data | ||
12591 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
12592 | args["objectName"] = objectName | ||
12593 | |||
12594 | _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) | ||
12595 | if err != nil { | ||
12596 | logError(testName, function, args, startTime, "", "PutObject call failed", err) | ||
12597 | return | ||
12598 | } | ||
12599 | |||
12600 | ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) | ||
12601 | args["ctx"] = ctx | ||
12602 | defer cancel() | ||
12603 | |||
12604 | fileName := "tempfile-context" | ||
12605 | args["fileName"] = fileName | ||
12606 | |||
12607 | // Read the data back | ||
12608 | err = c.FGetObject(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) | ||
12609 | if err == nil { | ||
12610 | logError(testName, function, args, startTime, "", "FGetObject should fail on short timeout", err) | ||
12611 | return | ||
12612 | } | ||
12613 | ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) | ||
12614 | defer cancel() | ||
12615 | |||
12616 | // Read the data back | ||
12617 | err = c.FGetObject(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{}) | ||
12618 | if err != nil { | ||
12619 | logError(testName, function, args, startTime, "", "FGetObject call shouldn't fail on long timeout", err) | ||
12620 | return | ||
12621 | } | ||
12622 | |||
12623 | if err = os.Remove(fileName + "-fcontext"); err != nil { | ||
12624 | logError(testName, function, args, startTime, "", "Remove file failed", err) | ||
12625 | return | ||
12626 | } | ||
12627 | |||
12628 | successLogger(testName, function, args, startTime).Info() | ||
12629 | } | ||
12630 | |||
12631 | // Test list object v1 and V2 | ||
12632 | func testListObjects() { | ||
12633 | // initialize logging params | ||
12634 | startTime := time.Now() | ||
12635 | testName := getFuncName() | ||
12636 | function := "ListObjects(bucketName, objectPrefix, recursive, doneCh)" | ||
12637 | args := map[string]interface{}{ | ||
12638 | "bucketName": "", | ||
12639 | "objectPrefix": "", | ||
12640 | "recursive": "true", | ||
12641 | } | ||
12642 | // Seed random based on current time. | ||
12643 | rand.Seed(time.Now().Unix()) | ||
12644 | |||
12645 | // Instantiate new minio client object. | ||
12646 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
12647 | &minio.Options{ | ||
12648 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
12649 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
12650 | }) | ||
12651 | if err != nil { | ||
12652 | logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) | ||
12653 | return | ||
12654 | } | ||
12655 | |||
12656 | // Enable tracing, write to stderr. | ||
12657 | // c.TraceOn(os.Stderr) | ||
12658 | |||
12659 | // Set user agent. | ||
12660 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
12661 | |||
12662 | // Generate a new random bucket name. | ||
12663 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
12664 | args["bucketName"] = bucketName | ||
12665 | |||
12666 | // Make a new bucket. | ||
12667 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) | ||
12668 | if err != nil { | ||
12669 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
12670 | return | ||
12671 | } | ||
12672 | |||
12673 | defer cleanupBucket(bucketName, c) | ||
12674 | |||
12675 | testObjects := []struct { | ||
12676 | name string | ||
12677 | storageClass string | ||
12678 | }{ | ||
12679 | // Special characters | ||
12680 | {"foo bar", "STANDARD"}, | ||
12681 | {"foo-%", "STANDARD"}, | ||
12682 | {"random-object-1", "STANDARD"}, | ||
12683 | {"random-object-2", "REDUCED_REDUNDANCY"}, | ||
12684 | } | ||
12685 | |||
12686 | for i, object := range testObjects { | ||
12687 | bufSize := dataFileMap["datafile-33-kB"] | ||
12688 | reader := getDataReader("datafile-33-kB") | ||
12689 | defer reader.Close() | ||
12690 | _, err = c.PutObject(context.Background(), bucketName, object.name, reader, int64(bufSize), | ||
12691 | minio.PutObjectOptions{ContentType: "binary/octet-stream", StorageClass: object.storageClass}) | ||
12692 | if err != nil { | ||
12693 | logError(testName, function, args, startTime, "", fmt.Sprintf("PutObject %d call failed", i+1), err) | ||
12694 | return | ||
12695 | } | ||
12696 | } | ||
12697 | |||
12698 | testList := func(listFn func(context.Context, string, minio.ListObjectsOptions) <-chan minio.ObjectInfo, bucket string, opts minio.ListObjectsOptions) { | ||
12699 | var objCursor int | ||
12700 | |||
12701 | // check for object name and storage-class from listing object result | ||
12702 | for objInfo := range listFn(context.Background(), bucket, opts) { | ||
12703 | if objInfo.Err != nil { | ||
12704 | logError(testName, function, args, startTime, "", "ListObjects failed unexpectedly", err) | ||
12705 | return | ||
12706 | } | ||
12707 | if objInfo.Key != testObjects[objCursor].name { | ||
12708 | logError(testName, function, args, startTime, "", "ListObjects does not return expected object name", err) | ||
12709 | return | ||
12710 | } | ||
12711 | if objInfo.StorageClass != testObjects[objCursor].storageClass { | ||
12712 | // Ignored as Gateways (Azure/GCS etc) wont return storage class | ||
12713 | ignoredLog(testName, function, args, startTime, "ListObjects doesn't return expected storage class").Info() | ||
12714 | } | ||
12715 | objCursor++ | ||
12716 | } | ||
12717 | |||
12718 | if objCursor != len(testObjects) { | ||
12719 | logError(testName, function, args, startTime, "", "ListObjects returned unexpected number of items", errors.New("")) | ||
12720 | return | ||
12721 | } | ||
12722 | } | ||
12723 | |||
12724 | testList(c.ListObjects, bucketName, minio.ListObjectsOptions{Recursive: true, UseV1: true}) | ||
12725 | testList(c.ListObjects, bucketName, minio.ListObjectsOptions{Recursive: true}) | ||
12726 | testList(c.ListObjects, bucketName, minio.ListObjectsOptions{Recursive: true, WithMetadata: true}) | ||
12727 | |||
12728 | successLogger(testName, function, args, startTime).Info() | ||
12729 | } | ||
12730 | |||
12731 | // Test deleting multiple objects with object retention set in Governance mode | ||
12732 | func testRemoveObjects() { | ||
12733 | // initialize logging params | ||
12734 | startTime := time.Now() | ||
12735 | testName := getFuncName() | ||
12736 | function := "RemoveObjects(bucketName, objectsCh, opts)" | ||
12737 | args := map[string]interface{}{ | ||
12738 | "bucketName": "", | ||
12739 | "objectPrefix": "", | ||
12740 | "recursive": "true", | ||
12741 | } | ||
12742 | // Seed random based on current time. | ||
12743 | rand.Seed(time.Now().Unix()) | ||
12744 | |||
12745 | // Instantiate new minio client object. | ||
12746 | c, err := minio.New(os.Getenv(serverEndpoint), | ||
12747 | &minio.Options{ | ||
12748 | Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), | ||
12749 | Secure: mustParseBool(os.Getenv(enableHTTPS)), | ||
12750 | }) | ||
12751 | if err != nil { | ||
12752 | logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) | ||
12753 | return | ||
12754 | } | ||
12755 | |||
12756 | // Enable tracing, write to stderr. | ||
12757 | // c.TraceOn(os.Stderr) | ||
12758 | |||
12759 | // Set user agent. | ||
12760 | c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") | ||
12761 | |||
12762 | // Generate a new random bucket name. | ||
12763 | bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") | ||
12764 | args["bucketName"] = bucketName | ||
12765 | objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") | ||
12766 | args["objectName"] = objectName | ||
12767 | |||
12768 | // Make a new bucket. | ||
12769 | err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) | ||
12770 | if err != nil { | ||
12771 | logError(testName, function, args, startTime, "", "MakeBucket failed", err) | ||
12772 | return | ||
12773 | } | ||
12774 | |||
12775 | bufSize := dataFileMap["datafile-129-MB"] | ||
12776 | reader := getDataReader("datafile-129-MB") | ||
12777 | defer reader.Close() | ||
12778 | |||
12779 | _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) | ||
12780 | if err != nil { | ||
12781 | logError(testName, function, args, startTime, "", "Error uploading object", err) | ||
12782 | return | ||
12783 | } | ||
12784 | |||
12785 | // Replace with smaller... | ||
12786 | bufSize = dataFileMap["datafile-10-kB"] | ||
12787 | reader = getDataReader("datafile-10-kB") | ||
12788 | defer reader.Close() | ||
12789 | |||
12790 | _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) | ||
12791 | if err != nil { | ||
12792 | logError(testName, function, args, startTime, "", "Error uploading object", err) | ||
12793 | } | ||
12794 | |||
12795 | t := time.Date(2030, time.April, 25, 14, 0, 0, 0, time.UTC) | ||
12796 | m := minio.RetentionMode(minio.Governance) | ||
12797 | opts := minio.PutObjectRetentionOptions{ | ||
12798 | GovernanceBypass: false, | ||
12799 | RetainUntilDate: &t, | ||
12800 | Mode: &m, | ||
12801 | } | ||
12802 | err = c.PutObjectRetention(context.Background(), bucketName, objectName, opts) | ||
12803 | if err != nil { | ||
12804 | logError(testName, function, args, startTime, "", "Error setting retention", err) | ||
12805 | return | ||
12806 | } | ||
12807 | |||
12808 | objectsCh := make(chan minio.ObjectInfo) | ||
12809 | // Send object names that are needed to be removed to objectsCh | ||
12810 | go func() { | ||
12811 | defer close(objectsCh) | ||
12812 | // List all objects from a bucket-name with a matching prefix. | ||
12813 | for object := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Recursive: true}) { | ||
12814 | if object.Err != nil { | ||
12815 | logError(testName, function, args, startTime, "", "Error listing objects", object.Err) | ||
12816 | return | ||
12817 | } | ||
12818 | objectsCh <- object | ||
12819 | } | ||
12820 | }() | ||
12821 | |||
12822 | for rErr := range c.RemoveObjects(context.Background(), bucketName, objectsCh, minio.RemoveObjectsOptions{}) { | ||
12823 | // Error is expected here because Retention is set on the object | ||
12824 | // and RemoveObjects is called without Bypass Governance | ||
12825 | if rErr.Err == nil { | ||
12826 | logError(testName, function, args, startTime, "", "Expected error during deletion", nil) | ||
12827 | return | ||
12828 | } | ||
12829 | } | ||
12830 | |||
12831 | objectsCh1 := make(chan minio.ObjectInfo) | ||
12832 | |||
12833 | // Send object names that are needed to be removed to objectsCh | ||
12834 | go func() { | ||
12835 | defer close(objectsCh1) | ||
12836 | // List all objects from a bucket-name with a matching prefix. | ||
12837 | for object := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Recursive: true}) { | ||
12838 | if object.Err != nil { | ||
12839 | logError(testName, function, args, startTime, "", "Error listing objects", object.Err) | ||
12840 | return | ||
12841 | } | ||
12842 | objectsCh1 <- object | ||
12843 | } | ||
12844 | }() | ||
12845 | |||
12846 | opts1 := minio.RemoveObjectsOptions{ | ||
12847 | GovernanceBypass: true, | ||
12848 | } | ||
12849 | |||
12850 | for rErr := range c.RemoveObjects(context.Background(), bucketName, objectsCh1, opts1) { | ||
12851 | // Error is not expected here because Retention is set on the object | ||
12852 | // and RemoveObjects is called with Bypass Governance | ||
12853 | logError(testName, function, args, startTime, "", "Error detected during deletion", rErr.Err) | ||
12854 | return | ||
12855 | } | ||
12856 | |||
12857 | // Delete all objects and buckets | ||
12858 | if err = cleanupVersionedBucket(bucketName, c); err != nil { | ||
12859 | logError(testName, function, args, startTime, "", "CleanupBucket failed", err) | ||
12860 | return | ||
12861 | } | ||
12862 | |||
12863 | successLogger(testName, function, args, startTime).Info() | ||
12864 | } | ||
12865 | |||
12866 | // Convert string to bool and always return false if any error | ||
12867 | func mustParseBool(str string) bool { | ||
12868 | b, err := strconv.ParseBool(str) | ||
12869 | if err != nil { | ||
12870 | return false | ||
12871 | } | ||
12872 | return b | ||
12873 | } | ||
12874 | |||
12875 | func main() { | ||
12876 | // Output to stdout instead of the default stderr | ||
12877 | log.SetOutput(os.Stdout) | ||
12878 | // create custom formatter | ||
12879 | mintFormatter := mintJSONFormatter{} | ||
12880 | // set custom formatter | ||
12881 | log.SetFormatter(&mintFormatter) | ||
12882 | // log Info or above -- success cases are Info level, failures are Fatal level | ||
12883 | log.SetLevel(log.InfoLevel) | ||
12884 | |||
12885 | tls := mustParseBool(os.Getenv(enableHTTPS)) | ||
12886 | kms := mustParseBool(os.Getenv(enableKMS)) | ||
12887 | if os.Getenv(enableKMS) == "" { | ||
12888 | // Default to KMS tests. | ||
12889 | kms = true | ||
12890 | } | ||
12891 | |||
12892 | // execute tests | ||
12893 | if isFullMode() { | ||
12894 | testMakeBucketErrorV2() | ||
12895 | testGetObjectClosedTwiceV2() | ||
12896 | testFPutObjectV2() | ||
12897 | testMakeBucketRegionsV2() | ||
12898 | testGetObjectReadSeekFunctionalV2() | ||
12899 | testGetObjectReadAtFunctionalV2() | ||
12900 | testGetObjectRanges() | ||
12901 | testCopyObjectV2() | ||
12902 | testFunctionalV2() | ||
12903 | testComposeObjectErrorCasesV2() | ||
12904 | testCompose10KSourcesV2() | ||
12905 | testUserMetadataCopyingV2() | ||
12906 | testPutObjectWithChecksums() | ||
12907 | testPutMultipartObjectWithChecksums() | ||
12908 | testPutObject0ByteV2() | ||
12909 | testPutObjectNoLengthV2() | ||
12910 | testPutObjectsUnknownV2() | ||
12911 | testGetObjectContextV2() | ||
12912 | testFPutObjectContextV2() | ||
12913 | testFGetObjectContextV2() | ||
12914 | testPutObjectContextV2() | ||
12915 | testPutObjectWithVersioning() | ||
12916 | testMakeBucketError() | ||
12917 | testMakeBucketRegions() | ||
12918 | testPutObjectWithMetadata() | ||
12919 | testPutObjectReadAt() | ||
12920 | testPutObjectStreaming() | ||
12921 | testGetObjectSeekEnd() | ||
12922 | testGetObjectClosedTwice() | ||
12923 | testGetObjectS3Zip() | ||
12924 | testRemoveMultipleObjects() | ||
12925 | testRemoveMultipleObjectsWithResult() | ||
12926 | testFPutObjectMultipart() | ||
12927 | testFPutObject() | ||
12928 | testGetObjectReadSeekFunctional() | ||
12929 | testGetObjectReadAtFunctional() | ||
12930 | testGetObjectReadAtWhenEOFWasReached() | ||
12931 | testPresignedPostPolicy() | ||
12932 | testCopyObject() | ||
12933 | testComposeObjectErrorCases() | ||
12934 | testCompose10KSources() | ||
12935 | testUserMetadataCopying() | ||
12936 | testBucketNotification() | ||
12937 | testFunctional() | ||
12938 | testGetObjectModified() | ||
12939 | testPutObjectUploadSeekedObject() | ||
12940 | testGetObjectContext() | ||
12941 | testFPutObjectContext() | ||
12942 | testFGetObjectContext() | ||
12943 | testGetObjectACLContext() | ||
12944 | testPutObjectContext() | ||
12945 | testStorageClassMetadataPutObject() | ||
12946 | testStorageClassInvalidMetadataPutObject() | ||
12947 | testStorageClassMetadataCopyObject() | ||
12948 | testPutObjectWithContentLanguage() | ||
12949 | testListObjects() | ||
12950 | testRemoveObjects() | ||
12951 | testListObjectVersions() | ||
12952 | testStatObjectWithVersioning() | ||
12953 | testGetObjectWithVersioning() | ||
12954 | testCopyObjectWithVersioning() | ||
12955 | testConcurrentCopyObjectWithVersioning() | ||
12956 | testComposeObjectWithVersioning() | ||
12957 | testRemoveObjectWithVersioning() | ||
12958 | testRemoveObjectsWithVersioning() | ||
12959 | testObjectTaggingWithVersioning() | ||
12960 | testTrailingChecksums() | ||
12961 | testPutObjectWithAutomaticChecksums() | ||
12962 | |||
12963 | // SSE-C tests will only work over TLS connection. | ||
12964 | if tls { | ||
12965 | testSSECEncryptionPutGet() | ||
12966 | testSSECEncryptionFPut() | ||
12967 | testSSECEncryptedGetObjectReadAtFunctional() | ||
12968 | testSSECEncryptedGetObjectReadSeekFunctional() | ||
12969 | testEncryptedCopyObjectV2() | ||
12970 | testEncryptedSSECToSSECCopyObject() | ||
12971 | testEncryptedSSECToUnencryptedCopyObject() | ||
12972 | testUnencryptedToSSECCopyObject() | ||
12973 | testUnencryptedToUnencryptedCopyObject() | ||
12974 | testEncryptedEmptyObject() | ||
12975 | testDecryptedCopyObject() | ||
12976 | testSSECEncryptedToSSECCopyObjectPart() | ||
12977 | testSSECMultipartEncryptedToSSECCopyObjectPart() | ||
12978 | testSSECEncryptedToUnencryptedCopyPart() | ||
12979 | testUnencryptedToSSECCopyObjectPart() | ||
12980 | testUnencryptedToUnencryptedCopyPart() | ||
12981 | testEncryptedSSECToSSES3CopyObject() | ||
12982 | testEncryptedSSES3ToSSECCopyObject() | ||
12983 | testSSECEncryptedToSSES3CopyObjectPart() | ||
12984 | testSSES3EncryptedToSSECCopyObjectPart() | ||
12985 | } | ||
12986 | |||
12987 | // KMS tests | ||
12988 | if kms { | ||
12989 | testSSES3EncryptionPutGet() | ||
12990 | testSSES3EncryptionFPut() | ||
12991 | testSSES3EncryptedGetObjectReadAtFunctional() | ||
12992 | testSSES3EncryptedGetObjectReadSeekFunctional() | ||
12993 | testEncryptedSSES3ToSSES3CopyObject() | ||
12994 | testEncryptedSSES3ToUnencryptedCopyObject() | ||
12995 | testUnencryptedToSSES3CopyObject() | ||
12996 | testUnencryptedToSSES3CopyObjectPart() | ||
12997 | testSSES3EncryptedToUnencryptedCopyPart() | ||
12998 | testSSES3EncryptedToSSES3CopyObjectPart() | ||
12999 | } | ||
13000 | } else { | ||
13001 | testFunctional() | ||
13002 | testFunctionalV2() | ||
13003 | } | ||
13004 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/hook-reader.go b/vendor/github.com/minio/minio-go/v7/hook-reader.go new file mode 100644 index 0000000..07bc7db --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/hook-reader.go | |||
@@ -0,0 +1,101 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2015-2017 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | import ( | ||
21 | "fmt" | ||
22 | "io" | ||
23 | "sync" | ||
24 | ) | ||
25 | |||
26 | // hookReader hooks additional reader in the source stream. It is | ||
27 | // useful for making progress bars. Second reader is appropriately | ||
28 | // notified about the exact number of bytes read from the primary | ||
29 | // source on each Read operation. | ||
30 | type hookReader struct { | ||
31 | mu sync.RWMutex | ||
32 | source io.Reader | ||
33 | hook io.Reader | ||
34 | } | ||
35 | |||
36 | // Seek implements io.Seeker. Seeks source first, and if necessary | ||
37 | // seeks hook if Seek method is appropriately found. | ||
38 | func (hr *hookReader) Seek(offset int64, whence int) (n int64, err error) { | ||
39 | hr.mu.Lock() | ||
40 | defer hr.mu.Unlock() | ||
41 | |||
42 | // Verify for source has embedded Seeker, use it. | ||
43 | sourceSeeker, ok := hr.source.(io.Seeker) | ||
44 | if ok { | ||
45 | n, err = sourceSeeker.Seek(offset, whence) | ||
46 | if err != nil { | ||
47 | return 0, err | ||
48 | } | ||
49 | } | ||
50 | |||
51 | if hr.hook != nil { | ||
52 | // Verify if hook has embedded Seeker, use it. | ||
53 | hookSeeker, ok := hr.hook.(io.Seeker) | ||
54 | if ok { | ||
55 | var m int64 | ||
56 | m, err = hookSeeker.Seek(offset, whence) | ||
57 | if err != nil { | ||
58 | return 0, err | ||
59 | } | ||
60 | if n != m { | ||
61 | return 0, fmt.Errorf("hook seeker seeked %d bytes, expected source %d bytes", m, n) | ||
62 | } | ||
63 | } | ||
64 | } | ||
65 | |||
66 | return n, nil | ||
67 | } | ||
68 | |||
69 | // Read implements io.Reader. Always reads from the source, the return | ||
70 | // value 'n' number of bytes are reported through the hook. Returns | ||
71 | // error for all non io.EOF conditions. | ||
72 | func (hr *hookReader) Read(b []byte) (n int, err error) { | ||
73 | hr.mu.RLock() | ||
74 | defer hr.mu.RUnlock() | ||
75 | |||
76 | n, err = hr.source.Read(b) | ||
77 | if err != nil && err != io.EOF { | ||
78 | return n, err | ||
79 | } | ||
80 | if hr.hook != nil { | ||
81 | // Progress the hook with the total read bytes from the source. | ||
82 | if _, herr := hr.hook.Read(b[:n]); herr != nil { | ||
83 | if herr != io.EOF { | ||
84 | return n, herr | ||
85 | } | ||
86 | } | ||
87 | } | ||
88 | return n, err | ||
89 | } | ||
90 | |||
91 | // newHook returns a io.ReadSeeker which implements hookReader that | ||
92 | // reports the data read from the source to the hook. | ||
93 | func newHook(source, hook io.Reader) io.Reader { | ||
94 | if hook == nil { | ||
95 | return &hookReader{source: source} | ||
96 | } | ||
97 | return &hookReader{ | ||
98 | source: source, | ||
99 | hook: hook, | ||
100 | } | ||
101 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go new file mode 100644 index 0000000..800c4a2 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go | |||
@@ -0,0 +1,242 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2020 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package credentials | ||
19 | |||
20 | import ( | ||
21 | "bytes" | ||
22 | "crypto/sha256" | ||
23 | "encoding/hex" | ||
24 | "encoding/xml" | ||
25 | "errors" | ||
26 | "io" | ||
27 | "net/http" | ||
28 | "net/url" | ||
29 | "strconv" | ||
30 | "strings" | ||
31 | "time" | ||
32 | |||
33 | "github.com/minio/minio-go/v7/pkg/signer" | ||
34 | ) | ||
35 | |||
36 | // AssumeRoleResponse contains the result of successful AssumeRole request. | ||
37 | type AssumeRoleResponse struct { | ||
38 | XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleResponse" json:"-"` | ||
39 | |||
40 | Result AssumeRoleResult `xml:"AssumeRoleResult"` | ||
41 | ResponseMetadata struct { | ||
42 | RequestID string `xml:"RequestId,omitempty"` | ||
43 | } `xml:"ResponseMetadata,omitempty"` | ||
44 | } | ||
45 | |||
46 | // AssumeRoleResult - Contains the response to a successful AssumeRole | ||
47 | // request, including temporary credentials that can be used to make | ||
48 | // MinIO API requests. | ||
49 | type AssumeRoleResult struct { | ||
50 | // The identifiers for the temporary security credentials that the operation | ||
51 | // returns. | ||
52 | AssumedRoleUser AssumedRoleUser `xml:",omitempty"` | ||
53 | |||
54 | // The temporary security credentials, which include an access key ID, a secret | ||
55 | // access key, and a security (or session) token. | ||
56 | // | ||
57 | // Note: The size of the security token that STS APIs return is not fixed. We | ||
58 | // strongly recommend that you make no assumptions about the maximum size. As | ||
59 | // of this writing, the typical size is less than 4096 bytes, but that can vary. | ||
60 | // Also, future updates to AWS might require larger sizes. | ||
61 | Credentials struct { | ||
62 | AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` | ||
63 | SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` | ||
64 | Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` | ||
65 | SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` | ||
66 | } `xml:",omitempty"` | ||
67 | |||
68 | // A percentage value that indicates the size of the policy in packed form. | ||
69 | // The service rejects any policy with a packed size greater than 100 percent, | ||
70 | // which means the policy exceeded the allowed space. | ||
71 | PackedPolicySize int `xml:",omitempty"` | ||
72 | } | ||
73 | |||
74 | // A STSAssumeRole retrieves credentials from MinIO service, and keeps track if | ||
75 | // those credentials are expired. | ||
76 | type STSAssumeRole struct { | ||
77 | Expiry | ||
78 | |||
79 | // Required http Client to use when connecting to MinIO STS service. | ||
80 | Client *http.Client | ||
81 | |||
82 | // STS endpoint to fetch STS credentials. | ||
83 | STSEndpoint string | ||
84 | |||
85 | // various options for this request. | ||
86 | Options STSAssumeRoleOptions | ||
87 | } | ||
88 | |||
89 | // STSAssumeRoleOptions collection of various input options | ||
90 | // to obtain AssumeRole credentials. | ||
91 | type STSAssumeRoleOptions struct { | ||
92 | // Mandatory inputs. | ||
93 | AccessKey string | ||
94 | SecretKey string | ||
95 | |||
96 | SessionToken string // Optional if the first request is made with temporary credentials. | ||
97 | Policy string // Optional to assign a policy to the assumed role | ||
98 | |||
99 | Location string // Optional commonly needed with AWS STS. | ||
100 | DurationSeconds int // Optional defaults to 1 hour. | ||
101 | |||
102 | // Optional only valid if using with AWS STS | ||
103 | RoleARN string | ||
104 | RoleSessionName string | ||
105 | ExternalID string | ||
106 | } | ||
107 | |||
108 | // NewSTSAssumeRole returns a pointer to a new | ||
109 | // Credentials object wrapping the STSAssumeRole. | ||
110 | func NewSTSAssumeRole(stsEndpoint string, opts STSAssumeRoleOptions) (*Credentials, error) { | ||
111 | if stsEndpoint == "" { | ||
112 | return nil, errors.New("STS endpoint cannot be empty") | ||
113 | } | ||
114 | if opts.AccessKey == "" || opts.SecretKey == "" { | ||
115 | return nil, errors.New("AssumeRole credentials access/secretkey is mandatory") | ||
116 | } | ||
117 | return New(&STSAssumeRole{ | ||
118 | Client: &http.Client{ | ||
119 | Transport: http.DefaultTransport, | ||
120 | }, | ||
121 | STSEndpoint: stsEndpoint, | ||
122 | Options: opts, | ||
123 | }), nil | ||
124 | } | ||
125 | |||
126 | const defaultDurationSeconds = 3600 | ||
127 | |||
128 | // closeResponse close non nil response with any response Body. | ||
129 | // convenient wrapper to drain any remaining data on response body. | ||
130 | // | ||
131 | // Subsequently this allows golang http RoundTripper | ||
132 | // to re-use the same connection for future requests. | ||
133 | func closeResponse(resp *http.Response) { | ||
134 | // Callers should close resp.Body when done reading from it. | ||
135 | // If resp.Body is not closed, the Client's underlying RoundTripper | ||
136 | // (typically Transport) may not be able to re-use a persistent TCP | ||
137 | // connection to the server for a subsequent "keep-alive" request. | ||
138 | if resp != nil && resp.Body != nil { | ||
139 | // Drain any remaining Body and then close the connection. | ||
140 | // Without this closing connection would disallow re-using | ||
141 | // the same connection for future uses. | ||
142 | // - http://stackoverflow.com/a/17961593/4465767 | ||
143 | io.Copy(io.Discard, resp.Body) | ||
144 | resp.Body.Close() | ||
145 | } | ||
146 | } | ||
147 | |||
148 | func getAssumeRoleCredentials(clnt *http.Client, endpoint string, opts STSAssumeRoleOptions) (AssumeRoleResponse, error) { | ||
149 | v := url.Values{} | ||
150 | v.Set("Action", "AssumeRole") | ||
151 | v.Set("Version", STSVersion) | ||
152 | if opts.RoleARN != "" { | ||
153 | v.Set("RoleArn", opts.RoleARN) | ||
154 | } | ||
155 | if opts.RoleSessionName != "" { | ||
156 | v.Set("RoleSessionName", opts.RoleSessionName) | ||
157 | } | ||
158 | if opts.DurationSeconds > defaultDurationSeconds { | ||
159 | v.Set("DurationSeconds", strconv.Itoa(opts.DurationSeconds)) | ||
160 | } else { | ||
161 | v.Set("DurationSeconds", strconv.Itoa(defaultDurationSeconds)) | ||
162 | } | ||
163 | if opts.Policy != "" { | ||
164 | v.Set("Policy", opts.Policy) | ||
165 | } | ||
166 | if opts.ExternalID != "" { | ||
167 | v.Set("ExternalId", opts.ExternalID) | ||
168 | } | ||
169 | |||
170 | u, err := url.Parse(endpoint) | ||
171 | if err != nil { | ||
172 | return AssumeRoleResponse{}, err | ||
173 | } | ||
174 | u.Path = "/" | ||
175 | |||
176 | postBody := strings.NewReader(v.Encode()) | ||
177 | hash := sha256.New() | ||
178 | if _, err = io.Copy(hash, postBody); err != nil { | ||
179 | return AssumeRoleResponse{}, err | ||
180 | } | ||
181 | postBody.Seek(0, 0) | ||
182 | |||
183 | req, err := http.NewRequest(http.MethodPost, u.String(), postBody) | ||
184 | if err != nil { | ||
185 | return AssumeRoleResponse{}, err | ||
186 | } | ||
187 | req.Header.Set("Content-Type", "application/x-www-form-urlencoded") | ||
188 | req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(hash.Sum(nil))) | ||
189 | if opts.SessionToken != "" { | ||
190 | req.Header.Set("X-Amz-Security-Token", opts.SessionToken) | ||
191 | } | ||
192 | req = signer.SignV4STS(*req, opts.AccessKey, opts.SecretKey, opts.Location) | ||
193 | |||
194 | resp, err := clnt.Do(req) | ||
195 | if err != nil { | ||
196 | return AssumeRoleResponse{}, err | ||
197 | } | ||
198 | defer closeResponse(resp) | ||
199 | if resp.StatusCode != http.StatusOK { | ||
200 | var errResp ErrorResponse | ||
201 | buf, err := io.ReadAll(resp.Body) | ||
202 | if err != nil { | ||
203 | return AssumeRoleResponse{}, err | ||
204 | } | ||
205 | _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp) | ||
206 | if err != nil { | ||
207 | var s3Err Error | ||
208 | if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil { | ||
209 | return AssumeRoleResponse{}, err | ||
210 | } | ||
211 | errResp.RequestID = s3Err.RequestID | ||
212 | errResp.STSError.Code = s3Err.Code | ||
213 | errResp.STSError.Message = s3Err.Message | ||
214 | } | ||
215 | return AssumeRoleResponse{}, errResp | ||
216 | } | ||
217 | |||
218 | a := AssumeRoleResponse{} | ||
219 | if _, err = xmlDecodeAndBody(resp.Body, &a); err != nil { | ||
220 | return AssumeRoleResponse{}, err | ||
221 | } | ||
222 | return a, nil | ||
223 | } | ||
224 | |||
225 | // Retrieve retrieves credentials from the MinIO service. | ||
226 | // Error will be returned if the request fails. | ||
227 | func (m *STSAssumeRole) Retrieve() (Value, error) { | ||
228 | a, err := getAssumeRoleCredentials(m.Client, m.STSEndpoint, m.Options) | ||
229 | if err != nil { | ||
230 | return Value{}, err | ||
231 | } | ||
232 | |||
233 | // Expiry window is set to 10secs. | ||
234 | m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow) | ||
235 | |||
236 | return Value{ | ||
237 | AccessKeyID: a.Result.Credentials.AccessKey, | ||
238 | SecretAccessKey: a.Result.Credentials.SecretKey, | ||
239 | SessionToken: a.Result.Credentials.SessionToken, | ||
240 | SignerType: SignatureV4, | ||
241 | }, nil | ||
242 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go new file mode 100644 index 0000000..ddccfb1 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go | |||
@@ -0,0 +1,88 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2017 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package credentials | ||
19 | |||
20 | // A Chain will search for a provider which returns credentials | ||
21 | // and cache that provider until Retrieve is called again. | ||
22 | // | ||
23 | // The Chain provides a way of chaining multiple providers together | ||
24 | // which will pick the first available using priority order of the | ||
25 | // Providers in the list. | ||
26 | // | ||
27 | // If none of the Providers retrieve valid credentials Value, ChainProvider's | ||
28 | // Retrieve() will return the no credentials value. | ||
29 | // | ||
30 | // If a Provider is found which returns valid credentials Value ChainProvider | ||
31 | // will cache that Provider for all calls to IsExpired(), until Retrieve is | ||
32 | // called again after IsExpired() is true. | ||
33 | // | ||
34 | // creds := credentials.NewChainCredentials( | ||
35 | // []credentials.Provider{ | ||
36 | // &credentials.EnvAWSS3{}, | ||
37 | // &credentials.EnvMinio{}, | ||
38 | // }) | ||
39 | // | ||
40 | // // Usage of ChainCredentials. | ||
41 | // mc, err := minio.NewWithCredentials(endpoint, creds, secure, "us-east-1") | ||
42 | // if err != nil { | ||
43 | // log.Fatalln(err) | ||
44 | // } | ||
45 | type Chain struct { | ||
46 | Providers []Provider | ||
47 | curr Provider | ||
48 | } | ||
49 | |||
50 | // NewChainCredentials returns a pointer to a new Credentials object | ||
51 | // wrapping a chain of providers. | ||
52 | func NewChainCredentials(providers []Provider) *Credentials { | ||
53 | return New(&Chain{ | ||
54 | Providers: append([]Provider{}, providers...), | ||
55 | }) | ||
56 | } | ||
57 | |||
58 | // Retrieve returns the credentials value, returns no credentials(anonymous) | ||
59 | // if no credentials provider returned any value. | ||
60 | // | ||
61 | // If a provider is found with credentials, it will be cached and any calls | ||
62 | // to IsExpired() will return the expired state of the cached provider. | ||
63 | func (c *Chain) Retrieve() (Value, error) { | ||
64 | for _, p := range c.Providers { | ||
65 | creds, _ := p.Retrieve() | ||
66 | // Always prioritize non-anonymous providers, if any. | ||
67 | if creds.AccessKeyID == "" && creds.SecretAccessKey == "" { | ||
68 | continue | ||
69 | } | ||
70 | c.curr = p | ||
71 | return creds, nil | ||
72 | } | ||
73 | // At this point we have exhausted all the providers and | ||
74 | // are left without any credentials return anonymous. | ||
75 | return Value{ | ||
76 | SignerType: SignatureAnonymous, | ||
77 | }, nil | ||
78 | } | ||
79 | |||
80 | // IsExpired will returned the expired state of the currently cached provider | ||
81 | // if there is one. If there is no current provider, true will be returned. | ||
82 | func (c *Chain) IsExpired() bool { | ||
83 | if c.curr != nil { | ||
84 | return c.curr.IsExpired() | ||
85 | } | ||
86 | |||
87 | return true | ||
88 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample b/vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample new file mode 100644 index 0000000..d793c9e --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample | |||
@@ -0,0 +1,17 @@ | |||
1 | { | ||
2 | "version": "8", | ||
3 | "hosts": { | ||
4 | "play": { | ||
5 | "url": "https://play.min.io", | ||
6 | "accessKey": "Q3AM3UQ867SPQQA43P2F", | ||
7 | "secretKey": "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", | ||
8 | "api": "S3v2" | ||
9 | }, | ||
10 | "s3": { | ||
11 | "url": "https://s3.amazonaws.com", | ||
12 | "accessKey": "accessKey", | ||
13 | "secretKey": "secret", | ||
14 | "api": "S3v4" | ||
15 | } | ||
16 | } | ||
17 | } \ No newline at end of file | ||
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go new file mode 100644 index 0000000..af61049 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go | |||
@@ -0,0 +1,193 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2017 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package credentials | ||
19 | |||
20 | import ( | ||
21 | "sync" | ||
22 | "time" | ||
23 | ) | ||
24 | |||
25 | const ( | ||
26 | // STSVersion sts version string | ||
27 | STSVersion = "2011-06-15" | ||
28 | |||
29 | // How much duration to slash from the given expiration duration | ||
30 | defaultExpiryWindow = 0.8 | ||
31 | ) | ||
32 | |||
33 | // A Value is the AWS credentials value for individual credential fields. | ||
34 | type Value struct { | ||
35 | // AWS Access key ID | ||
36 | AccessKeyID string | ||
37 | |||
38 | // AWS Secret Access Key | ||
39 | SecretAccessKey string | ||
40 | |||
41 | // AWS Session Token | ||
42 | SessionToken string | ||
43 | |||
44 | // Signature Type. | ||
45 | SignerType SignatureType | ||
46 | } | ||
47 | |||
48 | // A Provider is the interface for any component which will provide credentials | ||
49 | // Value. A provider is required to manage its own Expired state, and what to | ||
50 | // be expired means. | ||
51 | type Provider interface { | ||
52 | // Retrieve returns nil if it successfully retrieved the value. | ||
53 | // Error is returned if the value were not obtainable, or empty. | ||
54 | Retrieve() (Value, error) | ||
55 | |||
56 | // IsExpired returns if the credentials are no longer valid, and need | ||
57 | // to be retrieved. | ||
58 | IsExpired() bool | ||
59 | } | ||
60 | |||
61 | // A Expiry provides shared expiration logic to be used by credentials | ||
62 | // providers to implement expiry functionality. | ||
63 | // | ||
64 | // The best method to use this struct is as an anonymous field within the | ||
65 | // provider's struct. | ||
66 | // | ||
67 | // Example: | ||
68 | // | ||
69 | // type IAMCredentialProvider struct { | ||
70 | // Expiry | ||
71 | // ... | ||
72 | // } | ||
73 | type Expiry struct { | ||
74 | // The date/time when to expire on | ||
75 | expiration time.Time | ||
76 | |||
77 | // If set will be used by IsExpired to determine the current time. | ||
78 | // Defaults to time.Now if CurrentTime is not set. | ||
79 | CurrentTime func() time.Time | ||
80 | } | ||
81 | |||
82 | // SetExpiration sets the expiration IsExpired will check when called. | ||
83 | // | ||
84 | // If window is greater than 0 the expiration time will be reduced by the | ||
85 | // window value. | ||
86 | // | ||
87 | // Using a window is helpful to trigger credentials to expire sooner than | ||
88 | // the expiration time given to ensure no requests are made with expired | ||
89 | // tokens. | ||
90 | func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { | ||
91 | if e.CurrentTime == nil { | ||
92 | e.CurrentTime = time.Now | ||
93 | } | ||
94 | cut := window | ||
95 | if cut < 0 { | ||
96 | expireIn := expiration.Sub(e.CurrentTime()) | ||
97 | cut = time.Duration(float64(expireIn) * (1 - defaultExpiryWindow)) | ||
98 | } | ||
99 | e.expiration = expiration.Add(-cut) | ||
100 | } | ||
101 | |||
102 | // IsExpired returns if the credentials are expired. | ||
103 | func (e *Expiry) IsExpired() bool { | ||
104 | if e.CurrentTime == nil { | ||
105 | e.CurrentTime = time.Now | ||
106 | } | ||
107 | return e.expiration.Before(e.CurrentTime()) | ||
108 | } | ||
109 | |||
110 | // Credentials - A container for synchronous safe retrieval of credentials Value. | ||
111 | // Credentials will cache the credentials value until they expire. Once the value | ||
112 | // expires the next Get will attempt to retrieve valid credentials. | ||
113 | // | ||
114 | // Credentials is safe to use across multiple goroutines and will manage the | ||
115 | // synchronous state so the Providers do not need to implement their own | ||
116 | // synchronization. | ||
117 | // | ||
118 | // The first Credentials.Get() will always call Provider.Retrieve() to get the | ||
119 | // first instance of the credentials Value. All calls to Get() after that | ||
120 | // will return the cached credentials Value until IsExpired() returns true. | ||
121 | type Credentials struct { | ||
122 | sync.Mutex | ||
123 | |||
124 | creds Value | ||
125 | forceRefresh bool | ||
126 | provider Provider | ||
127 | } | ||
128 | |||
129 | // New returns a pointer to a new Credentials with the provider set. | ||
130 | func New(provider Provider) *Credentials { | ||
131 | return &Credentials{ | ||
132 | provider: provider, | ||
133 | forceRefresh: true, | ||
134 | } | ||
135 | } | ||
136 | |||
137 | // Get returns the credentials value, or error if the credentials Value failed | ||
138 | // to be retrieved. | ||
139 | // | ||
140 | // Will return the cached credentials Value if it has not expired. If the | ||
141 | // credentials Value has expired the Provider's Retrieve() will be called | ||
142 | // to refresh the credentials. | ||
143 | // | ||
144 | // If Credentials.Expire() was called the credentials Value will be force | ||
145 | // expired, and the next call to Get() will cause them to be refreshed. | ||
146 | func (c *Credentials) Get() (Value, error) { | ||
147 | if c == nil { | ||
148 | return Value{}, nil | ||
149 | } | ||
150 | |||
151 | c.Lock() | ||
152 | defer c.Unlock() | ||
153 | |||
154 | if c.isExpired() { | ||
155 | creds, err := c.provider.Retrieve() | ||
156 | if err != nil { | ||
157 | return Value{}, err | ||
158 | } | ||
159 | c.creds = creds | ||
160 | c.forceRefresh = false | ||
161 | } | ||
162 | |||
163 | return c.creds, nil | ||
164 | } | ||
165 | |||
166 | // Expire expires the credentials and forces them to be retrieved on the | ||
167 | // next call to Get(). | ||
168 | // | ||
169 | // This will override the Provider's expired state, and force Credentials | ||
170 | // to call the Provider's Retrieve(). | ||
171 | func (c *Credentials) Expire() { | ||
172 | c.Lock() | ||
173 | defer c.Unlock() | ||
174 | |||
175 | c.forceRefresh = true | ||
176 | } | ||
177 | |||
178 | // IsExpired returns if the credentials are no longer valid, and need | ||
179 | // to be refreshed. | ||
180 | // | ||
181 | // If the Credentials were forced to be expired with Expire() this will | ||
182 | // reflect that override. | ||
183 | func (c *Credentials) IsExpired() bool { | ||
184 | c.Lock() | ||
185 | defer c.Unlock() | ||
186 | |||
187 | return c.isExpired() | ||
188 | } | ||
189 | |||
190 | // isExpired helper method wrapping the definition of expired credentials. | ||
191 | func (c *Credentials) isExpired() bool { | ||
192 | return c.forceRefresh || c.provider.IsExpired() | ||
193 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json new file mode 100644 index 0000000..afbfad5 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json | |||
@@ -0,0 +1,7 @@ | |||
1 | { | ||
2 | "Version": 1, | ||
3 | "SessionToken": "token", | ||
4 | "AccessKeyId": "accessKey", | ||
5 | "SecretAccessKey": "secret", | ||
6 | "Expiration": "9999-04-27T16:02:25.000Z" | ||
7 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample new file mode 100644 index 0000000..e2dc1bf --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample | |||
@@ -0,0 +1,15 @@ | |||
1 | [default] | ||
2 | aws_access_key_id = accessKey | ||
3 | aws_secret_access_key = secret | ||
4 | aws_session_token = token | ||
5 | |||
6 | [no_token] | ||
7 | aws_access_key_id = accessKey | ||
8 | aws_secret_access_key = secret | ||
9 | |||
10 | [with_colon] | ||
11 | aws_access_key_id: accessKey | ||
12 | aws_secret_access_key: secret | ||
13 | |||
14 | [with_process] | ||
15 | credential_process = /bin/cat credentials.json | ||
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go new file mode 100644 index 0000000..fbfb105 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go | |||
@@ -0,0 +1,60 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2017 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | // Package credentials provides credential retrieval and management | ||
19 | // for S3 compatible object storage. | ||
20 | // | ||
21 | // By default the Credentials.Get() will cache the successful result of a | ||
22 | // Provider's Retrieve() until Provider.IsExpired() returns true. At which | ||
23 | // point Credentials will call Provider's Retrieve() to get new credential Value. | ||
24 | // | ||
25 | // The Provider is responsible for determining when credentials have expired. | ||
26 | // It is also important to note that Credentials will always call Retrieve the | ||
27 | // first time Credentials.Get() is called. | ||
28 | // | ||
29 | // Example of using the environment variable credentials. | ||
30 | // | ||
31 | // creds := NewFromEnv() | ||
32 | // // Retrieve the credentials value | ||
33 | // credValue, err := creds.Get() | ||
34 | // if err != nil { | ||
35 | // // handle error | ||
36 | // } | ||
37 | // | ||
38 | // Example of forcing credentials to expire and be refreshed on the next Get(). | ||
39 | // This may be helpful to proactively expire credentials and refresh them sooner | ||
40 | // than they would naturally expire on their own. | ||
41 | // | ||
42 | // creds := NewFromIAM("") | ||
43 | // creds.Expire() | ||
44 | // credsValue, err := creds.Get() | ||
45 | // // New credentials will be retrieved instead of from cache. | ||
46 | // | ||
47 | // # Custom Provider | ||
48 | // | ||
49 | // Each Provider built into this package also provides a helper method to generate | ||
50 | // a Credentials pointer setup with the provider. To use a custom Provider just | ||
51 | // create a type which satisfies the Provider interface and pass it to the | ||
52 | // NewCredentials method. | ||
53 | // | ||
54 | // type MyProvider struct{} | ||
55 | // func (m *MyProvider) Retrieve() (Value, error) {...} | ||
56 | // func (m *MyProvider) IsExpired() bool {...} | ||
57 | // | ||
58 | // creds := NewCredentials(&MyProvider{}) | ||
59 | // credValue, err := creds.Get() | ||
60 | package credentials | ||
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go new file mode 100644 index 0000000..b6e60d0 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go | |||
@@ -0,0 +1,71 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2017 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package credentials | ||
19 | |||
20 | import "os" | ||
21 | |||
22 | // A EnvAWS retrieves credentials from the environment variables of the | ||
23 | // running process. EnvAWSironment credentials never expire. | ||
24 | // | ||
25 | // EnvAWSironment variables used: | ||
26 | // | ||
27 | // * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY. | ||
28 | // * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY. | ||
29 | // * Secret Token: AWS_SESSION_TOKEN. | ||
30 | type EnvAWS struct { | ||
31 | retrieved bool | ||
32 | } | ||
33 | |||
34 | // NewEnvAWS returns a pointer to a new Credentials object | ||
35 | // wrapping the environment variable provider. | ||
36 | func NewEnvAWS() *Credentials { | ||
37 | return New(&EnvAWS{}) | ||
38 | } | ||
39 | |||
40 | // Retrieve retrieves the keys from the environment. | ||
41 | func (e *EnvAWS) Retrieve() (Value, error) { | ||
42 | e.retrieved = false | ||
43 | |||
44 | id := os.Getenv("AWS_ACCESS_KEY_ID") | ||
45 | if id == "" { | ||
46 | id = os.Getenv("AWS_ACCESS_KEY") | ||
47 | } | ||
48 | |||
49 | secret := os.Getenv("AWS_SECRET_ACCESS_KEY") | ||
50 | if secret == "" { | ||
51 | secret = os.Getenv("AWS_SECRET_KEY") | ||
52 | } | ||
53 | |||
54 | signerType := SignatureV4 | ||
55 | if id == "" || secret == "" { | ||
56 | signerType = SignatureAnonymous | ||
57 | } | ||
58 | |||
59 | e.retrieved = true | ||
60 | return Value{ | ||
61 | AccessKeyID: id, | ||
62 | SecretAccessKey: secret, | ||
63 | SessionToken: os.Getenv("AWS_SESSION_TOKEN"), | ||
64 | SignerType: signerType, | ||
65 | }, nil | ||
66 | } | ||
67 | |||
68 | // IsExpired returns if the credentials have been retrieved. | ||
69 | func (e *EnvAWS) IsExpired() bool { | ||
70 | return !e.retrieved | ||
71 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go new file mode 100644 index 0000000..5bfeab1 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go | |||
@@ -0,0 +1,68 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2017 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package credentials | ||
19 | |||
20 | import "os" | ||
21 | |||
22 | // A EnvMinio retrieves credentials from the environment variables of the | ||
23 | // running process. EnvMinioironment credentials never expire. | ||
24 | // | ||
25 | // Environment variables used: | ||
26 | // | ||
27 | // * Access Key ID: MINIO_ACCESS_KEY. | ||
28 | // * Secret Access Key: MINIO_SECRET_KEY. | ||
29 | // * Access Key ID: MINIO_ROOT_USER. | ||
30 | // * Secret Access Key: MINIO_ROOT_PASSWORD. | ||
31 | type EnvMinio struct { | ||
32 | retrieved bool | ||
33 | } | ||
34 | |||
35 | // NewEnvMinio returns a pointer to a new Credentials object | ||
36 | // wrapping the environment variable provider. | ||
37 | func NewEnvMinio() *Credentials { | ||
38 | return New(&EnvMinio{}) | ||
39 | } | ||
40 | |||
41 | // Retrieve retrieves the keys from the environment. | ||
42 | func (e *EnvMinio) Retrieve() (Value, error) { | ||
43 | e.retrieved = false | ||
44 | |||
45 | id := os.Getenv("MINIO_ROOT_USER") | ||
46 | secret := os.Getenv("MINIO_ROOT_PASSWORD") | ||
47 | |||
48 | signerType := SignatureV4 | ||
49 | if id == "" || secret == "" { | ||
50 | id = os.Getenv("MINIO_ACCESS_KEY") | ||
51 | secret = os.Getenv("MINIO_SECRET_KEY") | ||
52 | if id == "" || secret == "" { | ||
53 | signerType = SignatureAnonymous | ||
54 | } | ||
55 | } | ||
56 | |||
57 | e.retrieved = true | ||
58 | return Value{ | ||
59 | AccessKeyID: id, | ||
60 | SecretAccessKey: secret, | ||
61 | SignerType: signerType, | ||
62 | }, nil | ||
63 | } | ||
64 | |||
65 | // IsExpired returns if the credentials have been retrieved. | ||
66 | func (e *EnvMinio) IsExpired() bool { | ||
67 | return !e.retrieved | ||
68 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go new file mode 100644 index 0000000..07a9c2f --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go | |||
@@ -0,0 +1,95 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2021 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package credentials | ||
19 | |||
20 | import ( | ||
21 | "bytes" | ||
22 | "encoding/xml" | ||
23 | "fmt" | ||
24 | "io" | ||
25 | ) | ||
26 | |||
27 | // ErrorResponse - Is the typed error returned. | ||
28 | // ErrorResponse struct should be comparable since it is compared inside | ||
29 | // golang http API (https://github.com/golang/go/issues/29768) | ||
30 | type ErrorResponse struct { | ||
31 | XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ ErrorResponse" json:"-"` | ||
32 | STSError struct { | ||
33 | Type string `xml:"Type"` | ||
34 | Code string `xml:"Code"` | ||
35 | Message string `xml:"Message"` | ||
36 | } `xml:"Error"` | ||
37 | RequestID string `xml:"RequestId"` | ||
38 | } | ||
39 | |||
40 | // Error - Is the typed error returned by all API operations. | ||
41 | type Error struct { | ||
42 | XMLName xml.Name `xml:"Error" json:"-"` | ||
43 | Code string | ||
44 | Message string | ||
45 | BucketName string | ||
46 | Key string | ||
47 | Resource string | ||
48 | RequestID string `xml:"RequestId"` | ||
49 | HostID string `xml:"HostId"` | ||
50 | |||
51 | // Region where the bucket is located. This header is returned | ||
52 | // only in HEAD bucket and ListObjects response. | ||
53 | Region string | ||
54 | |||
55 | // Captures the server string returned in response header. | ||
56 | Server string | ||
57 | |||
58 | // Underlying HTTP status code for the returned error | ||
59 | StatusCode int `xml:"-" json:"-"` | ||
60 | } | ||
61 | |||
62 | // Error - Returns S3 error string. | ||
63 | func (e Error) Error() string { | ||
64 | if e.Message == "" { | ||
65 | return fmt.Sprintf("Error response code %s.", e.Code) | ||
66 | } | ||
67 | return e.Message | ||
68 | } | ||
69 | |||
70 | // Error - Returns STS error string. | ||
71 | func (e ErrorResponse) Error() string { | ||
72 | if e.STSError.Message == "" { | ||
73 | return fmt.Sprintf("Error response code %s.", e.STSError.Code) | ||
74 | } | ||
75 | return e.STSError.Message | ||
76 | } | ||
77 | |||
78 | // xmlDecoder provide decoded value in xml. | ||
79 | func xmlDecoder(body io.Reader, v interface{}) error { | ||
80 | d := xml.NewDecoder(body) | ||
81 | return d.Decode(v) | ||
82 | } | ||
83 | |||
84 | // xmlDecodeAndBody reads the whole body up to 1MB and | ||
85 | // tries to XML decode it into v. | ||
86 | // The body that was read and any error from reading or decoding is returned. | ||
87 | func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) { | ||
88 | // read the whole body (up to 1MB) | ||
89 | const maxBodyLength = 1 << 20 | ||
90 | body, err := io.ReadAll(io.LimitReader(bodyReader, maxBodyLength)) | ||
91 | if err != nil { | ||
92 | return nil, err | ||
93 | } | ||
94 | return bytes.TrimSpace(body), xmlDecoder(bytes.NewReader(body), v) | ||
95 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go new file mode 100644 index 0000000..5b07376 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go | |||
@@ -0,0 +1,157 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2017 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package credentials | ||
19 | |||
20 | import ( | ||
21 | "encoding/json" | ||
22 | "errors" | ||
23 | "os" | ||
24 | "os/exec" | ||
25 | "path/filepath" | ||
26 | "strings" | ||
27 | "time" | ||
28 | |||
29 | ini "gopkg.in/ini.v1" | ||
30 | ) | ||
31 | |||
32 | // A externalProcessCredentials stores the output of a credential_process | ||
33 | type externalProcessCredentials struct { | ||
34 | Version int | ||
35 | SessionToken string | ||
36 | AccessKeyID string `json:"AccessKeyId"` | ||
37 | SecretAccessKey string | ||
38 | Expiration time.Time | ||
39 | } | ||
40 | |||
41 | // A FileAWSCredentials retrieves credentials from the current user's home | ||
42 | // directory, and keeps track if those credentials are expired. | ||
43 | // | ||
44 | // Profile ini file example: $HOME/.aws/credentials | ||
45 | type FileAWSCredentials struct { | ||
46 | Expiry | ||
47 | |||
48 | // Path to the shared credentials file. | ||
49 | // | ||
50 | // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the | ||
51 | // env value is empty will default to current user's home directory. | ||
52 | // Linux/OSX: "$HOME/.aws/credentials" | ||
53 | // Windows: "%USERPROFILE%\.aws\credentials" | ||
54 | Filename string | ||
55 | |||
56 | // AWS Profile to extract credentials from the shared credentials file. If empty | ||
57 | // will default to environment variable "AWS_PROFILE" or "default" if | ||
58 | // environment variable is also not set. | ||
59 | Profile string | ||
60 | |||
61 | // retrieved states if the credentials have been successfully retrieved. | ||
62 | retrieved bool | ||
63 | } | ||
64 | |||
65 | // NewFileAWSCredentials returns a pointer to a new Credentials object | ||
66 | // wrapping the Profile file provider. | ||
67 | func NewFileAWSCredentials(filename, profile string) *Credentials { | ||
68 | return New(&FileAWSCredentials{ | ||
69 | Filename: filename, | ||
70 | Profile: profile, | ||
71 | }) | ||
72 | } | ||
73 | |||
74 | // Retrieve reads and extracts the shared credentials from the current | ||
75 | // users home directory. | ||
76 | func (p *FileAWSCredentials) Retrieve() (Value, error) { | ||
77 | if p.Filename == "" { | ||
78 | p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE") | ||
79 | if p.Filename == "" { | ||
80 | homeDir, err := os.UserHomeDir() | ||
81 | if err != nil { | ||
82 | return Value{}, err | ||
83 | } | ||
84 | p.Filename = filepath.Join(homeDir, ".aws", "credentials") | ||
85 | } | ||
86 | } | ||
87 | if p.Profile == "" { | ||
88 | p.Profile = os.Getenv("AWS_PROFILE") | ||
89 | if p.Profile == "" { | ||
90 | p.Profile = "default" | ||
91 | } | ||
92 | } | ||
93 | |||
94 | p.retrieved = false | ||
95 | |||
96 | iniProfile, err := loadProfile(p.Filename, p.Profile) | ||
97 | if err != nil { | ||
98 | return Value{}, err | ||
99 | } | ||
100 | |||
101 | // Default to empty string if not found. | ||
102 | id := iniProfile.Key("aws_access_key_id") | ||
103 | // Default to empty string if not found. | ||
104 | secret := iniProfile.Key("aws_secret_access_key") | ||
105 | // Default to empty string if not found. | ||
106 | token := iniProfile.Key("aws_session_token") | ||
107 | |||
108 | // If credential_process is defined, obtain credentials by executing | ||
109 | // the external process | ||
110 | credentialProcess := strings.TrimSpace(iniProfile.Key("credential_process").String()) | ||
111 | if credentialProcess != "" { | ||
112 | args := strings.Fields(credentialProcess) | ||
113 | if len(args) <= 1 { | ||
114 | return Value{}, errors.New("invalid credential process args") | ||
115 | } | ||
116 | cmd := exec.Command(args[0], args[1:]...) | ||
117 | out, err := cmd.Output() | ||
118 | if err != nil { | ||
119 | return Value{}, err | ||
120 | } | ||
121 | var externalProcessCredentials externalProcessCredentials | ||
122 | err = json.Unmarshal([]byte(out), &externalProcessCredentials) | ||
123 | if err != nil { | ||
124 | return Value{}, err | ||
125 | } | ||
126 | p.retrieved = true | ||
127 | p.SetExpiration(externalProcessCredentials.Expiration, DefaultExpiryWindow) | ||
128 | return Value{ | ||
129 | AccessKeyID: externalProcessCredentials.AccessKeyID, | ||
130 | SecretAccessKey: externalProcessCredentials.SecretAccessKey, | ||
131 | SessionToken: externalProcessCredentials.SessionToken, | ||
132 | SignerType: SignatureV4, | ||
133 | }, nil | ||
134 | } | ||
135 | p.retrieved = true | ||
136 | return Value{ | ||
137 | AccessKeyID: id.String(), | ||
138 | SecretAccessKey: secret.String(), | ||
139 | SessionToken: token.String(), | ||
140 | SignerType: SignatureV4, | ||
141 | }, nil | ||
142 | } | ||
143 | |||
144 | // loadProfiles loads from the file pointed to by shared credentials filename for profile. | ||
145 | // The credentials retrieved from the profile will be returned or error. Error will be | ||
146 | // returned if it fails to read from the file, or the data is invalid. | ||
147 | func loadProfile(filename, profile string) (*ini.Section, error) { | ||
148 | config, err := ini.Load(filename) | ||
149 | if err != nil { | ||
150 | return nil, err | ||
151 | } | ||
152 | iniProfile, err := config.GetSection(profile) | ||
153 | if err != nil { | ||
154 | return nil, err | ||
155 | } | ||
156 | return iniProfile, nil | ||
157 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go new file mode 100644 index 0000000..eb77767 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go | |||
@@ -0,0 +1,139 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2017 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package credentials | ||
19 | |||
20 | import ( | ||
21 | "os" | ||
22 | "path/filepath" | ||
23 | "runtime" | ||
24 | |||
25 | jsoniter "github.com/json-iterator/go" | ||
26 | ) | ||
27 | |||
28 | // A FileMinioClient retrieves credentials from the current user's home | ||
29 | // directory, and keeps track if those credentials are expired. | ||
30 | // | ||
31 | // Configuration file example: $HOME/.mc/config.json | ||
32 | type FileMinioClient struct { | ||
33 | // Path to the shared credentials file. | ||
34 | // | ||
35 | // If empty will look for "MINIO_SHARED_CREDENTIALS_FILE" env variable. If the | ||
36 | // env value is empty will default to current user's home directory. | ||
37 | // Linux/OSX: "$HOME/.mc/config.json" | ||
38 | // Windows: "%USERALIAS%\mc\config.json" | ||
39 | Filename string | ||
40 | |||
41 | // MinIO Alias to extract credentials from the shared credentials file. If empty | ||
42 | // will default to environment variable "MINIO_ALIAS" or "default" if | ||
43 | // environment variable is also not set. | ||
44 | Alias string | ||
45 | |||
46 | // retrieved states if the credentials have been successfully retrieved. | ||
47 | retrieved bool | ||
48 | } | ||
49 | |||
50 | // NewFileMinioClient returns a pointer to a new Credentials object | ||
51 | // wrapping the Alias file provider. | ||
52 | func NewFileMinioClient(filename, alias string) *Credentials { | ||
53 | return New(&FileMinioClient{ | ||
54 | Filename: filename, | ||
55 | Alias: alias, | ||
56 | }) | ||
57 | } | ||
58 | |||
59 | // Retrieve reads and extracts the shared credentials from the current | ||
60 | // users home directory. | ||
61 | func (p *FileMinioClient) Retrieve() (Value, error) { | ||
62 | if p.Filename == "" { | ||
63 | if value, ok := os.LookupEnv("MINIO_SHARED_CREDENTIALS_FILE"); ok { | ||
64 | p.Filename = value | ||
65 | } else { | ||
66 | homeDir, err := os.UserHomeDir() | ||
67 | if err != nil { | ||
68 | return Value{}, err | ||
69 | } | ||
70 | p.Filename = filepath.Join(homeDir, ".mc", "config.json") | ||
71 | if runtime.GOOS == "windows" { | ||
72 | p.Filename = filepath.Join(homeDir, "mc", "config.json") | ||
73 | } | ||
74 | } | ||
75 | } | ||
76 | |||
77 | if p.Alias == "" { | ||
78 | p.Alias = os.Getenv("MINIO_ALIAS") | ||
79 | if p.Alias == "" { | ||
80 | p.Alias = "s3" | ||
81 | } | ||
82 | } | ||
83 | |||
84 | p.retrieved = false | ||
85 | |||
86 | hostCfg, err := loadAlias(p.Filename, p.Alias) | ||
87 | if err != nil { | ||
88 | return Value{}, err | ||
89 | } | ||
90 | |||
91 | p.retrieved = true | ||
92 | return Value{ | ||
93 | AccessKeyID: hostCfg.AccessKey, | ||
94 | SecretAccessKey: hostCfg.SecretKey, | ||
95 | SignerType: parseSignatureType(hostCfg.API), | ||
96 | }, nil | ||
97 | } | ||
98 | |||
99 | // IsExpired returns if the shared credentials have expired. | ||
100 | func (p *FileMinioClient) IsExpired() bool { | ||
101 | return !p.retrieved | ||
102 | } | ||
103 | |||
104 | // hostConfig configuration of a host. | ||
105 | type hostConfig struct { | ||
106 | URL string `json:"url"` | ||
107 | AccessKey string `json:"accessKey"` | ||
108 | SecretKey string `json:"secretKey"` | ||
109 | API string `json:"api"` | ||
110 | } | ||
111 | |||
112 | // config config version. | ||
113 | type config struct { | ||
114 | Version string `json:"version"` | ||
115 | Hosts map[string]hostConfig `json:"hosts"` | ||
116 | Aliases map[string]hostConfig `json:"aliases"` | ||
117 | } | ||
118 | |||
119 | // loadAliass loads from the file pointed to by shared credentials filename for alias. | ||
120 | // The credentials retrieved from the alias will be returned or error. Error will be | ||
121 | // returned if it fails to read from the file. | ||
122 | func loadAlias(filename, alias string) (hostConfig, error) { | ||
123 | cfg := &config{} | ||
124 | json := jsoniter.ConfigCompatibleWithStandardLibrary | ||
125 | |||
126 | configBytes, err := os.ReadFile(filename) | ||
127 | if err != nil { | ||
128 | return hostConfig{}, err | ||
129 | } | ||
130 | if err = json.Unmarshal(configBytes, cfg); err != nil { | ||
131 | return hostConfig{}, err | ||
132 | } | ||
133 | |||
134 | if cfg.Version == "10" { | ||
135 | return cfg.Aliases[alias], nil | ||
136 | } | ||
137 | |||
138 | return cfg.Hosts[alias], nil | ||
139 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go new file mode 100644 index 0000000..c5153c4 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go | |||
@@ -0,0 +1,433 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2017 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package credentials | ||
19 | |||
20 | import ( | ||
21 | "bufio" | ||
22 | "context" | ||
23 | "errors" | ||
24 | "fmt" | ||
25 | "io" | ||
26 | "net" | ||
27 | "net/http" | ||
28 | "net/url" | ||
29 | "os" | ||
30 | "path" | ||
31 | "strings" | ||
32 | "time" | ||
33 | |||
34 | jsoniter "github.com/json-iterator/go" | ||
35 | ) | ||
36 | |||
37 | // DefaultExpiryWindow - Default expiry window. | ||
38 | // ExpiryWindow will allow the credentials to trigger refreshing | ||
39 | // prior to the credentials actually expiring. This is beneficial | ||
40 | // so race conditions with expiring credentials do not cause | ||
41 | // request to fail unexpectedly due to ExpiredTokenException exceptions. | ||
42 | // DefaultExpiryWindow can be used as parameter to (*Expiry).SetExpiration. | ||
43 | // When used the tokens refresh will be triggered when 80% of the elapsed | ||
44 | // time until the actual expiration time is passed. | ||
45 | const DefaultExpiryWindow = -1 | ||
46 | |||
47 | // A IAM retrieves credentials from the EC2 service, and keeps track if | ||
48 | // those credentials are expired. | ||
49 | type IAM struct { | ||
50 | Expiry | ||
51 | |||
52 | // Required http Client to use when connecting to IAM metadata service. | ||
53 | Client *http.Client | ||
54 | |||
55 | // Custom endpoint to fetch IAM role credentials. | ||
56 | Endpoint string | ||
57 | |||
58 | // Region configurable custom region for STS | ||
59 | Region string | ||
60 | |||
61 | // Support for container authorization token https://docs.aws.amazon.com/sdkref/latest/guide/feature-container-credentials.html | ||
62 | Container struct { | ||
63 | AuthorizationToken string | ||
64 | CredentialsFullURI string | ||
65 | CredentialsRelativeURI string | ||
66 | } | ||
67 | |||
68 | // EKS based k8s RBAC authorization - https://docs.aws.amazon.com/eks/latest/userguide/pod-configuration.html | ||
69 | EKSIdentity struct { | ||
70 | TokenFile string | ||
71 | RoleARN string | ||
72 | RoleSessionName string | ||
73 | } | ||
74 | } | ||
75 | |||
76 | // IAM Roles for Amazon EC2 | ||
77 | // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html | ||
78 | const ( | ||
79 | DefaultIAMRoleEndpoint = "http://169.254.169.254" | ||
80 | DefaultECSRoleEndpoint = "http://169.254.170.2" | ||
81 | DefaultSTSRoleEndpoint = "https://sts.amazonaws.com" | ||
82 | DefaultIAMSecurityCredsPath = "/latest/meta-data/iam/security-credentials/" | ||
83 | TokenRequestTTLHeader = "X-aws-ec2-metadata-token-ttl-seconds" | ||
84 | TokenPath = "/latest/api/token" | ||
85 | TokenTTL = "21600" | ||
86 | TokenRequestHeader = "X-aws-ec2-metadata-token" | ||
87 | ) | ||
88 | |||
89 | // NewIAM returns a pointer to a new Credentials object wrapping the IAM. | ||
90 | func NewIAM(endpoint string) *Credentials { | ||
91 | return New(&IAM{ | ||
92 | Client: &http.Client{ | ||
93 | Transport: http.DefaultTransport, | ||
94 | }, | ||
95 | Endpoint: endpoint, | ||
96 | }) | ||
97 | } | ||
98 | |||
99 | // Retrieve retrieves credentials from the EC2 service. | ||
100 | // Error will be returned if the request fails, or unable to extract | ||
101 | // the desired | ||
102 | func (m *IAM) Retrieve() (Value, error) { | ||
103 | token := os.Getenv("AWS_CONTAINER_AUTHORIZATION_TOKEN") | ||
104 | if token == "" { | ||
105 | token = m.Container.AuthorizationToken | ||
106 | } | ||
107 | |||
108 | relativeURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI") | ||
109 | if relativeURI == "" { | ||
110 | relativeURI = m.Container.CredentialsRelativeURI | ||
111 | } | ||
112 | |||
113 | fullURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_FULL_URI") | ||
114 | if fullURI == "" { | ||
115 | fullURI = m.Container.CredentialsFullURI | ||
116 | } | ||
117 | |||
118 | identityFile := os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE") | ||
119 | if identityFile == "" { | ||
120 | identityFile = m.EKSIdentity.TokenFile | ||
121 | } | ||
122 | |||
123 | roleArn := os.Getenv("AWS_ROLE_ARN") | ||
124 | if roleArn == "" { | ||
125 | roleArn = m.EKSIdentity.RoleARN | ||
126 | } | ||
127 | |||
128 | roleSessionName := os.Getenv("AWS_ROLE_SESSION_NAME") | ||
129 | if roleSessionName == "" { | ||
130 | roleSessionName = m.EKSIdentity.RoleSessionName | ||
131 | } | ||
132 | |||
133 | region := os.Getenv("AWS_REGION") | ||
134 | if region == "" { | ||
135 | region = m.Region | ||
136 | } | ||
137 | |||
138 | var roleCreds ec2RoleCredRespBody | ||
139 | var err error | ||
140 | |||
141 | endpoint := m.Endpoint | ||
142 | switch { | ||
143 | case identityFile != "": | ||
144 | if len(endpoint) == 0 { | ||
145 | if region != "" { | ||
146 | if strings.HasPrefix(region, "cn-") { | ||
147 | endpoint = "https://sts." + region + ".amazonaws.com.cn" | ||
148 | } else { | ||
149 | endpoint = "https://sts." + region + ".amazonaws.com" | ||
150 | } | ||
151 | } else { | ||
152 | endpoint = DefaultSTSRoleEndpoint | ||
153 | } | ||
154 | } | ||
155 | |||
156 | creds := &STSWebIdentity{ | ||
157 | Client: m.Client, | ||
158 | STSEndpoint: endpoint, | ||
159 | GetWebIDTokenExpiry: func() (*WebIdentityToken, error) { | ||
160 | token, err := os.ReadFile(identityFile) | ||
161 | if err != nil { | ||
162 | return nil, err | ||
163 | } | ||
164 | |||
165 | return &WebIdentityToken{Token: string(token)}, nil | ||
166 | }, | ||
167 | RoleARN: roleArn, | ||
168 | roleSessionName: roleSessionName, | ||
169 | } | ||
170 | |||
171 | stsWebIdentityCreds, err := creds.Retrieve() | ||
172 | if err == nil { | ||
173 | m.SetExpiration(creds.Expiration(), DefaultExpiryWindow) | ||
174 | } | ||
175 | return stsWebIdentityCreds, err | ||
176 | |||
177 | case relativeURI != "": | ||
178 | if len(endpoint) == 0 { | ||
179 | endpoint = fmt.Sprintf("%s%s", DefaultECSRoleEndpoint, relativeURI) | ||
180 | } | ||
181 | |||
182 | roleCreds, err = getEcsTaskCredentials(m.Client, endpoint, token) | ||
183 | |||
184 | case fullURI != "": | ||
185 | if len(endpoint) == 0 { | ||
186 | endpoint = fullURI | ||
187 | var ok bool | ||
188 | if ok, err = isLoopback(endpoint); !ok { | ||
189 | if err == nil { | ||
190 | err = fmt.Errorf("uri host is not a loopback address: %s", endpoint) | ||
191 | } | ||
192 | break | ||
193 | } | ||
194 | } | ||
195 | |||
196 | roleCreds, err = getEcsTaskCredentials(m.Client, endpoint, token) | ||
197 | |||
198 | default: | ||
199 | roleCreds, err = getCredentials(m.Client, endpoint) | ||
200 | } | ||
201 | |||
202 | if err != nil { | ||
203 | return Value{}, err | ||
204 | } | ||
205 | // Expiry window is set to 10secs. | ||
206 | m.SetExpiration(roleCreds.Expiration, DefaultExpiryWindow) | ||
207 | |||
208 | return Value{ | ||
209 | AccessKeyID: roleCreds.AccessKeyID, | ||
210 | SecretAccessKey: roleCreds.SecretAccessKey, | ||
211 | SessionToken: roleCreds.Token, | ||
212 | SignerType: SignatureV4, | ||
213 | }, nil | ||
214 | } | ||
215 | |||
216 | // A ec2RoleCredRespBody provides the shape for unmarshaling credential | ||
217 | // request responses. | ||
218 | type ec2RoleCredRespBody struct { | ||
219 | // Success State | ||
220 | Expiration time.Time | ||
221 | AccessKeyID string | ||
222 | SecretAccessKey string | ||
223 | Token string | ||
224 | |||
225 | // Error state | ||
226 | Code string | ||
227 | Message string | ||
228 | |||
229 | // Unused params. | ||
230 | LastUpdated time.Time | ||
231 | Type string | ||
232 | } | ||
233 | |||
234 | // Get the final IAM role URL where the request will | ||
235 | // be sent to fetch the rolling access credentials. | ||
236 | // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html | ||
237 | func getIAMRoleURL(endpoint string) (*url.URL, error) { | ||
238 | u, err := url.Parse(endpoint) | ||
239 | if err != nil { | ||
240 | return nil, err | ||
241 | } | ||
242 | u.Path = DefaultIAMSecurityCredsPath | ||
243 | return u, nil | ||
244 | } | ||
245 | |||
246 | // listRoleNames lists of credential role names associated | ||
247 | // with the current EC2 service. If there are no credentials, | ||
248 | // or there is an error making or receiving the request. | ||
249 | // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html | ||
250 | func listRoleNames(client *http.Client, u *url.URL, token string) ([]string, error) { | ||
251 | req, err := http.NewRequest(http.MethodGet, u.String(), nil) | ||
252 | if err != nil { | ||
253 | return nil, err | ||
254 | } | ||
255 | if token != "" { | ||
256 | req.Header.Add(TokenRequestHeader, token) | ||
257 | } | ||
258 | resp, err := client.Do(req) | ||
259 | if err != nil { | ||
260 | return nil, err | ||
261 | } | ||
262 | defer resp.Body.Close() | ||
263 | if resp.StatusCode != http.StatusOK { | ||
264 | return nil, errors.New(resp.Status) | ||
265 | } | ||
266 | |||
267 | credsList := []string{} | ||
268 | s := bufio.NewScanner(resp.Body) | ||
269 | for s.Scan() { | ||
270 | credsList = append(credsList, s.Text()) | ||
271 | } | ||
272 | |||
273 | if err := s.Err(); err != nil { | ||
274 | return nil, err | ||
275 | } | ||
276 | |||
277 | return credsList, nil | ||
278 | } | ||
279 | |||
280 | func getEcsTaskCredentials(client *http.Client, endpoint, token string) (ec2RoleCredRespBody, error) { | ||
281 | req, err := http.NewRequest(http.MethodGet, endpoint, nil) | ||
282 | if err != nil { | ||
283 | return ec2RoleCredRespBody{}, err | ||
284 | } | ||
285 | |||
286 | if token != "" { | ||
287 | req.Header.Set("Authorization", token) | ||
288 | } | ||
289 | |||
290 | resp, err := client.Do(req) | ||
291 | if err != nil { | ||
292 | return ec2RoleCredRespBody{}, err | ||
293 | } | ||
294 | defer resp.Body.Close() | ||
295 | if resp.StatusCode != http.StatusOK { | ||
296 | return ec2RoleCredRespBody{}, errors.New(resp.Status) | ||
297 | } | ||
298 | |||
299 | respCreds := ec2RoleCredRespBody{} | ||
300 | if err := jsoniter.NewDecoder(resp.Body).Decode(&respCreds); err != nil { | ||
301 | return ec2RoleCredRespBody{}, err | ||
302 | } | ||
303 | |||
304 | return respCreds, nil | ||
305 | } | ||
306 | |||
307 | func fetchIMDSToken(client *http.Client, endpoint string) (string, error) { | ||
308 | ctx, cancel := context.WithTimeout(context.Background(), time.Second) | ||
309 | defer cancel() | ||
310 | |||
311 | req, err := http.NewRequestWithContext(ctx, http.MethodPut, endpoint+TokenPath, nil) | ||
312 | if err != nil { | ||
313 | return "", err | ||
314 | } | ||
315 | req.Header.Add(TokenRequestTTLHeader, TokenTTL) | ||
316 | resp, err := client.Do(req) | ||
317 | if err != nil { | ||
318 | return "", err | ||
319 | } | ||
320 | defer resp.Body.Close() | ||
321 | data, err := io.ReadAll(resp.Body) | ||
322 | if err != nil { | ||
323 | return "", err | ||
324 | } | ||
325 | if resp.StatusCode != http.StatusOK { | ||
326 | return "", errors.New(resp.Status) | ||
327 | } | ||
328 | return string(data), nil | ||
329 | } | ||
330 | |||
331 | // getCredentials - obtains the credentials from the IAM role name associated with | ||
332 | // the current EC2 service. | ||
333 | // | ||
334 | // If the credentials cannot be found, or there is an error | ||
335 | // reading the response an error will be returned. | ||
336 | func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) { | ||
337 | if endpoint == "" { | ||
338 | endpoint = DefaultIAMRoleEndpoint | ||
339 | } | ||
340 | |||
341 | // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html | ||
342 | token, err := fetchIMDSToken(client, endpoint) | ||
343 | if err != nil { | ||
344 | // Return only errors for valid situations, if the IMDSv2 is not enabled | ||
345 | // we will not be able to get the token, in such a situation we have | ||
346 | // to rely on IMDSv1 behavior as a fallback, this check ensures that. | ||
347 | // Refer https://github.com/minio/minio-go/issues/1866 | ||
348 | if !errors.Is(err, context.DeadlineExceeded) && !errors.Is(err, context.Canceled) { | ||
349 | return ec2RoleCredRespBody{}, err | ||
350 | } | ||
351 | } | ||
352 | |||
353 | // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html | ||
354 | u, err := getIAMRoleURL(endpoint) | ||
355 | if err != nil { | ||
356 | return ec2RoleCredRespBody{}, err | ||
357 | } | ||
358 | |||
359 | // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html | ||
360 | roleNames, err := listRoleNames(client, u, token) | ||
361 | if err != nil { | ||
362 | return ec2RoleCredRespBody{}, err | ||
363 | } | ||
364 | |||
365 | if len(roleNames) == 0 { | ||
366 | return ec2RoleCredRespBody{}, errors.New("No IAM roles attached to this EC2 service") | ||
367 | } | ||
368 | |||
369 | // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html | ||
370 | // - An instance profile can contain only one IAM role. This limit cannot be increased. | ||
371 | roleName := roleNames[0] | ||
372 | |||
373 | // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html | ||
374 | // The following command retrieves the security credentials for an | ||
375 | // IAM role named `s3access`. | ||
376 | // | ||
377 | // $ curl http://169.254.169.254/latest/meta-data/iam/security-credentials/s3access | ||
378 | // | ||
379 | u.Path = path.Join(u.Path, roleName) | ||
380 | req, err := http.NewRequest(http.MethodGet, u.String(), nil) | ||
381 | if err != nil { | ||
382 | return ec2RoleCredRespBody{}, err | ||
383 | } | ||
384 | if token != "" { | ||
385 | req.Header.Add(TokenRequestHeader, token) | ||
386 | } | ||
387 | |||
388 | resp, err := client.Do(req) | ||
389 | if err != nil { | ||
390 | return ec2RoleCredRespBody{}, err | ||
391 | } | ||
392 | defer resp.Body.Close() | ||
393 | if resp.StatusCode != http.StatusOK { | ||
394 | return ec2RoleCredRespBody{}, errors.New(resp.Status) | ||
395 | } | ||
396 | |||
397 | respCreds := ec2RoleCredRespBody{} | ||
398 | if err := jsoniter.NewDecoder(resp.Body).Decode(&respCreds); err != nil { | ||
399 | return ec2RoleCredRespBody{}, err | ||
400 | } | ||
401 | |||
402 | if respCreds.Code != "Success" { | ||
403 | // If an error code was returned something failed requesting the role. | ||
404 | return ec2RoleCredRespBody{}, errors.New(respCreds.Message) | ||
405 | } | ||
406 | |||
407 | return respCreds, nil | ||
408 | } | ||
409 | |||
410 | // isLoopback identifies if a uri's host is on a loopback address | ||
411 | func isLoopback(uri string) (bool, error) { | ||
412 | u, err := url.Parse(uri) | ||
413 | if err != nil { | ||
414 | return false, err | ||
415 | } | ||
416 | |||
417 | host := u.Hostname() | ||
418 | if len(host) == 0 { | ||
419 | return false, fmt.Errorf("can't parse host from uri: %s", uri) | ||
420 | } | ||
421 | |||
422 | ips, err := net.LookupHost(host) | ||
423 | if err != nil { | ||
424 | return false, err | ||
425 | } | ||
426 | for _, ip := range ips { | ||
427 | if !net.ParseIP(ip).IsLoopback() { | ||
428 | return false, nil | ||
429 | } | ||
430 | } | ||
431 | |||
432 | return true, nil | ||
433 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go new file mode 100644 index 0000000..b794333 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go | |||
@@ -0,0 +1,77 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2017 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package credentials | ||
19 | |||
20 | import "strings" | ||
21 | |||
22 | // SignatureType is type of Authorization requested for a given HTTP request. | ||
23 | type SignatureType int | ||
24 | |||
25 | // Different types of supported signatures - default is SignatureV4 or SignatureDefault. | ||
26 | const ( | ||
27 | // SignatureDefault is always set to v4. | ||
28 | SignatureDefault SignatureType = iota | ||
29 | SignatureV4 | ||
30 | SignatureV2 | ||
31 | SignatureV4Streaming | ||
32 | SignatureAnonymous // Anonymous signature signifies, no signature. | ||
33 | ) | ||
34 | |||
35 | // IsV2 - is signature SignatureV2? | ||
36 | func (s SignatureType) IsV2() bool { | ||
37 | return s == SignatureV2 | ||
38 | } | ||
39 | |||
40 | // IsV4 - is signature SignatureV4? | ||
41 | func (s SignatureType) IsV4() bool { | ||
42 | return s == SignatureV4 || s == SignatureDefault | ||
43 | } | ||
44 | |||
45 | // IsStreamingV4 - is signature SignatureV4Streaming? | ||
46 | func (s SignatureType) IsStreamingV4() bool { | ||
47 | return s == SignatureV4Streaming | ||
48 | } | ||
49 | |||
50 | // IsAnonymous - is signature empty? | ||
51 | func (s SignatureType) IsAnonymous() bool { | ||
52 | return s == SignatureAnonymous | ||
53 | } | ||
54 | |||
55 | // Stringer humanized version of signature type, | ||
56 | // strings returned here are case insensitive. | ||
57 | func (s SignatureType) String() string { | ||
58 | if s.IsV2() { | ||
59 | return "S3v2" | ||
60 | } else if s.IsV4() { | ||
61 | return "S3v4" | ||
62 | } else if s.IsStreamingV4() { | ||
63 | return "S3v4Streaming" | ||
64 | } | ||
65 | return "Anonymous" | ||
66 | } | ||
67 | |||
68 | func parseSignatureType(str string) SignatureType { | ||
69 | if strings.EqualFold(str, "S3v4") { | ||
70 | return SignatureV4 | ||
71 | } else if strings.EqualFold(str, "S3v2") { | ||
72 | return SignatureV2 | ||
73 | } else if strings.EqualFold(str, "S3v4Streaming") { | ||
74 | return SignatureV4Streaming | ||
75 | } | ||
76 | return SignatureAnonymous | ||
77 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go new file mode 100644 index 0000000..7dde00b --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go | |||
@@ -0,0 +1,67 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2017 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package credentials | ||
19 | |||
20 | // A Static is a set of credentials which are set programmatically, | ||
21 | // and will never expire. | ||
22 | type Static struct { | ||
23 | Value | ||
24 | } | ||
25 | |||
26 | // NewStaticV2 returns a pointer to a new Credentials object | ||
27 | // wrapping a static credentials value provider, signature is | ||
28 | // set to v2. If access and secret are not specified then | ||
29 | // regardless of signature type set it Value will return | ||
30 | // as anonymous. | ||
31 | func NewStaticV2(id, secret, token string) *Credentials { | ||
32 | return NewStatic(id, secret, token, SignatureV2) | ||
33 | } | ||
34 | |||
35 | // NewStaticV4 is similar to NewStaticV2 with similar considerations. | ||
36 | func NewStaticV4(id, secret, token string) *Credentials { | ||
37 | return NewStatic(id, secret, token, SignatureV4) | ||
38 | } | ||
39 | |||
40 | // NewStatic returns a pointer to a new Credentials object | ||
41 | // wrapping a static credentials value provider. | ||
42 | func NewStatic(id, secret, token string, signerType SignatureType) *Credentials { | ||
43 | return New(&Static{ | ||
44 | Value: Value{ | ||
45 | AccessKeyID: id, | ||
46 | SecretAccessKey: secret, | ||
47 | SessionToken: token, | ||
48 | SignerType: signerType, | ||
49 | }, | ||
50 | }) | ||
51 | } | ||
52 | |||
53 | // Retrieve returns the static credentials. | ||
54 | func (s *Static) Retrieve() (Value, error) { | ||
55 | if s.AccessKeyID == "" || s.SecretAccessKey == "" { | ||
56 | // Anonymous is not an error | ||
57 | return Value{SignerType: SignatureAnonymous}, nil | ||
58 | } | ||
59 | return s.Value, nil | ||
60 | } | ||
61 | |||
62 | // IsExpired returns if the credentials are expired. | ||
63 | // | ||
64 | // For Static, the credentials never expired. | ||
65 | func (s *Static) IsExpired() bool { | ||
66 | return false | ||
67 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go new file mode 100644 index 0000000..9e92c1e --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go | |||
@@ -0,0 +1,182 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2019-2022 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package credentials | ||
19 | |||
20 | import ( | ||
21 | "bytes" | ||
22 | "encoding/xml" | ||
23 | "errors" | ||
24 | "fmt" | ||
25 | "io" | ||
26 | "net/http" | ||
27 | "net/url" | ||
28 | "strings" | ||
29 | "time" | ||
30 | ) | ||
31 | |||
32 | // AssumedRoleUser - The identifiers for the temporary security credentials that | ||
33 | // the operation returns. Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumedRoleUser | ||
34 | type AssumedRoleUser struct { | ||
35 | Arn string | ||
36 | AssumedRoleID string `xml:"AssumeRoleId"` | ||
37 | } | ||
38 | |||
39 | // AssumeRoleWithClientGrantsResponse contains the result of successful AssumeRoleWithClientGrants request. | ||
40 | type AssumeRoleWithClientGrantsResponse struct { | ||
41 | XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithClientGrantsResponse" json:"-"` | ||
42 | Result ClientGrantsResult `xml:"AssumeRoleWithClientGrantsResult"` | ||
43 | ResponseMetadata struct { | ||
44 | RequestID string `xml:"RequestId,omitempty"` | ||
45 | } `xml:"ResponseMetadata,omitempty"` | ||
46 | } | ||
47 | |||
48 | // ClientGrantsResult - Contains the response to a successful AssumeRoleWithClientGrants | ||
49 | // request, including temporary credentials that can be used to make MinIO API requests. | ||
50 | type ClientGrantsResult struct { | ||
51 | AssumedRoleUser AssumedRoleUser `xml:",omitempty"` | ||
52 | Audience string `xml:",omitempty"` | ||
53 | Credentials struct { | ||
54 | AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` | ||
55 | SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` | ||
56 | Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` | ||
57 | SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` | ||
58 | } `xml:",omitempty"` | ||
59 | PackedPolicySize int `xml:",omitempty"` | ||
60 | Provider string `xml:",omitempty"` | ||
61 | SubjectFromClientGrantsToken string `xml:",omitempty"` | ||
62 | } | ||
63 | |||
64 | // ClientGrantsToken - client grants token with expiry. | ||
65 | type ClientGrantsToken struct { | ||
66 | Token string | ||
67 | Expiry int | ||
68 | } | ||
69 | |||
70 | // A STSClientGrants retrieves credentials from MinIO service, and keeps track if | ||
71 | // those credentials are expired. | ||
72 | type STSClientGrants struct { | ||
73 | Expiry | ||
74 | |||
75 | // Required http Client to use when connecting to MinIO STS service. | ||
76 | Client *http.Client | ||
77 | |||
78 | // MinIO endpoint to fetch STS credentials. | ||
79 | STSEndpoint string | ||
80 | |||
81 | // getClientGrantsTokenExpiry function to retrieve tokens | ||
82 | // from IDP This function should return two values one is | ||
83 | // accessToken which is a self contained access token (JWT) | ||
84 | // and second return value is the expiry associated with | ||
85 | // this token. This is a customer provided function and | ||
86 | // is mandatory. | ||
87 | GetClientGrantsTokenExpiry func() (*ClientGrantsToken, error) | ||
88 | } | ||
89 | |||
90 | // NewSTSClientGrants returns a pointer to a new | ||
91 | // Credentials object wrapping the STSClientGrants. | ||
92 | func NewSTSClientGrants(stsEndpoint string, getClientGrantsTokenExpiry func() (*ClientGrantsToken, error)) (*Credentials, error) { | ||
93 | if stsEndpoint == "" { | ||
94 | return nil, errors.New("STS endpoint cannot be empty") | ||
95 | } | ||
96 | if getClientGrantsTokenExpiry == nil { | ||
97 | return nil, errors.New("Client grants access token and expiry retrieval function should be defined") | ||
98 | } | ||
99 | return New(&STSClientGrants{ | ||
100 | Client: &http.Client{ | ||
101 | Transport: http.DefaultTransport, | ||
102 | }, | ||
103 | STSEndpoint: stsEndpoint, | ||
104 | GetClientGrantsTokenExpiry: getClientGrantsTokenExpiry, | ||
105 | }), nil | ||
106 | } | ||
107 | |||
108 | func getClientGrantsCredentials(clnt *http.Client, endpoint string, | ||
109 | getClientGrantsTokenExpiry func() (*ClientGrantsToken, error), | ||
110 | ) (AssumeRoleWithClientGrantsResponse, error) { | ||
111 | accessToken, err := getClientGrantsTokenExpiry() | ||
112 | if err != nil { | ||
113 | return AssumeRoleWithClientGrantsResponse{}, err | ||
114 | } | ||
115 | |||
116 | v := url.Values{} | ||
117 | v.Set("Action", "AssumeRoleWithClientGrants") | ||
118 | v.Set("Token", accessToken.Token) | ||
119 | v.Set("DurationSeconds", fmt.Sprintf("%d", accessToken.Expiry)) | ||
120 | v.Set("Version", STSVersion) | ||
121 | |||
122 | u, err := url.Parse(endpoint) | ||
123 | if err != nil { | ||
124 | return AssumeRoleWithClientGrantsResponse{}, err | ||
125 | } | ||
126 | |||
127 | req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode())) | ||
128 | if err != nil { | ||
129 | return AssumeRoleWithClientGrantsResponse{}, err | ||
130 | } | ||
131 | |||
132 | req.Header.Set("Content-Type", "application/x-www-form-urlencoded") | ||
133 | |||
134 | resp, err := clnt.Do(req) | ||
135 | if err != nil { | ||
136 | return AssumeRoleWithClientGrantsResponse{}, err | ||
137 | } | ||
138 | defer resp.Body.Close() | ||
139 | if resp.StatusCode != http.StatusOK { | ||
140 | var errResp ErrorResponse | ||
141 | buf, err := io.ReadAll(resp.Body) | ||
142 | if err != nil { | ||
143 | return AssumeRoleWithClientGrantsResponse{}, err | ||
144 | } | ||
145 | _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp) | ||
146 | if err != nil { | ||
147 | var s3Err Error | ||
148 | if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil { | ||
149 | return AssumeRoleWithClientGrantsResponse{}, err | ||
150 | } | ||
151 | errResp.RequestID = s3Err.RequestID | ||
152 | errResp.STSError.Code = s3Err.Code | ||
153 | errResp.STSError.Message = s3Err.Message | ||
154 | } | ||
155 | return AssumeRoleWithClientGrantsResponse{}, errResp | ||
156 | } | ||
157 | |||
158 | a := AssumeRoleWithClientGrantsResponse{} | ||
159 | if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil { | ||
160 | return AssumeRoleWithClientGrantsResponse{}, err | ||
161 | } | ||
162 | return a, nil | ||
163 | } | ||
164 | |||
165 | // Retrieve retrieves credentials from the MinIO service. | ||
166 | // Error will be returned if the request fails. | ||
167 | func (m *STSClientGrants) Retrieve() (Value, error) { | ||
168 | a, err := getClientGrantsCredentials(m.Client, m.STSEndpoint, m.GetClientGrantsTokenExpiry) | ||
169 | if err != nil { | ||
170 | return Value{}, err | ||
171 | } | ||
172 | |||
173 | // Expiry window is set to 10secs. | ||
174 | m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow) | ||
175 | |||
176 | return Value{ | ||
177 | AccessKeyID: a.Result.Credentials.AccessKey, | ||
178 | SecretAccessKey: a.Result.Credentials.SecretKey, | ||
179 | SessionToken: a.Result.Credentials.SessionToken, | ||
180 | SignerType: SignatureV4, | ||
181 | }, nil | ||
182 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go new file mode 100644 index 0000000..e1f9ce4 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go | |||
@@ -0,0 +1,146 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2015-2022 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package credentials | ||
19 | |||
20 | import ( | ||
21 | "encoding/xml" | ||
22 | "errors" | ||
23 | "fmt" | ||
24 | "net/http" | ||
25 | "net/url" | ||
26 | "time" | ||
27 | ) | ||
28 | |||
29 | // CustomTokenResult - Contains temporary creds and user metadata. | ||
30 | type CustomTokenResult struct { | ||
31 | Credentials struct { | ||
32 | AccessKey string `xml:"AccessKeyId"` | ||
33 | SecretKey string `xml:"SecretAccessKey"` | ||
34 | Expiration time.Time `xml:"Expiration"` | ||
35 | SessionToken string `xml:"SessionToken"` | ||
36 | } `xml:",omitempty"` | ||
37 | |||
38 | AssumedUser string `xml:",omitempty"` | ||
39 | } | ||
40 | |||
41 | // AssumeRoleWithCustomTokenResponse contains the result of a successful | ||
42 | // AssumeRoleWithCustomToken request. | ||
43 | type AssumeRoleWithCustomTokenResponse struct { | ||
44 | XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithCustomTokenResponse" json:"-"` | ||
45 | Result CustomTokenResult `xml:"AssumeRoleWithCustomTokenResult"` | ||
46 | Metadata struct { | ||
47 | RequestID string `xml:"RequestId,omitempty"` | ||
48 | } `xml:"ResponseMetadata,omitempty"` | ||
49 | } | ||
50 | |||
51 | // CustomTokenIdentity - satisfies the Provider interface, and retrieves | ||
52 | // credentials from MinIO using the AssumeRoleWithCustomToken STS API. | ||
53 | type CustomTokenIdentity struct { | ||
54 | Expiry | ||
55 | |||
56 | Client *http.Client | ||
57 | |||
58 | // MinIO server STS endpoint to fetch STS credentials. | ||
59 | STSEndpoint string | ||
60 | |||
61 | // The custom token to use with the request. | ||
62 | Token string | ||
63 | |||
64 | // RoleArn associated with the identity | ||
65 | RoleArn string | ||
66 | |||
67 | // RequestedExpiry is to set the validity of the generated credentials | ||
68 | // (this value bounded by server). | ||
69 | RequestedExpiry time.Duration | ||
70 | } | ||
71 | |||
72 | // Retrieve - to satisfy Provider interface; fetches credentials from MinIO. | ||
73 | func (c *CustomTokenIdentity) Retrieve() (value Value, err error) { | ||
74 | u, err := url.Parse(c.STSEndpoint) | ||
75 | if err != nil { | ||
76 | return value, err | ||
77 | } | ||
78 | |||
79 | v := url.Values{} | ||
80 | v.Set("Action", "AssumeRoleWithCustomToken") | ||
81 | v.Set("Version", STSVersion) | ||
82 | v.Set("RoleArn", c.RoleArn) | ||
83 | v.Set("Token", c.Token) | ||
84 | if c.RequestedExpiry != 0 { | ||
85 | v.Set("DurationSeconds", fmt.Sprintf("%d", int(c.RequestedExpiry.Seconds()))) | ||
86 | } | ||
87 | |||
88 | u.RawQuery = v.Encode() | ||
89 | |||
90 | req, err := http.NewRequest(http.MethodPost, u.String(), nil) | ||
91 | if err != nil { | ||
92 | return value, err | ||
93 | } | ||
94 | |||
95 | resp, err := c.Client.Do(req) | ||
96 | if err != nil { | ||
97 | return value, err | ||
98 | } | ||
99 | |||
100 | defer resp.Body.Close() | ||
101 | if resp.StatusCode != http.StatusOK { | ||
102 | return value, errors.New(resp.Status) | ||
103 | } | ||
104 | |||
105 | r := AssumeRoleWithCustomTokenResponse{} | ||
106 | if err = xml.NewDecoder(resp.Body).Decode(&r); err != nil { | ||
107 | return | ||
108 | } | ||
109 | |||
110 | cr := r.Result.Credentials | ||
111 | c.SetExpiration(cr.Expiration, DefaultExpiryWindow) | ||
112 | return Value{ | ||
113 | AccessKeyID: cr.AccessKey, | ||
114 | SecretAccessKey: cr.SecretKey, | ||
115 | SessionToken: cr.SessionToken, | ||
116 | SignerType: SignatureV4, | ||
117 | }, nil | ||
118 | } | ||
119 | |||
120 | // NewCustomTokenCredentials - returns credentials using the | ||
121 | // AssumeRoleWithCustomToken STS API. | ||
122 | func NewCustomTokenCredentials(stsEndpoint, token, roleArn string, optFuncs ...CustomTokenOpt) (*Credentials, error) { | ||
123 | c := CustomTokenIdentity{ | ||
124 | Client: &http.Client{Transport: http.DefaultTransport}, | ||
125 | STSEndpoint: stsEndpoint, | ||
126 | Token: token, | ||
127 | RoleArn: roleArn, | ||
128 | } | ||
129 | for _, optFunc := range optFuncs { | ||
130 | optFunc(&c) | ||
131 | } | ||
132 | return New(&c), nil | ||
133 | } | ||
134 | |||
135 | // CustomTokenOpt is a function type to configure the custom-token based | ||
136 | // credentials using NewCustomTokenCredentials. | ||
137 | type CustomTokenOpt func(*CustomTokenIdentity) | ||
138 | |||
139 | // CustomTokenValidityOpt sets the validity duration of the requested | ||
140 | // credentials. This value is ignored if the server enforces a lower validity | ||
141 | // period. | ||
142 | func CustomTokenValidityOpt(d time.Duration) CustomTokenOpt { | ||
143 | return func(c *CustomTokenIdentity) { | ||
144 | c.RequestedExpiry = d | ||
145 | } | ||
146 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go new file mode 100644 index 0000000..ec5f3f0 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go | |||
@@ -0,0 +1,189 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2019-2022 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package credentials | ||
19 | |||
20 | import ( | ||
21 | "bytes" | ||
22 | "encoding/xml" | ||
23 | "fmt" | ||
24 | "io" | ||
25 | "net/http" | ||
26 | "net/url" | ||
27 | "strings" | ||
28 | "time" | ||
29 | ) | ||
30 | |||
31 | // AssumeRoleWithLDAPResponse contains the result of successful | ||
32 | // AssumeRoleWithLDAPIdentity request | ||
33 | type AssumeRoleWithLDAPResponse struct { | ||
34 | XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithLDAPIdentityResponse" json:"-"` | ||
35 | Result LDAPIdentityResult `xml:"AssumeRoleWithLDAPIdentityResult"` | ||
36 | ResponseMetadata struct { | ||
37 | RequestID string `xml:"RequestId,omitempty"` | ||
38 | } `xml:"ResponseMetadata,omitempty"` | ||
39 | } | ||
40 | |||
41 | // LDAPIdentityResult - contains credentials for a successful | ||
42 | // AssumeRoleWithLDAPIdentity request. | ||
43 | type LDAPIdentityResult struct { | ||
44 | Credentials struct { | ||
45 | AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` | ||
46 | SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` | ||
47 | Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` | ||
48 | SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` | ||
49 | } `xml:",omitempty"` | ||
50 | |||
51 | SubjectFromToken string `xml:",omitempty"` | ||
52 | } | ||
53 | |||
54 | // LDAPIdentity retrieves credentials from MinIO | ||
55 | type LDAPIdentity struct { | ||
56 | Expiry | ||
57 | |||
58 | // Required http Client to use when connecting to MinIO STS service. | ||
59 | Client *http.Client | ||
60 | |||
61 | // Exported STS endpoint to fetch STS credentials. | ||
62 | STSEndpoint string | ||
63 | |||
64 | // LDAP username/password used to fetch LDAP STS credentials. | ||
65 | LDAPUsername, LDAPPassword string | ||
66 | |||
67 | // Session policy to apply to the generated credentials. Leave empty to | ||
68 | // use the full access policy available to the user. | ||
69 | Policy string | ||
70 | |||
71 | // RequestedExpiry is the configured expiry duration for credentials | ||
72 | // requested from LDAP. | ||
73 | RequestedExpiry time.Duration | ||
74 | } | ||
75 | |||
76 | // NewLDAPIdentity returns new credentials object that uses LDAP | ||
77 | // Identity. | ||
78 | func NewLDAPIdentity(stsEndpoint, ldapUsername, ldapPassword string, optFuncs ...LDAPIdentityOpt) (*Credentials, error) { | ||
79 | l := LDAPIdentity{ | ||
80 | Client: &http.Client{Transport: http.DefaultTransport}, | ||
81 | STSEndpoint: stsEndpoint, | ||
82 | LDAPUsername: ldapUsername, | ||
83 | LDAPPassword: ldapPassword, | ||
84 | } | ||
85 | for _, optFunc := range optFuncs { | ||
86 | optFunc(&l) | ||
87 | } | ||
88 | return New(&l), nil | ||
89 | } | ||
90 | |||
91 | // LDAPIdentityOpt is a function type used to configured the LDAPIdentity | ||
92 | // instance. | ||
93 | type LDAPIdentityOpt func(*LDAPIdentity) | ||
94 | |||
95 | // LDAPIdentityPolicyOpt sets the session policy for requested credentials. | ||
96 | func LDAPIdentityPolicyOpt(policy string) LDAPIdentityOpt { | ||
97 | return func(k *LDAPIdentity) { | ||
98 | k.Policy = policy | ||
99 | } | ||
100 | } | ||
101 | |||
102 | // LDAPIdentityExpiryOpt sets the expiry duration for requested credentials. | ||
103 | func LDAPIdentityExpiryOpt(d time.Duration) LDAPIdentityOpt { | ||
104 | return func(k *LDAPIdentity) { | ||
105 | k.RequestedExpiry = d | ||
106 | } | ||
107 | } | ||
108 | |||
109 | // NewLDAPIdentityWithSessionPolicy returns new credentials object that uses | ||
110 | // LDAP Identity with a specified session policy. The `policy` parameter must be | ||
111 | // a JSON string specifying the policy document. | ||
112 | // | ||
113 | // Deprecated: Use the `LDAPIdentityPolicyOpt` with `NewLDAPIdentity` instead. | ||
114 | func NewLDAPIdentityWithSessionPolicy(stsEndpoint, ldapUsername, ldapPassword, policy string) (*Credentials, error) { | ||
115 | return New(&LDAPIdentity{ | ||
116 | Client: &http.Client{Transport: http.DefaultTransport}, | ||
117 | STSEndpoint: stsEndpoint, | ||
118 | LDAPUsername: ldapUsername, | ||
119 | LDAPPassword: ldapPassword, | ||
120 | Policy: policy, | ||
121 | }), nil | ||
122 | } | ||
123 | |||
124 | // Retrieve gets the credential by calling the MinIO STS API for | ||
125 | // LDAP on the configured stsEndpoint. | ||
126 | func (k *LDAPIdentity) Retrieve() (value Value, err error) { | ||
127 | u, err := url.Parse(k.STSEndpoint) | ||
128 | if err != nil { | ||
129 | return value, err | ||
130 | } | ||
131 | |||
132 | v := url.Values{} | ||
133 | v.Set("Action", "AssumeRoleWithLDAPIdentity") | ||
134 | v.Set("Version", STSVersion) | ||
135 | v.Set("LDAPUsername", k.LDAPUsername) | ||
136 | v.Set("LDAPPassword", k.LDAPPassword) | ||
137 | if k.Policy != "" { | ||
138 | v.Set("Policy", k.Policy) | ||
139 | } | ||
140 | if k.RequestedExpiry != 0 { | ||
141 | v.Set("DurationSeconds", fmt.Sprintf("%d", int(k.RequestedExpiry.Seconds()))) | ||
142 | } | ||
143 | |||
144 | req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode())) | ||
145 | if err != nil { | ||
146 | return value, err | ||
147 | } | ||
148 | |||
149 | req.Header.Set("Content-Type", "application/x-www-form-urlencoded") | ||
150 | |||
151 | resp, err := k.Client.Do(req) | ||
152 | if err != nil { | ||
153 | return value, err | ||
154 | } | ||
155 | |||
156 | defer resp.Body.Close() | ||
157 | if resp.StatusCode != http.StatusOK { | ||
158 | var errResp ErrorResponse | ||
159 | buf, err := io.ReadAll(resp.Body) | ||
160 | if err != nil { | ||
161 | return value, err | ||
162 | } | ||
163 | _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp) | ||
164 | if err != nil { | ||
165 | var s3Err Error | ||
166 | if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil { | ||
167 | return value, err | ||
168 | } | ||
169 | errResp.RequestID = s3Err.RequestID | ||
170 | errResp.STSError.Code = s3Err.Code | ||
171 | errResp.STSError.Message = s3Err.Message | ||
172 | } | ||
173 | return value, errResp | ||
174 | } | ||
175 | |||
176 | r := AssumeRoleWithLDAPResponse{} | ||
177 | if err = xml.NewDecoder(resp.Body).Decode(&r); err != nil { | ||
178 | return | ||
179 | } | ||
180 | |||
181 | cr := r.Result.Credentials | ||
182 | k.SetExpiration(cr.Expiration, DefaultExpiryWindow) | ||
183 | return Value{ | ||
184 | AccessKeyID: cr.AccessKey, | ||
185 | SecretAccessKey: cr.SecretKey, | ||
186 | SessionToken: cr.SessionToken, | ||
187 | SignerType: SignatureV4, | ||
188 | }, nil | ||
189 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go new file mode 100644 index 0000000..dee0a8c --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go | |||
@@ -0,0 +1,211 @@ | |||
1 | // MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
2 | // Copyright 2021 MinIO, Inc. | ||
3 | // | ||
4 | // Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | // you may not use this file except in compliance with the License. | ||
6 | // You may obtain a copy of the License at | ||
7 | // | ||
8 | // http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | // | ||
10 | // Unless required by applicable law or agreed to in writing, software | ||
11 | // distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | // See the License for the specific language governing permissions and | ||
14 | // limitations under the License. | ||
15 | |||
16 | package credentials | ||
17 | |||
18 | import ( | ||
19 | "bytes" | ||
20 | "crypto/tls" | ||
21 | "encoding/xml" | ||
22 | "errors" | ||
23 | "io" | ||
24 | "net" | ||
25 | "net/http" | ||
26 | "net/url" | ||
27 | "strconv" | ||
28 | "time" | ||
29 | ) | ||
30 | |||
31 | // CertificateIdentityOption is an optional AssumeRoleWithCertificate | ||
32 | // parameter - e.g. a custom HTTP transport configuration or S3 credental | ||
33 | // livetime. | ||
34 | type CertificateIdentityOption func(*STSCertificateIdentity) | ||
35 | |||
36 | // CertificateIdentityWithTransport returns a CertificateIdentityOption that | ||
37 | // customizes the STSCertificateIdentity with the given http.RoundTripper. | ||
38 | func CertificateIdentityWithTransport(t http.RoundTripper) CertificateIdentityOption { | ||
39 | return CertificateIdentityOption(func(i *STSCertificateIdentity) { i.Client.Transport = t }) | ||
40 | } | ||
41 | |||
42 | // CertificateIdentityWithExpiry returns a CertificateIdentityOption that | ||
43 | // customizes the STSCertificateIdentity with the given livetime. | ||
44 | // | ||
45 | // Fetched S3 credentials will have the given livetime if the STS server | ||
46 | // allows such credentials. | ||
47 | func CertificateIdentityWithExpiry(livetime time.Duration) CertificateIdentityOption { | ||
48 | return CertificateIdentityOption(func(i *STSCertificateIdentity) { i.S3CredentialLivetime = livetime }) | ||
49 | } | ||
50 | |||
51 | // A STSCertificateIdentity retrieves S3 credentials from the MinIO STS API and | ||
52 | // rotates those credentials once they expire. | ||
53 | type STSCertificateIdentity struct { | ||
54 | Expiry | ||
55 | |||
56 | // STSEndpoint is the base URL endpoint of the STS API. | ||
57 | // For example, https://minio.local:9000 | ||
58 | STSEndpoint string | ||
59 | |||
60 | // S3CredentialLivetime is the duration temp. S3 access | ||
61 | // credentials should be valid. | ||
62 | // | ||
63 | // It represents the access credential livetime requested | ||
64 | // by the client. The STS server may choose to issue | ||
65 | // temp. S3 credentials that have a different - usually | ||
66 | // shorter - livetime. | ||
67 | // | ||
68 | // The default livetime is one hour. | ||
69 | S3CredentialLivetime time.Duration | ||
70 | |||
71 | // Client is the HTTP client used to authenticate and fetch | ||
72 | // S3 credentials. | ||
73 | // | ||
74 | // A custom TLS client configuration can be specified by | ||
75 | // using a custom http.Transport: | ||
76 | // Client: http.Client { | ||
77 | // Transport: &http.Transport{ | ||
78 | // TLSClientConfig: &tls.Config{}, | ||
79 | // }, | ||
80 | // } | ||
81 | Client http.Client | ||
82 | } | ||
83 | |||
84 | var _ Provider = (*STSWebIdentity)(nil) // compiler check | ||
85 | |||
86 | // NewSTSCertificateIdentity returns a STSCertificateIdentity that authenticates | ||
87 | // to the given STS endpoint with the given TLS certificate and retrieves and | ||
88 | // rotates S3 credentials. | ||
89 | func NewSTSCertificateIdentity(endpoint string, certificate tls.Certificate, options ...CertificateIdentityOption) (*Credentials, error) { | ||
90 | if endpoint == "" { | ||
91 | return nil, errors.New("STS endpoint cannot be empty") | ||
92 | } | ||
93 | if _, err := url.Parse(endpoint); err != nil { | ||
94 | return nil, err | ||
95 | } | ||
96 | identity := &STSCertificateIdentity{ | ||
97 | STSEndpoint: endpoint, | ||
98 | Client: http.Client{ | ||
99 | Transport: &http.Transport{ | ||
100 | Proxy: http.ProxyFromEnvironment, | ||
101 | DialContext: (&net.Dialer{ | ||
102 | Timeout: 30 * time.Second, | ||
103 | KeepAlive: 30 * time.Second, | ||
104 | }).DialContext, | ||
105 | ForceAttemptHTTP2: true, | ||
106 | MaxIdleConns: 100, | ||
107 | IdleConnTimeout: 90 * time.Second, | ||
108 | TLSHandshakeTimeout: 10 * time.Second, | ||
109 | ExpectContinueTimeout: 5 * time.Second, | ||
110 | TLSClientConfig: &tls.Config{ | ||
111 | Certificates: []tls.Certificate{certificate}, | ||
112 | }, | ||
113 | }, | ||
114 | }, | ||
115 | } | ||
116 | for _, option := range options { | ||
117 | option(identity) | ||
118 | } | ||
119 | return New(identity), nil | ||
120 | } | ||
121 | |||
122 | // Retrieve fetches a new set of S3 credentials from the configured | ||
123 | // STS API endpoint. | ||
124 | func (i *STSCertificateIdentity) Retrieve() (Value, error) { | ||
125 | endpointURL, err := url.Parse(i.STSEndpoint) | ||
126 | if err != nil { | ||
127 | return Value{}, err | ||
128 | } | ||
129 | livetime := i.S3CredentialLivetime | ||
130 | if livetime == 0 { | ||
131 | livetime = 1 * time.Hour | ||
132 | } | ||
133 | |||
134 | queryValues := url.Values{} | ||
135 | queryValues.Set("Action", "AssumeRoleWithCertificate") | ||
136 | queryValues.Set("Version", STSVersion) | ||
137 | endpointURL.RawQuery = queryValues.Encode() | ||
138 | |||
139 | req, err := http.NewRequest(http.MethodPost, endpointURL.String(), nil) | ||
140 | if err != nil { | ||
141 | return Value{}, err | ||
142 | } | ||
143 | if req.Form == nil { | ||
144 | req.Form = url.Values{} | ||
145 | } | ||
146 | req.Form.Add("DurationSeconds", strconv.FormatUint(uint64(livetime.Seconds()), 10)) | ||
147 | |||
148 | resp, err := i.Client.Do(req) | ||
149 | if err != nil { | ||
150 | return Value{}, err | ||
151 | } | ||
152 | if resp.Body != nil { | ||
153 | defer resp.Body.Close() | ||
154 | } | ||
155 | if resp.StatusCode != http.StatusOK { | ||
156 | var errResp ErrorResponse | ||
157 | buf, err := io.ReadAll(resp.Body) | ||
158 | if err != nil { | ||
159 | return Value{}, err | ||
160 | } | ||
161 | _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp) | ||
162 | if err != nil { | ||
163 | var s3Err Error | ||
164 | if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil { | ||
165 | return Value{}, err | ||
166 | } | ||
167 | errResp.RequestID = s3Err.RequestID | ||
168 | errResp.STSError.Code = s3Err.Code | ||
169 | errResp.STSError.Message = s3Err.Message | ||
170 | } | ||
171 | return Value{}, errResp | ||
172 | } | ||
173 | |||
174 | const MaxSize = 10 * 1 << 20 | ||
175 | var body io.Reader = resp.Body | ||
176 | if resp.ContentLength > 0 && resp.ContentLength < MaxSize { | ||
177 | body = io.LimitReader(body, resp.ContentLength) | ||
178 | } else { | ||
179 | body = io.LimitReader(body, MaxSize) | ||
180 | } | ||
181 | |||
182 | var response assumeRoleWithCertificateResponse | ||
183 | if err = xml.NewDecoder(body).Decode(&response); err != nil { | ||
184 | return Value{}, err | ||
185 | } | ||
186 | i.SetExpiration(response.Result.Credentials.Expiration, DefaultExpiryWindow) | ||
187 | return Value{ | ||
188 | AccessKeyID: response.Result.Credentials.AccessKey, | ||
189 | SecretAccessKey: response.Result.Credentials.SecretKey, | ||
190 | SessionToken: response.Result.Credentials.SessionToken, | ||
191 | SignerType: SignatureDefault, | ||
192 | }, nil | ||
193 | } | ||
194 | |||
195 | // Expiration returns the expiration time of the current S3 credentials. | ||
196 | func (i *STSCertificateIdentity) Expiration() time.Time { return i.expiration } | ||
197 | |||
198 | type assumeRoleWithCertificateResponse struct { | ||
199 | XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithCertificateResponse" json:"-"` | ||
200 | Result struct { | ||
201 | Credentials struct { | ||
202 | AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` | ||
203 | SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` | ||
204 | Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` | ||
205 | SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` | ||
206 | } `xml:"Credentials" json:"credentials,omitempty"` | ||
207 | } `xml:"AssumeRoleWithCertificateResult"` | ||
208 | ResponseMetadata struct { | ||
209 | RequestID string `xml:"RequestId,omitempty"` | ||
210 | } `xml:"ResponseMetadata,omitempty"` | ||
211 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go new file mode 100644 index 0000000..2e2af50 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go | |||
@@ -0,0 +1,205 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2019-2022 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package credentials | ||
19 | |||
20 | import ( | ||
21 | "bytes" | ||
22 | "encoding/xml" | ||
23 | "errors" | ||
24 | "fmt" | ||
25 | "io" | ||
26 | "net/http" | ||
27 | "net/url" | ||
28 | "strconv" | ||
29 | "strings" | ||
30 | "time" | ||
31 | ) | ||
32 | |||
33 | // AssumeRoleWithWebIdentityResponse contains the result of successful AssumeRoleWithWebIdentity request. | ||
34 | type AssumeRoleWithWebIdentityResponse struct { | ||
35 | XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithWebIdentityResponse" json:"-"` | ||
36 | Result WebIdentityResult `xml:"AssumeRoleWithWebIdentityResult"` | ||
37 | ResponseMetadata struct { | ||
38 | RequestID string `xml:"RequestId,omitempty"` | ||
39 | } `xml:"ResponseMetadata,omitempty"` | ||
40 | } | ||
41 | |||
42 | // WebIdentityResult - Contains the response to a successful AssumeRoleWithWebIdentity | ||
43 | // request, including temporary credentials that can be used to make MinIO API requests. | ||
44 | type WebIdentityResult struct { | ||
45 | AssumedRoleUser AssumedRoleUser `xml:",omitempty"` | ||
46 | Audience string `xml:",omitempty"` | ||
47 | Credentials struct { | ||
48 | AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` | ||
49 | SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` | ||
50 | Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` | ||
51 | SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` | ||
52 | } `xml:",omitempty"` | ||
53 | PackedPolicySize int `xml:",omitempty"` | ||
54 | Provider string `xml:",omitempty"` | ||
55 | SubjectFromWebIdentityToken string `xml:",omitempty"` | ||
56 | } | ||
57 | |||
58 | // WebIdentityToken - web identity token with expiry. | ||
59 | type WebIdentityToken struct { | ||
60 | Token string | ||
61 | AccessToken string | ||
62 | Expiry int | ||
63 | } | ||
64 | |||
65 | // A STSWebIdentity retrieves credentials from MinIO service, and keeps track if | ||
66 | // those credentials are expired. | ||
67 | type STSWebIdentity struct { | ||
68 | Expiry | ||
69 | |||
70 | // Required http Client to use when connecting to MinIO STS service. | ||
71 | Client *http.Client | ||
72 | |||
73 | // Exported STS endpoint to fetch STS credentials. | ||
74 | STSEndpoint string | ||
75 | |||
76 | // Exported GetWebIDTokenExpiry function which returns ID | ||
77 | // tokens from IDP. This function should return two values | ||
78 | // one is ID token which is a self contained ID token (JWT) | ||
79 | // and second return value is the expiry associated with | ||
80 | // this token. | ||
81 | // This is a customer provided function and is mandatory. | ||
82 | GetWebIDTokenExpiry func() (*WebIdentityToken, error) | ||
83 | |||
84 | // RoleARN is the Amazon Resource Name (ARN) of the role that the caller is | ||
85 | // assuming. | ||
86 | RoleARN string | ||
87 | |||
88 | // roleSessionName is the identifier for the assumed role session. | ||
89 | roleSessionName string | ||
90 | } | ||
91 | |||
92 | // NewSTSWebIdentity returns a pointer to a new | ||
93 | // Credentials object wrapping the STSWebIdentity. | ||
94 | func NewSTSWebIdentity(stsEndpoint string, getWebIDTokenExpiry func() (*WebIdentityToken, error)) (*Credentials, error) { | ||
95 | if stsEndpoint == "" { | ||
96 | return nil, errors.New("STS endpoint cannot be empty") | ||
97 | } | ||
98 | if getWebIDTokenExpiry == nil { | ||
99 | return nil, errors.New("Web ID token and expiry retrieval function should be defined") | ||
100 | } | ||
101 | return New(&STSWebIdentity{ | ||
102 | Client: &http.Client{ | ||
103 | Transport: http.DefaultTransport, | ||
104 | }, | ||
105 | STSEndpoint: stsEndpoint, | ||
106 | GetWebIDTokenExpiry: getWebIDTokenExpiry, | ||
107 | }), nil | ||
108 | } | ||
109 | |||
110 | func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSessionName string, | ||
111 | getWebIDTokenExpiry func() (*WebIdentityToken, error), | ||
112 | ) (AssumeRoleWithWebIdentityResponse, error) { | ||
113 | idToken, err := getWebIDTokenExpiry() | ||
114 | if err != nil { | ||
115 | return AssumeRoleWithWebIdentityResponse{}, err | ||
116 | } | ||
117 | |||
118 | v := url.Values{} | ||
119 | v.Set("Action", "AssumeRoleWithWebIdentity") | ||
120 | if len(roleARN) > 0 { | ||
121 | v.Set("RoleArn", roleARN) | ||
122 | |||
123 | if len(roleSessionName) == 0 { | ||
124 | roleSessionName = strconv.FormatInt(time.Now().UnixNano(), 10) | ||
125 | } | ||
126 | v.Set("RoleSessionName", roleSessionName) | ||
127 | } | ||
128 | v.Set("WebIdentityToken", idToken.Token) | ||
129 | if idToken.AccessToken != "" { | ||
130 | // Usually set when server is using extended userInfo endpoint. | ||
131 | v.Set("WebIdentityAccessToken", idToken.AccessToken) | ||
132 | } | ||
133 | if idToken.Expiry > 0 { | ||
134 | v.Set("DurationSeconds", fmt.Sprintf("%d", idToken.Expiry)) | ||
135 | } | ||
136 | v.Set("Version", STSVersion) | ||
137 | |||
138 | u, err := url.Parse(endpoint) | ||
139 | if err != nil { | ||
140 | return AssumeRoleWithWebIdentityResponse{}, err | ||
141 | } | ||
142 | |||
143 | req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode())) | ||
144 | if err != nil { | ||
145 | return AssumeRoleWithWebIdentityResponse{}, err | ||
146 | } | ||
147 | |||
148 | req.Header.Set("Content-Type", "application/x-www-form-urlencoded") | ||
149 | |||
150 | resp, err := clnt.Do(req) | ||
151 | if err != nil { | ||
152 | return AssumeRoleWithWebIdentityResponse{}, err | ||
153 | } | ||
154 | |||
155 | defer resp.Body.Close() | ||
156 | if resp.StatusCode != http.StatusOK { | ||
157 | var errResp ErrorResponse | ||
158 | buf, err := io.ReadAll(resp.Body) | ||
159 | if err != nil { | ||
160 | return AssumeRoleWithWebIdentityResponse{}, err | ||
161 | } | ||
162 | _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp) | ||
163 | if err != nil { | ||
164 | var s3Err Error | ||
165 | if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil { | ||
166 | return AssumeRoleWithWebIdentityResponse{}, err | ||
167 | } | ||
168 | errResp.RequestID = s3Err.RequestID | ||
169 | errResp.STSError.Code = s3Err.Code | ||
170 | errResp.STSError.Message = s3Err.Message | ||
171 | } | ||
172 | return AssumeRoleWithWebIdentityResponse{}, errResp | ||
173 | } | ||
174 | |||
175 | a := AssumeRoleWithWebIdentityResponse{} | ||
176 | if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil { | ||
177 | return AssumeRoleWithWebIdentityResponse{}, err | ||
178 | } | ||
179 | |||
180 | return a, nil | ||
181 | } | ||
182 | |||
183 | // Retrieve retrieves credentials from the MinIO service. | ||
184 | // Error will be returned if the request fails. | ||
185 | func (m *STSWebIdentity) Retrieve() (Value, error) { | ||
186 | a, err := getWebIdentityCredentials(m.Client, m.STSEndpoint, m.RoleARN, m.roleSessionName, m.GetWebIDTokenExpiry) | ||
187 | if err != nil { | ||
188 | return Value{}, err | ||
189 | } | ||
190 | |||
191 | // Expiry window is set to 10secs. | ||
192 | m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow) | ||
193 | |||
194 | return Value{ | ||
195 | AccessKeyID: a.Result.Credentials.AccessKey, | ||
196 | SecretAccessKey: a.Result.Credentials.SecretKey, | ||
197 | SessionToken: a.Result.Credentials.SessionToken, | ||
198 | SignerType: SignatureV4, | ||
199 | }, nil | ||
200 | } | ||
201 | |||
202 | // Expiration returns the expiration time of the credentials | ||
203 | func (m *STSWebIdentity) Expiration() time.Time { | ||
204 | return m.expiration | ||
205 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go new file mode 100644 index 0000000..6db26c0 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go | |||
@@ -0,0 +1,24 @@ | |||
1 | //go:build !fips | ||
2 | // +build !fips | ||
3 | |||
4 | /* | ||
5 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
6 | * Copyright 2022 MinIO, Inc. | ||
7 | * | ||
8 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
9 | * you may not use this file except in compliance with the License. | ||
10 | * You may obtain a copy of the License at | ||
11 | * | ||
12 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
13 | * | ||
14 | * Unless required by applicable law or agreed to in writing, software | ||
15 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
17 | * See the License for the specific language governing permissions and | ||
18 | * limitations under the License. | ||
19 | */ | ||
20 | |||
21 | package encrypt | ||
22 | |||
23 | // FIPS is true if 'fips' build tag was specified. | ||
24 | const FIPS = false | ||
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go new file mode 100644 index 0000000..6402582 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go | |||
@@ -0,0 +1,24 @@ | |||
1 | //go:build fips | ||
2 | // +build fips | ||
3 | |||
4 | /* | ||
5 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
6 | * Copyright 2022 MinIO, Inc. | ||
7 | * | ||
8 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
9 | * you may not use this file except in compliance with the License. | ||
10 | * You may obtain a copy of the License at | ||
11 | * | ||
12 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
13 | * | ||
14 | * Unless required by applicable law or agreed to in writing, software | ||
15 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
17 | * See the License for the specific language governing permissions and | ||
18 | * limitations under the License. | ||
19 | */ | ||
20 | |||
21 | package encrypt | ||
22 | |||
23 | // FIPS is true if 'fips' build tag was specified. | ||
24 | const FIPS = true | ||
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go new file mode 100644 index 0000000..a7081c5 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go | |||
@@ -0,0 +1,198 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2018 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package encrypt | ||
19 | |||
20 | import ( | ||
21 | "crypto/md5" | ||
22 | "encoding/base64" | ||
23 | "errors" | ||
24 | "net/http" | ||
25 | |||
26 | jsoniter "github.com/json-iterator/go" | ||
27 | "golang.org/x/crypto/argon2" | ||
28 | ) | ||
29 | |||
30 | const ( | ||
31 | // SseGenericHeader is the AWS SSE header used for SSE-S3 and SSE-KMS. | ||
32 | SseGenericHeader = "X-Amz-Server-Side-Encryption" | ||
33 | |||
34 | // SseKmsKeyID is the AWS SSE-KMS key id. | ||
35 | SseKmsKeyID = SseGenericHeader + "-Aws-Kms-Key-Id" | ||
36 | // SseEncryptionContext is the AWS SSE-KMS Encryption Context data. | ||
37 | SseEncryptionContext = SseGenericHeader + "-Context" | ||
38 | |||
39 | // SseCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key. | ||
40 | SseCustomerAlgorithm = SseGenericHeader + "-Customer-Algorithm" | ||
41 | // SseCustomerKey is the AWS SSE-C encryption key HTTP header key. | ||
42 | SseCustomerKey = SseGenericHeader + "-Customer-Key" | ||
43 | // SseCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key. | ||
44 | SseCustomerKeyMD5 = SseGenericHeader + "-Customer-Key-MD5" | ||
45 | |||
46 | // SseCopyCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key for CopyObject API. | ||
47 | SseCopyCustomerAlgorithm = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm" | ||
48 | // SseCopyCustomerKey is the AWS SSE-C encryption key HTTP header key for CopyObject API. | ||
49 | SseCopyCustomerKey = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key" | ||
50 | // SseCopyCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key for CopyObject API. | ||
51 | SseCopyCustomerKeyMD5 = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-MD5" | ||
52 | ) | ||
53 | |||
54 | // PBKDF creates a SSE-C key from the provided password and salt. | ||
55 | // PBKDF is a password-based key derivation function | ||
56 | // which can be used to derive a high-entropy cryptographic | ||
57 | // key from a low-entropy password and a salt. | ||
58 | type PBKDF func(password, salt []byte) ServerSide | ||
59 | |||
60 | // DefaultPBKDF is the default PBKDF. It uses Argon2id with the | ||
61 | // recommended parameters from the RFC draft (1 pass, 64 MB memory, 4 threads). | ||
62 | var DefaultPBKDF PBKDF = func(password, salt []byte) ServerSide { | ||
63 | sse := ssec{} | ||
64 | copy(sse[:], argon2.IDKey(password, salt, 1, 64*1024, 4, 32)) | ||
65 | return sse | ||
66 | } | ||
67 | |||
68 | // Type is the server-side-encryption method. It represents one of | ||
69 | // the following encryption methods: | ||
70 | // - SSE-C: server-side-encryption with customer provided keys | ||
71 | // - KMS: server-side-encryption with managed keys | ||
72 | // - S3: server-side-encryption using S3 storage encryption | ||
73 | type Type string | ||
74 | |||
75 | const ( | ||
76 | // SSEC represents server-side-encryption with customer provided keys | ||
77 | SSEC Type = "SSE-C" | ||
78 | // KMS represents server-side-encryption with managed keys | ||
79 | KMS Type = "KMS" | ||
80 | // S3 represents server-side-encryption using S3 storage encryption | ||
81 | S3 Type = "S3" | ||
82 | ) | ||
83 | |||
84 | // ServerSide is a form of S3 server-side-encryption. | ||
85 | type ServerSide interface { | ||
86 | // Type returns the server-side-encryption method. | ||
87 | Type() Type | ||
88 | |||
89 | // Marshal adds encryption headers to the provided HTTP headers. | ||
90 | // It marks an HTTP request as server-side-encryption request | ||
91 | // and inserts the required data into the headers. | ||
92 | Marshal(h http.Header) | ||
93 | } | ||
94 | |||
95 | // NewSSE returns a server-side-encryption using S3 storage encryption. | ||
96 | // Using SSE-S3 the server will encrypt the object with server-managed keys. | ||
97 | func NewSSE() ServerSide { return s3{} } | ||
98 | |||
99 | // NewSSEKMS returns a new server-side-encryption using SSE-KMS and the provided Key Id and context. | ||
100 | func NewSSEKMS(keyID string, context interface{}) (ServerSide, error) { | ||
101 | if context == nil { | ||
102 | return kms{key: keyID, hasContext: false}, nil | ||
103 | } | ||
104 | json := jsoniter.ConfigCompatibleWithStandardLibrary | ||
105 | serializedContext, err := json.Marshal(context) | ||
106 | if err != nil { | ||
107 | return nil, err | ||
108 | } | ||
109 | return kms{key: keyID, context: serializedContext, hasContext: true}, nil | ||
110 | } | ||
111 | |||
112 | // NewSSEC returns a new server-side-encryption using SSE-C and the provided key. | ||
113 | // The key must be 32 bytes long. | ||
114 | func NewSSEC(key []byte) (ServerSide, error) { | ||
115 | if len(key) != 32 { | ||
116 | return nil, errors.New("encrypt: SSE-C key must be 256 bit long") | ||
117 | } | ||
118 | sse := ssec{} | ||
119 | copy(sse[:], key) | ||
120 | return sse, nil | ||
121 | } | ||
122 | |||
123 | // SSE transforms a SSE-C copy encryption into a SSE-C encryption. | ||
124 | // It is the inverse of SSECopy(...). | ||
125 | // | ||
126 | // If the provided sse is no SSE-C copy encryption SSE returns | ||
127 | // sse unmodified. | ||
128 | func SSE(sse ServerSide) ServerSide { | ||
129 | if sse == nil || sse.Type() != SSEC { | ||
130 | return sse | ||
131 | } | ||
132 | if sse, ok := sse.(ssecCopy); ok { | ||
133 | return ssec(sse) | ||
134 | } | ||
135 | return sse | ||
136 | } | ||
137 | |||
138 | // SSECopy transforms a SSE-C encryption into a SSE-C copy | ||
139 | // encryption. This is required for SSE-C key rotation or a SSE-C | ||
140 | // copy where the source and the destination should be encrypted. | ||
141 | // | ||
142 | // If the provided sse is no SSE-C encryption SSECopy returns | ||
143 | // sse unmodified. | ||
144 | func SSECopy(sse ServerSide) ServerSide { | ||
145 | if sse == nil || sse.Type() != SSEC { | ||
146 | return sse | ||
147 | } | ||
148 | if sse, ok := sse.(ssec); ok { | ||
149 | return ssecCopy(sse) | ||
150 | } | ||
151 | return sse | ||
152 | } | ||
153 | |||
154 | type ssec [32]byte | ||
155 | |||
156 | func (s ssec) Type() Type { return SSEC } | ||
157 | |||
158 | func (s ssec) Marshal(h http.Header) { | ||
159 | keyMD5 := md5.Sum(s[:]) | ||
160 | h.Set(SseCustomerAlgorithm, "AES256") | ||
161 | h.Set(SseCustomerKey, base64.StdEncoding.EncodeToString(s[:])) | ||
162 | h.Set(SseCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:])) | ||
163 | } | ||
164 | |||
165 | type ssecCopy [32]byte | ||
166 | |||
167 | func (s ssecCopy) Type() Type { return SSEC } | ||
168 | |||
169 | func (s ssecCopy) Marshal(h http.Header) { | ||
170 | keyMD5 := md5.Sum(s[:]) | ||
171 | h.Set(SseCopyCustomerAlgorithm, "AES256") | ||
172 | h.Set(SseCopyCustomerKey, base64.StdEncoding.EncodeToString(s[:])) | ||
173 | h.Set(SseCopyCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:])) | ||
174 | } | ||
175 | |||
176 | type s3 struct{} | ||
177 | |||
178 | func (s s3) Type() Type { return S3 } | ||
179 | |||
180 | func (s s3) Marshal(h http.Header) { h.Set(SseGenericHeader, "AES256") } | ||
181 | |||
182 | type kms struct { | ||
183 | key string | ||
184 | context []byte | ||
185 | hasContext bool | ||
186 | } | ||
187 | |||
188 | func (s kms) Type() Type { return KMS } | ||
189 | |||
190 | func (s kms) Marshal(h http.Header) { | ||
191 | h.Set(SseGenericHeader, "aws:kms") | ||
192 | if s.key != "" { | ||
193 | h.Set(SseKmsKeyID, s.key) | ||
194 | } | ||
195 | if s.hasContext { | ||
196 | h.Set(SseEncryptionContext, base64.StdEncoding.EncodeToString(s.context)) | ||
197 | } | ||
198 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go new file mode 100644 index 0000000..c52f78c --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go | |||
@@ -0,0 +1,491 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2020 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | // Package lifecycle contains all the lifecycle related data types and marshallers. | ||
19 | package lifecycle | ||
20 | |||
21 | import ( | ||
22 | "encoding/json" | ||
23 | "encoding/xml" | ||
24 | "errors" | ||
25 | "time" | ||
26 | ) | ||
27 | |||
28 | var errMissingStorageClass = errors.New("storage-class cannot be empty") | ||
29 | |||
30 | // AbortIncompleteMultipartUpload structure, not supported yet on MinIO | ||
31 | type AbortIncompleteMultipartUpload struct { | ||
32 | XMLName xml.Name `xml:"AbortIncompleteMultipartUpload,omitempty" json:"-"` | ||
33 | DaysAfterInitiation ExpirationDays `xml:"DaysAfterInitiation,omitempty" json:"DaysAfterInitiation,omitempty"` | ||
34 | } | ||
35 | |||
36 | // IsDaysNull returns true if days field is null | ||
37 | func (n AbortIncompleteMultipartUpload) IsDaysNull() bool { | ||
38 | return n.DaysAfterInitiation == ExpirationDays(0) | ||
39 | } | ||
40 | |||
41 | // MarshalXML if days after initiation is set to non-zero value | ||
42 | func (n AbortIncompleteMultipartUpload) MarshalXML(e *xml.Encoder, start xml.StartElement) error { | ||
43 | if n.IsDaysNull() { | ||
44 | return nil | ||
45 | } | ||
46 | type abortIncompleteMultipartUploadWrapper AbortIncompleteMultipartUpload | ||
47 | return e.EncodeElement(abortIncompleteMultipartUploadWrapper(n), start) | ||
48 | } | ||
49 | |||
50 | // NoncurrentVersionExpiration - Specifies when noncurrent object versions expire. | ||
51 | // Upon expiration, server permanently deletes the noncurrent object versions. | ||
52 | // Set this lifecycle configuration action on a bucket that has versioning enabled | ||
53 | // (or suspended) to request server delete noncurrent object versions at a | ||
54 | // specific period in the object's lifetime. | ||
55 | type NoncurrentVersionExpiration struct { | ||
56 | XMLName xml.Name `xml:"NoncurrentVersionExpiration" json:"-"` | ||
57 | NoncurrentDays ExpirationDays `xml:"NoncurrentDays,omitempty" json:"NoncurrentDays,omitempty"` | ||
58 | NewerNoncurrentVersions int `xml:"NewerNoncurrentVersions,omitempty" json:"NewerNoncurrentVersions,omitempty"` | ||
59 | } | ||
60 | |||
61 | // MarshalXML if n is non-empty, i.e has a non-zero NoncurrentDays or NewerNoncurrentVersions. | ||
62 | func (n NoncurrentVersionExpiration) MarshalXML(e *xml.Encoder, start xml.StartElement) error { | ||
63 | if n.isNull() { | ||
64 | return nil | ||
65 | } | ||
66 | type noncurrentVersionExpirationWrapper NoncurrentVersionExpiration | ||
67 | return e.EncodeElement(noncurrentVersionExpirationWrapper(n), start) | ||
68 | } | ||
69 | |||
70 | // IsDaysNull returns true if days field is null | ||
71 | func (n NoncurrentVersionExpiration) IsDaysNull() bool { | ||
72 | return n.NoncurrentDays == ExpirationDays(0) | ||
73 | } | ||
74 | |||
75 | func (n NoncurrentVersionExpiration) isNull() bool { | ||
76 | return n.IsDaysNull() && n.NewerNoncurrentVersions == 0 | ||
77 | } | ||
78 | |||
79 | // NoncurrentVersionTransition structure, set this action to request server to | ||
80 | // transition noncurrent object versions to different set storage classes | ||
81 | // at a specific period in the object's lifetime. | ||
82 | type NoncurrentVersionTransition struct { | ||
83 | XMLName xml.Name `xml:"NoncurrentVersionTransition,omitempty" json:"-"` | ||
84 | StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"` | ||
85 | NoncurrentDays ExpirationDays `xml:"NoncurrentDays" json:"NoncurrentDays"` | ||
86 | NewerNoncurrentVersions int `xml:"NewerNoncurrentVersions,omitempty" json:"NewerNoncurrentVersions,omitempty"` | ||
87 | } | ||
88 | |||
89 | // IsDaysNull returns true if days field is null | ||
90 | func (n NoncurrentVersionTransition) IsDaysNull() bool { | ||
91 | return n.NoncurrentDays == ExpirationDays(0) | ||
92 | } | ||
93 | |||
94 | // IsStorageClassEmpty returns true if storage class field is empty | ||
95 | func (n NoncurrentVersionTransition) IsStorageClassEmpty() bool { | ||
96 | return n.StorageClass == "" | ||
97 | } | ||
98 | |||
99 | func (n NoncurrentVersionTransition) isNull() bool { | ||
100 | return n.StorageClass == "" | ||
101 | } | ||
102 | |||
103 | // UnmarshalJSON implements NoncurrentVersionTransition JSONify | ||
104 | func (n *NoncurrentVersionTransition) UnmarshalJSON(b []byte) error { | ||
105 | type noncurrentVersionTransition NoncurrentVersionTransition | ||
106 | var nt noncurrentVersionTransition | ||
107 | err := json.Unmarshal(b, &nt) | ||
108 | if err != nil { | ||
109 | return err | ||
110 | } | ||
111 | |||
112 | if nt.StorageClass == "" { | ||
113 | return errMissingStorageClass | ||
114 | } | ||
115 | *n = NoncurrentVersionTransition(nt) | ||
116 | return nil | ||
117 | } | ||
118 | |||
119 | // MarshalXML is extended to leave out | ||
120 | // <NoncurrentVersionTransition></NoncurrentVersionTransition> tags | ||
121 | func (n NoncurrentVersionTransition) MarshalXML(e *xml.Encoder, start xml.StartElement) error { | ||
122 | if n.isNull() { | ||
123 | return nil | ||
124 | } | ||
125 | type noncurrentVersionTransitionWrapper NoncurrentVersionTransition | ||
126 | return e.EncodeElement(noncurrentVersionTransitionWrapper(n), start) | ||
127 | } | ||
128 | |||
129 | // Tag structure key/value pair representing an object tag to apply lifecycle configuration | ||
130 | type Tag struct { | ||
131 | XMLName xml.Name `xml:"Tag,omitempty" json:"-"` | ||
132 | Key string `xml:"Key,omitempty" json:"Key,omitempty"` | ||
133 | Value string `xml:"Value,omitempty" json:"Value,omitempty"` | ||
134 | } | ||
135 | |||
136 | // IsEmpty returns whether this tag is empty or not. | ||
137 | func (tag Tag) IsEmpty() bool { | ||
138 | return tag.Key == "" | ||
139 | } | ||
140 | |||
141 | // Transition structure - transition details of lifecycle configuration | ||
142 | type Transition struct { | ||
143 | XMLName xml.Name `xml:"Transition" json:"-"` | ||
144 | Date ExpirationDate `xml:"Date,omitempty" json:"Date,omitempty"` | ||
145 | StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"` | ||
146 | Days ExpirationDays `xml:"Days" json:"Days"` | ||
147 | } | ||
148 | |||
149 | // UnmarshalJSON returns an error if storage-class is empty. | ||
150 | func (t *Transition) UnmarshalJSON(b []byte) error { | ||
151 | type transition Transition | ||
152 | var tr transition | ||
153 | err := json.Unmarshal(b, &tr) | ||
154 | if err != nil { | ||
155 | return err | ||
156 | } | ||
157 | |||
158 | if tr.StorageClass == "" { | ||
159 | return errMissingStorageClass | ||
160 | } | ||
161 | *t = Transition(tr) | ||
162 | return nil | ||
163 | } | ||
164 | |||
165 | // MarshalJSON customizes json encoding by omitting empty values | ||
166 | func (t Transition) MarshalJSON() ([]byte, error) { | ||
167 | if t.IsNull() { | ||
168 | return nil, nil | ||
169 | } | ||
170 | type transition struct { | ||
171 | Date *ExpirationDate `json:"Date,omitempty"` | ||
172 | StorageClass string `json:"StorageClass,omitempty"` | ||
173 | Days *ExpirationDays `json:"Days"` | ||
174 | } | ||
175 | |||
176 | newt := transition{ | ||
177 | StorageClass: t.StorageClass, | ||
178 | } | ||
179 | |||
180 | if !t.IsDateNull() { | ||
181 | newt.Date = &t.Date | ||
182 | } else { | ||
183 | newt.Days = &t.Days | ||
184 | } | ||
185 | return json.Marshal(newt) | ||
186 | } | ||
187 | |||
188 | // IsDaysNull returns true if days field is null | ||
189 | func (t Transition) IsDaysNull() bool { | ||
190 | return t.Days == ExpirationDays(0) | ||
191 | } | ||
192 | |||
193 | // IsDateNull returns true if date field is null | ||
194 | func (t Transition) IsDateNull() bool { | ||
195 | return t.Date.Time.IsZero() | ||
196 | } | ||
197 | |||
198 | // IsNull returns true if no storage-class is set. | ||
199 | func (t Transition) IsNull() bool { | ||
200 | return t.StorageClass == "" | ||
201 | } | ||
202 | |||
203 | // MarshalXML is transition is non null | ||
204 | func (t Transition) MarshalXML(en *xml.Encoder, startElement xml.StartElement) error { | ||
205 | if t.IsNull() { | ||
206 | return nil | ||
207 | } | ||
208 | type transitionWrapper Transition | ||
209 | return en.EncodeElement(transitionWrapper(t), startElement) | ||
210 | } | ||
211 | |||
212 | // And And Rule for LifecycleTag, to be used in LifecycleRuleFilter | ||
213 | type And struct { | ||
214 | XMLName xml.Name `xml:"And" json:"-"` | ||
215 | Prefix string `xml:"Prefix" json:"Prefix,omitempty"` | ||
216 | Tags []Tag `xml:"Tag" json:"Tags,omitempty"` | ||
217 | ObjectSizeLessThan int64 `xml:"ObjectSizeLessThan,omitempty" json:"ObjectSizeLessThan,omitempty"` | ||
218 | ObjectSizeGreaterThan int64 `xml:"ObjectSizeGreaterThan,omitempty" json:"ObjectSizeGreaterThan,omitempty"` | ||
219 | } | ||
220 | |||
221 | // IsEmpty returns true if Tags field is null | ||
222 | func (a And) IsEmpty() bool { | ||
223 | return len(a.Tags) == 0 && a.Prefix == "" && | ||
224 | a.ObjectSizeLessThan == 0 && a.ObjectSizeGreaterThan == 0 | ||
225 | } | ||
226 | |||
227 | // Filter will be used in selecting rule(s) for lifecycle configuration | ||
228 | type Filter struct { | ||
229 | XMLName xml.Name `xml:"Filter" json:"-"` | ||
230 | And And `xml:"And,omitempty" json:"And,omitempty"` | ||
231 | Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"` | ||
232 | Tag Tag `xml:"Tag,omitempty" json:"Tag,omitempty"` | ||
233 | ObjectSizeLessThan int64 `xml:"ObjectSizeLessThan,omitempty" json:"ObjectSizeLessThan,omitempty"` | ||
234 | ObjectSizeGreaterThan int64 `xml:"ObjectSizeGreaterThan,omitempty" json:"ObjectSizeGreaterThan,omitempty"` | ||
235 | } | ||
236 | |||
237 | // IsNull returns true if all Filter fields are empty. | ||
238 | func (f Filter) IsNull() bool { | ||
239 | return f.Tag.IsEmpty() && f.And.IsEmpty() && f.Prefix == "" && | ||
240 | f.ObjectSizeLessThan == 0 && f.ObjectSizeGreaterThan == 0 | ||
241 | } | ||
242 | |||
243 | // MarshalJSON customizes json encoding by removing empty values. | ||
244 | func (f Filter) MarshalJSON() ([]byte, error) { | ||
245 | type filter struct { | ||
246 | And *And `json:"And,omitempty"` | ||
247 | Prefix string `json:"Prefix,omitempty"` | ||
248 | Tag *Tag `json:"Tag,omitempty"` | ||
249 | ObjectSizeLessThan int64 `json:"ObjectSizeLessThan,omitempty"` | ||
250 | ObjectSizeGreaterThan int64 `json:"ObjectSizeGreaterThan,omitempty"` | ||
251 | } | ||
252 | |||
253 | newf := filter{ | ||
254 | Prefix: f.Prefix, | ||
255 | } | ||
256 | if !f.Tag.IsEmpty() { | ||
257 | newf.Tag = &f.Tag | ||
258 | } | ||
259 | if !f.And.IsEmpty() { | ||
260 | newf.And = &f.And | ||
261 | } | ||
262 | newf.ObjectSizeLessThan = f.ObjectSizeLessThan | ||
263 | newf.ObjectSizeGreaterThan = f.ObjectSizeGreaterThan | ||
264 | return json.Marshal(newf) | ||
265 | } | ||
266 | |||
267 | // MarshalXML - produces the xml representation of the Filter struct | ||
268 | // only one of Prefix, And and Tag should be present in the output. | ||
269 | func (f Filter) MarshalXML(e *xml.Encoder, start xml.StartElement) error { | ||
270 | if err := e.EncodeToken(start); err != nil { | ||
271 | return err | ||
272 | } | ||
273 | |||
274 | switch { | ||
275 | case !f.And.IsEmpty(): | ||
276 | if err := e.EncodeElement(f.And, xml.StartElement{Name: xml.Name{Local: "And"}}); err != nil { | ||
277 | return err | ||
278 | } | ||
279 | case !f.Tag.IsEmpty(): | ||
280 | if err := e.EncodeElement(f.Tag, xml.StartElement{Name: xml.Name{Local: "Tag"}}); err != nil { | ||
281 | return err | ||
282 | } | ||
283 | default: | ||
284 | if f.ObjectSizeLessThan > 0 { | ||
285 | if err := e.EncodeElement(f.ObjectSizeLessThan, xml.StartElement{Name: xml.Name{Local: "ObjectSizeLessThan"}}); err != nil { | ||
286 | return err | ||
287 | } | ||
288 | break | ||
289 | } | ||
290 | if f.ObjectSizeGreaterThan > 0 { | ||
291 | if err := e.EncodeElement(f.ObjectSizeGreaterThan, xml.StartElement{Name: xml.Name{Local: "ObjectSizeGreaterThan"}}); err != nil { | ||
292 | return err | ||
293 | } | ||
294 | break | ||
295 | } | ||
296 | // Print empty Prefix field only when everything else is empty | ||
297 | if err := e.EncodeElement(f.Prefix, xml.StartElement{Name: xml.Name{Local: "Prefix"}}); err != nil { | ||
298 | return err | ||
299 | } | ||
300 | } | ||
301 | |||
302 | return e.EncodeToken(xml.EndElement{Name: start.Name}) | ||
303 | } | ||
304 | |||
305 | // ExpirationDays is a type alias to unmarshal Days in Expiration | ||
306 | type ExpirationDays int | ||
307 | |||
308 | // MarshalXML encodes number of days to expire if it is non-zero and | ||
309 | // encodes empty string otherwise | ||
310 | func (eDays ExpirationDays) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { | ||
311 | if eDays == 0 { | ||
312 | return nil | ||
313 | } | ||
314 | return e.EncodeElement(int(eDays), startElement) | ||
315 | } | ||
316 | |||
317 | // ExpirationDate is a embedded type containing time.Time to unmarshal | ||
318 | // Date in Expiration | ||
319 | type ExpirationDate struct { | ||
320 | time.Time | ||
321 | } | ||
322 | |||
323 | // MarshalXML encodes expiration date if it is non-zero and encodes | ||
324 | // empty string otherwise | ||
325 | func (eDate ExpirationDate) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { | ||
326 | if eDate.Time.IsZero() { | ||
327 | return nil | ||
328 | } | ||
329 | return e.EncodeElement(eDate.Format(time.RFC3339), startElement) | ||
330 | } | ||
331 | |||
332 | // ExpireDeleteMarker represents value of ExpiredObjectDeleteMarker field in Expiration XML element. | ||
333 | type ExpireDeleteMarker ExpirationBoolean | ||
334 | |||
335 | // IsEnabled returns true if the auto delete-marker expiration is enabled | ||
336 | func (e ExpireDeleteMarker) IsEnabled() bool { | ||
337 | return bool(e) | ||
338 | } | ||
339 | |||
340 | // ExpirationBoolean represents an XML version of 'bool' type | ||
341 | type ExpirationBoolean bool | ||
342 | |||
343 | // MarshalXML encodes delete marker boolean into an XML form. | ||
344 | func (b ExpirationBoolean) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { | ||
345 | if !b { | ||
346 | return nil | ||
347 | } | ||
348 | type booleanWrapper ExpirationBoolean | ||
349 | return e.EncodeElement(booleanWrapper(b), startElement) | ||
350 | } | ||
351 | |||
352 | // IsEnabled returns true if the expiration boolean is enabled | ||
353 | func (b ExpirationBoolean) IsEnabled() bool { | ||
354 | return bool(b) | ||
355 | } | ||
356 | |||
357 | // Expiration structure - expiration details of lifecycle configuration | ||
358 | type Expiration struct { | ||
359 | XMLName xml.Name `xml:"Expiration,omitempty" json:"-"` | ||
360 | Date ExpirationDate `xml:"Date,omitempty" json:"Date,omitempty"` | ||
361 | Days ExpirationDays `xml:"Days,omitempty" json:"Days,omitempty"` | ||
362 | DeleteMarker ExpireDeleteMarker `xml:"ExpiredObjectDeleteMarker,omitempty" json:"ExpiredObjectDeleteMarker,omitempty"` | ||
363 | DeleteAll ExpirationBoolean `xml:"ExpiredObjectAllVersions,omitempty" json:"ExpiredObjectAllVersions,omitempty"` | ||
364 | } | ||
365 | |||
366 | // MarshalJSON customizes json encoding by removing empty day/date specification. | ||
367 | func (e Expiration) MarshalJSON() ([]byte, error) { | ||
368 | type expiration struct { | ||
369 | Date *ExpirationDate `json:"Date,omitempty"` | ||
370 | Days *ExpirationDays `json:"Days,omitempty"` | ||
371 | DeleteMarker ExpireDeleteMarker `json:"ExpiredObjectDeleteMarker,omitempty"` | ||
372 | DeleteAll ExpirationBoolean `json:"ExpiredObjectAllVersions,omitempty"` | ||
373 | } | ||
374 | |||
375 | newexp := expiration{ | ||
376 | DeleteMarker: e.DeleteMarker, | ||
377 | DeleteAll: e.DeleteAll, | ||
378 | } | ||
379 | if !e.IsDaysNull() { | ||
380 | newexp.Days = &e.Days | ||
381 | } | ||
382 | if !e.IsDateNull() { | ||
383 | newexp.Date = &e.Date | ||
384 | } | ||
385 | return json.Marshal(newexp) | ||
386 | } | ||
387 | |||
388 | // IsDaysNull returns true if days field is null | ||
389 | func (e Expiration) IsDaysNull() bool { | ||
390 | return e.Days == ExpirationDays(0) | ||
391 | } | ||
392 | |||
393 | // IsDateNull returns true if date field is null | ||
394 | func (e Expiration) IsDateNull() bool { | ||
395 | return e.Date.Time.IsZero() | ||
396 | } | ||
397 | |||
398 | // IsDeleteMarkerExpirationEnabled returns true if the auto-expiration of delete marker is enabled | ||
399 | func (e Expiration) IsDeleteMarkerExpirationEnabled() bool { | ||
400 | return e.DeleteMarker.IsEnabled() | ||
401 | } | ||
402 | |||
403 | // IsNull returns true if both date and days fields are null | ||
404 | func (e Expiration) IsNull() bool { | ||
405 | return e.IsDaysNull() && e.IsDateNull() && !e.IsDeleteMarkerExpirationEnabled() | ||
406 | } | ||
407 | |||
408 | // MarshalXML is expiration is non null | ||
409 | func (e Expiration) MarshalXML(en *xml.Encoder, startElement xml.StartElement) error { | ||
410 | if e.IsNull() { | ||
411 | return nil | ||
412 | } | ||
413 | type expirationWrapper Expiration | ||
414 | return en.EncodeElement(expirationWrapper(e), startElement) | ||
415 | } | ||
416 | |||
417 | // MarshalJSON customizes json encoding by omitting empty values | ||
418 | func (r Rule) MarshalJSON() ([]byte, error) { | ||
419 | type rule struct { | ||
420 | AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `json:"AbortIncompleteMultipartUpload,omitempty"` | ||
421 | Expiration *Expiration `json:"Expiration,omitempty"` | ||
422 | ID string `json:"ID"` | ||
423 | RuleFilter *Filter `json:"Filter,omitempty"` | ||
424 | NoncurrentVersionExpiration *NoncurrentVersionExpiration `json:"NoncurrentVersionExpiration,omitempty"` | ||
425 | NoncurrentVersionTransition *NoncurrentVersionTransition `json:"NoncurrentVersionTransition,omitempty"` | ||
426 | Prefix string `json:"Prefix,omitempty"` | ||
427 | Status string `json:"Status"` | ||
428 | Transition *Transition `json:"Transition,omitempty"` | ||
429 | } | ||
430 | newr := rule{ | ||
431 | Prefix: r.Prefix, | ||
432 | Status: r.Status, | ||
433 | ID: r.ID, | ||
434 | } | ||
435 | |||
436 | if !r.RuleFilter.IsNull() { | ||
437 | newr.RuleFilter = &r.RuleFilter | ||
438 | } | ||
439 | if !r.AbortIncompleteMultipartUpload.IsDaysNull() { | ||
440 | newr.AbortIncompleteMultipartUpload = &r.AbortIncompleteMultipartUpload | ||
441 | } | ||
442 | if !r.Expiration.IsNull() { | ||
443 | newr.Expiration = &r.Expiration | ||
444 | } | ||
445 | if !r.Transition.IsNull() { | ||
446 | newr.Transition = &r.Transition | ||
447 | } | ||
448 | if !r.NoncurrentVersionExpiration.isNull() { | ||
449 | newr.NoncurrentVersionExpiration = &r.NoncurrentVersionExpiration | ||
450 | } | ||
451 | if !r.NoncurrentVersionTransition.isNull() { | ||
452 | newr.NoncurrentVersionTransition = &r.NoncurrentVersionTransition | ||
453 | } | ||
454 | |||
455 | return json.Marshal(newr) | ||
456 | } | ||
457 | |||
458 | // Rule represents a single rule in lifecycle configuration | ||
459 | type Rule struct { | ||
460 | XMLName xml.Name `xml:"Rule,omitempty" json:"-"` | ||
461 | AbortIncompleteMultipartUpload AbortIncompleteMultipartUpload `xml:"AbortIncompleteMultipartUpload,omitempty" json:"AbortIncompleteMultipartUpload,omitempty"` | ||
462 | Expiration Expiration `xml:"Expiration,omitempty" json:"Expiration,omitempty"` | ||
463 | ID string `xml:"ID" json:"ID"` | ||
464 | RuleFilter Filter `xml:"Filter,omitempty" json:"Filter,omitempty"` | ||
465 | NoncurrentVersionExpiration NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty" json:"NoncurrentVersionExpiration,omitempty"` | ||
466 | NoncurrentVersionTransition NoncurrentVersionTransition `xml:"NoncurrentVersionTransition,omitempty" json:"NoncurrentVersionTransition,omitempty"` | ||
467 | Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"` | ||
468 | Status string `xml:"Status" json:"Status"` | ||
469 | Transition Transition `xml:"Transition,omitempty" json:"Transition,omitempty"` | ||
470 | } | ||
471 | |||
472 | // Configuration is a collection of Rule objects. | ||
473 | type Configuration struct { | ||
474 | XMLName xml.Name `xml:"LifecycleConfiguration,omitempty" json:"-"` | ||
475 | Rules []Rule `xml:"Rule"` | ||
476 | } | ||
477 | |||
478 | // Empty check if lifecycle configuration is empty | ||
479 | func (c *Configuration) Empty() bool { | ||
480 | if c == nil { | ||
481 | return true | ||
482 | } | ||
483 | return len(c.Rules) == 0 | ||
484 | } | ||
485 | |||
486 | // NewConfiguration initializes a fresh lifecycle configuration | ||
487 | // for manipulation, such as setting and removing lifecycle rules | ||
488 | // and filters. | ||
489 | func NewConfiguration() *Configuration { | ||
490 | return &Configuration{} | ||
491 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go b/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go new file mode 100644 index 0000000..126661a --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go | |||
@@ -0,0 +1,78 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2017-2020 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package notification | ||
19 | |||
20 | // Indentity represents the user id, this is a compliance field. | ||
21 | type identity struct { | ||
22 | PrincipalID string `json:"principalId"` | ||
23 | } | ||
24 | |||
25 | // event bucket metadata. | ||
26 | type bucketMeta struct { | ||
27 | Name string `json:"name"` | ||
28 | OwnerIdentity identity `json:"ownerIdentity"` | ||
29 | ARN string `json:"arn"` | ||
30 | } | ||
31 | |||
32 | // event object metadata. | ||
33 | type objectMeta struct { | ||
34 | Key string `json:"key"` | ||
35 | Size int64 `json:"size,omitempty"` | ||
36 | ETag string `json:"eTag,omitempty"` | ||
37 | ContentType string `json:"contentType,omitempty"` | ||
38 | UserMetadata map[string]string `json:"userMetadata,omitempty"` | ||
39 | VersionID string `json:"versionId,omitempty"` | ||
40 | Sequencer string `json:"sequencer"` | ||
41 | } | ||
42 | |||
43 | // event server specific metadata. | ||
44 | type eventMeta struct { | ||
45 | SchemaVersion string `json:"s3SchemaVersion"` | ||
46 | ConfigurationID string `json:"configurationId"` | ||
47 | Bucket bucketMeta `json:"bucket"` | ||
48 | Object objectMeta `json:"object"` | ||
49 | } | ||
50 | |||
51 | // sourceInfo represents information on the client that | ||
52 | // triggered the event notification. | ||
53 | type sourceInfo struct { | ||
54 | Host string `json:"host"` | ||
55 | Port string `json:"port"` | ||
56 | UserAgent string `json:"userAgent"` | ||
57 | } | ||
58 | |||
59 | // Event represents an Amazon an S3 bucket notification event. | ||
60 | type Event struct { | ||
61 | EventVersion string `json:"eventVersion"` | ||
62 | EventSource string `json:"eventSource"` | ||
63 | AwsRegion string `json:"awsRegion"` | ||
64 | EventTime string `json:"eventTime"` | ||
65 | EventName string `json:"eventName"` | ||
66 | UserIdentity identity `json:"userIdentity"` | ||
67 | RequestParameters map[string]string `json:"requestParameters"` | ||
68 | ResponseElements map[string]string `json:"responseElements"` | ||
69 | S3 eventMeta `json:"s3"` | ||
70 | Source sourceInfo `json:"source"` | ||
71 | } | ||
72 | |||
73 | // Info - represents the collection of notification events, additionally | ||
74 | // also reports errors if any while listening on bucket notifications. | ||
75 | type Info struct { | ||
76 | Records []Event | ||
77 | Err error | ||
78 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go new file mode 100644 index 0000000..a44799d --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go | |||
@@ -0,0 +1,440 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2020 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package notification | ||
19 | |||
20 | import ( | ||
21 | "encoding/xml" | ||
22 | "errors" | ||
23 | "fmt" | ||
24 | "strings" | ||
25 | |||
26 | "github.com/minio/minio-go/v7/pkg/set" | ||
27 | ) | ||
28 | |||
29 | // EventType is a S3 notification event associated to the bucket notification configuration | ||
30 | type EventType string | ||
31 | |||
32 | // The role of all event types are described in : | ||
33 | // | ||
34 | // http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations | ||
35 | const ( | ||
36 | ObjectCreatedAll EventType = "s3:ObjectCreated:*" | ||
37 | ObjectCreatedPut EventType = "s3:ObjectCreated:Put" | ||
38 | ObjectCreatedPost EventType = "s3:ObjectCreated:Post" | ||
39 | ObjectCreatedCopy EventType = "s3:ObjectCreated:Copy" | ||
40 | ObjectCreatedDeleteTagging EventType = "s3:ObjectCreated:DeleteTagging" | ||
41 | ObjectCreatedCompleteMultipartUpload EventType = "s3:ObjectCreated:CompleteMultipartUpload" | ||
42 | ObjectCreatedPutLegalHold EventType = "s3:ObjectCreated:PutLegalHold" | ||
43 | ObjectCreatedPutRetention EventType = "s3:ObjectCreated:PutRetention" | ||
44 | ObjectCreatedPutTagging EventType = "s3:ObjectCreated:PutTagging" | ||
45 | ObjectAccessedGet EventType = "s3:ObjectAccessed:Get" | ||
46 | ObjectAccessedHead EventType = "s3:ObjectAccessed:Head" | ||
47 | ObjectAccessedGetRetention EventType = "s3:ObjectAccessed:GetRetention" | ||
48 | ObjectAccessedGetLegalHold EventType = "s3:ObjectAccessed:GetLegalHold" | ||
49 | ObjectAccessedAll EventType = "s3:ObjectAccessed:*" | ||
50 | ObjectRemovedAll EventType = "s3:ObjectRemoved:*" | ||
51 | ObjectRemovedDelete EventType = "s3:ObjectRemoved:Delete" | ||
52 | ObjectRemovedDeleteMarkerCreated EventType = "s3:ObjectRemoved:DeleteMarkerCreated" | ||
53 | ObjectReducedRedundancyLostObject EventType = "s3:ReducedRedundancyLostObject" | ||
54 | ObjectTransitionAll EventType = "s3:ObjectTransition:*" | ||
55 | ObjectTransitionFailed EventType = "s3:ObjectTransition:Failed" | ||
56 | ObjectTransitionComplete EventType = "s3:ObjectTransition:Complete" | ||
57 | ObjectTransitionPost EventType = "s3:ObjectRestore:Post" | ||
58 | ObjectTransitionCompleted EventType = "s3:ObjectRestore:Completed" | ||
59 | ObjectReplicationAll EventType = "s3:Replication:*" | ||
60 | ObjectReplicationOperationCompletedReplication EventType = "s3:Replication:OperationCompletedReplication" | ||
61 | ObjectReplicationOperationFailedReplication EventType = "s3:Replication:OperationFailedReplication" | ||
62 | ObjectReplicationOperationMissedThreshold EventType = "s3:Replication:OperationMissedThreshold" | ||
63 | ObjectReplicationOperationNotTracked EventType = "s3:Replication:OperationNotTracked" | ||
64 | ObjectReplicationOperationReplicatedAfterThreshold EventType = "s3:Replication:OperationReplicatedAfterThreshold" | ||
65 | ObjectScannerManyVersions EventType = "s3:Scanner:ManyVersions" | ||
66 | ObjectScannerBigPrefix EventType = "s3:Scanner:BigPrefix" | ||
67 | ObjectScannerAll EventType = "s3:Scanner:*" | ||
68 | BucketCreatedAll EventType = "s3:BucketCreated:*" | ||
69 | BucketRemovedAll EventType = "s3:BucketRemoved:*" | ||
70 | ) | ||
71 | |||
72 | // FilterRule - child of S3Key, a tag in the notification xml which | ||
73 | // carries suffix/prefix filters | ||
74 | type FilterRule struct { | ||
75 | Name string `xml:"Name"` | ||
76 | Value string `xml:"Value"` | ||
77 | } | ||
78 | |||
79 | // S3Key - child of Filter, a tag in the notification xml which | ||
80 | // carries suffix/prefix filters | ||
81 | type S3Key struct { | ||
82 | FilterRules []FilterRule `xml:"FilterRule,omitempty"` | ||
83 | } | ||
84 | |||
85 | // Filter - a tag in the notification xml structure which carries | ||
86 | // suffix/prefix filters | ||
87 | type Filter struct { | ||
88 | S3Key S3Key `xml:"S3Key,omitempty"` | ||
89 | } | ||
90 | |||
91 | // Arn - holds ARN information that will be sent to the web service, | ||
92 | // ARN desciption can be found in http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html | ||
93 | type Arn struct { | ||
94 | Partition string | ||
95 | Service string | ||
96 | Region string | ||
97 | AccountID string | ||
98 | Resource string | ||
99 | } | ||
100 | |||
101 | // NewArn creates new ARN based on the given partition, service, region, account id and resource | ||
102 | func NewArn(partition, service, region, accountID, resource string) Arn { | ||
103 | return Arn{ | ||
104 | Partition: partition, | ||
105 | Service: service, | ||
106 | Region: region, | ||
107 | AccountID: accountID, | ||
108 | Resource: resource, | ||
109 | } | ||
110 | } | ||
111 | |||
112 | var ( | ||
113 | // ErrInvalidArnPrefix is returned when ARN string format does not start with 'arn' | ||
114 | ErrInvalidArnPrefix = errors.New("invalid ARN format, must start with 'arn:'") | ||
115 | // ErrInvalidArnFormat is returned when ARN string format is not valid | ||
116 | ErrInvalidArnFormat = errors.New("invalid ARN format, must be 'arn:<partition>:<service>:<region>:<accountID>:<resource>'") | ||
117 | ) | ||
118 | |||
119 | // NewArnFromString parses string representation of ARN into Arn object. | ||
120 | // Returns an error if the string format is incorrect. | ||
121 | func NewArnFromString(arn string) (Arn, error) { | ||
122 | parts := strings.Split(arn, ":") | ||
123 | if len(parts) != 6 { | ||
124 | return Arn{}, ErrInvalidArnFormat | ||
125 | } | ||
126 | if parts[0] != "arn" { | ||
127 | return Arn{}, ErrInvalidArnPrefix | ||
128 | } | ||
129 | |||
130 | return NewArn(parts[1], parts[2], parts[3], parts[4], parts[5]), nil | ||
131 | } | ||
132 | |||
133 | // String returns the string format of the ARN | ||
134 | func (arn Arn) String() string { | ||
135 | return "arn:" + arn.Partition + ":" + arn.Service + ":" + arn.Region + ":" + arn.AccountID + ":" + arn.Resource | ||
136 | } | ||
137 | |||
138 | // Config - represents one single notification configuration | ||
139 | // such as topic, queue or lambda configuration. | ||
140 | type Config struct { | ||
141 | ID string `xml:"Id,omitempty"` | ||
142 | Arn Arn `xml:"-"` | ||
143 | Events []EventType `xml:"Event"` | ||
144 | Filter *Filter `xml:"Filter,omitempty"` | ||
145 | } | ||
146 | |||
147 | // NewConfig creates one notification config and sets the given ARN | ||
148 | func NewConfig(arn Arn) Config { | ||
149 | return Config{Arn: arn, Filter: &Filter{}} | ||
150 | } | ||
151 | |||
152 | // AddEvents adds one event to the current notification config | ||
153 | func (t *Config) AddEvents(events ...EventType) { | ||
154 | t.Events = append(t.Events, events...) | ||
155 | } | ||
156 | |||
157 | // AddFilterSuffix sets the suffix configuration to the current notification config | ||
158 | func (t *Config) AddFilterSuffix(suffix string) { | ||
159 | if t.Filter == nil { | ||
160 | t.Filter = &Filter{} | ||
161 | } | ||
162 | newFilterRule := FilterRule{Name: "suffix", Value: suffix} | ||
163 | // Replace any suffix rule if existing and add to the list otherwise | ||
164 | for index := range t.Filter.S3Key.FilterRules { | ||
165 | if t.Filter.S3Key.FilterRules[index].Name == "suffix" { | ||
166 | t.Filter.S3Key.FilterRules[index] = newFilterRule | ||
167 | return | ||
168 | } | ||
169 | } | ||
170 | t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule) | ||
171 | } | ||
172 | |||
173 | // AddFilterPrefix sets the prefix configuration to the current notification config | ||
174 | func (t *Config) AddFilterPrefix(prefix string) { | ||
175 | if t.Filter == nil { | ||
176 | t.Filter = &Filter{} | ||
177 | } | ||
178 | newFilterRule := FilterRule{Name: "prefix", Value: prefix} | ||
179 | // Replace any prefix rule if existing and add to the list otherwise | ||
180 | for index := range t.Filter.S3Key.FilterRules { | ||
181 | if t.Filter.S3Key.FilterRules[index].Name == "prefix" { | ||
182 | t.Filter.S3Key.FilterRules[index] = newFilterRule | ||
183 | return | ||
184 | } | ||
185 | } | ||
186 | t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule) | ||
187 | } | ||
188 | |||
189 | // EqualEventTypeList tells whether a and b contain the same events | ||
190 | func EqualEventTypeList(a, b []EventType) bool { | ||
191 | if len(a) != len(b) { | ||
192 | return false | ||
193 | } | ||
194 | setA := set.NewStringSet() | ||
195 | for _, i := range a { | ||
196 | setA.Add(string(i)) | ||
197 | } | ||
198 | |||
199 | setB := set.NewStringSet() | ||
200 | for _, i := range b { | ||
201 | setB.Add(string(i)) | ||
202 | } | ||
203 | |||
204 | return setA.Difference(setB).IsEmpty() | ||
205 | } | ||
206 | |||
207 | // EqualFilterRuleList tells whether a and b contain the same filters | ||
208 | func EqualFilterRuleList(a, b []FilterRule) bool { | ||
209 | if len(a) != len(b) { | ||
210 | return false | ||
211 | } | ||
212 | |||
213 | setA := set.NewStringSet() | ||
214 | for _, i := range a { | ||
215 | setA.Add(fmt.Sprintf("%s-%s", i.Name, i.Value)) | ||
216 | } | ||
217 | |||
218 | setB := set.NewStringSet() | ||
219 | for _, i := range b { | ||
220 | setB.Add(fmt.Sprintf("%s-%s", i.Name, i.Value)) | ||
221 | } | ||
222 | |||
223 | return setA.Difference(setB).IsEmpty() | ||
224 | } | ||
225 | |||
226 | // Equal returns whether this `Config` is equal to another defined by the passed parameters | ||
227 | func (t *Config) Equal(events []EventType, prefix, suffix string) bool { | ||
228 | if t == nil { | ||
229 | return false | ||
230 | } | ||
231 | |||
232 | // Compare events | ||
233 | passEvents := EqualEventTypeList(t.Events, events) | ||
234 | |||
235 | // Compare filters | ||
236 | var newFilterRules []FilterRule | ||
237 | if prefix != "" { | ||
238 | newFilterRules = append(newFilterRules, FilterRule{Name: "prefix", Value: prefix}) | ||
239 | } | ||
240 | if suffix != "" { | ||
241 | newFilterRules = append(newFilterRules, FilterRule{Name: "suffix", Value: suffix}) | ||
242 | } | ||
243 | |||
244 | var currentFilterRules []FilterRule | ||
245 | if t.Filter != nil { | ||
246 | currentFilterRules = t.Filter.S3Key.FilterRules | ||
247 | } | ||
248 | |||
249 | passFilters := EqualFilterRuleList(currentFilterRules, newFilterRules) | ||
250 | return passEvents && passFilters | ||
251 | } | ||
252 | |||
253 | // TopicConfig carries one single topic notification configuration | ||
254 | type TopicConfig struct { | ||
255 | Config | ||
256 | Topic string `xml:"Topic"` | ||
257 | } | ||
258 | |||
259 | // QueueConfig carries one single queue notification configuration | ||
260 | type QueueConfig struct { | ||
261 | Config | ||
262 | Queue string `xml:"Queue"` | ||
263 | } | ||
264 | |||
265 | // LambdaConfig carries one single cloudfunction notification configuration | ||
266 | type LambdaConfig struct { | ||
267 | Config | ||
268 | Lambda string `xml:"CloudFunction"` | ||
269 | } | ||
270 | |||
271 | // Configuration - the struct that represents the whole XML to be sent to the web service | ||
272 | type Configuration struct { | ||
273 | XMLName xml.Name `xml:"NotificationConfiguration"` | ||
274 | LambdaConfigs []LambdaConfig `xml:"CloudFunctionConfiguration"` | ||
275 | TopicConfigs []TopicConfig `xml:"TopicConfiguration"` | ||
276 | QueueConfigs []QueueConfig `xml:"QueueConfiguration"` | ||
277 | } | ||
278 | |||
279 | // AddTopic adds a given topic config to the general bucket notification config | ||
280 | func (b *Configuration) AddTopic(topicConfig Config) bool { | ||
281 | newTopicConfig := TopicConfig{Config: topicConfig, Topic: topicConfig.Arn.String()} | ||
282 | for _, n := range b.TopicConfigs { | ||
283 | // If new config matches existing one | ||
284 | if n.Topic == newTopicConfig.Arn.String() && newTopicConfig.Filter == n.Filter { | ||
285 | |||
286 | existingConfig := set.NewStringSet() | ||
287 | for _, v := range n.Events { | ||
288 | existingConfig.Add(string(v)) | ||
289 | } | ||
290 | |||
291 | newConfig := set.NewStringSet() | ||
292 | for _, v := range topicConfig.Events { | ||
293 | newConfig.Add(string(v)) | ||
294 | } | ||
295 | |||
296 | if !newConfig.Intersection(existingConfig).IsEmpty() { | ||
297 | return false | ||
298 | } | ||
299 | } | ||
300 | } | ||
301 | b.TopicConfigs = append(b.TopicConfigs, newTopicConfig) | ||
302 | return true | ||
303 | } | ||
304 | |||
305 | // AddQueue adds a given queue config to the general bucket notification config | ||
306 | func (b *Configuration) AddQueue(queueConfig Config) bool { | ||
307 | newQueueConfig := QueueConfig{Config: queueConfig, Queue: queueConfig.Arn.String()} | ||
308 | for _, n := range b.QueueConfigs { | ||
309 | if n.Queue == newQueueConfig.Arn.String() && newQueueConfig.Filter == n.Filter { | ||
310 | |||
311 | existingConfig := set.NewStringSet() | ||
312 | for _, v := range n.Events { | ||
313 | existingConfig.Add(string(v)) | ||
314 | } | ||
315 | |||
316 | newConfig := set.NewStringSet() | ||
317 | for _, v := range queueConfig.Events { | ||
318 | newConfig.Add(string(v)) | ||
319 | } | ||
320 | |||
321 | if !newConfig.Intersection(existingConfig).IsEmpty() { | ||
322 | return false | ||
323 | } | ||
324 | } | ||
325 | } | ||
326 | b.QueueConfigs = append(b.QueueConfigs, newQueueConfig) | ||
327 | return true | ||
328 | } | ||
329 | |||
330 | // AddLambda adds a given lambda config to the general bucket notification config | ||
331 | func (b *Configuration) AddLambda(lambdaConfig Config) bool { | ||
332 | newLambdaConfig := LambdaConfig{Config: lambdaConfig, Lambda: lambdaConfig.Arn.String()} | ||
333 | for _, n := range b.LambdaConfigs { | ||
334 | if n.Lambda == newLambdaConfig.Arn.String() && newLambdaConfig.Filter == n.Filter { | ||
335 | |||
336 | existingConfig := set.NewStringSet() | ||
337 | for _, v := range n.Events { | ||
338 | existingConfig.Add(string(v)) | ||
339 | } | ||
340 | |||
341 | newConfig := set.NewStringSet() | ||
342 | for _, v := range lambdaConfig.Events { | ||
343 | newConfig.Add(string(v)) | ||
344 | } | ||
345 | |||
346 | if !newConfig.Intersection(existingConfig).IsEmpty() { | ||
347 | return false | ||
348 | } | ||
349 | } | ||
350 | } | ||
351 | b.LambdaConfigs = append(b.LambdaConfigs, newLambdaConfig) | ||
352 | return true | ||
353 | } | ||
354 | |||
355 | // RemoveTopicByArn removes all topic configurations that match the exact specified ARN | ||
356 | func (b *Configuration) RemoveTopicByArn(arn Arn) { | ||
357 | var topics []TopicConfig | ||
358 | for _, topic := range b.TopicConfigs { | ||
359 | if topic.Topic != arn.String() { | ||
360 | topics = append(topics, topic) | ||
361 | } | ||
362 | } | ||
363 | b.TopicConfigs = topics | ||
364 | } | ||
365 | |||
366 | // ErrNoConfigMatch is returned when a notification configuration (sqs,sns,lambda) is not found when trying to delete | ||
367 | var ErrNoConfigMatch = errors.New("no notification configuration matched") | ||
368 | |||
369 | // RemoveTopicByArnEventsPrefixSuffix removes a topic configuration that match the exact specified ARN, events, prefix and suffix | ||
370 | func (b *Configuration) RemoveTopicByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error { | ||
371 | removeIndex := -1 | ||
372 | for i, v := range b.TopicConfigs { | ||
373 | // if it matches events and filters, mark the index for deletion | ||
374 | if v.Topic == arn.String() && v.Config.Equal(events, prefix, suffix) { | ||
375 | removeIndex = i | ||
376 | break // since we have at most one matching config | ||
377 | } | ||
378 | } | ||
379 | if removeIndex >= 0 { | ||
380 | b.TopicConfigs = append(b.TopicConfigs[:removeIndex], b.TopicConfigs[removeIndex+1:]...) | ||
381 | return nil | ||
382 | } | ||
383 | return ErrNoConfigMatch | ||
384 | } | ||
385 | |||
386 | // RemoveQueueByArn removes all queue configurations that match the exact specified ARN | ||
387 | func (b *Configuration) RemoveQueueByArn(arn Arn) { | ||
388 | var queues []QueueConfig | ||
389 | for _, queue := range b.QueueConfigs { | ||
390 | if queue.Queue != arn.String() { | ||
391 | queues = append(queues, queue) | ||
392 | } | ||
393 | } | ||
394 | b.QueueConfigs = queues | ||
395 | } | ||
396 | |||
397 | // RemoveQueueByArnEventsPrefixSuffix removes a queue configuration that match the exact specified ARN, events, prefix and suffix | ||
398 | func (b *Configuration) RemoveQueueByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error { | ||
399 | removeIndex := -1 | ||
400 | for i, v := range b.QueueConfigs { | ||
401 | // if it matches events and filters, mark the index for deletion | ||
402 | if v.Queue == arn.String() && v.Config.Equal(events, prefix, suffix) { | ||
403 | removeIndex = i | ||
404 | break // since we have at most one matching config | ||
405 | } | ||
406 | } | ||
407 | if removeIndex >= 0 { | ||
408 | b.QueueConfigs = append(b.QueueConfigs[:removeIndex], b.QueueConfigs[removeIndex+1:]...) | ||
409 | return nil | ||
410 | } | ||
411 | return ErrNoConfigMatch | ||
412 | } | ||
413 | |||
414 | // RemoveLambdaByArn removes all lambda configurations that match the exact specified ARN | ||
415 | func (b *Configuration) RemoveLambdaByArn(arn Arn) { | ||
416 | var lambdas []LambdaConfig | ||
417 | for _, lambda := range b.LambdaConfigs { | ||
418 | if lambda.Lambda != arn.String() { | ||
419 | lambdas = append(lambdas, lambda) | ||
420 | } | ||
421 | } | ||
422 | b.LambdaConfigs = lambdas | ||
423 | } | ||
424 | |||
425 | // RemoveLambdaByArnEventsPrefixSuffix removes a topic configuration that match the exact specified ARN, events, prefix and suffix | ||
426 | func (b *Configuration) RemoveLambdaByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error { | ||
427 | removeIndex := -1 | ||
428 | for i, v := range b.LambdaConfigs { | ||
429 | // if it matches events and filters, mark the index for deletion | ||
430 | if v.Lambda == arn.String() && v.Config.Equal(events, prefix, suffix) { | ||
431 | removeIndex = i | ||
432 | break // since we have at most one matching config | ||
433 | } | ||
434 | } | ||
435 | if removeIndex >= 0 { | ||
436 | b.LambdaConfigs = append(b.LambdaConfigs[:removeIndex], b.LambdaConfigs[removeIndex+1:]...) | ||
437 | return nil | ||
438 | } | ||
439 | return ErrNoConfigMatch | ||
440 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go new file mode 100644 index 0000000..0abbf6e --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go | |||
@@ -0,0 +1,971 @@ | |||
1 | /* | ||
2 | * MinIO Client (C) 2020 MinIO, Inc. | ||
3 | * | ||
4 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | * you may not use this file except in compliance with the License. | ||
6 | * You may obtain a copy of the License at | ||
7 | * | ||
8 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | * | ||
10 | * Unless required by applicable law or agreed to in writing, software | ||
11 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | * See the License for the specific language governing permissions and | ||
14 | * limitations under the License. | ||
15 | */ | ||
16 | |||
17 | package replication | ||
18 | |||
19 | import ( | ||
20 | "bytes" | ||
21 | "encoding/xml" | ||
22 | "fmt" | ||
23 | "math" | ||
24 | "strconv" | ||
25 | "strings" | ||
26 | "time" | ||
27 | "unicode/utf8" | ||
28 | |||
29 | "github.com/rs/xid" | ||
30 | ) | ||
31 | |||
32 | var errInvalidFilter = fmt.Errorf("invalid filter") | ||
33 | |||
34 | // OptionType specifies operation to be performed on config | ||
35 | type OptionType string | ||
36 | |||
37 | const ( | ||
38 | // AddOption specifies addition of rule to config | ||
39 | AddOption OptionType = "Add" | ||
40 | // SetOption specifies modification of existing rule to config | ||
41 | SetOption OptionType = "Set" | ||
42 | |||
43 | // RemoveOption specifies rule options are for removing a rule | ||
44 | RemoveOption OptionType = "Remove" | ||
45 | // ImportOption is for getting current config | ||
46 | ImportOption OptionType = "Import" | ||
47 | ) | ||
48 | |||
49 | // Options represents options to set a replication configuration rule | ||
50 | type Options struct { | ||
51 | Op OptionType | ||
52 | RoleArn string | ||
53 | ID string | ||
54 | Prefix string | ||
55 | RuleStatus string | ||
56 | Priority string | ||
57 | TagString string | ||
58 | StorageClass string | ||
59 | DestBucket string | ||
60 | IsTagSet bool | ||
61 | IsSCSet bool | ||
62 | ReplicateDeletes string // replicate versioned deletes | ||
63 | ReplicateDeleteMarkers string // replicate soft deletes | ||
64 | ReplicaSync string // replicate replica metadata modifications | ||
65 | ExistingObjectReplicate string | ||
66 | } | ||
67 | |||
68 | // Tags returns a slice of tags for a rule | ||
69 | func (opts Options) Tags() ([]Tag, error) { | ||
70 | var tagList []Tag | ||
71 | tagTokens := strings.Split(opts.TagString, "&") | ||
72 | for _, tok := range tagTokens { | ||
73 | if tok == "" { | ||
74 | break | ||
75 | } | ||
76 | kv := strings.SplitN(tok, "=", 2) | ||
77 | if len(kv) != 2 { | ||
78 | return []Tag{}, fmt.Errorf("tags should be entered as comma separated k=v pairs") | ||
79 | } | ||
80 | tagList = append(tagList, Tag{ | ||
81 | Key: kv[0], | ||
82 | Value: kv[1], | ||
83 | }) | ||
84 | } | ||
85 | return tagList, nil | ||
86 | } | ||
87 | |||
88 | // Config - replication configuration specified in | ||
89 | // https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html | ||
90 | type Config struct { | ||
91 | XMLName xml.Name `xml:"ReplicationConfiguration" json:"-"` | ||
92 | Rules []Rule `xml:"Rule" json:"Rules"` | ||
93 | Role string `xml:"Role" json:"Role"` | ||
94 | } | ||
95 | |||
96 | // Empty returns true if config is not set | ||
97 | func (c *Config) Empty() bool { | ||
98 | return len(c.Rules) == 0 | ||
99 | } | ||
100 | |||
101 | // AddRule adds a new rule to existing replication config. If a rule exists with the | ||
102 | // same ID, then the rule is replaced. | ||
103 | func (c *Config) AddRule(opts Options) error { | ||
104 | priority, err := strconv.Atoi(opts.Priority) | ||
105 | if err != nil { | ||
106 | return err | ||
107 | } | ||
108 | var compatSw bool // true if RoleArn is used with new mc client and older minio version prior to multisite | ||
109 | if opts.RoleArn != "" { | ||
110 | tokens := strings.Split(opts.RoleArn, ":") | ||
111 | if len(tokens) != 6 { | ||
112 | return fmt.Errorf("invalid format for replication Role Arn: %v", opts.RoleArn) | ||
113 | } | ||
114 | switch { | ||
115 | case strings.HasPrefix(opts.RoleArn, "arn:minio:replication") && len(c.Rules) == 0: | ||
116 | c.Role = opts.RoleArn | ||
117 | compatSw = true | ||
118 | case strings.HasPrefix(opts.RoleArn, "arn:aws:iam"): | ||
119 | c.Role = opts.RoleArn | ||
120 | default: | ||
121 | return fmt.Errorf("RoleArn invalid for AWS replication configuration: %v", opts.RoleArn) | ||
122 | } | ||
123 | } | ||
124 | |||
125 | var status Status | ||
126 | // toggle rule status for edit option | ||
127 | switch opts.RuleStatus { | ||
128 | case "enable": | ||
129 | status = Enabled | ||
130 | case "disable": | ||
131 | status = Disabled | ||
132 | default: | ||
133 | return fmt.Errorf("rule state should be either [enable|disable]") | ||
134 | } | ||
135 | |||
136 | tags, err := opts.Tags() | ||
137 | if err != nil { | ||
138 | return err | ||
139 | } | ||
140 | andVal := And{ | ||
141 | Tags: tags, | ||
142 | } | ||
143 | filter := Filter{Prefix: opts.Prefix} | ||
144 | // only a single tag is set. | ||
145 | if opts.Prefix == "" && len(tags) == 1 { | ||
146 | filter.Tag = tags[0] | ||
147 | } | ||
148 | // both prefix and tag are present | ||
149 | if len(andVal.Tags) > 1 || opts.Prefix != "" { | ||
150 | filter.And = andVal | ||
151 | filter.And.Prefix = opts.Prefix | ||
152 | filter.Prefix = "" | ||
153 | filter.Tag = Tag{} | ||
154 | } | ||
155 | if opts.ID == "" { | ||
156 | opts.ID = xid.New().String() | ||
157 | } | ||
158 | |||
159 | destBucket := opts.DestBucket | ||
160 | // ref https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html | ||
161 | if btokens := strings.Split(destBucket, ":"); len(btokens) != 6 { | ||
162 | if len(btokens) == 1 && compatSw { | ||
163 | destBucket = fmt.Sprintf("arn:aws:s3:::%s", destBucket) | ||
164 | } else { | ||
165 | return fmt.Errorf("destination bucket needs to be in Arn format") | ||
166 | } | ||
167 | } | ||
168 | dmStatus := Disabled | ||
169 | if opts.ReplicateDeleteMarkers != "" { | ||
170 | switch opts.ReplicateDeleteMarkers { | ||
171 | case "enable": | ||
172 | dmStatus = Enabled | ||
173 | case "disable": | ||
174 | dmStatus = Disabled | ||
175 | default: | ||
176 | return fmt.Errorf("ReplicateDeleteMarkers should be either enable|disable") | ||
177 | } | ||
178 | } | ||
179 | |||
180 | vDeleteStatus := Disabled | ||
181 | if opts.ReplicateDeletes != "" { | ||
182 | switch opts.ReplicateDeletes { | ||
183 | case "enable": | ||
184 | vDeleteStatus = Enabled | ||
185 | case "disable": | ||
186 | vDeleteStatus = Disabled | ||
187 | default: | ||
188 | return fmt.Errorf("ReplicateDeletes should be either enable|disable") | ||
189 | } | ||
190 | } | ||
191 | var replicaSync Status | ||
192 | // replica sync is by default Enabled, unless specified. | ||
193 | switch opts.ReplicaSync { | ||
194 | case "enable", "": | ||
195 | replicaSync = Enabled | ||
196 | case "disable": | ||
197 | replicaSync = Disabled | ||
198 | default: | ||
199 | return fmt.Errorf("replica metadata sync should be either [enable|disable]") | ||
200 | } | ||
201 | |||
202 | var existingStatus Status | ||
203 | if opts.ExistingObjectReplicate != "" { | ||
204 | switch opts.ExistingObjectReplicate { | ||
205 | case "enable": | ||
206 | existingStatus = Enabled | ||
207 | case "disable", "": | ||
208 | existingStatus = Disabled | ||
209 | default: | ||
210 | return fmt.Errorf("existingObjectReplicate should be either enable|disable") | ||
211 | } | ||
212 | } | ||
213 | newRule := Rule{ | ||
214 | ID: opts.ID, | ||
215 | Priority: priority, | ||
216 | Status: status, | ||
217 | Filter: filter, | ||
218 | Destination: Destination{ | ||
219 | Bucket: destBucket, | ||
220 | StorageClass: opts.StorageClass, | ||
221 | }, | ||
222 | DeleteMarkerReplication: DeleteMarkerReplication{Status: dmStatus}, | ||
223 | DeleteReplication: DeleteReplication{Status: vDeleteStatus}, | ||
224 | // MinIO enables replica metadata syncing by default in the case of bi-directional replication to allow | ||
225 | // automatic failover as the expectation in this case is that replica and source should be identical. | ||
226 | // However AWS leaves this configurable https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-for-metadata-changes.html | ||
227 | SourceSelectionCriteria: SourceSelectionCriteria{ | ||
228 | ReplicaModifications: ReplicaModifications{ | ||
229 | Status: replicaSync, | ||
230 | }, | ||
231 | }, | ||
232 | // By default disable existing object replication unless selected | ||
233 | ExistingObjectReplication: ExistingObjectReplication{ | ||
234 | Status: existingStatus, | ||
235 | }, | ||
236 | } | ||
237 | |||
238 | // validate rule after overlaying priority for pre-existing rule being disabled. | ||
239 | if err := newRule.Validate(); err != nil { | ||
240 | return err | ||
241 | } | ||
242 | // if replication config uses RoleArn, migrate this to the destination element as target ARN for remote bucket for MinIO configuration | ||
243 | if c.Role != "" && !strings.HasPrefix(c.Role, "arn:aws:iam") && !compatSw { | ||
244 | for i := range c.Rules { | ||
245 | c.Rules[i].Destination.Bucket = c.Role | ||
246 | } | ||
247 | c.Role = "" | ||
248 | } | ||
249 | |||
250 | for _, rule := range c.Rules { | ||
251 | if rule.Priority == newRule.Priority { | ||
252 | return fmt.Errorf("priority must be unique. Replication configuration already has a rule with this priority") | ||
253 | } | ||
254 | if rule.ID == newRule.ID { | ||
255 | return fmt.Errorf("a rule exists with this ID") | ||
256 | } | ||
257 | } | ||
258 | |||
259 | c.Rules = append(c.Rules, newRule) | ||
260 | return nil | ||
261 | } | ||
262 | |||
263 | // EditRule modifies an existing rule in replication config | ||
264 | func (c *Config) EditRule(opts Options) error { | ||
265 | if opts.ID == "" { | ||
266 | return fmt.Errorf("rule ID missing") | ||
267 | } | ||
268 | // if replication config uses RoleArn, migrate this to the destination element as target ARN for remote bucket for non AWS. | ||
269 | if c.Role != "" && !strings.HasPrefix(c.Role, "arn:aws:iam") && len(c.Rules) > 1 { | ||
270 | for i := range c.Rules { | ||
271 | c.Rules[i].Destination.Bucket = c.Role | ||
272 | } | ||
273 | c.Role = "" | ||
274 | } | ||
275 | |||
276 | rIdx := -1 | ||
277 | var newRule Rule | ||
278 | for i, rule := range c.Rules { | ||
279 | if rule.ID == opts.ID { | ||
280 | rIdx = i | ||
281 | newRule = rule | ||
282 | break | ||
283 | } | ||
284 | } | ||
285 | if rIdx < 0 { | ||
286 | return fmt.Errorf("rule with ID %s not found in replication configuration", opts.ID) | ||
287 | } | ||
288 | prefixChg := opts.Prefix != newRule.Prefix() | ||
289 | if opts.IsTagSet || prefixChg { | ||
290 | prefix := newRule.Prefix() | ||
291 | if prefix != opts.Prefix { | ||
292 | prefix = opts.Prefix | ||
293 | } | ||
294 | tags := []Tag{newRule.Filter.Tag} | ||
295 | if len(newRule.Filter.And.Tags) != 0 { | ||
296 | tags = newRule.Filter.And.Tags | ||
297 | } | ||
298 | var err error | ||
299 | if opts.IsTagSet { | ||
300 | tags, err = opts.Tags() | ||
301 | if err != nil { | ||
302 | return err | ||
303 | } | ||
304 | } | ||
305 | andVal := And{ | ||
306 | Tags: tags, | ||
307 | } | ||
308 | |||
309 | filter := Filter{Prefix: prefix} | ||
310 | // only a single tag is set. | ||
311 | if prefix == "" && len(tags) == 1 { | ||
312 | filter.Tag = tags[0] | ||
313 | } | ||
314 | // both prefix and tag are present | ||
315 | if len(andVal.Tags) > 1 || prefix != "" { | ||
316 | filter.And = andVal | ||
317 | filter.And.Prefix = prefix | ||
318 | filter.Prefix = "" | ||
319 | filter.Tag = Tag{} | ||
320 | } | ||
321 | newRule.Filter = filter | ||
322 | } | ||
323 | |||
324 | // toggle rule status for edit option | ||
325 | if opts.RuleStatus != "" { | ||
326 | switch opts.RuleStatus { | ||
327 | case "enable": | ||
328 | newRule.Status = Enabled | ||
329 | case "disable": | ||
330 | newRule.Status = Disabled | ||
331 | default: | ||
332 | return fmt.Errorf("rule state should be either [enable|disable]") | ||
333 | } | ||
334 | } | ||
335 | // set DeleteMarkerReplication rule status for edit option | ||
336 | if opts.ReplicateDeleteMarkers != "" { | ||
337 | switch opts.ReplicateDeleteMarkers { | ||
338 | case "enable": | ||
339 | newRule.DeleteMarkerReplication.Status = Enabled | ||
340 | case "disable": | ||
341 | newRule.DeleteMarkerReplication.Status = Disabled | ||
342 | default: | ||
343 | return fmt.Errorf("ReplicateDeleteMarkers state should be either [enable|disable]") | ||
344 | } | ||
345 | } | ||
346 | |||
347 | // set DeleteReplication rule status for edit option. This is a MinIO specific | ||
348 | // option to replicate versioned deletes | ||
349 | if opts.ReplicateDeletes != "" { | ||
350 | switch opts.ReplicateDeletes { | ||
351 | case "enable": | ||
352 | newRule.DeleteReplication.Status = Enabled | ||
353 | case "disable": | ||
354 | newRule.DeleteReplication.Status = Disabled | ||
355 | default: | ||
356 | return fmt.Errorf("ReplicateDeletes state should be either [enable|disable]") | ||
357 | } | ||
358 | } | ||
359 | |||
360 | if opts.ReplicaSync != "" { | ||
361 | switch opts.ReplicaSync { | ||
362 | case "enable", "": | ||
363 | newRule.SourceSelectionCriteria.ReplicaModifications.Status = Enabled | ||
364 | case "disable": | ||
365 | newRule.SourceSelectionCriteria.ReplicaModifications.Status = Disabled | ||
366 | default: | ||
367 | return fmt.Errorf("replica metadata sync should be either [enable|disable]") | ||
368 | } | ||
369 | } | ||
370 | |||
371 | if opts.ExistingObjectReplicate != "" { | ||
372 | switch opts.ExistingObjectReplicate { | ||
373 | case "enable": | ||
374 | newRule.ExistingObjectReplication.Status = Enabled | ||
375 | case "disable": | ||
376 | newRule.ExistingObjectReplication.Status = Disabled | ||
377 | default: | ||
378 | return fmt.Errorf("existingObjectsReplication state should be either [enable|disable]") | ||
379 | } | ||
380 | } | ||
381 | if opts.IsSCSet { | ||
382 | newRule.Destination.StorageClass = opts.StorageClass | ||
383 | } | ||
384 | if opts.Priority != "" { | ||
385 | priority, err := strconv.Atoi(opts.Priority) | ||
386 | if err != nil { | ||
387 | return err | ||
388 | } | ||
389 | newRule.Priority = priority | ||
390 | } | ||
391 | if opts.DestBucket != "" { | ||
392 | destBucket := opts.DestBucket | ||
393 | // ref https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html | ||
394 | if btokens := strings.Split(opts.DestBucket, ":"); len(btokens) != 6 { | ||
395 | return fmt.Errorf("destination bucket needs to be in Arn format") | ||
396 | } | ||
397 | newRule.Destination.Bucket = destBucket | ||
398 | } | ||
399 | // validate rule | ||
400 | if err := newRule.Validate(); err != nil { | ||
401 | return err | ||
402 | } | ||
403 | // ensure priority and destination bucket restrictions are not violated | ||
404 | for idx, rule := range c.Rules { | ||
405 | if rule.Priority == newRule.Priority && rIdx != idx { | ||
406 | return fmt.Errorf("priority must be unique. Replication configuration already has a rule with this priority") | ||
407 | } | ||
408 | if rule.Destination.Bucket != newRule.Destination.Bucket && rule.ID == newRule.ID { | ||
409 | return fmt.Errorf("invalid destination bucket for this rule") | ||
410 | } | ||
411 | } | ||
412 | |||
413 | c.Rules[rIdx] = newRule | ||
414 | return nil | ||
415 | } | ||
416 | |||
417 | // RemoveRule removes a rule from replication config. | ||
418 | func (c *Config) RemoveRule(opts Options) error { | ||
419 | var newRules []Rule | ||
420 | ruleFound := false | ||
421 | for _, rule := range c.Rules { | ||
422 | if rule.ID != opts.ID { | ||
423 | newRules = append(newRules, rule) | ||
424 | continue | ||
425 | } | ||
426 | ruleFound = true | ||
427 | } | ||
428 | if !ruleFound { | ||
429 | return fmt.Errorf("Rule with ID %s not found", opts.ID) | ||
430 | } | ||
431 | if len(newRules) == 0 { | ||
432 | return fmt.Errorf("replication configuration should have at least one rule") | ||
433 | } | ||
434 | c.Rules = newRules | ||
435 | return nil | ||
436 | } | ||
437 | |||
438 | // Rule - a rule for replication configuration. | ||
439 | type Rule struct { | ||
440 | XMLName xml.Name `xml:"Rule" json:"-"` | ||
441 | ID string `xml:"ID,omitempty"` | ||
442 | Status Status `xml:"Status"` | ||
443 | Priority int `xml:"Priority"` | ||
444 | DeleteMarkerReplication DeleteMarkerReplication `xml:"DeleteMarkerReplication"` | ||
445 | DeleteReplication DeleteReplication `xml:"DeleteReplication"` | ||
446 | Destination Destination `xml:"Destination"` | ||
447 | Filter Filter `xml:"Filter" json:"Filter"` | ||
448 | SourceSelectionCriteria SourceSelectionCriteria `xml:"SourceSelectionCriteria" json:"SourceSelectionCriteria"` | ||
449 | ExistingObjectReplication ExistingObjectReplication `xml:"ExistingObjectReplication,omitempty" json:"ExistingObjectReplication,omitempty"` | ||
450 | } | ||
451 | |||
452 | // Validate validates the rule for correctness | ||
453 | func (r Rule) Validate() error { | ||
454 | if err := r.validateID(); err != nil { | ||
455 | return err | ||
456 | } | ||
457 | if err := r.validateStatus(); err != nil { | ||
458 | return err | ||
459 | } | ||
460 | if err := r.validateFilter(); err != nil { | ||
461 | return err | ||
462 | } | ||
463 | |||
464 | if r.Priority < 0 && r.Status == Enabled { | ||
465 | return fmt.Errorf("priority must be set for the rule") | ||
466 | } | ||
467 | |||
468 | if err := r.validateStatus(); err != nil { | ||
469 | return err | ||
470 | } | ||
471 | return r.ExistingObjectReplication.Validate() | ||
472 | } | ||
473 | |||
474 | // validateID - checks if ID is valid or not. | ||
475 | func (r Rule) validateID() error { | ||
476 | // cannot be longer than 255 characters | ||
477 | if len(r.ID) > 255 { | ||
478 | return fmt.Errorf("ID must be less than 255 characters") | ||
479 | } | ||
480 | return nil | ||
481 | } | ||
482 | |||
483 | // validateStatus - checks if status is valid or not. | ||
484 | func (r Rule) validateStatus() error { | ||
485 | // Status can't be empty | ||
486 | if len(r.Status) == 0 { | ||
487 | return fmt.Errorf("status cannot be empty") | ||
488 | } | ||
489 | |||
490 | // Status must be one of Enabled or Disabled | ||
491 | if r.Status != Enabled && r.Status != Disabled { | ||
492 | return fmt.Errorf("status must be set to either Enabled or Disabled") | ||
493 | } | ||
494 | return nil | ||
495 | } | ||
496 | |||
497 | func (r Rule) validateFilter() error { | ||
498 | return r.Filter.Validate() | ||
499 | } | ||
500 | |||
501 | // Prefix - a rule can either have prefix under <filter></filter> or under | ||
502 | // <filter><and></and></filter>. This method returns the prefix from the | ||
503 | // location where it is available | ||
504 | func (r Rule) Prefix() string { | ||
505 | if r.Filter.Prefix != "" { | ||
506 | return r.Filter.Prefix | ||
507 | } | ||
508 | return r.Filter.And.Prefix | ||
509 | } | ||
510 | |||
511 | // Tags - a rule can either have tag under <filter></filter> or under | ||
512 | // <filter><and></and></filter>. This method returns all the tags from the | ||
513 | // rule in the format tag1=value1&tag2=value2 | ||
514 | func (r Rule) Tags() string { | ||
515 | ts := []Tag{r.Filter.Tag} | ||
516 | if len(r.Filter.And.Tags) != 0 { | ||
517 | ts = r.Filter.And.Tags | ||
518 | } | ||
519 | |||
520 | var buf bytes.Buffer | ||
521 | for _, t := range ts { | ||
522 | if buf.Len() > 0 { | ||
523 | buf.WriteString("&") | ||
524 | } | ||
525 | buf.WriteString(t.String()) | ||
526 | } | ||
527 | return buf.String() | ||
528 | } | ||
529 | |||
530 | // Filter - a filter for a replication configuration Rule. | ||
531 | type Filter struct { | ||
532 | XMLName xml.Name `xml:"Filter" json:"-"` | ||
533 | Prefix string `json:"Prefix,omitempty"` | ||
534 | And And `xml:"And,omitempty" json:"And,omitempty"` | ||
535 | Tag Tag `xml:"Tag,omitempty" json:"Tag,omitempty"` | ||
536 | } | ||
537 | |||
538 | // Validate - validates the filter element | ||
539 | func (f Filter) Validate() error { | ||
540 | // A Filter must have exactly one of Prefix, Tag, or And specified. | ||
541 | if !f.And.isEmpty() { | ||
542 | if f.Prefix != "" { | ||
543 | return errInvalidFilter | ||
544 | } | ||
545 | if !f.Tag.IsEmpty() { | ||
546 | return errInvalidFilter | ||
547 | } | ||
548 | } | ||
549 | if f.Prefix != "" { | ||
550 | if !f.Tag.IsEmpty() { | ||
551 | return errInvalidFilter | ||
552 | } | ||
553 | } | ||
554 | if !f.Tag.IsEmpty() { | ||
555 | if err := f.Tag.Validate(); err != nil { | ||
556 | return err | ||
557 | } | ||
558 | } | ||
559 | return nil | ||
560 | } | ||
561 | |||
562 | // Tag - a tag for a replication configuration Rule filter. | ||
563 | type Tag struct { | ||
564 | XMLName xml.Name `json:"-"` | ||
565 | Key string `xml:"Key,omitempty" json:"Key,omitempty"` | ||
566 | Value string `xml:"Value,omitempty" json:"Value,omitempty"` | ||
567 | } | ||
568 | |||
569 | func (tag Tag) String() string { | ||
570 | if tag.IsEmpty() { | ||
571 | return "" | ||
572 | } | ||
573 | return tag.Key + "=" + tag.Value | ||
574 | } | ||
575 | |||
576 | // IsEmpty returns whether this tag is empty or not. | ||
577 | func (tag Tag) IsEmpty() bool { | ||
578 | return tag.Key == "" | ||
579 | } | ||
580 | |||
581 | // Validate checks this tag. | ||
582 | func (tag Tag) Validate() error { | ||
583 | if len(tag.Key) == 0 || utf8.RuneCountInString(tag.Key) > 128 { | ||
584 | return fmt.Errorf("invalid Tag Key") | ||
585 | } | ||
586 | |||
587 | if utf8.RuneCountInString(tag.Value) > 256 { | ||
588 | return fmt.Errorf("invalid Tag Value") | ||
589 | } | ||
590 | return nil | ||
591 | } | ||
592 | |||
593 | // Destination - destination in ReplicationConfiguration. | ||
594 | type Destination struct { | ||
595 | XMLName xml.Name `xml:"Destination" json:"-"` | ||
596 | Bucket string `xml:"Bucket" json:"Bucket"` | ||
597 | StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"` | ||
598 | } | ||
599 | |||
600 | // And - a tag to combine a prefix and multiple tags for replication configuration rule. | ||
601 | type And struct { | ||
602 | XMLName xml.Name `xml:"And,omitempty" json:"-"` | ||
603 | Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"` | ||
604 | Tags []Tag `xml:"Tag,omitempty" json:"Tag,omitempty"` | ||
605 | } | ||
606 | |||
607 | // isEmpty returns true if Tags field is null | ||
608 | func (a And) isEmpty() bool { | ||
609 | return len(a.Tags) == 0 && a.Prefix == "" | ||
610 | } | ||
611 | |||
612 | // Status represents Enabled/Disabled status | ||
613 | type Status string | ||
614 | |||
615 | // Supported status types | ||
616 | const ( | ||
617 | Enabled Status = "Enabled" | ||
618 | Disabled Status = "Disabled" | ||
619 | ) | ||
620 | |||
621 | // DeleteMarkerReplication - whether delete markers are replicated - https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html | ||
622 | type DeleteMarkerReplication struct { | ||
623 | Status Status `xml:"Status" json:"Status"` // should be set to "Disabled" by default | ||
624 | } | ||
625 | |||
626 | // IsEmpty returns true if DeleteMarkerReplication is not set | ||
627 | func (d DeleteMarkerReplication) IsEmpty() bool { | ||
628 | return len(d.Status) == 0 | ||
629 | } | ||
630 | |||
631 | // DeleteReplication - whether versioned deletes are replicated - this | ||
632 | // is a MinIO specific extension | ||
633 | type DeleteReplication struct { | ||
634 | Status Status `xml:"Status" json:"Status"` // should be set to "Disabled" by default | ||
635 | } | ||
636 | |||
637 | // IsEmpty returns true if DeleteReplication is not set | ||
638 | func (d DeleteReplication) IsEmpty() bool { | ||
639 | return len(d.Status) == 0 | ||
640 | } | ||
641 | |||
642 | // ReplicaModifications specifies if replica modification sync is enabled | ||
643 | type ReplicaModifications struct { | ||
644 | Status Status `xml:"Status" json:"Status"` // should be set to "Enabled" by default | ||
645 | } | ||
646 | |||
647 | // SourceSelectionCriteria - specifies additional source selection criteria in ReplicationConfiguration. | ||
648 | type SourceSelectionCriteria struct { | ||
649 | ReplicaModifications ReplicaModifications `xml:"ReplicaModifications" json:"ReplicaModifications"` | ||
650 | } | ||
651 | |||
652 | // IsValid - checks whether SourceSelectionCriteria is valid or not. | ||
653 | func (s SourceSelectionCriteria) IsValid() bool { | ||
654 | return s.ReplicaModifications.Status == Enabled || s.ReplicaModifications.Status == Disabled | ||
655 | } | ||
656 | |||
657 | // Validate source selection criteria | ||
658 | func (s SourceSelectionCriteria) Validate() error { | ||
659 | if (s == SourceSelectionCriteria{}) { | ||
660 | return nil | ||
661 | } | ||
662 | if !s.IsValid() { | ||
663 | return fmt.Errorf("invalid ReplicaModification status") | ||
664 | } | ||
665 | return nil | ||
666 | } | ||
667 | |||
668 | // ExistingObjectReplication - whether existing object replication is enabled | ||
669 | type ExistingObjectReplication struct { | ||
670 | Status Status `xml:"Status"` // should be set to "Disabled" by default | ||
671 | } | ||
672 | |||
673 | // IsEmpty returns true if DeleteMarkerReplication is not set | ||
674 | func (e ExistingObjectReplication) IsEmpty() bool { | ||
675 | return len(e.Status) == 0 | ||
676 | } | ||
677 | |||
678 | // Validate validates whether the status is disabled. | ||
679 | func (e ExistingObjectReplication) Validate() error { | ||
680 | if e.IsEmpty() { | ||
681 | return nil | ||
682 | } | ||
683 | if e.Status != Disabled && e.Status != Enabled { | ||
684 | return fmt.Errorf("invalid ExistingObjectReplication status") | ||
685 | } | ||
686 | return nil | ||
687 | } | ||
688 | |||
689 | // TargetMetrics represents inline replication metrics | ||
690 | // such as pending, failed and completed bytes in total for a bucket remote target | ||
691 | type TargetMetrics struct { | ||
692 | // Completed count | ||
693 | ReplicatedCount uint64 `json:"replicationCount,omitempty"` | ||
694 | // Completed size in bytes | ||
695 | ReplicatedSize uint64 `json:"completedReplicationSize,omitempty"` | ||
696 | // Bandwidth limit in bytes/sec for this target | ||
697 | BandWidthLimitInBytesPerSecond int64 `json:"limitInBits,omitempty"` | ||
698 | // Current bandwidth used in bytes/sec for this target | ||
699 | CurrentBandwidthInBytesPerSecond float64 `json:"currentBandwidth,omitempty"` | ||
700 | // errors seen in replication in last minute, hour and total | ||
701 | Failed TimedErrStats `json:"failed,omitempty"` | ||
702 | // Deprecated fields | ||
703 | // Pending size in bytes | ||
704 | PendingSize uint64 `json:"pendingReplicationSize,omitempty"` | ||
705 | // Total Replica size in bytes | ||
706 | ReplicaSize uint64 `json:"replicaSize,omitempty"` | ||
707 | // Failed size in bytes | ||
708 | FailedSize uint64 `json:"failedReplicationSize,omitempty"` | ||
709 | // Total number of pending operations including metadata updates | ||
710 | PendingCount uint64 `json:"pendingReplicationCount,omitempty"` | ||
711 | // Total number of failed operations including metadata updates | ||
712 | FailedCount uint64 `json:"failedReplicationCount,omitempty"` | ||
713 | } | ||
714 | |||
715 | // Metrics represents inline replication metrics for a bucket. | ||
716 | type Metrics struct { | ||
717 | Stats map[string]TargetMetrics | ||
718 | // Completed size in bytes across targets | ||
719 | ReplicatedSize uint64 `json:"completedReplicationSize,omitempty"` | ||
720 | // Total Replica size in bytes across targets | ||
721 | ReplicaSize uint64 `json:"replicaSize,omitempty"` | ||
722 | // Total Replica counts | ||
723 | ReplicaCount int64 `json:"replicaCount,omitempty"` | ||
724 | // Total Replicated count | ||
725 | ReplicatedCount int64 `json:"replicationCount,omitempty"` | ||
726 | // errors seen in replication in last minute, hour and total | ||
727 | Errors TimedErrStats `json:"failed,omitempty"` | ||
728 | // Total number of entries that are queued for replication | ||
729 | QStats InQueueMetric `json:"queued"` | ||
730 | // Deprecated fields | ||
731 | // Total Pending size in bytes across targets | ||
732 | PendingSize uint64 `json:"pendingReplicationSize,omitempty"` | ||
733 | // Failed size in bytes across targets | ||
734 | FailedSize uint64 `json:"failedReplicationSize,omitempty"` | ||
735 | // Total number of pending operations including metadata updates across targets | ||
736 | PendingCount uint64 `json:"pendingReplicationCount,omitempty"` | ||
737 | // Total number of failed operations including metadata updates across targets | ||
738 | FailedCount uint64 `json:"failedReplicationCount,omitempty"` | ||
739 | } | ||
740 | |||
741 | // RStat - has count and bytes for replication metrics | ||
742 | type RStat struct { | ||
743 | Count float64 `json:"count"` | ||
744 | Bytes int64 `json:"bytes"` | ||
745 | } | ||
746 | |||
747 | // Add two RStat | ||
748 | func (r RStat) Add(r1 RStat) RStat { | ||
749 | return RStat{ | ||
750 | Count: r.Count + r1.Count, | ||
751 | Bytes: r.Bytes + r1.Bytes, | ||
752 | } | ||
753 | } | ||
754 | |||
755 | // TimedErrStats holds error stats for a time period | ||
756 | type TimedErrStats struct { | ||
757 | LastMinute RStat `json:"lastMinute"` | ||
758 | LastHour RStat `json:"lastHour"` | ||
759 | Totals RStat `json:"totals"` | ||
760 | } | ||
761 | |||
762 | // Add two TimedErrStats | ||
763 | func (te TimedErrStats) Add(o TimedErrStats) TimedErrStats { | ||
764 | return TimedErrStats{ | ||
765 | LastMinute: te.LastMinute.Add(o.LastMinute), | ||
766 | LastHour: te.LastHour.Add(o.LastHour), | ||
767 | Totals: te.Totals.Add(o.Totals), | ||
768 | } | ||
769 | } | ||
770 | |||
771 | // ResyncTargetsInfo provides replication target information to resync replicated data. | ||
772 | type ResyncTargetsInfo struct { | ||
773 | Targets []ResyncTarget `json:"target,omitempty"` | ||
774 | } | ||
775 | |||
776 | // ResyncTarget provides the replica resources and resetID to initiate resync replication. | ||
777 | type ResyncTarget struct { | ||
778 | Arn string `json:"arn"` | ||
779 | ResetID string `json:"resetid"` | ||
780 | StartTime time.Time `json:"startTime,omitempty"` | ||
781 | EndTime time.Time `json:"endTime,omitempty"` | ||
782 | // Status of resync operation | ||
783 | ResyncStatus string `json:"resyncStatus,omitempty"` | ||
784 | // Completed size in bytes | ||
785 | ReplicatedSize int64 `json:"completedReplicationSize,omitempty"` | ||
786 | // Failed size in bytes | ||
787 | FailedSize int64 `json:"failedReplicationSize,omitempty"` | ||
788 | // Total number of failed operations | ||
789 | FailedCount int64 `json:"failedReplicationCount,omitempty"` | ||
790 | // Total number of completed operations | ||
791 | ReplicatedCount int64 `json:"replicationCount,omitempty"` | ||
792 | // Last bucket/object replicated. | ||
793 | Bucket string `json:"bucket,omitempty"` | ||
794 | Object string `json:"object,omitempty"` | ||
795 | } | ||
796 | |||
797 | // XferStats holds transfer rate info for uploads/sec | ||
798 | type XferStats struct { | ||
799 | AvgRate float64 `json:"avgRate"` | ||
800 | PeakRate float64 `json:"peakRate"` | ||
801 | CurrRate float64 `json:"currRate"` | ||
802 | } | ||
803 | |||
804 | // Merge two XferStats | ||
805 | func (x *XferStats) Merge(x1 XferStats) { | ||
806 | x.AvgRate += x1.AvgRate | ||
807 | x.PeakRate += x1.PeakRate | ||
808 | x.CurrRate += x1.CurrRate | ||
809 | } | ||
810 | |||
811 | // QStat holds count and bytes for objects in replication queue | ||
812 | type QStat struct { | ||
813 | Count float64 `json:"count"` | ||
814 | Bytes float64 `json:"bytes"` | ||
815 | } | ||
816 | |||
817 | // Add 2 QStat entries | ||
818 | func (q *QStat) Add(q1 QStat) { | ||
819 | q.Count += q1.Count | ||
820 | q.Bytes += q1.Bytes | ||
821 | } | ||
822 | |||
823 | // InQueueMetric holds stats for objects in replication queue | ||
824 | type InQueueMetric struct { | ||
825 | Curr QStat `json:"curr" msg:"cq"` | ||
826 | Avg QStat `json:"avg" msg:"aq"` | ||
827 | Max QStat `json:"peak" msg:"pq"` | ||
828 | } | ||
829 | |||
830 | // MetricName name of replication metric | ||
831 | type MetricName string | ||
832 | |||
833 | const ( | ||
834 | // Large is a metric name for large objects >=128MiB | ||
835 | Large MetricName = "Large" | ||
836 | // Small is a metric name for objects <128MiB size | ||
837 | Small MetricName = "Small" | ||
838 | // Total is a metric name for total objects | ||
839 | Total MetricName = "Total" | ||
840 | ) | ||
841 | |||
842 | // WorkerStat has stats on number of replication workers | ||
843 | type WorkerStat struct { | ||
844 | Curr int32 `json:"curr"` | ||
845 | Avg float32 `json:"avg"` | ||
846 | Max int32 `json:"max"` | ||
847 | } | ||
848 | |||
849 | // ReplMRFStats holds stats of MRF backlog saved to disk in the last 5 minutes | ||
850 | // and number of entries that failed replication after 3 retries | ||
851 | type ReplMRFStats struct { | ||
852 | LastFailedCount uint64 `json:"failedCount_last5min"` | ||
853 | // Count of unreplicated entries that were dropped after MRF retry limit reached since cluster start. | ||
854 | TotalDroppedCount uint64 `json:"droppedCount_since_uptime"` | ||
855 | // Bytes of unreplicated entries that were dropped after MRF retry limit reached since cluster start. | ||
856 | TotalDroppedBytes uint64 `json:"droppedBytes_since_uptime"` | ||
857 | } | ||
858 | |||
859 | // ReplQNodeStats holds stats for a node in replication queue | ||
860 | type ReplQNodeStats struct { | ||
861 | NodeName string `json:"nodeName"` | ||
862 | Uptime int64 `json:"uptime"` | ||
863 | Workers WorkerStat `json:"activeWorkers"` | ||
864 | |||
865 | XferStats map[MetricName]XferStats `json:"transferSummary"` | ||
866 | TgtXferStats map[string]map[MetricName]XferStats `json:"tgtTransferStats"` | ||
867 | |||
868 | QStats InQueueMetric `json:"queueStats"` | ||
869 | MRFStats ReplMRFStats `json:"mrfStats"` | ||
870 | } | ||
871 | |||
872 | // ReplQueueStats holds stats for replication queue across nodes | ||
873 | type ReplQueueStats struct { | ||
874 | Nodes []ReplQNodeStats `json:"nodes"` | ||
875 | } | ||
876 | |||
877 | // Workers returns number of workers across all nodes | ||
878 | func (q ReplQueueStats) Workers() (tot WorkerStat) { | ||
879 | for _, node := range q.Nodes { | ||
880 | tot.Avg += node.Workers.Avg | ||
881 | tot.Curr += node.Workers.Curr | ||
882 | if tot.Max < node.Workers.Max { | ||
883 | tot.Max = node.Workers.Max | ||
884 | } | ||
885 | } | ||
886 | if len(q.Nodes) > 0 { | ||
887 | tot.Avg /= float32(len(q.Nodes)) | ||
888 | tot.Curr /= int32(len(q.Nodes)) | ||
889 | } | ||
890 | return tot | ||
891 | } | ||
892 | |||
893 | // qStatSummary returns cluster level stats for objects in replication queue | ||
894 | func (q ReplQueueStats) qStatSummary() InQueueMetric { | ||
895 | m := InQueueMetric{} | ||
896 | for _, v := range q.Nodes { | ||
897 | m.Avg.Add(v.QStats.Avg) | ||
898 | m.Curr.Add(v.QStats.Curr) | ||
899 | if m.Max.Count < v.QStats.Max.Count { | ||
900 | m.Max.Add(v.QStats.Max) | ||
901 | } | ||
902 | } | ||
903 | return m | ||
904 | } | ||
905 | |||
906 | // ReplQStats holds stats for objects in replication queue | ||
907 | type ReplQStats struct { | ||
908 | Uptime int64 `json:"uptime"` | ||
909 | Workers WorkerStat `json:"workers"` | ||
910 | |||
911 | XferStats map[MetricName]XferStats `json:"xferStats"` | ||
912 | TgtXferStats map[string]map[MetricName]XferStats `json:"tgtXferStats"` | ||
913 | |||
914 | QStats InQueueMetric `json:"qStats"` | ||
915 | MRFStats ReplMRFStats `json:"mrfStats"` | ||
916 | } | ||
917 | |||
918 | // QStats returns cluster level stats for objects in replication queue | ||
919 | func (q ReplQueueStats) QStats() (r ReplQStats) { | ||
920 | r.QStats = q.qStatSummary() | ||
921 | r.XferStats = make(map[MetricName]XferStats) | ||
922 | r.TgtXferStats = make(map[string]map[MetricName]XferStats) | ||
923 | r.Workers = q.Workers() | ||
924 | |||
925 | for _, node := range q.Nodes { | ||
926 | for arn := range node.TgtXferStats { | ||
927 | xmap, ok := node.TgtXferStats[arn] | ||
928 | if !ok { | ||
929 | xmap = make(map[MetricName]XferStats) | ||
930 | } | ||
931 | for m, v := range xmap { | ||
932 | st, ok := r.XferStats[m] | ||
933 | if !ok { | ||
934 | st = XferStats{} | ||
935 | } | ||
936 | st.AvgRate += v.AvgRate | ||
937 | st.CurrRate += v.CurrRate | ||
938 | st.PeakRate = math.Max(st.PeakRate, v.PeakRate) | ||
939 | if _, ok := r.TgtXferStats[arn]; !ok { | ||
940 | r.TgtXferStats[arn] = make(map[MetricName]XferStats) | ||
941 | } | ||
942 | r.TgtXferStats[arn][m] = st | ||
943 | } | ||
944 | } | ||
945 | for k, v := range node.XferStats { | ||
946 | st, ok := r.XferStats[k] | ||
947 | if !ok { | ||
948 | st = XferStats{} | ||
949 | } | ||
950 | st.AvgRate += v.AvgRate | ||
951 | st.CurrRate += v.CurrRate | ||
952 | st.PeakRate = math.Max(st.PeakRate, v.PeakRate) | ||
953 | r.XferStats[k] = st | ||
954 | } | ||
955 | r.MRFStats.LastFailedCount += node.MRFStats.LastFailedCount | ||
956 | r.MRFStats.TotalDroppedCount += node.MRFStats.TotalDroppedCount | ||
957 | r.MRFStats.TotalDroppedBytes += node.MRFStats.TotalDroppedBytes | ||
958 | r.Uptime += node.Uptime | ||
959 | } | ||
960 | if len(q.Nodes) > 0 { | ||
961 | r.Uptime /= int64(len(q.Nodes)) // average uptime | ||
962 | } | ||
963 | return | ||
964 | } | ||
965 | |||
966 | // MetricsV2 represents replication metrics for a bucket. | ||
967 | type MetricsV2 struct { | ||
968 | Uptime int64 `json:"uptime"` | ||
969 | CurrentStats Metrics `json:"currStats"` | ||
970 | QueueStats ReplQueueStats `json:"queueStats"` | ||
971 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go b/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go new file mode 100644 index 0000000..056e78a --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go | |||
@@ -0,0 +1,411 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2015-2020 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package s3utils | ||
19 | |||
20 | import ( | ||
21 | "bytes" | ||
22 | "encoding/hex" | ||
23 | "errors" | ||
24 | "net" | ||
25 | "net/url" | ||
26 | "regexp" | ||
27 | "sort" | ||
28 | "strings" | ||
29 | "unicode/utf8" | ||
30 | ) | ||
31 | |||
32 | // Sentinel URL is the default url value which is invalid. | ||
33 | var sentinelURL = url.URL{} | ||
34 | |||
35 | // IsValidDomain validates if input string is a valid domain name. | ||
36 | func IsValidDomain(host string) bool { | ||
37 | // See RFC 1035, RFC 3696. | ||
38 | host = strings.TrimSpace(host) | ||
39 | if len(host) == 0 || len(host) > 255 { | ||
40 | return false | ||
41 | } | ||
42 | // host cannot start or end with "-" | ||
43 | if host[len(host)-1:] == "-" || host[:1] == "-" { | ||
44 | return false | ||
45 | } | ||
46 | // host cannot start or end with "_" | ||
47 | if host[len(host)-1:] == "_" || host[:1] == "_" { | ||
48 | return false | ||
49 | } | ||
50 | // host cannot start with a "." | ||
51 | if host[:1] == "." { | ||
52 | return false | ||
53 | } | ||
54 | // All non alphanumeric characters are invalid. | ||
55 | if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:><?/") { | ||
56 | return false | ||
57 | } | ||
58 | // No need to regexp match, since the list is non-exhaustive. | ||
59 | // We let it valid and fail later. | ||
60 | return true | ||
61 | } | ||
62 | |||
63 | // IsValidIP parses input string for ip address validity. | ||
64 | func IsValidIP(ip string) bool { | ||
65 | return net.ParseIP(ip) != nil | ||
66 | } | ||
67 | |||
68 | // IsVirtualHostSupported - verifies if bucketName can be part of | ||
69 | // virtual host. Currently only Amazon S3 and Google Cloud Storage | ||
70 | // would support this. | ||
71 | func IsVirtualHostSupported(endpointURL url.URL, bucketName string) bool { | ||
72 | if endpointURL == sentinelURL { | ||
73 | return false | ||
74 | } | ||
75 | // bucketName can be valid but '.' in the hostname will fail SSL | ||
76 | // certificate validation. So do not use host-style for such buckets. | ||
77 | if endpointURL.Scheme == "https" && strings.Contains(bucketName, ".") { | ||
78 | return false | ||
79 | } | ||
80 | // Return true for all other cases | ||
81 | return IsAmazonEndpoint(endpointURL) || IsGoogleEndpoint(endpointURL) || IsAliyunOSSEndpoint(endpointURL) | ||
82 | } | ||
83 | |||
84 | // Refer for region styles - https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region | ||
85 | |||
86 | // amazonS3HostHyphen - regular expression used to determine if an arg is s3 host in hyphenated style. | ||
87 | var amazonS3HostHyphen = regexp.MustCompile(`^s3-(.*?).amazonaws.com$`) | ||
88 | |||
89 | // amazonS3HostDualStack - regular expression used to determine if an arg is s3 host dualstack. | ||
90 | var amazonS3HostDualStack = regexp.MustCompile(`^s3.dualstack.(.*?).amazonaws.com$`) | ||
91 | |||
92 | // amazonS3HostFIPS - regular expression used to determine if an arg is s3 FIPS host. | ||
93 | var amazonS3HostFIPS = regexp.MustCompile(`^s3-fips.(.*?).amazonaws.com$`) | ||
94 | |||
95 | // amazonS3HostFIPSDualStack - regular expression used to determine if an arg is s3 FIPS host dualstack. | ||
96 | var amazonS3HostFIPSDualStack = regexp.MustCompile(`^s3-fips.dualstack.(.*?).amazonaws.com$`) | ||
97 | |||
98 | // amazonS3HostDot - regular expression used to determine if an arg is s3 host in . style. | ||
99 | var amazonS3HostDot = regexp.MustCompile(`^s3.(.*?).amazonaws.com$`) | ||
100 | |||
101 | // amazonS3ChinaHost - regular expression used to determine if the arg is s3 china host. | ||
102 | var amazonS3ChinaHost = regexp.MustCompile(`^s3.(cn.*?).amazonaws.com.cn$`) | ||
103 | |||
104 | // amazonS3ChinaHostDualStack - regular expression used to determine if the arg is s3 china host dualstack. | ||
105 | var amazonS3ChinaHostDualStack = regexp.MustCompile(`^s3.dualstack.(cn.*?).amazonaws.com.cn$`) | ||
106 | |||
107 | // Regular expression used to determine if the arg is elb host. | ||
108 | var elbAmazonRegex = regexp.MustCompile(`elb(.*?).amazonaws.com$`) | ||
109 | |||
110 | // Regular expression used to determine if the arg is elb host in china. | ||
111 | var elbAmazonCnRegex = regexp.MustCompile(`elb(.*?).amazonaws.com.cn$`) | ||
112 | |||
113 | // amazonS3HostPrivateLink - regular expression used to determine if an arg is s3 host in AWS PrivateLink interface endpoints style | ||
114 | var amazonS3HostPrivateLink = regexp.MustCompile(`^(?:bucket|accesspoint).vpce-.*?.s3.(.*?).vpce.amazonaws.com$`) | ||
115 | |||
116 | // GetRegionFromURL - returns a region from url host. | ||
117 | func GetRegionFromURL(endpointURL url.URL) string { | ||
118 | if endpointURL == sentinelURL { | ||
119 | return "" | ||
120 | } | ||
121 | if endpointURL.Host == "s3-external-1.amazonaws.com" { | ||
122 | return "" | ||
123 | } | ||
124 | |||
125 | // if elb's are used we cannot calculate which region it may be, just return empty. | ||
126 | if elbAmazonRegex.MatchString(endpointURL.Host) || elbAmazonCnRegex.MatchString(endpointURL.Host) { | ||
127 | return "" | ||
128 | } | ||
129 | |||
130 | // We check for FIPS dualstack matching first to avoid the non-greedy | ||
131 | // regex for FIPS non-dualstack matching a dualstack URL | ||
132 | parts := amazonS3HostFIPSDualStack.FindStringSubmatch(endpointURL.Host) | ||
133 | if len(parts) > 1 { | ||
134 | return parts[1] | ||
135 | } | ||
136 | |||
137 | parts = amazonS3HostFIPS.FindStringSubmatch(endpointURL.Host) | ||
138 | if len(parts) > 1 { | ||
139 | return parts[1] | ||
140 | } | ||
141 | |||
142 | parts = amazonS3HostDualStack.FindStringSubmatch(endpointURL.Host) | ||
143 | if len(parts) > 1 { | ||
144 | return parts[1] | ||
145 | } | ||
146 | |||
147 | parts = amazonS3HostHyphen.FindStringSubmatch(endpointURL.Host) | ||
148 | if len(parts) > 1 { | ||
149 | return parts[1] | ||
150 | } | ||
151 | |||
152 | parts = amazonS3ChinaHost.FindStringSubmatch(endpointURL.Host) | ||
153 | if len(parts) > 1 { | ||
154 | return parts[1] | ||
155 | } | ||
156 | |||
157 | parts = amazonS3ChinaHostDualStack.FindStringSubmatch(endpointURL.Host) | ||
158 | if len(parts) > 1 { | ||
159 | return parts[1] | ||
160 | } | ||
161 | |||
162 | parts = amazonS3HostDot.FindStringSubmatch(endpointURL.Host) | ||
163 | if len(parts) > 1 { | ||
164 | return parts[1] | ||
165 | } | ||
166 | |||
167 | parts = amazonS3HostPrivateLink.FindStringSubmatch(endpointURL.Host) | ||
168 | if len(parts) > 1 { | ||
169 | return parts[1] | ||
170 | } | ||
171 | |||
172 | return "" | ||
173 | } | ||
174 | |||
175 | // IsAliyunOSSEndpoint - Match if it is exactly Aliyun OSS endpoint. | ||
176 | func IsAliyunOSSEndpoint(endpointURL url.URL) bool { | ||
177 | return strings.HasSuffix(endpointURL.Host, "aliyuncs.com") | ||
178 | } | ||
179 | |||
180 | // IsAmazonEndpoint - Match if it is exactly Amazon S3 endpoint. | ||
181 | func IsAmazonEndpoint(endpointURL url.URL) bool { | ||
182 | if endpointURL.Host == "s3-external-1.amazonaws.com" || endpointURL.Host == "s3.amazonaws.com" { | ||
183 | return true | ||
184 | } | ||
185 | return GetRegionFromURL(endpointURL) != "" | ||
186 | } | ||
187 | |||
188 | // IsAmazonGovCloudEndpoint - Match if it is exactly Amazon S3 GovCloud endpoint. | ||
189 | func IsAmazonGovCloudEndpoint(endpointURL url.URL) bool { | ||
190 | if endpointURL == sentinelURL { | ||
191 | return false | ||
192 | } | ||
193 | return (endpointURL.Host == "s3-us-gov-west-1.amazonaws.com" || | ||
194 | endpointURL.Host == "s3-us-gov-east-1.amazonaws.com" || | ||
195 | IsAmazonFIPSGovCloudEndpoint(endpointURL)) | ||
196 | } | ||
197 | |||
198 | // IsAmazonFIPSGovCloudEndpoint - match if the endpoint is FIPS and GovCloud. | ||
199 | func IsAmazonFIPSGovCloudEndpoint(endpointURL url.URL) bool { | ||
200 | if endpointURL == sentinelURL { | ||
201 | return false | ||
202 | } | ||
203 | return IsAmazonFIPSEndpoint(endpointURL) && strings.Contains(endpointURL.Host, "us-gov-") | ||
204 | } | ||
205 | |||
206 | // IsAmazonFIPSEndpoint - Match if it is exactly Amazon S3 FIPS endpoint. | ||
207 | // See https://aws.amazon.com/compliance/fips. | ||
208 | func IsAmazonFIPSEndpoint(endpointURL url.URL) bool { | ||
209 | if endpointURL == sentinelURL { | ||
210 | return false | ||
211 | } | ||
212 | return strings.HasPrefix(endpointURL.Host, "s3-fips") && strings.HasSuffix(endpointURL.Host, ".amazonaws.com") | ||
213 | } | ||
214 | |||
215 | // IsAmazonPrivateLinkEndpoint - Match if it is exactly Amazon S3 PrivateLink interface endpoint | ||
216 | // See https://docs.aws.amazon.com/AmazonS3/latest/userguide/privatelink-interface-endpoints.html. | ||
217 | func IsAmazonPrivateLinkEndpoint(endpointURL url.URL) bool { | ||
218 | if endpointURL == sentinelURL { | ||
219 | return false | ||
220 | } | ||
221 | return amazonS3HostPrivateLink.MatchString(endpointURL.Host) | ||
222 | } | ||
223 | |||
224 | // IsGoogleEndpoint - Match if it is exactly Google cloud storage endpoint. | ||
225 | func IsGoogleEndpoint(endpointURL url.URL) bool { | ||
226 | if endpointURL == sentinelURL { | ||
227 | return false | ||
228 | } | ||
229 | return endpointURL.Host == "storage.googleapis.com" | ||
230 | } | ||
231 | |||
232 | // Expects ascii encoded strings - from output of urlEncodePath | ||
233 | func percentEncodeSlash(s string) string { | ||
234 | return strings.ReplaceAll(s, "/", "%2F") | ||
235 | } | ||
236 | |||
237 | // QueryEncode - encodes query values in their URL encoded form. In | ||
238 | // addition to the percent encoding performed by urlEncodePath() used | ||
239 | // here, it also percent encodes '/' (forward slash) | ||
240 | func QueryEncode(v url.Values) string { | ||
241 | if v == nil { | ||
242 | return "" | ||
243 | } | ||
244 | var buf bytes.Buffer | ||
245 | keys := make([]string, 0, len(v)) | ||
246 | for k := range v { | ||
247 | keys = append(keys, k) | ||
248 | } | ||
249 | sort.Strings(keys) | ||
250 | for _, k := range keys { | ||
251 | vs := v[k] | ||
252 | prefix := percentEncodeSlash(EncodePath(k)) + "=" | ||
253 | for _, v := range vs { | ||
254 | if buf.Len() > 0 { | ||
255 | buf.WriteByte('&') | ||
256 | } | ||
257 | buf.WriteString(prefix) | ||
258 | buf.WriteString(percentEncodeSlash(EncodePath(v))) | ||
259 | } | ||
260 | } | ||
261 | return buf.String() | ||
262 | } | ||
263 | |||
264 | // TagDecode - decodes canonical tag into map of key and value. | ||
265 | func TagDecode(ctag string) map[string]string { | ||
266 | if ctag == "" { | ||
267 | return map[string]string{} | ||
268 | } | ||
269 | tags := strings.Split(ctag, "&") | ||
270 | tagMap := make(map[string]string, len(tags)) | ||
271 | var err error | ||
272 | for _, tag := range tags { | ||
273 | kvs := strings.SplitN(tag, "=", 2) | ||
274 | if len(kvs) == 0 { | ||
275 | return map[string]string{} | ||
276 | } | ||
277 | if len(kvs) == 1 { | ||
278 | return map[string]string{} | ||
279 | } | ||
280 | tagMap[kvs[0]], err = url.PathUnescape(kvs[1]) | ||
281 | if err != nil { | ||
282 | continue | ||
283 | } | ||
284 | } | ||
285 | return tagMap | ||
286 | } | ||
287 | |||
288 | // TagEncode - encodes tag values in their URL encoded form. In | ||
289 | // addition to the percent encoding performed by urlEncodePath() used | ||
290 | // here, it also percent encodes '/' (forward slash) | ||
291 | func TagEncode(tags map[string]string) string { | ||
292 | if tags == nil { | ||
293 | return "" | ||
294 | } | ||
295 | values := url.Values{} | ||
296 | for k, v := range tags { | ||
297 | values[k] = []string{v} | ||
298 | } | ||
299 | return QueryEncode(values) | ||
300 | } | ||
301 | |||
302 | // if object matches reserved string, no need to encode them | ||
303 | var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$") | ||
304 | |||
305 | // EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences | ||
306 | // | ||
307 | // This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 | ||
308 | // non english characters cannot be parsed due to the nature in which url.Encode() is written | ||
309 | // | ||
310 | // This function on the other hand is a direct replacement for url.Encode() technique to support | ||
311 | // pretty much every UTF-8 character. | ||
312 | func EncodePath(pathName string) string { | ||
313 | if reservedObjectNames.MatchString(pathName) { | ||
314 | return pathName | ||
315 | } | ||
316 | var encodedPathname strings.Builder | ||
317 | for _, s := range pathName { | ||
318 | if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) | ||
319 | encodedPathname.WriteRune(s) | ||
320 | continue | ||
321 | } | ||
322 | switch s { | ||
323 | case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) | ||
324 | encodedPathname.WriteRune(s) | ||
325 | continue | ||
326 | default: | ||
327 | l := utf8.RuneLen(s) | ||
328 | if l < 0 { | ||
329 | // if utf8 cannot convert return the same string as is | ||
330 | return pathName | ||
331 | } | ||
332 | u := make([]byte, l) | ||
333 | utf8.EncodeRune(u, s) | ||
334 | for _, r := range u { | ||
335 | hex := hex.EncodeToString([]byte{r}) | ||
336 | encodedPathname.WriteString("%" + strings.ToUpper(hex)) | ||
337 | } | ||
338 | } | ||
339 | } | ||
340 | return encodedPathname.String() | ||
341 | } | ||
342 | |||
343 | // We support '.' with bucket names but we fallback to using path | ||
344 | // style requests instead for such buckets. | ||
345 | var ( | ||
346 | validBucketName = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9\.\-\_\:]{1,61}[A-Za-z0-9]$`) | ||
347 | validBucketNameStrict = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) | ||
348 | ipAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) | ||
349 | ) | ||
350 | |||
351 | // Common checker for both stricter and basic validation. | ||
352 | func checkBucketNameCommon(bucketName string, strict bool) (err error) { | ||
353 | if strings.TrimSpace(bucketName) == "" { | ||
354 | return errors.New("Bucket name cannot be empty") | ||
355 | } | ||
356 | if len(bucketName) < 3 { | ||
357 | return errors.New("Bucket name cannot be shorter than 3 characters") | ||
358 | } | ||
359 | if len(bucketName) > 63 { | ||
360 | return errors.New("Bucket name cannot be longer than 63 characters") | ||
361 | } | ||
362 | if ipAddress.MatchString(bucketName) { | ||
363 | return errors.New("Bucket name cannot be an ip address") | ||
364 | } | ||
365 | if strings.Contains(bucketName, "..") || strings.Contains(bucketName, ".-") || strings.Contains(bucketName, "-.") { | ||
366 | return errors.New("Bucket name contains invalid characters") | ||
367 | } | ||
368 | if strict { | ||
369 | if !validBucketNameStrict.MatchString(bucketName) { | ||
370 | err = errors.New("Bucket name contains invalid characters") | ||
371 | } | ||
372 | return err | ||
373 | } | ||
374 | if !validBucketName.MatchString(bucketName) { | ||
375 | err = errors.New("Bucket name contains invalid characters") | ||
376 | } | ||
377 | return err | ||
378 | } | ||
379 | |||
380 | // CheckValidBucketName - checks if we have a valid input bucket name. | ||
381 | func CheckValidBucketName(bucketName string) (err error) { | ||
382 | return checkBucketNameCommon(bucketName, false) | ||
383 | } | ||
384 | |||
385 | // CheckValidBucketNameStrict - checks if we have a valid input bucket name. | ||
386 | // This is a stricter version. | ||
387 | // - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html | ||
388 | func CheckValidBucketNameStrict(bucketName string) (err error) { | ||
389 | return checkBucketNameCommon(bucketName, true) | ||
390 | } | ||
391 | |||
392 | // CheckValidObjectNamePrefix - checks if we have a valid input object name prefix. | ||
393 | // - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html | ||
394 | func CheckValidObjectNamePrefix(objectName string) error { | ||
395 | if len(objectName) > 1024 { | ||
396 | return errors.New("Object name cannot be longer than 1024 characters") | ||
397 | } | ||
398 | if !utf8.ValidString(objectName) { | ||
399 | return errors.New("Object name with non UTF-8 strings are not supported") | ||
400 | } | ||
401 | return nil | ||
402 | } | ||
403 | |||
404 | // CheckValidObjectName - checks if we have a valid input object name. | ||
405 | // - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html | ||
406 | func CheckValidObjectName(objectName string) error { | ||
407 | if strings.TrimSpace(objectName) == "" { | ||
408 | return errors.New("Object name cannot be empty") | ||
409 | } | ||
410 | return CheckValidObjectNamePrefix(objectName) | ||
411 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go b/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go new file mode 100644 index 0000000..c35e58e --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go | |||
@@ -0,0 +1,200 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2015-2017 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package set | ||
19 | |||
20 | import ( | ||
21 | "fmt" | ||
22 | "sort" | ||
23 | |||
24 | jsoniter "github.com/json-iterator/go" | ||
25 | ) | ||
26 | |||
27 | // StringSet - uses map as set of strings. | ||
28 | type StringSet map[string]struct{} | ||
29 | |||
30 | var json = jsoniter.ConfigCompatibleWithStandardLibrary | ||
31 | |||
32 | // ToSlice - returns StringSet as string slice. | ||
33 | func (set StringSet) ToSlice() []string { | ||
34 | keys := make([]string, 0, len(set)) | ||
35 | for k := range set { | ||
36 | keys = append(keys, k) | ||
37 | } | ||
38 | sort.Strings(keys) | ||
39 | return keys | ||
40 | } | ||
41 | |||
42 | // IsEmpty - returns whether the set is empty or not. | ||
43 | func (set StringSet) IsEmpty() bool { | ||
44 | return len(set) == 0 | ||
45 | } | ||
46 | |||
47 | // Add - adds string to the set. | ||
48 | func (set StringSet) Add(s string) { | ||
49 | set[s] = struct{}{} | ||
50 | } | ||
51 | |||
52 | // Remove - removes string in the set. It does nothing if string does not exist in the set. | ||
53 | func (set StringSet) Remove(s string) { | ||
54 | delete(set, s) | ||
55 | } | ||
56 | |||
57 | // Contains - checks if string is in the set. | ||
58 | func (set StringSet) Contains(s string) bool { | ||
59 | _, ok := set[s] | ||
60 | return ok | ||
61 | } | ||
62 | |||
63 | // FuncMatch - returns new set containing each value who passes match function. | ||
64 | // A 'matchFn' should accept element in a set as first argument and | ||
65 | // 'matchString' as second argument. The function can do any logic to | ||
66 | // compare both the arguments and should return true to accept element in | ||
67 | // a set to include in output set else the element is ignored. | ||
68 | func (set StringSet) FuncMatch(matchFn func(string, string) bool, matchString string) StringSet { | ||
69 | nset := NewStringSet() | ||
70 | for k := range set { | ||
71 | if matchFn(k, matchString) { | ||
72 | nset.Add(k) | ||
73 | } | ||
74 | } | ||
75 | return nset | ||
76 | } | ||
77 | |||
78 | // ApplyFunc - returns new set containing each value processed by 'applyFn'. | ||
79 | // A 'applyFn' should accept element in a set as a argument and return | ||
80 | // a processed string. The function can do any logic to return a processed | ||
81 | // string. | ||
82 | func (set StringSet) ApplyFunc(applyFn func(string) string) StringSet { | ||
83 | nset := NewStringSet() | ||
84 | for k := range set { | ||
85 | nset.Add(applyFn(k)) | ||
86 | } | ||
87 | return nset | ||
88 | } | ||
89 | |||
90 | // Equals - checks whether given set is equal to current set or not. | ||
91 | func (set StringSet) Equals(sset StringSet) bool { | ||
92 | // If length of set is not equal to length of given set, the | ||
93 | // set is not equal to given set. | ||
94 | if len(set) != len(sset) { | ||
95 | return false | ||
96 | } | ||
97 | |||
98 | // As both sets are equal in length, check each elements are equal. | ||
99 | for k := range set { | ||
100 | if _, ok := sset[k]; !ok { | ||
101 | return false | ||
102 | } | ||
103 | } | ||
104 | |||
105 | return true | ||
106 | } | ||
107 | |||
108 | // Intersection - returns the intersection with given set as new set. | ||
109 | func (set StringSet) Intersection(sset StringSet) StringSet { | ||
110 | nset := NewStringSet() | ||
111 | for k := range set { | ||
112 | if _, ok := sset[k]; ok { | ||
113 | nset.Add(k) | ||
114 | } | ||
115 | } | ||
116 | |||
117 | return nset | ||
118 | } | ||
119 | |||
120 | // Difference - returns the difference with given set as new set. | ||
121 | func (set StringSet) Difference(sset StringSet) StringSet { | ||
122 | nset := NewStringSet() | ||
123 | for k := range set { | ||
124 | if _, ok := sset[k]; !ok { | ||
125 | nset.Add(k) | ||
126 | } | ||
127 | } | ||
128 | |||
129 | return nset | ||
130 | } | ||
131 | |||
132 | // Union - returns the union with given set as new set. | ||
133 | func (set StringSet) Union(sset StringSet) StringSet { | ||
134 | nset := NewStringSet() | ||
135 | for k := range set { | ||
136 | nset.Add(k) | ||
137 | } | ||
138 | |||
139 | for k := range sset { | ||
140 | nset.Add(k) | ||
141 | } | ||
142 | |||
143 | return nset | ||
144 | } | ||
145 | |||
146 | // MarshalJSON - converts to JSON data. | ||
147 | func (set StringSet) MarshalJSON() ([]byte, error) { | ||
148 | return json.Marshal(set.ToSlice()) | ||
149 | } | ||
150 | |||
151 | // UnmarshalJSON - parses JSON data and creates new set with it. | ||
152 | // If 'data' contains JSON string array, the set contains each string. | ||
153 | // If 'data' contains JSON string, the set contains the string as one element. | ||
154 | // If 'data' contains Other JSON types, JSON parse error is returned. | ||
155 | func (set *StringSet) UnmarshalJSON(data []byte) error { | ||
156 | sl := []string{} | ||
157 | var err error | ||
158 | if err = json.Unmarshal(data, &sl); err == nil { | ||
159 | *set = make(StringSet) | ||
160 | for _, s := range sl { | ||
161 | set.Add(s) | ||
162 | } | ||
163 | } else { | ||
164 | var s string | ||
165 | if err = json.Unmarshal(data, &s); err == nil { | ||
166 | *set = make(StringSet) | ||
167 | set.Add(s) | ||
168 | } | ||
169 | } | ||
170 | |||
171 | return err | ||
172 | } | ||
173 | |||
174 | // String - returns printable string of the set. | ||
175 | func (set StringSet) String() string { | ||
176 | return fmt.Sprintf("%s", set.ToSlice()) | ||
177 | } | ||
178 | |||
179 | // NewStringSet - creates new string set. | ||
180 | func NewStringSet() StringSet { | ||
181 | return make(StringSet) | ||
182 | } | ||
183 | |||
184 | // CreateStringSet - creates new string set with given string values. | ||
185 | func CreateStringSet(sl ...string) StringSet { | ||
186 | set := make(StringSet) | ||
187 | for _, k := range sl { | ||
188 | set.Add(k) | ||
189 | } | ||
190 | return set | ||
191 | } | ||
192 | |||
193 | // CopyStringSet - returns copy of given set. | ||
194 | func CopyStringSet(set StringSet) StringSet { | ||
195 | nset := NewStringSet() | ||
196 | for k, v := range set { | ||
197 | nset[k] = v | ||
198 | } | ||
199 | return nset | ||
200 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go new file mode 100644 index 0000000..77540e2 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go | |||
@@ -0,0 +1,224 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2022 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package signer | ||
19 | |||
20 | import ( | ||
21 | "bytes" | ||
22 | "fmt" | ||
23 | "io" | ||
24 | "net/http" | ||
25 | "strconv" | ||
26 | "strings" | ||
27 | "time" | ||
28 | ) | ||
29 | |||
30 | // getUnsignedChunkLength - calculates the length of chunk metadata | ||
31 | func getUnsignedChunkLength(chunkDataSize int64) int64 { | ||
32 | return int64(len(fmt.Sprintf("%x", chunkDataSize))) + | ||
33 | crlfLen + | ||
34 | chunkDataSize + | ||
35 | crlfLen | ||
36 | } | ||
37 | |||
38 | // getUSStreamLength - calculates the length of the overall stream (data + metadata) | ||
39 | func getUSStreamLength(dataLen, chunkSize int64, trailers http.Header) int64 { | ||
40 | if dataLen <= 0 { | ||
41 | return 0 | ||
42 | } | ||
43 | |||
44 | chunksCount := int64(dataLen / chunkSize) | ||
45 | remainingBytes := int64(dataLen % chunkSize) | ||
46 | streamLen := int64(0) | ||
47 | streamLen += chunksCount * getUnsignedChunkLength(chunkSize) | ||
48 | if remainingBytes > 0 { | ||
49 | streamLen += getUnsignedChunkLength(remainingBytes) | ||
50 | } | ||
51 | streamLen += getUnsignedChunkLength(0) | ||
52 | if len(trailers) > 0 { | ||
53 | for name, placeholder := range trailers { | ||
54 | if len(placeholder) > 0 { | ||
55 | streamLen += int64(len(name) + len(trailerKVSeparator) + len(placeholder[0]) + 1) | ||
56 | } | ||
57 | } | ||
58 | streamLen += crlfLen | ||
59 | } | ||
60 | |||
61 | return streamLen | ||
62 | } | ||
63 | |||
64 | // prepareStreamingRequest - prepares a request with appropriate | ||
65 | // headers before computing the seed signature. | ||
66 | func prepareUSStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) { | ||
67 | req.TransferEncoding = []string{"aws-chunked"} | ||
68 | if sessionToken != "" { | ||
69 | req.Header.Set("X-Amz-Security-Token", sessionToken) | ||
70 | } | ||
71 | |||
72 | req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat)) | ||
73 | // Set content length with streaming signature for each chunk included. | ||
74 | req.ContentLength = getUSStreamLength(dataLen, int64(payloadChunkSize), req.Trailer) | ||
75 | } | ||
76 | |||
77 | // StreamingUSReader implements chunked upload signature as a reader on | ||
78 | // top of req.Body's ReaderCloser chunk header;data;... repeat | ||
79 | type StreamingUSReader struct { | ||
80 | contentLen int64 // Content-Length from req header | ||
81 | baseReadCloser io.ReadCloser // underlying io.Reader | ||
82 | bytesRead int64 // bytes read from underlying io.Reader | ||
83 | buf bytes.Buffer // holds signed chunk | ||
84 | chunkBuf []byte // holds raw data read from req Body | ||
85 | chunkBufLen int // no. of bytes read so far into chunkBuf | ||
86 | done bool // done reading the underlying reader to EOF | ||
87 | chunkNum int | ||
88 | totalChunks int | ||
89 | lastChunkSize int | ||
90 | trailer http.Header | ||
91 | } | ||
92 | |||
93 | // writeChunk - signs a chunk read from s.baseReader of chunkLen size. | ||
94 | func (s *StreamingUSReader) writeChunk(chunkLen int, addCrLf bool) { | ||
95 | s.buf.WriteString(strconv.FormatInt(int64(chunkLen), 16) + "\r\n") | ||
96 | |||
97 | // Write chunk data into streaming buffer | ||
98 | s.buf.Write(s.chunkBuf[:chunkLen]) | ||
99 | |||
100 | // Write the chunk trailer. | ||
101 | if addCrLf { | ||
102 | s.buf.Write([]byte("\r\n")) | ||
103 | } | ||
104 | |||
105 | // Reset chunkBufLen for next chunk read. | ||
106 | s.chunkBufLen = 0 | ||
107 | s.chunkNum++ | ||
108 | } | ||
109 | |||
110 | // addSignedTrailer - adds a trailer with the provided headers, | ||
111 | // then signs a chunk and adds it to output. | ||
112 | func (s *StreamingUSReader) addTrailer(h http.Header) { | ||
113 | olen := len(s.chunkBuf) | ||
114 | s.chunkBuf = s.chunkBuf[:0] | ||
115 | for k, v := range h { | ||
116 | s.chunkBuf = append(s.chunkBuf, []byte(strings.ToLower(k)+trailerKVSeparator+v[0]+"\n")...) | ||
117 | } | ||
118 | |||
119 | s.buf.Write(s.chunkBuf) | ||
120 | s.buf.WriteString("\r\n\r\n") | ||
121 | |||
122 | // Reset chunkBufLen for next chunk read. | ||
123 | s.chunkBuf = s.chunkBuf[:olen] | ||
124 | s.chunkBufLen = 0 | ||
125 | s.chunkNum++ | ||
126 | } | ||
127 | |||
128 | // StreamingUnsignedV4 - provides chunked upload | ||
129 | func StreamingUnsignedV4(req *http.Request, sessionToken string, dataLen int64, reqTime time.Time) *http.Request { | ||
130 | // Set headers needed for streaming signature. | ||
131 | prepareUSStreamingRequest(req, sessionToken, dataLen, reqTime) | ||
132 | |||
133 | if req.Body == nil { | ||
134 | req.Body = io.NopCloser(bytes.NewReader([]byte(""))) | ||
135 | } | ||
136 | |||
137 | stReader := &StreamingUSReader{ | ||
138 | baseReadCloser: req.Body, | ||
139 | chunkBuf: make([]byte, payloadChunkSize), | ||
140 | contentLen: dataLen, | ||
141 | chunkNum: 1, | ||
142 | totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1, | ||
143 | lastChunkSize: int(dataLen % payloadChunkSize), | ||
144 | } | ||
145 | if len(req.Trailer) > 0 { | ||
146 | stReader.trailer = req.Trailer | ||
147 | // Remove... | ||
148 | req.Trailer = nil | ||
149 | } | ||
150 | |||
151 | req.Body = stReader | ||
152 | |||
153 | return req | ||
154 | } | ||
155 | |||
156 | // Read - this method performs chunk upload signature providing a | ||
157 | // io.Reader interface. | ||
158 | func (s *StreamingUSReader) Read(buf []byte) (int, error) { | ||
159 | switch { | ||
160 | // After the last chunk is read from underlying reader, we | ||
161 | // never re-fill s.buf. | ||
162 | case s.done: | ||
163 | |||
164 | // s.buf will be (re-)filled with next chunk when has lesser | ||
165 | // bytes than asked for. | ||
166 | case s.buf.Len() < len(buf): | ||
167 | s.chunkBufLen = 0 | ||
168 | for { | ||
169 | n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:]) | ||
170 | // Usually we validate `err` first, but in this case | ||
171 | // we are validating n > 0 for the following reasons. | ||
172 | // | ||
173 | // 1. n > 0, err is one of io.EOF, nil (near end of stream) | ||
174 | // A Reader returning a non-zero number of bytes at the end | ||
175 | // of the input stream may return either err == EOF or err == nil | ||
176 | // | ||
177 | // 2. n == 0, err is io.EOF (actual end of stream) | ||
178 | // | ||
179 | // Callers should always process the n > 0 bytes returned | ||
180 | // before considering the error err. | ||
181 | if n1 > 0 { | ||
182 | s.chunkBufLen += n1 | ||
183 | s.bytesRead += int64(n1) | ||
184 | |||
185 | if s.chunkBufLen == payloadChunkSize || | ||
186 | (s.chunkNum == s.totalChunks-1 && | ||
187 | s.chunkBufLen == s.lastChunkSize) { | ||
188 | // Sign the chunk and write it to s.buf. | ||
189 | s.writeChunk(s.chunkBufLen, true) | ||
190 | break | ||
191 | } | ||
192 | } | ||
193 | if err != nil { | ||
194 | if err == io.EOF { | ||
195 | // No more data left in baseReader - last chunk. | ||
196 | // Done reading the last chunk from baseReader. | ||
197 | s.done = true | ||
198 | |||
199 | // bytes read from baseReader different than | ||
200 | // content length provided. | ||
201 | if s.bytesRead != s.contentLen { | ||
202 | return 0, fmt.Errorf("http: ContentLength=%d with Body length %d", s.contentLen, s.bytesRead) | ||
203 | } | ||
204 | |||
205 | // Sign the chunk and write it to s.buf. | ||
206 | s.writeChunk(0, len(s.trailer) == 0) | ||
207 | if len(s.trailer) > 0 { | ||
208 | // Trailer must be set now. | ||
209 | s.addTrailer(s.trailer) | ||
210 | } | ||
211 | break | ||
212 | } | ||
213 | return 0, err | ||
214 | } | ||
215 | |||
216 | } | ||
217 | } | ||
218 | return s.buf.Read(buf) | ||
219 | } | ||
220 | |||
221 | // Close - this method makes underlying io.ReadCloser's Close method available. | ||
222 | func (s *StreamingUSReader) Close() error { | ||
223 | return s.baseReadCloser.Close() | ||
224 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go new file mode 100644 index 0000000..1c2f1dc --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go | |||
@@ -0,0 +1,403 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2017 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package signer | ||
19 | |||
20 | import ( | ||
21 | "bytes" | ||
22 | "encoding/hex" | ||
23 | "fmt" | ||
24 | "io" | ||
25 | "net/http" | ||
26 | "strconv" | ||
27 | "strings" | ||
28 | "time" | ||
29 | |||
30 | md5simd "github.com/minio/md5-simd" | ||
31 | ) | ||
32 | |||
33 | // Reference for constants used below - | ||
34 | // http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming | ||
35 | const ( | ||
36 | streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" | ||
37 | streamingSignTrailerAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER" | ||
38 | streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD" | ||
39 | streamingTrailerHdr = "AWS4-HMAC-SHA256-TRAILER" | ||
40 | emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" | ||
41 | payloadChunkSize = 64 * 1024 | ||
42 | chunkSigConstLen = 17 // ";chunk-signature=" | ||
43 | signatureStrLen = 64 // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2" | ||
44 | crlfLen = 2 // CRLF | ||
45 | trailerKVSeparator = ":" | ||
46 | trailerSignature = "x-amz-trailer-signature" | ||
47 | ) | ||
48 | |||
49 | // Request headers to be ignored while calculating seed signature for | ||
50 | // a request. | ||
51 | var ignoredStreamingHeaders = map[string]bool{ | ||
52 | "Authorization": true, | ||
53 | "User-Agent": true, | ||
54 | "Content-Type": true, | ||
55 | } | ||
56 | |||
57 | // getSignedChunkLength - calculates the length of chunk metadata | ||
58 | func getSignedChunkLength(chunkDataSize int64) int64 { | ||
59 | return int64(len(fmt.Sprintf("%x", chunkDataSize))) + | ||
60 | chunkSigConstLen + | ||
61 | signatureStrLen + | ||
62 | crlfLen + | ||
63 | chunkDataSize + | ||
64 | crlfLen | ||
65 | } | ||
66 | |||
67 | // getStreamLength - calculates the length of the overall stream (data + metadata) | ||
68 | func getStreamLength(dataLen, chunkSize int64, trailers http.Header) int64 { | ||
69 | if dataLen <= 0 { | ||
70 | return 0 | ||
71 | } | ||
72 | |||
73 | chunksCount := int64(dataLen / chunkSize) | ||
74 | remainingBytes := int64(dataLen % chunkSize) | ||
75 | streamLen := int64(0) | ||
76 | streamLen += chunksCount * getSignedChunkLength(chunkSize) | ||
77 | if remainingBytes > 0 { | ||
78 | streamLen += getSignedChunkLength(remainingBytes) | ||
79 | } | ||
80 | streamLen += getSignedChunkLength(0) | ||
81 | if len(trailers) > 0 { | ||
82 | for name, placeholder := range trailers { | ||
83 | if len(placeholder) > 0 { | ||
84 | streamLen += int64(len(name) + len(trailerKVSeparator) + len(placeholder[0]) + 1) | ||
85 | } | ||
86 | } | ||
87 | streamLen += int64(len(trailerSignature)+len(trailerKVSeparator)) + signatureStrLen + crlfLen + crlfLen | ||
88 | } | ||
89 | |||
90 | return streamLen | ||
91 | } | ||
92 | |||
93 | // buildChunkStringToSign - returns the string to sign given chunk data | ||
94 | // and previous signature. | ||
95 | func buildChunkStringToSign(t time.Time, region, previousSig, chunkChecksum string) string { | ||
96 | stringToSignParts := []string{ | ||
97 | streamingPayloadHdr, | ||
98 | t.Format(iso8601DateFormat), | ||
99 | getScope(region, t, ServiceTypeS3), | ||
100 | previousSig, | ||
101 | emptySHA256, | ||
102 | chunkChecksum, | ||
103 | } | ||
104 | |||
105 | return strings.Join(stringToSignParts, "\n") | ||
106 | } | ||
107 | |||
108 | // buildTrailerChunkStringToSign - returns the string to sign given chunk data | ||
109 | // and previous signature. | ||
110 | func buildTrailerChunkStringToSign(t time.Time, region, previousSig, chunkChecksum string) string { | ||
111 | stringToSignParts := []string{ | ||
112 | streamingTrailerHdr, | ||
113 | t.Format(iso8601DateFormat), | ||
114 | getScope(region, t, ServiceTypeS3), | ||
115 | previousSig, | ||
116 | chunkChecksum, | ||
117 | } | ||
118 | |||
119 | return strings.Join(stringToSignParts, "\n") | ||
120 | } | ||
121 | |||
122 | // prepareStreamingRequest - prepares a request with appropriate | ||
123 | // headers before computing the seed signature. | ||
124 | func prepareStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) { | ||
125 | // Set x-amz-content-sha256 header. | ||
126 | if len(req.Trailer) == 0 { | ||
127 | req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm) | ||
128 | } else { | ||
129 | req.Header.Set("X-Amz-Content-Sha256", streamingSignTrailerAlgorithm) | ||
130 | for k := range req.Trailer { | ||
131 | req.Header.Add("X-Amz-Trailer", strings.ToLower(k)) | ||
132 | } | ||
133 | req.TransferEncoding = []string{"aws-chunked"} | ||
134 | } | ||
135 | |||
136 | if sessionToken != "" { | ||
137 | req.Header.Set("X-Amz-Security-Token", sessionToken) | ||
138 | } | ||
139 | |||
140 | req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat)) | ||
141 | // Set content length with streaming signature for each chunk included. | ||
142 | req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize), req.Trailer) | ||
143 | req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(dataLen, 10)) | ||
144 | } | ||
145 | |||
146 | // buildChunkHeader - returns the chunk header. | ||
147 | // e.g string(IntHexBase(chunk-size)) + ";chunk-signature=" + signature + \r\n + chunk-data + \r\n | ||
148 | func buildChunkHeader(chunkLen int64, signature string) []byte { | ||
149 | return []byte(strconv.FormatInt(chunkLen, 16) + ";chunk-signature=" + signature + "\r\n") | ||
150 | } | ||
151 | |||
152 | // buildChunkSignature - returns chunk signature for a given chunk and previous signature. | ||
153 | func buildChunkSignature(chunkCheckSum string, reqTime time.Time, region, | ||
154 | previousSignature, secretAccessKey string, | ||
155 | ) string { | ||
156 | chunkStringToSign := buildChunkStringToSign(reqTime, region, | ||
157 | previousSignature, chunkCheckSum) | ||
158 | signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3) | ||
159 | return getSignature(signingKey, chunkStringToSign) | ||
160 | } | ||
161 | |||
162 | // buildChunkSignature - returns chunk signature for a given chunk and previous signature. | ||
163 | func buildTrailerChunkSignature(chunkChecksum string, reqTime time.Time, region, | ||
164 | previousSignature, secretAccessKey string, | ||
165 | ) string { | ||
166 | chunkStringToSign := buildTrailerChunkStringToSign(reqTime, region, | ||
167 | previousSignature, chunkChecksum) | ||
168 | signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3) | ||
169 | return getSignature(signingKey, chunkStringToSign) | ||
170 | } | ||
171 | |||
172 | // getSeedSignature - returns the seed signature for a given request. | ||
173 | func (s *StreamingReader) setSeedSignature(req *http.Request) { | ||
174 | // Get canonical request | ||
175 | canonicalRequest := getCanonicalRequest(*req, ignoredStreamingHeaders, getHashedPayload(*req)) | ||
176 | |||
177 | // Get string to sign from canonical request. | ||
178 | stringToSign := getStringToSignV4(s.reqTime, s.region, canonicalRequest, ServiceTypeS3) | ||
179 | |||
180 | signingKey := getSigningKey(s.secretAccessKey, s.region, s.reqTime, ServiceTypeS3) | ||
181 | |||
182 | // Calculate signature. | ||
183 | s.seedSignature = getSignature(signingKey, stringToSign) | ||
184 | } | ||
185 | |||
186 | // StreamingReader implements chunked upload signature as a reader on | ||
187 | // top of req.Body's ReaderCloser chunk header;data;... repeat | ||
188 | type StreamingReader struct { | ||
189 | accessKeyID string | ||
190 | secretAccessKey string | ||
191 | sessionToken string | ||
192 | region string | ||
193 | prevSignature string | ||
194 | seedSignature string | ||
195 | contentLen int64 // Content-Length from req header | ||
196 | baseReadCloser io.ReadCloser // underlying io.Reader | ||
197 | bytesRead int64 // bytes read from underlying io.Reader | ||
198 | buf bytes.Buffer // holds signed chunk | ||
199 | chunkBuf []byte // holds raw data read from req Body | ||
200 | chunkBufLen int // no. of bytes read so far into chunkBuf | ||
201 | done bool // done reading the underlying reader to EOF | ||
202 | reqTime time.Time | ||
203 | chunkNum int | ||
204 | totalChunks int | ||
205 | lastChunkSize int | ||
206 | trailer http.Header | ||
207 | sh256 md5simd.Hasher | ||
208 | } | ||
209 | |||
210 | // signChunk - signs a chunk read from s.baseReader of chunkLen size. | ||
211 | func (s *StreamingReader) signChunk(chunkLen int, addCrLf bool) { | ||
212 | // Compute chunk signature for next header | ||
213 | s.sh256.Reset() | ||
214 | s.sh256.Write(s.chunkBuf[:chunkLen]) | ||
215 | chunckChecksum := hex.EncodeToString(s.sh256.Sum(nil)) | ||
216 | |||
217 | signature := buildChunkSignature(chunckChecksum, s.reqTime, | ||
218 | s.region, s.prevSignature, s.secretAccessKey) | ||
219 | |||
220 | // For next chunk signature computation | ||
221 | s.prevSignature = signature | ||
222 | |||
223 | // Write chunk header into streaming buffer | ||
224 | chunkHdr := buildChunkHeader(int64(chunkLen), signature) | ||
225 | s.buf.Write(chunkHdr) | ||
226 | |||
227 | // Write chunk data into streaming buffer | ||
228 | s.buf.Write(s.chunkBuf[:chunkLen]) | ||
229 | |||
230 | // Write the chunk trailer. | ||
231 | if addCrLf { | ||
232 | s.buf.Write([]byte("\r\n")) | ||
233 | } | ||
234 | |||
235 | // Reset chunkBufLen for next chunk read. | ||
236 | s.chunkBufLen = 0 | ||
237 | s.chunkNum++ | ||
238 | } | ||
239 | |||
240 | // addSignedTrailer - adds a trailer with the provided headers, | ||
241 | // then signs a chunk and adds it to output. | ||
242 | func (s *StreamingReader) addSignedTrailer(h http.Header) { | ||
243 | olen := len(s.chunkBuf) | ||
244 | s.chunkBuf = s.chunkBuf[:0] | ||
245 | for k, v := range h { | ||
246 | s.chunkBuf = append(s.chunkBuf, []byte(strings.ToLower(k)+trailerKVSeparator+v[0]+"\n")...) | ||
247 | } | ||
248 | |||
249 | s.sh256.Reset() | ||
250 | s.sh256.Write(s.chunkBuf) | ||
251 | chunkChecksum := hex.EncodeToString(s.sh256.Sum(nil)) | ||
252 | // Compute chunk signature | ||
253 | signature := buildTrailerChunkSignature(chunkChecksum, s.reqTime, | ||
254 | s.region, s.prevSignature, s.secretAccessKey) | ||
255 | |||
256 | // For next chunk signature computation | ||
257 | s.prevSignature = signature | ||
258 | |||
259 | s.buf.Write(s.chunkBuf) | ||
260 | s.buf.WriteString("\r\n" + trailerSignature + trailerKVSeparator + signature + "\r\n\r\n") | ||
261 | |||
262 | // Reset chunkBufLen for next chunk read. | ||
263 | s.chunkBuf = s.chunkBuf[:olen] | ||
264 | s.chunkBufLen = 0 | ||
265 | s.chunkNum++ | ||
266 | } | ||
267 | |||
268 | // setStreamingAuthHeader - builds and sets authorization header value | ||
269 | // for streaming signature. | ||
270 | func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) { | ||
271 | credential := GetCredential(s.accessKeyID, s.region, s.reqTime, ServiceTypeS3) | ||
272 | authParts := []string{ | ||
273 | signV4Algorithm + " Credential=" + credential, | ||
274 | "SignedHeaders=" + getSignedHeaders(*req, ignoredStreamingHeaders), | ||
275 | "Signature=" + s.seedSignature, | ||
276 | } | ||
277 | |||
278 | // Set authorization header. | ||
279 | auth := strings.Join(authParts, ",") | ||
280 | req.Header.Set("Authorization", auth) | ||
281 | } | ||
282 | |||
283 | // StreamingSignV4 - provides chunked upload signatureV4 support by | ||
284 | // implementing io.Reader. | ||
285 | func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionToken, | ||
286 | region string, dataLen int64, reqTime time.Time, sh256 md5simd.Hasher, | ||
287 | ) *http.Request { | ||
288 | // Set headers needed for streaming signature. | ||
289 | prepareStreamingRequest(req, sessionToken, dataLen, reqTime) | ||
290 | |||
291 | if req.Body == nil { | ||
292 | req.Body = io.NopCloser(bytes.NewReader([]byte(""))) | ||
293 | } | ||
294 | |||
295 | stReader := &StreamingReader{ | ||
296 | baseReadCloser: req.Body, | ||
297 | accessKeyID: accessKeyID, | ||
298 | secretAccessKey: secretAccessKey, | ||
299 | sessionToken: sessionToken, | ||
300 | region: region, | ||
301 | reqTime: reqTime, | ||
302 | chunkBuf: make([]byte, payloadChunkSize), | ||
303 | contentLen: dataLen, | ||
304 | chunkNum: 1, | ||
305 | totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1, | ||
306 | lastChunkSize: int(dataLen % payloadChunkSize), | ||
307 | sh256: sh256, | ||
308 | } | ||
309 | if len(req.Trailer) > 0 { | ||
310 | stReader.trailer = req.Trailer | ||
311 | // Remove... | ||
312 | req.Trailer = nil | ||
313 | } | ||
314 | |||
315 | // Add the request headers required for chunk upload signing. | ||
316 | |||
317 | // Compute the seed signature. | ||
318 | stReader.setSeedSignature(req) | ||
319 | |||
320 | // Set the authorization header with the seed signature. | ||
321 | stReader.setStreamingAuthHeader(req) | ||
322 | |||
323 | // Set seed signature as prevSignature for subsequent | ||
324 | // streaming signing process. | ||
325 | stReader.prevSignature = stReader.seedSignature | ||
326 | req.Body = stReader | ||
327 | |||
328 | return req | ||
329 | } | ||
330 | |||
331 | // Read - this method performs chunk upload signature providing a | ||
332 | // io.Reader interface. | ||
333 | func (s *StreamingReader) Read(buf []byte) (int, error) { | ||
334 | switch { | ||
335 | // After the last chunk is read from underlying reader, we | ||
336 | // never re-fill s.buf. | ||
337 | case s.done: | ||
338 | |||
339 | // s.buf will be (re-)filled with next chunk when has lesser | ||
340 | // bytes than asked for. | ||
341 | case s.buf.Len() < len(buf): | ||
342 | s.chunkBufLen = 0 | ||
343 | for { | ||
344 | n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:]) | ||
345 | // Usually we validate `err` first, but in this case | ||
346 | // we are validating n > 0 for the following reasons. | ||
347 | // | ||
348 | // 1. n > 0, err is one of io.EOF, nil (near end of stream) | ||
349 | // A Reader returning a non-zero number of bytes at the end | ||
350 | // of the input stream may return either err == EOF or err == nil | ||
351 | // | ||
352 | // 2. n == 0, err is io.EOF (actual end of stream) | ||
353 | // | ||
354 | // Callers should always process the n > 0 bytes returned | ||
355 | // before considering the error err. | ||
356 | if n1 > 0 { | ||
357 | s.chunkBufLen += n1 | ||
358 | s.bytesRead += int64(n1) | ||
359 | |||
360 | if s.chunkBufLen == payloadChunkSize || | ||
361 | (s.chunkNum == s.totalChunks-1 && | ||
362 | s.chunkBufLen == s.lastChunkSize) { | ||
363 | // Sign the chunk and write it to s.buf. | ||
364 | s.signChunk(s.chunkBufLen, true) | ||
365 | break | ||
366 | } | ||
367 | } | ||
368 | if err != nil { | ||
369 | if err == io.EOF { | ||
370 | // No more data left in baseReader - last chunk. | ||
371 | // Done reading the last chunk from baseReader. | ||
372 | s.done = true | ||
373 | |||
374 | // bytes read from baseReader different than | ||
375 | // content length provided. | ||
376 | if s.bytesRead != s.contentLen { | ||
377 | return 0, fmt.Errorf("http: ContentLength=%d with Body length %d", s.contentLen, s.bytesRead) | ||
378 | } | ||
379 | |||
380 | // Sign the chunk and write it to s.buf. | ||
381 | s.signChunk(0, len(s.trailer) == 0) | ||
382 | if len(s.trailer) > 0 { | ||
383 | // Trailer must be set now. | ||
384 | s.addSignedTrailer(s.trailer) | ||
385 | } | ||
386 | break | ||
387 | } | ||
388 | return 0, err | ||
389 | } | ||
390 | |||
391 | } | ||
392 | } | ||
393 | return s.buf.Read(buf) | ||
394 | } | ||
395 | |||
396 | // Close - this method makes underlying io.ReadCloser's Close method available. | ||
397 | func (s *StreamingReader) Close() error { | ||
398 | if s.sh256 != nil { | ||
399 | s.sh256.Close() | ||
400 | s.sh256 = nil | ||
401 | } | ||
402 | return s.baseReadCloser.Close() | ||
403 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go new file mode 100644 index 0000000..fa4f8c9 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go | |||
@@ -0,0 +1,319 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2015-2017 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package signer | ||
19 | |||
20 | import ( | ||
21 | "bytes" | ||
22 | "crypto/hmac" | ||
23 | "crypto/sha1" | ||
24 | "encoding/base64" | ||
25 | "fmt" | ||
26 | "net/http" | ||
27 | "net/url" | ||
28 | "sort" | ||
29 | "strconv" | ||
30 | "strings" | ||
31 | "time" | ||
32 | |||
33 | "github.com/minio/minio-go/v7/pkg/s3utils" | ||
34 | ) | ||
35 | |||
36 | // Signature and API related constants. | ||
37 | const ( | ||
38 | signV2Algorithm = "AWS" | ||
39 | ) | ||
40 | |||
41 | // Encode input URL path to URL encoded path. | ||
42 | func encodeURL2Path(req *http.Request, virtualHost bool) (path string) { | ||
43 | if virtualHost { | ||
44 | reqHost := getHostAddr(req) | ||
45 | dotPos := strings.Index(reqHost, ".") | ||
46 | if dotPos > -1 { | ||
47 | bucketName := reqHost[:dotPos] | ||
48 | path = "/" + bucketName | ||
49 | path += req.URL.Path | ||
50 | path = s3utils.EncodePath(path) | ||
51 | return | ||
52 | } | ||
53 | } | ||
54 | path = s3utils.EncodePath(req.URL.Path) | ||
55 | return | ||
56 | } | ||
57 | |||
58 | // PreSignV2 - presign the request in following style. | ||
59 | // https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}. | ||
60 | func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64, virtualHost bool) *http.Request { | ||
61 | // Presign is not needed for anonymous credentials. | ||
62 | if accessKeyID == "" || secretAccessKey == "" { | ||
63 | return &req | ||
64 | } | ||
65 | |||
66 | d := time.Now().UTC() | ||
67 | // Find epoch expires when the request will expire. | ||
68 | epochExpires := d.Unix() + expires | ||
69 | |||
70 | // Add expires header if not present. | ||
71 | if expiresStr := req.Header.Get("Expires"); expiresStr == "" { | ||
72 | req.Header.Set("Expires", strconv.FormatInt(epochExpires, 10)) | ||
73 | } | ||
74 | |||
75 | // Get presigned string to sign. | ||
76 | stringToSign := preStringToSignV2(req, virtualHost) | ||
77 | hm := hmac.New(sha1.New, []byte(secretAccessKey)) | ||
78 | hm.Write([]byte(stringToSign)) | ||
79 | |||
80 | // Calculate signature. | ||
81 | signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) | ||
82 | |||
83 | query := req.URL.Query() | ||
84 | // Handle specially for Google Cloud Storage. | ||
85 | if strings.Contains(getHostAddr(&req), ".storage.googleapis.com") { | ||
86 | query.Set("GoogleAccessId", accessKeyID) | ||
87 | } else { | ||
88 | query.Set("AWSAccessKeyId", accessKeyID) | ||
89 | } | ||
90 | |||
91 | // Fill in Expires for presigned query. | ||
92 | query.Set("Expires", strconv.FormatInt(epochExpires, 10)) | ||
93 | |||
94 | // Encode query and save. | ||
95 | req.URL.RawQuery = s3utils.QueryEncode(query) | ||
96 | |||
97 | // Save signature finally. | ||
98 | req.URL.RawQuery += "&Signature=" + s3utils.EncodePath(signature) | ||
99 | |||
100 | // Return. | ||
101 | return &req | ||
102 | } | ||
103 | |||
104 | // PostPresignSignatureV2 - presigned signature for PostPolicy | ||
105 | // request. | ||
106 | func PostPresignSignatureV2(policyBase64, secretAccessKey string) string { | ||
107 | hm := hmac.New(sha1.New, []byte(secretAccessKey)) | ||
108 | hm.Write([]byte(policyBase64)) | ||
109 | signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) | ||
110 | return signature | ||
111 | } | ||
112 | |||
113 | // Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature; | ||
114 | // Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) ); | ||
115 | // | ||
116 | // StringToSign = HTTP-Verb + "\n" + | ||
117 | // Content-Md5 + "\n" + | ||
118 | // Content-Type + "\n" + | ||
119 | // Date + "\n" + | ||
120 | // CanonicalizedProtocolHeaders + | ||
121 | // CanonicalizedResource; | ||
122 | // | ||
123 | // CanonicalizedResource = [ "/" + Bucket ] + | ||
124 | // <HTTP-Request-URI, from the protocol name up to the query string> + | ||
125 | // [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; | ||
126 | // | ||
127 | // CanonicalizedProtocolHeaders = <described below> | ||
128 | |||
129 | // SignV2 sign the request before Do() (AWS Signature Version 2). | ||
130 | func SignV2(req http.Request, accessKeyID, secretAccessKey string, virtualHost bool) *http.Request { | ||
131 | // Signature calculation is not needed for anonymous credentials. | ||
132 | if accessKeyID == "" || secretAccessKey == "" { | ||
133 | return &req | ||
134 | } | ||
135 | |||
136 | // Initial time. | ||
137 | d := time.Now().UTC() | ||
138 | |||
139 | // Add date if not present. | ||
140 | if date := req.Header.Get("Date"); date == "" { | ||
141 | req.Header.Set("Date", d.Format(http.TimeFormat)) | ||
142 | } | ||
143 | |||
144 | // Calculate HMAC for secretAccessKey. | ||
145 | stringToSign := stringToSignV2(req, virtualHost) | ||
146 | hm := hmac.New(sha1.New, []byte(secretAccessKey)) | ||
147 | hm.Write([]byte(stringToSign)) | ||
148 | |||
149 | // Prepare auth header. | ||
150 | authHeader := new(bytes.Buffer) | ||
151 | authHeader.WriteString(fmt.Sprintf("%s %s:", signV2Algorithm, accessKeyID)) | ||
152 | encoder := base64.NewEncoder(base64.StdEncoding, authHeader) | ||
153 | encoder.Write(hm.Sum(nil)) | ||
154 | encoder.Close() | ||
155 | |||
156 | // Set Authorization header. | ||
157 | req.Header.Set("Authorization", authHeader.String()) | ||
158 | |||
159 | return &req | ||
160 | } | ||
161 | |||
162 | // From the Amazon docs: | ||
163 | // | ||
164 | // StringToSign = HTTP-Verb + "\n" + | ||
165 | // | ||
166 | // Content-Md5 + "\n" + | ||
167 | // Content-Type + "\n" + | ||
168 | // Expires + "\n" + | ||
169 | // CanonicalizedProtocolHeaders + | ||
170 | // CanonicalizedResource; | ||
171 | func preStringToSignV2(req http.Request, virtualHost bool) string { | ||
172 | buf := new(bytes.Buffer) | ||
173 | // Write standard headers. | ||
174 | writePreSignV2Headers(buf, req) | ||
175 | // Write canonicalized protocol headers if any. | ||
176 | writeCanonicalizedHeaders(buf, req) | ||
177 | // Write canonicalized Query resources if any. | ||
178 | writeCanonicalizedResource(buf, req, virtualHost) | ||
179 | return buf.String() | ||
180 | } | ||
181 | |||
182 | // writePreSignV2Headers - write preSign v2 required headers. | ||
183 | func writePreSignV2Headers(buf *bytes.Buffer, req http.Request) { | ||
184 | buf.WriteString(req.Method + "\n") | ||
185 | buf.WriteString(req.Header.Get("Content-Md5") + "\n") | ||
186 | buf.WriteString(req.Header.Get("Content-Type") + "\n") | ||
187 | buf.WriteString(req.Header.Get("Expires") + "\n") | ||
188 | } | ||
189 | |||
190 | // From the Amazon docs: | ||
191 | // | ||
192 | // StringToSign = HTTP-Verb + "\n" + | ||
193 | // | ||
194 | // Content-Md5 + "\n" + | ||
195 | // Content-Type + "\n" + | ||
196 | // Date + "\n" + | ||
197 | // CanonicalizedProtocolHeaders + | ||
198 | // CanonicalizedResource; | ||
199 | func stringToSignV2(req http.Request, virtualHost bool) string { | ||
200 | buf := new(bytes.Buffer) | ||
201 | // Write standard headers. | ||
202 | writeSignV2Headers(buf, req) | ||
203 | // Write canonicalized protocol headers if any. | ||
204 | writeCanonicalizedHeaders(buf, req) | ||
205 | // Write canonicalized Query resources if any. | ||
206 | writeCanonicalizedResource(buf, req, virtualHost) | ||
207 | return buf.String() | ||
208 | } | ||
209 | |||
210 | // writeSignV2Headers - write signV2 required headers. | ||
211 | func writeSignV2Headers(buf *bytes.Buffer, req http.Request) { | ||
212 | buf.WriteString(req.Method + "\n") | ||
213 | buf.WriteString(req.Header.Get("Content-Md5") + "\n") | ||
214 | buf.WriteString(req.Header.Get("Content-Type") + "\n") | ||
215 | buf.WriteString(req.Header.Get("Date") + "\n") | ||
216 | } | ||
217 | |||
218 | // writeCanonicalizedHeaders - write canonicalized headers. | ||
219 | func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) { | ||
220 | var protoHeaders []string | ||
221 | vals := make(map[string][]string) | ||
222 | for k, vv := range req.Header { | ||
223 | // All the AMZ headers should be lowercase | ||
224 | lk := strings.ToLower(k) | ||
225 | if strings.HasPrefix(lk, "x-amz") { | ||
226 | protoHeaders = append(protoHeaders, lk) | ||
227 | vals[lk] = vv | ||
228 | } | ||
229 | } | ||
230 | sort.Strings(protoHeaders) | ||
231 | for _, k := range protoHeaders { | ||
232 | buf.WriteString(k) | ||
233 | buf.WriteByte(':') | ||
234 | for idx, v := range vals[k] { | ||
235 | if idx > 0 { | ||
236 | buf.WriteByte(',') | ||
237 | } | ||
238 | buf.WriteString(v) | ||
239 | } | ||
240 | buf.WriteByte('\n') | ||
241 | } | ||
242 | } | ||
243 | |||
244 | // AWS S3 Signature V2 calculation rule is give here: | ||
245 | // http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationStringToSign | ||
246 | |||
247 | // Whitelist resource list that will be used in query string for signature-V2 calculation. | ||
248 | // | ||
249 | // This list should be kept alphabetically sorted, do not hastily edit. | ||
250 | var resourceList = []string{ | ||
251 | "acl", | ||
252 | "cors", | ||
253 | "delete", | ||
254 | "encryption", | ||
255 | "legal-hold", | ||
256 | "lifecycle", | ||
257 | "location", | ||
258 | "logging", | ||
259 | "notification", | ||
260 | "partNumber", | ||
261 | "policy", | ||
262 | "replication", | ||
263 | "requestPayment", | ||
264 | "response-cache-control", | ||
265 | "response-content-disposition", | ||
266 | "response-content-encoding", | ||
267 | "response-content-language", | ||
268 | "response-content-type", | ||
269 | "response-expires", | ||
270 | "retention", | ||
271 | "select", | ||
272 | "select-type", | ||
273 | "tagging", | ||
274 | "torrent", | ||
275 | "uploadId", | ||
276 | "uploads", | ||
277 | "versionId", | ||
278 | "versioning", | ||
279 | "versions", | ||
280 | "website", | ||
281 | } | ||
282 | |||
283 | // From the Amazon docs: | ||
284 | // | ||
285 | // CanonicalizedResource = [ "/" + Bucket ] + | ||
286 | // | ||
287 | // <HTTP-Request-URI, from the protocol name up to the query string> + | ||
288 | // [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; | ||
289 | func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, virtualHost bool) { | ||
290 | // Save request URL. | ||
291 | requestURL := req.URL | ||
292 | // Get encoded URL path. | ||
293 | buf.WriteString(encodeURL2Path(&req, virtualHost)) | ||
294 | if requestURL.RawQuery != "" { | ||
295 | var n int | ||
296 | vals, _ := url.ParseQuery(requestURL.RawQuery) | ||
297 | // Verify if any sub resource queries are present, if yes | ||
298 | // canonicallize them. | ||
299 | for _, resource := range resourceList { | ||
300 | if vv, ok := vals[resource]; ok && len(vv) > 0 { | ||
301 | n++ | ||
302 | // First element | ||
303 | switch n { | ||
304 | case 1: | ||
305 | buf.WriteByte('?') | ||
306 | // The rest | ||
307 | default: | ||
308 | buf.WriteByte('&') | ||
309 | } | ||
310 | buf.WriteString(resource) | ||
311 | // Request parameters | ||
312 | if len(vv[0]) > 0 { | ||
313 | buf.WriteByte('=') | ||
314 | buf.WriteString(vv[0]) | ||
315 | } | ||
316 | } | ||
317 | } | ||
318 | } | ||
319 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go new file mode 100644 index 0000000..ffd2514 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go | |||
@@ -0,0 +1,351 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2015-2017 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package signer | ||
19 | |||
20 | import ( | ||
21 | "bytes" | ||
22 | "encoding/hex" | ||
23 | "net/http" | ||
24 | "sort" | ||
25 | "strconv" | ||
26 | "strings" | ||
27 | "time" | ||
28 | |||
29 | "github.com/minio/minio-go/v7/pkg/s3utils" | ||
30 | ) | ||
31 | |||
32 | // Signature and API related constants. | ||
33 | const ( | ||
34 | signV4Algorithm = "AWS4-HMAC-SHA256" | ||
35 | iso8601DateFormat = "20060102T150405Z" | ||
36 | yyyymmdd = "20060102" | ||
37 | ) | ||
38 | |||
39 | // Different service types | ||
40 | const ( | ||
41 | ServiceTypeS3 = "s3" | ||
42 | ServiceTypeSTS = "sts" | ||
43 | ) | ||
44 | |||
45 | // Excerpts from @lsegal - | ||
46 | // https:/github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258. | ||
47 | // | ||
48 | // * User-Agent | ||
49 | // This is ignored from signing because signing this causes problems with generating pre-signed | ||
50 | // URLs (that are executed by other agents) or when customers pass requests through proxies, which | ||
51 | // may modify the user-agent. | ||
52 | // | ||
53 | // * Authorization | ||
54 | // Is skipped for obvious reasons. | ||
55 | // | ||
56 | // * Accept-Encoding | ||
57 | // Some S3 servers like Hitachi Content Platform do not honor this header for signature | ||
58 | // calculation. | ||
59 | var v4IgnoredHeaders = map[string]bool{ | ||
60 | "Accept-Encoding": true, | ||
61 | "Authorization": true, | ||
62 | "User-Agent": true, | ||
63 | } | ||
64 | |||
65 | // getSigningKey hmac seed to calculate final signature. | ||
66 | func getSigningKey(secret, loc string, t time.Time, serviceType string) []byte { | ||
67 | date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd))) | ||
68 | location := sumHMAC(date, []byte(loc)) | ||
69 | service := sumHMAC(location, []byte(serviceType)) | ||
70 | signingKey := sumHMAC(service, []byte("aws4_request")) | ||
71 | return signingKey | ||
72 | } | ||
73 | |||
74 | // getSignature final signature in hexadecimal form. | ||
75 | func getSignature(signingKey []byte, stringToSign string) string { | ||
76 | return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) | ||
77 | } | ||
78 | |||
79 | // getScope generate a string of a specific date, an AWS region, and a | ||
80 | // service. | ||
81 | func getScope(location string, t time.Time, serviceType string) string { | ||
82 | scope := strings.Join([]string{ | ||
83 | t.Format(yyyymmdd), | ||
84 | location, | ||
85 | serviceType, | ||
86 | "aws4_request", | ||
87 | }, "/") | ||
88 | return scope | ||
89 | } | ||
90 | |||
91 | // GetCredential generate a credential string. | ||
92 | func GetCredential(accessKeyID, location string, t time.Time, serviceType string) string { | ||
93 | scope := getScope(location, t, serviceType) | ||
94 | return accessKeyID + "/" + scope | ||
95 | } | ||
96 | |||
97 | // getHashedPayload get the hexadecimal value of the SHA256 hash of | ||
98 | // the request payload. | ||
99 | func getHashedPayload(req http.Request) string { | ||
100 | hashedPayload := req.Header.Get("X-Amz-Content-Sha256") | ||
101 | if hashedPayload == "" { | ||
102 | // Presign does not have a payload, use S3 recommended value. | ||
103 | hashedPayload = unsignedPayload | ||
104 | } | ||
105 | return hashedPayload | ||
106 | } | ||
107 | |||
108 | // getCanonicalHeaders generate a list of request headers for | ||
109 | // signature. | ||
110 | func getCanonicalHeaders(req http.Request, ignoredHeaders map[string]bool) string { | ||
111 | var headers []string | ||
112 | vals := make(map[string][]string) | ||
113 | for k, vv := range req.Header { | ||
114 | if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { | ||
115 | continue // ignored header | ||
116 | } | ||
117 | headers = append(headers, strings.ToLower(k)) | ||
118 | vals[strings.ToLower(k)] = vv | ||
119 | } | ||
120 | if !headerExists("host", headers) { | ||
121 | headers = append(headers, "host") | ||
122 | } | ||
123 | sort.Strings(headers) | ||
124 | |||
125 | var buf bytes.Buffer | ||
126 | // Save all the headers in canonical form <header>:<value> newline | ||
127 | // separated for each header. | ||
128 | for _, k := range headers { | ||
129 | buf.WriteString(k) | ||
130 | buf.WriteByte(':') | ||
131 | switch { | ||
132 | case k == "host": | ||
133 | buf.WriteString(getHostAddr(&req)) | ||
134 | buf.WriteByte('\n') | ||
135 | default: | ||
136 | for idx, v := range vals[k] { | ||
137 | if idx > 0 { | ||
138 | buf.WriteByte(',') | ||
139 | } | ||
140 | buf.WriteString(signV4TrimAll(v)) | ||
141 | } | ||
142 | buf.WriteByte('\n') | ||
143 | } | ||
144 | } | ||
145 | return buf.String() | ||
146 | } | ||
147 | |||
148 | func headerExists(key string, headers []string) bool { | ||
149 | for _, k := range headers { | ||
150 | if k == key { | ||
151 | return true | ||
152 | } | ||
153 | } | ||
154 | return false | ||
155 | } | ||
156 | |||
157 | // getSignedHeaders generate all signed request headers. | ||
158 | // i.e lexically sorted, semicolon-separated list of lowercase | ||
159 | // request header names. | ||
160 | func getSignedHeaders(req http.Request, ignoredHeaders map[string]bool) string { | ||
161 | var headers []string | ||
162 | for k := range req.Header { | ||
163 | if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { | ||
164 | continue // Ignored header found continue. | ||
165 | } | ||
166 | headers = append(headers, strings.ToLower(k)) | ||
167 | } | ||
168 | if !headerExists("host", headers) { | ||
169 | headers = append(headers, "host") | ||
170 | } | ||
171 | sort.Strings(headers) | ||
172 | return strings.Join(headers, ";") | ||
173 | } | ||
174 | |||
175 | // getCanonicalRequest generate a canonical request of style. | ||
176 | // | ||
177 | // canonicalRequest = | ||
178 | // | ||
179 | // <HTTPMethod>\n | ||
180 | // <CanonicalURI>\n | ||
181 | // <CanonicalQueryString>\n | ||
182 | // <CanonicalHeaders>\n | ||
183 | // <SignedHeaders>\n | ||
184 | // <HashedPayload> | ||
185 | func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool, hashedPayload string) string { | ||
186 | req.URL.RawQuery = strings.ReplaceAll(req.URL.Query().Encode(), "+", "%20") | ||
187 | canonicalRequest := strings.Join([]string{ | ||
188 | req.Method, | ||
189 | s3utils.EncodePath(req.URL.Path), | ||
190 | req.URL.RawQuery, | ||
191 | getCanonicalHeaders(req, ignoredHeaders), | ||
192 | getSignedHeaders(req, ignoredHeaders), | ||
193 | hashedPayload, | ||
194 | }, "\n") | ||
195 | return canonicalRequest | ||
196 | } | ||
197 | |||
198 | // getStringToSign a string based on selected query values. | ||
199 | func getStringToSignV4(t time.Time, location, canonicalRequest, serviceType string) string { | ||
200 | stringToSign := signV4Algorithm + "\n" + t.Format(iso8601DateFormat) + "\n" | ||
201 | stringToSign = stringToSign + getScope(location, t, serviceType) + "\n" | ||
202 | stringToSign += hex.EncodeToString(sum256([]byte(canonicalRequest))) | ||
203 | return stringToSign | ||
204 | } | ||
205 | |||
206 | // PreSignV4 presign the request, in accordance with | ||
207 | // http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html. | ||
208 | func PreSignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, expires int64) *http.Request { | ||
209 | // Presign is not needed for anonymous credentials. | ||
210 | if accessKeyID == "" || secretAccessKey == "" { | ||
211 | return &req | ||
212 | } | ||
213 | |||
214 | // Initial time. | ||
215 | t := time.Now().UTC() | ||
216 | |||
217 | // Get credential string. | ||
218 | credential := GetCredential(accessKeyID, location, t, ServiceTypeS3) | ||
219 | |||
220 | // Get all signed headers. | ||
221 | signedHeaders := getSignedHeaders(req, v4IgnoredHeaders) | ||
222 | |||
223 | // Set URL query. | ||
224 | query := req.URL.Query() | ||
225 | query.Set("X-Amz-Algorithm", signV4Algorithm) | ||
226 | query.Set("X-Amz-Date", t.Format(iso8601DateFormat)) | ||
227 | query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10)) | ||
228 | query.Set("X-Amz-SignedHeaders", signedHeaders) | ||
229 | query.Set("X-Amz-Credential", credential) | ||
230 | // Set session token if available. | ||
231 | if sessionToken != "" { | ||
232 | query.Set("X-Amz-Security-Token", sessionToken) | ||
233 | } | ||
234 | req.URL.RawQuery = query.Encode() | ||
235 | |||
236 | // Get canonical request. | ||
237 | canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders, getHashedPayload(req)) | ||
238 | |||
239 | // Get string to sign from canonical request. | ||
240 | stringToSign := getStringToSignV4(t, location, canonicalRequest, ServiceTypeS3) | ||
241 | |||
242 | // Gext hmac signing key. | ||
243 | signingKey := getSigningKey(secretAccessKey, location, t, ServiceTypeS3) | ||
244 | |||
245 | // Calculate signature. | ||
246 | signature := getSignature(signingKey, stringToSign) | ||
247 | |||
248 | // Add signature header to RawQuery. | ||
249 | req.URL.RawQuery += "&X-Amz-Signature=" + signature | ||
250 | |||
251 | return &req | ||
252 | } | ||
253 | |||
254 | // PostPresignSignatureV4 - presigned signature for PostPolicy | ||
255 | // requests. | ||
256 | func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string { | ||
257 | // Get signining key. | ||
258 | signingkey := getSigningKey(secretAccessKey, location, t, ServiceTypeS3) | ||
259 | // Calculate signature. | ||
260 | signature := getSignature(signingkey, policyBase64) | ||
261 | return signature | ||
262 | } | ||
263 | |||
264 | // SignV4STS - signature v4 for STS request. | ||
265 | func SignV4STS(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request { | ||
266 | return signV4(req, accessKeyID, secretAccessKey, "", location, ServiceTypeSTS, nil) | ||
267 | } | ||
268 | |||
269 | // Internal function called for different service types. | ||
270 | func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location, serviceType string, trailer http.Header) *http.Request { | ||
271 | // Signature calculation is not needed for anonymous credentials. | ||
272 | if accessKeyID == "" || secretAccessKey == "" { | ||
273 | return &req | ||
274 | } | ||
275 | |||
276 | // Initial time. | ||
277 | t := time.Now().UTC() | ||
278 | |||
279 | // Set x-amz-date. | ||
280 | req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat)) | ||
281 | |||
282 | // Set session token if available. | ||
283 | if sessionToken != "" { | ||
284 | req.Header.Set("X-Amz-Security-Token", sessionToken) | ||
285 | } | ||
286 | |||
287 | if len(trailer) > 0 { | ||
288 | for k := range trailer { | ||
289 | req.Header.Add("X-Amz-Trailer", strings.ToLower(k)) | ||
290 | } | ||
291 | |||
292 | req.Header.Set("Content-Encoding", "aws-chunked") | ||
293 | req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(req.ContentLength, 10)) | ||
294 | } | ||
295 | |||
296 | hashedPayload := getHashedPayload(req) | ||
297 | if serviceType == ServiceTypeSTS { | ||
298 | // Content sha256 header is not sent with the request | ||
299 | // but it is expected to have sha256 of payload for signature | ||
300 | // in STS service type request. | ||
301 | req.Header.Del("X-Amz-Content-Sha256") | ||
302 | } | ||
303 | |||
304 | // Get canonical request. | ||
305 | canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders, hashedPayload) | ||
306 | |||
307 | // Get string to sign from canonical request. | ||
308 | stringToSign := getStringToSignV4(t, location, canonicalRequest, serviceType) | ||
309 | |||
310 | // Get hmac signing key. | ||
311 | signingKey := getSigningKey(secretAccessKey, location, t, serviceType) | ||
312 | |||
313 | // Get credential string. | ||
314 | credential := GetCredential(accessKeyID, location, t, serviceType) | ||
315 | |||
316 | // Get all signed headers. | ||
317 | signedHeaders := getSignedHeaders(req, v4IgnoredHeaders) | ||
318 | |||
319 | // Calculate signature. | ||
320 | signature := getSignature(signingKey, stringToSign) | ||
321 | |||
322 | // If regular request, construct the final authorization header. | ||
323 | parts := []string{ | ||
324 | signV4Algorithm + " Credential=" + credential, | ||
325 | "SignedHeaders=" + signedHeaders, | ||
326 | "Signature=" + signature, | ||
327 | } | ||
328 | |||
329 | // Set authorization header. | ||
330 | auth := strings.Join(parts, ", ") | ||
331 | req.Header.Set("Authorization", auth) | ||
332 | |||
333 | if len(trailer) > 0 { | ||
334 | // Use custom chunked encoding. | ||
335 | req.Trailer = trailer | ||
336 | return StreamingUnsignedV4(&req, sessionToken, req.ContentLength, time.Now().UTC()) | ||
337 | } | ||
338 | return &req | ||
339 | } | ||
340 | |||
341 | // SignV4 sign the request before Do(), in accordance with | ||
342 | // http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html. | ||
343 | func SignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string) *http.Request { | ||
344 | return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3, nil) | ||
345 | } | ||
346 | |||
347 | // SignV4Trailer sign the request before Do(), in accordance with | ||
348 | // http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html | ||
349 | func SignV4Trailer(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, trailer http.Header) *http.Request { | ||
350 | return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3, trailer) | ||
351 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go new file mode 100644 index 0000000..87c9939 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go | |||
@@ -0,0 +1,62 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2015-2017 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package signer | ||
19 | |||
20 | import ( | ||
21 | "crypto/hmac" | ||
22 | "crypto/sha256" | ||
23 | "net/http" | ||
24 | "strings" | ||
25 | ) | ||
26 | |||
27 | // unsignedPayload - value to be set to X-Amz-Content-Sha256 header when | ||
28 | const unsignedPayload = "UNSIGNED-PAYLOAD" | ||
29 | |||
30 | // sum256 calculate sha256 sum for an input byte array. | ||
31 | func sum256(data []byte) []byte { | ||
32 | hash := sha256.New() | ||
33 | hash.Write(data) | ||
34 | return hash.Sum(nil) | ||
35 | } | ||
36 | |||
37 | // sumHMAC calculate hmac between two input byte array. | ||
38 | func sumHMAC(key, data []byte) []byte { | ||
39 | hash := hmac.New(sha256.New, key) | ||
40 | hash.Write(data) | ||
41 | return hash.Sum(nil) | ||
42 | } | ||
43 | |||
44 | // getHostAddr returns host header if available, otherwise returns host from URL | ||
45 | func getHostAddr(req *http.Request) string { | ||
46 | host := req.Header.Get("host") | ||
47 | if host != "" && req.Host != host { | ||
48 | return host | ||
49 | } | ||
50 | if req.Host != "" { | ||
51 | return req.Host | ||
52 | } | ||
53 | return req.URL.Host | ||
54 | } | ||
55 | |||
56 | // Trim leading and trailing spaces and replace sequential spaces with one space, following Trimall() | ||
57 | // in http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html | ||
58 | func signV4TrimAll(input string) string { | ||
59 | // Compress adjacent spaces (a space is determined by | ||
60 | // unicode.IsSpace() internally here) to one space and return | ||
61 | return strings.Join(strings.Fields(input), " ") | ||
62 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go b/vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go new file mode 100644 index 0000000..b5fb956 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go | |||
@@ -0,0 +1,66 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2020 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package sse | ||
19 | |||
20 | import "encoding/xml" | ||
21 | |||
22 | // ApplySSEByDefault defines default encryption configuration, KMS or SSE. To activate | ||
23 | // KMS, SSEAlgoritm needs to be set to "aws:kms" | ||
24 | // Minio currently does not support Kms. | ||
25 | type ApplySSEByDefault struct { | ||
26 | KmsMasterKeyID string `xml:"KMSMasterKeyID,omitempty"` | ||
27 | SSEAlgorithm string `xml:"SSEAlgorithm"` | ||
28 | } | ||
29 | |||
30 | // Rule layer encapsulates default encryption configuration | ||
31 | type Rule struct { | ||
32 | Apply ApplySSEByDefault `xml:"ApplyServerSideEncryptionByDefault"` | ||
33 | } | ||
34 | |||
35 | // Configuration is the default encryption configuration structure | ||
36 | type Configuration struct { | ||
37 | XMLName xml.Name `xml:"ServerSideEncryptionConfiguration"` | ||
38 | Rules []Rule `xml:"Rule"` | ||
39 | } | ||
40 | |||
41 | // NewConfigurationSSES3 initializes a new SSE-S3 configuration | ||
42 | func NewConfigurationSSES3() *Configuration { | ||
43 | return &Configuration{ | ||
44 | Rules: []Rule{ | ||
45 | { | ||
46 | Apply: ApplySSEByDefault{ | ||
47 | SSEAlgorithm: "AES256", | ||
48 | }, | ||
49 | }, | ||
50 | }, | ||
51 | } | ||
52 | } | ||
53 | |||
54 | // NewConfigurationSSEKMS initializes a new SSE-KMS configuration | ||
55 | func NewConfigurationSSEKMS(kmsMasterKey string) *Configuration { | ||
56 | return &Configuration{ | ||
57 | Rules: []Rule{ | ||
58 | { | ||
59 | Apply: ApplySSEByDefault{ | ||
60 | KmsMasterKeyID: kmsMasterKey, | ||
61 | SSEAlgorithm: "aws:kms", | ||
62 | }, | ||
63 | }, | ||
64 | }, | ||
65 | } | ||
66 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go b/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go new file mode 100644 index 0000000..7a84a6f --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go | |||
@@ -0,0 +1,413 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2020-2022 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package tags | ||
19 | |||
20 | import ( | ||
21 | "encoding/xml" | ||
22 | "io" | ||
23 | "net/url" | ||
24 | "regexp" | ||
25 | "sort" | ||
26 | "strings" | ||
27 | "unicode/utf8" | ||
28 | ) | ||
29 | |||
30 | // Error contains tag specific error. | ||
31 | type Error interface { | ||
32 | error | ||
33 | Code() string | ||
34 | } | ||
35 | |||
36 | type errTag struct { | ||
37 | code string | ||
38 | message string | ||
39 | } | ||
40 | |||
41 | // Code contains error code. | ||
42 | func (err errTag) Code() string { | ||
43 | return err.code | ||
44 | } | ||
45 | |||
46 | // Error contains error message. | ||
47 | func (err errTag) Error() string { | ||
48 | return err.message | ||
49 | } | ||
50 | |||
51 | var ( | ||
52 | errTooManyObjectTags = &errTag{"BadRequest", "Tags cannot be more than 10"} | ||
53 | errTooManyTags = &errTag{"BadRequest", "Tags cannot be more than 50"} | ||
54 | errInvalidTagKey = &errTag{"InvalidTag", "The TagKey you have provided is invalid"} | ||
55 | errInvalidTagValue = &errTag{"InvalidTag", "The TagValue you have provided is invalid"} | ||
56 | errDuplicateTagKey = &errTag{"InvalidTag", "Cannot provide multiple Tags with the same key"} | ||
57 | ) | ||
58 | |||
59 | // Tag comes with limitation as per | ||
60 | // https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html amd | ||
61 | // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions | ||
62 | const ( | ||
63 | maxKeyLength = 128 | ||
64 | maxValueLength = 256 | ||
65 | maxObjectTagCount = 10 | ||
66 | maxTagCount = 50 | ||
67 | ) | ||
68 | |||
69 | // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions | ||
70 | // borrowed from this article and also testing various ASCII characters following regex | ||
71 | // is supported by AWS S3 for both tags and values. | ||
72 | var validTagKeyValue = regexp.MustCompile(`^[a-zA-Z0-9-+\-._:/@ ]+$`) | ||
73 | |||
74 | func checkKey(key string) error { | ||
75 | if len(key) == 0 { | ||
76 | return errInvalidTagKey | ||
77 | } | ||
78 | |||
79 | if utf8.RuneCountInString(key) > maxKeyLength || !validTagKeyValue.MatchString(key) { | ||
80 | return errInvalidTagKey | ||
81 | } | ||
82 | |||
83 | return nil | ||
84 | } | ||
85 | |||
86 | func checkValue(value string) error { | ||
87 | if value != "" { | ||
88 | if utf8.RuneCountInString(value) > maxValueLength || !validTagKeyValue.MatchString(value) { | ||
89 | return errInvalidTagValue | ||
90 | } | ||
91 | } | ||
92 | |||
93 | return nil | ||
94 | } | ||
95 | |||
96 | // Tag denotes key and value. | ||
97 | type Tag struct { | ||
98 | Key string `xml:"Key"` | ||
99 | Value string `xml:"Value"` | ||
100 | } | ||
101 | |||
102 | func (tag Tag) String() string { | ||
103 | return tag.Key + "=" + tag.Value | ||
104 | } | ||
105 | |||
106 | // IsEmpty returns whether this tag is empty or not. | ||
107 | func (tag Tag) IsEmpty() bool { | ||
108 | return tag.Key == "" | ||
109 | } | ||
110 | |||
111 | // Validate checks this tag. | ||
112 | func (tag Tag) Validate() error { | ||
113 | if err := checkKey(tag.Key); err != nil { | ||
114 | return err | ||
115 | } | ||
116 | |||
117 | return checkValue(tag.Value) | ||
118 | } | ||
119 | |||
120 | // MarshalXML encodes to XML data. | ||
121 | func (tag Tag) MarshalXML(e *xml.Encoder, start xml.StartElement) error { | ||
122 | if err := tag.Validate(); err != nil { | ||
123 | return err | ||
124 | } | ||
125 | |||
126 | type subTag Tag // to avoid recursively calling MarshalXML() | ||
127 | return e.EncodeElement(subTag(tag), start) | ||
128 | } | ||
129 | |||
130 | // UnmarshalXML decodes XML data to tag. | ||
131 | func (tag *Tag) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { | ||
132 | type subTag Tag // to avoid recursively calling UnmarshalXML() | ||
133 | var st subTag | ||
134 | if err := d.DecodeElement(&st, &start); err != nil { | ||
135 | return err | ||
136 | } | ||
137 | |||
138 | if err := Tag(st).Validate(); err != nil { | ||
139 | return err | ||
140 | } | ||
141 | |||
142 | *tag = Tag(st) | ||
143 | return nil | ||
144 | } | ||
145 | |||
146 | // tagSet represents list of unique tags. | ||
147 | type tagSet struct { | ||
148 | tagMap map[string]string | ||
149 | isObject bool | ||
150 | } | ||
151 | |||
152 | func (tags tagSet) String() string { | ||
153 | if len(tags.tagMap) == 0 { | ||
154 | return "" | ||
155 | } | ||
156 | var buf strings.Builder | ||
157 | keys := make([]string, 0, len(tags.tagMap)) | ||
158 | for k := range tags.tagMap { | ||
159 | keys = append(keys, k) | ||
160 | } | ||
161 | sort.Strings(keys) | ||
162 | for _, k := range keys { | ||
163 | keyEscaped := url.QueryEscape(k) | ||
164 | valueEscaped := url.QueryEscape(tags.tagMap[k]) | ||
165 | if buf.Len() > 0 { | ||
166 | buf.WriteByte('&') | ||
167 | } | ||
168 | buf.WriteString(keyEscaped) | ||
169 | buf.WriteByte('=') | ||
170 | buf.WriteString(valueEscaped) | ||
171 | } | ||
172 | return buf.String() | ||
173 | } | ||
174 | |||
175 | func (tags *tagSet) remove(key string) { | ||
176 | delete(tags.tagMap, key) | ||
177 | } | ||
178 | |||
179 | func (tags *tagSet) set(key, value string, failOnExist bool) error { | ||
180 | if failOnExist { | ||
181 | if _, found := tags.tagMap[key]; found { | ||
182 | return errDuplicateTagKey | ||
183 | } | ||
184 | } | ||
185 | |||
186 | if err := checkKey(key); err != nil { | ||
187 | return err | ||
188 | } | ||
189 | |||
190 | if err := checkValue(value); err != nil { | ||
191 | return err | ||
192 | } | ||
193 | |||
194 | if tags.isObject { | ||
195 | if len(tags.tagMap) == maxObjectTagCount { | ||
196 | return errTooManyObjectTags | ||
197 | } | ||
198 | } else if len(tags.tagMap) == maxTagCount { | ||
199 | return errTooManyTags | ||
200 | } | ||
201 | |||
202 | tags.tagMap[key] = value | ||
203 | return nil | ||
204 | } | ||
205 | |||
206 | func (tags tagSet) count() int { | ||
207 | return len(tags.tagMap) | ||
208 | } | ||
209 | |||
210 | func (tags tagSet) toMap() map[string]string { | ||
211 | m := make(map[string]string, len(tags.tagMap)) | ||
212 | for key, value := range tags.tagMap { | ||
213 | m[key] = value | ||
214 | } | ||
215 | return m | ||
216 | } | ||
217 | |||
218 | // MarshalXML encodes to XML data. | ||
219 | func (tags tagSet) MarshalXML(e *xml.Encoder, start xml.StartElement) error { | ||
220 | tagList := struct { | ||
221 | Tags []Tag `xml:"Tag"` | ||
222 | }{} | ||
223 | |||
224 | tagList.Tags = make([]Tag, 0, len(tags.tagMap)) | ||
225 | for key, value := range tags.tagMap { | ||
226 | tagList.Tags = append(tagList.Tags, Tag{key, value}) | ||
227 | } | ||
228 | |||
229 | return e.EncodeElement(tagList, start) | ||
230 | } | ||
231 | |||
232 | // UnmarshalXML decodes XML data to tag list. | ||
233 | func (tags *tagSet) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { | ||
234 | tagList := struct { | ||
235 | Tags []Tag `xml:"Tag"` | ||
236 | }{} | ||
237 | |||
238 | if err := d.DecodeElement(&tagList, &start); err != nil { | ||
239 | return err | ||
240 | } | ||
241 | |||
242 | if tags.isObject { | ||
243 | if len(tagList.Tags) > maxObjectTagCount { | ||
244 | return errTooManyObjectTags | ||
245 | } | ||
246 | } else if len(tagList.Tags) > maxTagCount { | ||
247 | return errTooManyTags | ||
248 | } | ||
249 | |||
250 | m := make(map[string]string, len(tagList.Tags)) | ||
251 | for _, tag := range tagList.Tags { | ||
252 | if _, found := m[tag.Key]; found { | ||
253 | return errDuplicateTagKey | ||
254 | } | ||
255 | |||
256 | m[tag.Key] = tag.Value | ||
257 | } | ||
258 | |||
259 | tags.tagMap = m | ||
260 | return nil | ||
261 | } | ||
262 | |||
263 | type tagging struct { | ||
264 | XMLName xml.Name `xml:"Tagging"` | ||
265 | TagSet *tagSet `xml:"TagSet"` | ||
266 | } | ||
267 | |||
268 | // Tags is list of tags of XML request/response as per | ||
269 | // https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html#API_GetBucketTagging_RequestBody | ||
270 | type Tags tagging | ||
271 | |||
272 | func (tags Tags) String() string { | ||
273 | return tags.TagSet.String() | ||
274 | } | ||
275 | |||
276 | // Remove removes a tag by its key. | ||
277 | func (tags *Tags) Remove(key string) { | ||
278 | tags.TagSet.remove(key) | ||
279 | } | ||
280 | |||
281 | // Set sets new tag. | ||
282 | func (tags *Tags) Set(key, value string) error { | ||
283 | return tags.TagSet.set(key, value, false) | ||
284 | } | ||
285 | |||
286 | // Count - return number of tags accounted for | ||
287 | func (tags Tags) Count() int { | ||
288 | return tags.TagSet.count() | ||
289 | } | ||
290 | |||
291 | // ToMap returns copy of tags. | ||
292 | func (tags Tags) ToMap() map[string]string { | ||
293 | return tags.TagSet.toMap() | ||
294 | } | ||
295 | |||
296 | // MapToObjectTags converts an input map of key and value into | ||
297 | // *Tags data structure with validation. | ||
298 | func MapToObjectTags(tagMap map[string]string) (*Tags, error) { | ||
299 | return NewTags(tagMap, true) | ||
300 | } | ||
301 | |||
302 | // MapToBucketTags converts an input map of key and value into | ||
303 | // *Tags data structure with validation. | ||
304 | func MapToBucketTags(tagMap map[string]string) (*Tags, error) { | ||
305 | return NewTags(tagMap, false) | ||
306 | } | ||
307 | |||
308 | // NewTags creates Tags from tagMap, If isObject is set, it validates for object tags. | ||
309 | func NewTags(tagMap map[string]string, isObject bool) (*Tags, error) { | ||
310 | tagging := &Tags{ | ||
311 | TagSet: &tagSet{ | ||
312 | tagMap: make(map[string]string), | ||
313 | isObject: isObject, | ||
314 | }, | ||
315 | } | ||
316 | |||
317 | for key, value := range tagMap { | ||
318 | if err := tagging.TagSet.set(key, value, true); err != nil { | ||
319 | return nil, err | ||
320 | } | ||
321 | } | ||
322 | |||
323 | return tagging, nil | ||
324 | } | ||
325 | |||
326 | func unmarshalXML(reader io.Reader, isObject bool) (*Tags, error) { | ||
327 | tagging := &Tags{ | ||
328 | TagSet: &tagSet{ | ||
329 | tagMap: make(map[string]string), | ||
330 | isObject: isObject, | ||
331 | }, | ||
332 | } | ||
333 | |||
334 | if err := xml.NewDecoder(reader).Decode(tagging); err != nil { | ||
335 | return nil, err | ||
336 | } | ||
337 | |||
338 | return tagging, nil | ||
339 | } | ||
340 | |||
341 | // ParseBucketXML decodes XML data of tags in reader specified in | ||
342 | // https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html#API_PutBucketTagging_RequestSyntax. | ||
343 | func ParseBucketXML(reader io.Reader) (*Tags, error) { | ||
344 | return unmarshalXML(reader, false) | ||
345 | } | ||
346 | |||
347 | // ParseObjectXML decodes XML data of tags in reader specified in | ||
348 | // https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html#API_PutObjectTagging_RequestSyntax | ||
349 | func ParseObjectXML(reader io.Reader) (*Tags, error) { | ||
350 | return unmarshalXML(reader, true) | ||
351 | } | ||
352 | |||
353 | // stringsCut slices s around the first instance of sep, | ||
354 | // returning the text before and after sep. | ||
355 | // The found result reports whether sep appears in s. | ||
356 | // If sep does not appear in s, cut returns s, "", false. | ||
357 | func stringsCut(s, sep string) (before, after string, found bool) { | ||
358 | if i := strings.Index(s, sep); i >= 0 { | ||
359 | return s[:i], s[i+len(sep):], true | ||
360 | } | ||
361 | return s, "", false | ||
362 | } | ||
363 | |||
364 | func (tags *tagSet) parseTags(tgs string) (err error) { | ||
365 | for tgs != "" { | ||
366 | var key string | ||
367 | key, tgs, _ = stringsCut(tgs, "&") | ||
368 | if key == "" { | ||
369 | continue | ||
370 | } | ||
371 | key, value, _ := stringsCut(key, "=") | ||
372 | key, err1 := url.QueryUnescape(key) | ||
373 | if err1 != nil { | ||
374 | if err == nil { | ||
375 | err = err1 | ||
376 | } | ||
377 | continue | ||
378 | } | ||
379 | value, err1 = url.QueryUnescape(value) | ||
380 | if err1 != nil { | ||
381 | if err == nil { | ||
382 | err = err1 | ||
383 | } | ||
384 | continue | ||
385 | } | ||
386 | if err = tags.set(key, value, true); err != nil { | ||
387 | return err | ||
388 | } | ||
389 | } | ||
390 | return err | ||
391 | } | ||
392 | |||
393 | // Parse decodes HTTP query formatted string into tags which is limited by isObject. | ||
394 | // A query formatted string is like "key1=value1&key2=value2". | ||
395 | func Parse(s string, isObject bool) (*Tags, error) { | ||
396 | tagging := &Tags{ | ||
397 | TagSet: &tagSet{ | ||
398 | tagMap: make(map[string]string), | ||
399 | isObject: isObject, | ||
400 | }, | ||
401 | } | ||
402 | |||
403 | if err := tagging.TagSet.parseTags(s); err != nil { | ||
404 | return nil, err | ||
405 | } | ||
406 | |||
407 | return tagging, nil | ||
408 | } | ||
409 | |||
410 | // ParseObjectTags decodes HTTP query formatted string into tags. A query formatted string is like "key1=value1&key2=value2". | ||
411 | func ParseObjectTags(s string) (*Tags, error) { | ||
412 | return Parse(s, true) | ||
413 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/post-policy.go b/vendor/github.com/minio/minio-go/v7/post-policy.go new file mode 100644 index 0000000..3f4881e --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/post-policy.go | |||
@@ -0,0 +1,349 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2015-2023 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | import ( | ||
21 | "encoding/base64" | ||
22 | "fmt" | ||
23 | "net/http" | ||
24 | "strings" | ||
25 | "time" | ||
26 | |||
27 | "github.com/minio/minio-go/v7/pkg/encrypt" | ||
28 | ) | ||
29 | |||
30 | // expirationDateFormat date format for expiration key in json policy. | ||
31 | const expirationDateFormat = "2006-01-02T15:04:05.000Z" | ||
32 | |||
33 | // policyCondition explanation: | ||
34 | // http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html | ||
35 | // | ||
36 | // Example: | ||
37 | // | ||
38 | // policyCondition { | ||
39 | // matchType: "$eq", | ||
40 | // key: "$Content-Type", | ||
41 | // value: "image/png", | ||
42 | // } | ||
43 | type policyCondition struct { | ||
44 | matchType string | ||
45 | condition string | ||
46 | value string | ||
47 | } | ||
48 | |||
49 | // PostPolicy - Provides strict static type conversion and validation | ||
50 | // for Amazon S3's POST policy JSON string. | ||
51 | type PostPolicy struct { | ||
52 | // Expiration date and time of the POST policy. | ||
53 | expiration time.Time | ||
54 | // Collection of different policy conditions. | ||
55 | conditions []policyCondition | ||
56 | // ContentLengthRange minimum and maximum allowable size for the | ||
57 | // uploaded content. | ||
58 | contentLengthRange struct { | ||
59 | min int64 | ||
60 | max int64 | ||
61 | } | ||
62 | |||
63 | // Post form data. | ||
64 | formData map[string]string | ||
65 | } | ||
66 | |||
67 | // NewPostPolicy - Instantiate new post policy. | ||
68 | func NewPostPolicy() *PostPolicy { | ||
69 | p := &PostPolicy{} | ||
70 | p.conditions = make([]policyCondition, 0) | ||
71 | p.formData = make(map[string]string) | ||
72 | return p | ||
73 | } | ||
74 | |||
75 | // SetExpires - Sets expiration time for the new policy. | ||
76 | func (p *PostPolicy) SetExpires(t time.Time) error { | ||
77 | if t.IsZero() { | ||
78 | return errInvalidArgument("No expiry time set.") | ||
79 | } | ||
80 | p.expiration = t | ||
81 | return nil | ||
82 | } | ||
83 | |||
84 | // SetKey - Sets an object name for the policy based upload. | ||
85 | func (p *PostPolicy) SetKey(key string) error { | ||
86 | if strings.TrimSpace(key) == "" || key == "" { | ||
87 | return errInvalidArgument("Object name is empty.") | ||
88 | } | ||
89 | policyCond := policyCondition{ | ||
90 | matchType: "eq", | ||
91 | condition: "$key", | ||
92 | value: key, | ||
93 | } | ||
94 | if err := p.addNewPolicy(policyCond); err != nil { | ||
95 | return err | ||
96 | } | ||
97 | p.formData["key"] = key | ||
98 | return nil | ||
99 | } | ||
100 | |||
101 | // SetKeyStartsWith - Sets an object name that an policy based upload | ||
102 | // can start with. | ||
103 | // Can use an empty value ("") to allow any key. | ||
104 | func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error { | ||
105 | policyCond := policyCondition{ | ||
106 | matchType: "starts-with", | ||
107 | condition: "$key", | ||
108 | value: keyStartsWith, | ||
109 | } | ||
110 | if err := p.addNewPolicy(policyCond); err != nil { | ||
111 | return err | ||
112 | } | ||
113 | p.formData["key"] = keyStartsWith | ||
114 | return nil | ||
115 | } | ||
116 | |||
117 | // SetBucket - Sets bucket at which objects will be uploaded to. | ||
118 | func (p *PostPolicy) SetBucket(bucketName string) error { | ||
119 | if strings.TrimSpace(bucketName) == "" || bucketName == "" { | ||
120 | return errInvalidArgument("Bucket name is empty.") | ||
121 | } | ||
122 | policyCond := policyCondition{ | ||
123 | matchType: "eq", | ||
124 | condition: "$bucket", | ||
125 | value: bucketName, | ||
126 | } | ||
127 | if err := p.addNewPolicy(policyCond); err != nil { | ||
128 | return err | ||
129 | } | ||
130 | p.formData["bucket"] = bucketName | ||
131 | return nil | ||
132 | } | ||
133 | |||
134 | // SetCondition - Sets condition for credentials, date and algorithm | ||
135 | func (p *PostPolicy) SetCondition(matchType, condition, value string) error { | ||
136 | if strings.TrimSpace(value) == "" || value == "" { | ||
137 | return errInvalidArgument("No value specified for condition") | ||
138 | } | ||
139 | |||
140 | policyCond := policyCondition{ | ||
141 | matchType: matchType, | ||
142 | condition: "$" + condition, | ||
143 | value: value, | ||
144 | } | ||
145 | if condition == "X-Amz-Credential" || condition == "X-Amz-Date" || condition == "X-Amz-Algorithm" { | ||
146 | if err := p.addNewPolicy(policyCond); err != nil { | ||
147 | return err | ||
148 | } | ||
149 | p.formData[condition] = value | ||
150 | return nil | ||
151 | } | ||
152 | return errInvalidArgument("Invalid condition in policy") | ||
153 | } | ||
154 | |||
155 | // SetContentType - Sets content-type of the object for this policy | ||
156 | // based upload. | ||
157 | func (p *PostPolicy) SetContentType(contentType string) error { | ||
158 | if strings.TrimSpace(contentType) == "" || contentType == "" { | ||
159 | return errInvalidArgument("No content type specified.") | ||
160 | } | ||
161 | policyCond := policyCondition{ | ||
162 | matchType: "eq", | ||
163 | condition: "$Content-Type", | ||
164 | value: contentType, | ||
165 | } | ||
166 | if err := p.addNewPolicy(policyCond); err != nil { | ||
167 | return err | ||
168 | } | ||
169 | p.formData["Content-Type"] = contentType | ||
170 | return nil | ||
171 | } | ||
172 | |||
173 | // SetContentTypeStartsWith - Sets what content-type of the object for this policy | ||
174 | // based upload can start with. | ||
175 | // Can use an empty value ("") to allow any content-type. | ||
176 | func (p *PostPolicy) SetContentTypeStartsWith(contentTypeStartsWith string) error { | ||
177 | policyCond := policyCondition{ | ||
178 | matchType: "starts-with", | ||
179 | condition: "$Content-Type", | ||
180 | value: contentTypeStartsWith, | ||
181 | } | ||
182 | if err := p.addNewPolicy(policyCond); err != nil { | ||
183 | return err | ||
184 | } | ||
185 | p.formData["Content-Type"] = contentTypeStartsWith | ||
186 | return nil | ||
187 | } | ||
188 | |||
189 | // SetContentLengthRange - Set new min and max content length | ||
190 | // condition for all incoming uploads. | ||
191 | func (p *PostPolicy) SetContentLengthRange(min, max int64) error { | ||
192 | if min > max { | ||
193 | return errInvalidArgument("Minimum limit is larger than maximum limit.") | ||
194 | } | ||
195 | if min < 0 { | ||
196 | return errInvalidArgument("Minimum limit cannot be negative.") | ||
197 | } | ||
198 | if max <= 0 { | ||
199 | return errInvalidArgument("Maximum limit cannot be non-positive.") | ||
200 | } | ||
201 | p.contentLengthRange.min = min | ||
202 | p.contentLengthRange.max = max | ||
203 | return nil | ||
204 | } | ||
205 | |||
206 | // SetSuccessActionRedirect - Sets the redirect success url of the object for this policy | ||
207 | // based upload. | ||
208 | func (p *PostPolicy) SetSuccessActionRedirect(redirect string) error { | ||
209 | if strings.TrimSpace(redirect) == "" || redirect == "" { | ||
210 | return errInvalidArgument("Redirect is empty") | ||
211 | } | ||
212 | policyCond := policyCondition{ | ||
213 | matchType: "eq", | ||
214 | condition: "$success_action_redirect", | ||
215 | value: redirect, | ||
216 | } | ||
217 | if err := p.addNewPolicy(policyCond); err != nil { | ||
218 | return err | ||
219 | } | ||
220 | p.formData["success_action_redirect"] = redirect | ||
221 | return nil | ||
222 | } | ||
223 | |||
224 | // SetSuccessStatusAction - Sets the status success code of the object for this policy | ||
225 | // based upload. | ||
226 | func (p *PostPolicy) SetSuccessStatusAction(status string) error { | ||
227 | if strings.TrimSpace(status) == "" || status == "" { | ||
228 | return errInvalidArgument("Status is empty") | ||
229 | } | ||
230 | policyCond := policyCondition{ | ||
231 | matchType: "eq", | ||
232 | condition: "$success_action_status", | ||
233 | value: status, | ||
234 | } | ||
235 | if err := p.addNewPolicy(policyCond); err != nil { | ||
236 | return err | ||
237 | } | ||
238 | p.formData["success_action_status"] = status | ||
239 | return nil | ||
240 | } | ||
241 | |||
242 | // SetUserMetadata - Set user metadata as a key/value couple. | ||
243 | // Can be retrieved through a HEAD request or an event. | ||
244 | func (p *PostPolicy) SetUserMetadata(key, value string) error { | ||
245 | if strings.TrimSpace(key) == "" || key == "" { | ||
246 | return errInvalidArgument("Key is empty") | ||
247 | } | ||
248 | if strings.TrimSpace(value) == "" || value == "" { | ||
249 | return errInvalidArgument("Value is empty") | ||
250 | } | ||
251 | headerName := fmt.Sprintf("x-amz-meta-%s", key) | ||
252 | policyCond := policyCondition{ | ||
253 | matchType: "eq", | ||
254 | condition: fmt.Sprintf("$%s", headerName), | ||
255 | value: value, | ||
256 | } | ||
257 | if err := p.addNewPolicy(policyCond); err != nil { | ||
258 | return err | ||
259 | } | ||
260 | p.formData[headerName] = value | ||
261 | return nil | ||
262 | } | ||
263 | |||
264 | // SetChecksum sets the checksum of the request. | ||
265 | func (p *PostPolicy) SetChecksum(c Checksum) { | ||
266 | if c.IsSet() { | ||
267 | p.formData[amzChecksumAlgo] = c.Type.String() | ||
268 | p.formData[c.Type.Key()] = c.Encoded() | ||
269 | } | ||
270 | } | ||
271 | |||
272 | // SetEncryption - sets encryption headers for POST API | ||
273 | func (p *PostPolicy) SetEncryption(sse encrypt.ServerSide) { | ||
274 | if sse == nil { | ||
275 | return | ||
276 | } | ||
277 | h := http.Header{} | ||
278 | sse.Marshal(h) | ||
279 | for k, v := range h { | ||
280 | p.formData[k] = v[0] | ||
281 | } | ||
282 | } | ||
283 | |||
284 | // SetUserData - Set user data as a key/value couple. | ||
285 | // Can be retrieved through a HEAD request or an event. | ||
286 | func (p *PostPolicy) SetUserData(key, value string) error { | ||
287 | if key == "" { | ||
288 | return errInvalidArgument("Key is empty") | ||
289 | } | ||
290 | if value == "" { | ||
291 | return errInvalidArgument("Value is empty") | ||
292 | } | ||
293 | headerName := fmt.Sprintf("x-amz-%s", key) | ||
294 | policyCond := policyCondition{ | ||
295 | matchType: "eq", | ||
296 | condition: fmt.Sprintf("$%s", headerName), | ||
297 | value: value, | ||
298 | } | ||
299 | if err := p.addNewPolicy(policyCond); err != nil { | ||
300 | return err | ||
301 | } | ||
302 | p.formData[headerName] = value | ||
303 | return nil | ||
304 | } | ||
305 | |||
306 | // addNewPolicy - internal helper to validate adding new policies. | ||
307 | // Can use starts-with with an empty value ("") to allow any content within a form field. | ||
308 | func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error { | ||
309 | if policyCond.matchType == "" || policyCond.condition == "" { | ||
310 | return errInvalidArgument("Policy fields are empty.") | ||
311 | } | ||
312 | if policyCond.matchType != "starts-with" && policyCond.value == "" { | ||
313 | return errInvalidArgument("Policy value is empty.") | ||
314 | } | ||
315 | p.conditions = append(p.conditions, policyCond) | ||
316 | return nil | ||
317 | } | ||
318 | |||
319 | // String function for printing policy in json formatted string. | ||
320 | func (p PostPolicy) String() string { | ||
321 | return string(p.marshalJSON()) | ||
322 | } | ||
323 | |||
324 | // marshalJSON - Provides Marshaled JSON in bytes. | ||
325 | func (p PostPolicy) marshalJSON() []byte { | ||
326 | expirationStr := `"expiration":"` + p.expiration.Format(expirationDateFormat) + `"` | ||
327 | var conditionsStr string | ||
328 | conditions := []string{} | ||
329 | for _, po := range p.conditions { | ||
330 | conditions = append(conditions, fmt.Sprintf("[\"%s\",\"%s\",\"%s\"]", po.matchType, po.condition, po.value)) | ||
331 | } | ||
332 | if p.contentLengthRange.min != 0 || p.contentLengthRange.max != 0 { | ||
333 | conditions = append(conditions, fmt.Sprintf("[\"content-length-range\", %d, %d]", | ||
334 | p.contentLengthRange.min, p.contentLengthRange.max)) | ||
335 | } | ||
336 | if len(conditions) > 0 { | ||
337 | conditionsStr = `"conditions":[` + strings.Join(conditions, ",") + "]" | ||
338 | } | ||
339 | retStr := "{" | ||
340 | retStr = retStr + expirationStr + "," | ||
341 | retStr += conditionsStr | ||
342 | retStr += "}" | ||
343 | return []byte(retStr) | ||
344 | } | ||
345 | |||
346 | // base64 - Produces base64 of PostPolicy's Marshaled json. | ||
347 | func (p PostPolicy) base64() string { | ||
348 | return base64.StdEncoding.EncodeToString(p.marshalJSON()) | ||
349 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/retry-continous.go b/vendor/github.com/minio/minio-go/v7/retry-continous.go new file mode 100644 index 0000000..bfeea95 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/retry-continous.go | |||
@@ -0,0 +1,69 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2015-2017 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | import "time" | ||
21 | |||
22 | // newRetryTimerContinous creates a timer with exponentially increasing delays forever. | ||
23 | func (c *Client) newRetryTimerContinous(unit, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int { | ||
24 | attemptCh := make(chan int) | ||
25 | |||
26 | // normalize jitter to the range [0, 1.0] | ||
27 | if jitter < NoJitter { | ||
28 | jitter = NoJitter | ||
29 | } | ||
30 | if jitter > MaxJitter { | ||
31 | jitter = MaxJitter | ||
32 | } | ||
33 | |||
34 | // computes the exponential backoff duration according to | ||
35 | // https://www.awsarchitectureblog.com/2015/03/backoff.html | ||
36 | exponentialBackoffWait := func(attempt int) time.Duration { | ||
37 | // 1<<uint(attempt) below could overflow, so limit the value of attempt | ||
38 | maxAttempt := 30 | ||
39 | if attempt > maxAttempt { | ||
40 | attempt = maxAttempt | ||
41 | } | ||
42 | // sleep = random_between(0, min(cap, base * 2 ** attempt)) | ||
43 | sleep := unit * time.Duration(1<<uint(attempt)) | ||
44 | if sleep > cap { | ||
45 | sleep = cap | ||
46 | } | ||
47 | if jitter != NoJitter { | ||
48 | sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter) | ||
49 | } | ||
50 | return sleep | ||
51 | } | ||
52 | |||
53 | go func() { | ||
54 | defer close(attemptCh) | ||
55 | var nextBackoff int | ||
56 | for { | ||
57 | select { | ||
58 | // Attempts starts. | ||
59 | case attemptCh <- nextBackoff: | ||
60 | nextBackoff++ | ||
61 | case <-doneCh: | ||
62 | // Stop the routine. | ||
63 | return | ||
64 | } | ||
65 | time.Sleep(exponentialBackoffWait(nextBackoff)) | ||
66 | } | ||
67 | }() | ||
68 | return attemptCh | ||
69 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/retry.go b/vendor/github.com/minio/minio-go/v7/retry.go new file mode 100644 index 0000000..1c6105e --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/retry.go | |||
@@ -0,0 +1,148 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2015-2017 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | import ( | ||
21 | "context" | ||
22 | "crypto/x509" | ||
23 | "errors" | ||
24 | "net/http" | ||
25 | "net/url" | ||
26 | "time" | ||
27 | ) | ||
28 | |||
29 | // MaxRetry is the maximum number of retries before stopping. | ||
30 | var MaxRetry = 10 | ||
31 | |||
32 | // MaxJitter will randomize over the full exponential backoff time | ||
33 | const MaxJitter = 1.0 | ||
34 | |||
35 | // NoJitter disables the use of jitter for randomizing the exponential backoff time | ||
36 | const NoJitter = 0.0 | ||
37 | |||
38 | // DefaultRetryUnit - default unit multiplicative per retry. | ||
39 | // defaults to 200 * time.Millisecond | ||
40 | var DefaultRetryUnit = 200 * time.Millisecond | ||
41 | |||
42 | // DefaultRetryCap - Each retry attempt never waits no longer than | ||
43 | // this maximum time duration. | ||
44 | var DefaultRetryCap = time.Second | ||
45 | |||
46 | // newRetryTimer creates a timer with exponentially increasing | ||
47 | // delays until the maximum retry attempts are reached. | ||
48 | func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, unit, cap time.Duration, jitter float64) <-chan int { | ||
49 | attemptCh := make(chan int) | ||
50 | |||
51 | // computes the exponential backoff duration according to | ||
52 | // https://www.awsarchitectureblog.com/2015/03/backoff.html | ||
53 | exponentialBackoffWait := func(attempt int) time.Duration { | ||
54 | // normalize jitter to the range [0, 1.0] | ||
55 | if jitter < NoJitter { | ||
56 | jitter = NoJitter | ||
57 | } | ||
58 | if jitter > MaxJitter { | ||
59 | jitter = MaxJitter | ||
60 | } | ||
61 | |||
62 | // sleep = random_between(0, min(cap, base * 2 ** attempt)) | ||
63 | sleep := unit * time.Duration(1<<uint(attempt)) | ||
64 | if sleep > cap { | ||
65 | sleep = cap | ||
66 | } | ||
67 | if jitter != NoJitter { | ||
68 | sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter) | ||
69 | } | ||
70 | return sleep | ||
71 | } | ||
72 | |||
73 | go func() { | ||
74 | defer close(attemptCh) | ||
75 | for i := 0; i < maxRetry; i++ { | ||
76 | select { | ||
77 | case attemptCh <- i + 1: | ||
78 | case <-ctx.Done(): | ||
79 | return | ||
80 | } | ||
81 | |||
82 | select { | ||
83 | case <-time.After(exponentialBackoffWait(i)): | ||
84 | case <-ctx.Done(): | ||
85 | return | ||
86 | } | ||
87 | } | ||
88 | }() | ||
89 | return attemptCh | ||
90 | } | ||
91 | |||
92 | // List of AWS S3 error codes which are retryable. | ||
93 | var retryableS3Codes = map[string]struct{}{ | ||
94 | "RequestError": {}, | ||
95 | "RequestTimeout": {}, | ||
96 | "Throttling": {}, | ||
97 | "ThrottlingException": {}, | ||
98 | "RequestLimitExceeded": {}, | ||
99 | "RequestThrottled": {}, | ||
100 | "InternalError": {}, | ||
101 | "ExpiredToken": {}, | ||
102 | "ExpiredTokenException": {}, | ||
103 | "SlowDown": {}, | ||
104 | // Add more AWS S3 codes here. | ||
105 | } | ||
106 | |||
107 | // isS3CodeRetryable - is s3 error code retryable. | ||
108 | func isS3CodeRetryable(s3Code string) (ok bool) { | ||
109 | _, ok = retryableS3Codes[s3Code] | ||
110 | return ok | ||
111 | } | ||
112 | |||
113 | // List of HTTP status codes which are retryable. | ||
114 | var retryableHTTPStatusCodes = map[int]struct{}{ | ||
115 | 429: {}, // http.StatusTooManyRequests is not part of the Go 1.5 library, yet | ||
116 | 499: {}, // client closed request, retry. A non-standard status code introduced by nginx. | ||
117 | http.StatusInternalServerError: {}, | ||
118 | http.StatusBadGateway: {}, | ||
119 | http.StatusServiceUnavailable: {}, | ||
120 | http.StatusGatewayTimeout: {}, | ||
121 | // Add more HTTP status codes here. | ||
122 | } | ||
123 | |||
124 | // isHTTPStatusRetryable - is HTTP error code retryable. | ||
125 | func isHTTPStatusRetryable(httpStatusCode int) (ok bool) { | ||
126 | _, ok = retryableHTTPStatusCodes[httpStatusCode] | ||
127 | return ok | ||
128 | } | ||
129 | |||
130 | // For now, all http Do() requests are retriable except some well defined errors | ||
131 | func isRequestErrorRetryable(err error) bool { | ||
132 | if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { | ||
133 | return false | ||
134 | } | ||
135 | if ue, ok := err.(*url.Error); ok { | ||
136 | e := ue.Unwrap() | ||
137 | switch e.(type) { | ||
138 | // x509: certificate signed by unknown authority | ||
139 | case x509.UnknownAuthorityError: | ||
140 | return false | ||
141 | } | ||
142 | switch e.Error() { | ||
143 | case "http: server gave HTTP response to HTTPS client": | ||
144 | return false | ||
145 | } | ||
146 | } | ||
147 | return true | ||
148 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/s3-endpoints.go b/vendor/github.com/minio/minio-go/v7/s3-endpoints.go new file mode 100644 index 0000000..b1de7b6 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/s3-endpoints.go | |||
@@ -0,0 +1,64 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2015-2017 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | // awsS3EndpointMap Amazon S3 endpoint map. | ||
21 | var awsS3EndpointMap = map[string]string{ | ||
22 | "us-east-1": "s3.dualstack.us-east-1.amazonaws.com", | ||
23 | "us-east-2": "s3.dualstack.us-east-2.amazonaws.com", | ||
24 | "us-west-2": "s3.dualstack.us-west-2.amazonaws.com", | ||
25 | "us-west-1": "s3.dualstack.us-west-1.amazonaws.com", | ||
26 | "ca-central-1": "s3.dualstack.ca-central-1.amazonaws.com", | ||
27 | "eu-west-1": "s3.dualstack.eu-west-1.amazonaws.com", | ||
28 | "eu-west-2": "s3.dualstack.eu-west-2.amazonaws.com", | ||
29 | "eu-west-3": "s3.dualstack.eu-west-3.amazonaws.com", | ||
30 | "eu-central-1": "s3.dualstack.eu-central-1.amazonaws.com", | ||
31 | "eu-central-2": "s3.dualstack.eu-central-2.amazonaws.com", | ||
32 | "eu-north-1": "s3.dualstack.eu-north-1.amazonaws.com", | ||
33 | "eu-south-1": "s3.dualstack.eu-south-1.amazonaws.com", | ||
34 | "eu-south-2": "s3.dualstack.eu-south-2.amazonaws.com", | ||
35 | "ap-east-1": "s3.dualstack.ap-east-1.amazonaws.com", | ||
36 | "ap-south-1": "s3.dualstack.ap-south-1.amazonaws.com", | ||
37 | "ap-south-2": "s3.dualstack.ap-south-2.amazonaws.com", | ||
38 | "ap-southeast-1": "s3.dualstack.ap-southeast-1.amazonaws.com", | ||
39 | "ap-southeast-2": "s3.dualstack.ap-southeast-2.amazonaws.com", | ||
40 | "ap-northeast-1": "s3.dualstack.ap-northeast-1.amazonaws.com", | ||
41 | "ap-northeast-2": "s3.dualstack.ap-northeast-2.amazonaws.com", | ||
42 | "ap-northeast-3": "s3.dualstack.ap-northeast-3.amazonaws.com", | ||
43 | "af-south-1": "s3.dualstack.af-south-1.amazonaws.com", | ||
44 | "me-central-1": "s3.dualstack.me-central-1.amazonaws.com", | ||
45 | "me-south-1": "s3.dualstack.me-south-1.amazonaws.com", | ||
46 | "sa-east-1": "s3.dualstack.sa-east-1.amazonaws.com", | ||
47 | "us-gov-west-1": "s3.dualstack.us-gov-west-1.amazonaws.com", | ||
48 | "us-gov-east-1": "s3.dualstack.us-gov-east-1.amazonaws.com", | ||
49 | "cn-north-1": "s3.dualstack.cn-north-1.amazonaws.com.cn", | ||
50 | "cn-northwest-1": "s3.dualstack.cn-northwest-1.amazonaws.com.cn", | ||
51 | "ap-southeast-3": "s3.dualstack.ap-southeast-3.amazonaws.com", | ||
52 | "ap-southeast-4": "s3.dualstack.ap-southeast-4.amazonaws.com", | ||
53 | "il-central-1": "s3.dualstack.il-central-1.amazonaws.com", | ||
54 | } | ||
55 | |||
56 | // getS3Endpoint get Amazon S3 endpoint based on the bucket location. | ||
57 | func getS3Endpoint(bucketLocation string) (s3Endpoint string) { | ||
58 | s3Endpoint, ok := awsS3EndpointMap[bucketLocation] | ||
59 | if !ok { | ||
60 | // Default to 's3.dualstack.us-east-1.amazonaws.com' endpoint. | ||
61 | s3Endpoint = "s3.dualstack.us-east-1.amazonaws.com" | ||
62 | } | ||
63 | return s3Endpoint | ||
64 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/s3-error.go b/vendor/github.com/minio/minio-go/v7/s3-error.go new file mode 100644 index 0000000..f365157 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/s3-error.go | |||
@@ -0,0 +1,61 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2015-2017 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | // Non exhaustive list of AWS S3 standard error responses - | ||
21 | // http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html | ||
22 | var s3ErrorResponseMap = map[string]string{ | ||
23 | "AccessDenied": "Access Denied.", | ||
24 | "BadDigest": "The Content-Md5 you specified did not match what we received.", | ||
25 | "EntityTooSmall": "Your proposed upload is smaller than the minimum allowed object size.", | ||
26 | "EntityTooLarge": "Your proposed upload exceeds the maximum allowed object size.", | ||
27 | "IncompleteBody": "You did not provide the number of bytes specified by the Content-Length HTTP header.", | ||
28 | "InternalError": "We encountered an internal error, please try again.", | ||
29 | "InvalidAccessKeyId": "The access key ID you provided does not exist in our records.", | ||
30 | "InvalidBucketName": "The specified bucket is not valid.", | ||
31 | "InvalidDigest": "The Content-Md5 you specified is not valid.", | ||
32 | "InvalidRange": "The requested range is not satisfiable", | ||
33 | "MalformedXML": "The XML you provided was not well-formed or did not validate against our published schema.", | ||
34 | "MissingContentLength": "You must provide the Content-Length HTTP header.", | ||
35 | "MissingContentMD5": "Missing required header for this request: Content-Md5.", | ||
36 | "MissingRequestBodyError": "Request body is empty.", | ||
37 | "NoSuchBucket": "The specified bucket does not exist.", | ||
38 | "NoSuchBucketPolicy": "The bucket policy does not exist", | ||
39 | "NoSuchKey": "The specified key does not exist.", | ||
40 | "NoSuchUpload": "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.", | ||
41 | "NotImplemented": "A header you provided implies functionality that is not implemented", | ||
42 | "PreconditionFailed": "At least one of the pre-conditions you specified did not hold", | ||
43 | "RequestTimeTooSkewed": "The difference between the request time and the server's time is too large.", | ||
44 | "SignatureDoesNotMatch": "The request signature we calculated does not match the signature you provided. Check your key and signing method.", | ||
45 | "MethodNotAllowed": "The specified method is not allowed against this resource.", | ||
46 | "InvalidPart": "One or more of the specified parts could not be found.", | ||
47 | "InvalidPartOrder": "The list of parts was not in ascending order. The parts list must be specified in order by part number.", | ||
48 | "InvalidObjectState": "The operation is not valid for the current state of the object.", | ||
49 | "AuthorizationHeaderMalformed": "The authorization header is malformed; the region is wrong.", | ||
50 | "MalformedPOSTRequest": "The body of your POST request is not well-formed multipart/form-data.", | ||
51 | "BucketNotEmpty": "The bucket you tried to delete is not empty", | ||
52 | "AllAccessDisabled": "All access to this bucket has been disabled.", | ||
53 | "MalformedPolicy": "Policy has invalid resource.", | ||
54 | "MissingFields": "Missing fields in request.", | ||
55 | "AuthorizationQueryParametersError": "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"<YOUR-AKID>/YYYYMMDD/REGION/SERVICE/aws4_request\".", | ||
56 | "MalformedDate": "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.", | ||
57 | "BucketAlreadyOwnedByYou": "Your previous request to create the named bucket succeeded and you already own it.", | ||
58 | "InvalidDuration": "Duration provided in the request is invalid.", | ||
59 | "XAmzContentSHA256Mismatch": "The provided 'x-amz-content-sha256' header does not match what was computed.", | ||
60 | // Add new API errors here. | ||
61 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/transport.go b/vendor/github.com/minio/minio-go/v7/transport.go new file mode 100644 index 0000000..1bff664 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/transport.go | |||
@@ -0,0 +1,83 @@ | |||
1 | //go:build go1.7 || go1.8 | ||
2 | // +build go1.7 go1.8 | ||
3 | |||
4 | /* | ||
5 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
6 | * Copyright 2017-2018 MinIO, Inc. | ||
7 | * | ||
8 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
9 | * you may not use this file except in compliance with the License. | ||
10 | * You may obtain a copy of the License at | ||
11 | * | ||
12 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
13 | * | ||
14 | * Unless required by applicable law or agreed to in writing, software | ||
15 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
17 | * See the License for the specific language governing permissions and | ||
18 | * limitations under the License. | ||
19 | */ | ||
20 | |||
21 | package minio | ||
22 | |||
23 | import ( | ||
24 | "crypto/tls" | ||
25 | "crypto/x509" | ||
26 | "net" | ||
27 | "net/http" | ||
28 | "os" | ||
29 | "time" | ||
30 | ) | ||
31 | |||
32 | // mustGetSystemCertPool - return system CAs or empty pool in case of error (or windows) | ||
33 | func mustGetSystemCertPool() *x509.CertPool { | ||
34 | pool, err := x509.SystemCertPool() | ||
35 | if err != nil { | ||
36 | return x509.NewCertPool() | ||
37 | } | ||
38 | return pool | ||
39 | } | ||
40 | |||
41 | // DefaultTransport - this default transport is similar to | ||
42 | // http.DefaultTransport but with additional param DisableCompression | ||
43 | // is set to true to avoid decompressing content with 'gzip' encoding. | ||
44 | var DefaultTransport = func(secure bool) (*http.Transport, error) { | ||
45 | tr := &http.Transport{ | ||
46 | Proxy: http.ProxyFromEnvironment, | ||
47 | DialContext: (&net.Dialer{ | ||
48 | Timeout: 30 * time.Second, | ||
49 | KeepAlive: 30 * time.Second, | ||
50 | }).DialContext, | ||
51 | MaxIdleConns: 256, | ||
52 | MaxIdleConnsPerHost: 16, | ||
53 | ResponseHeaderTimeout: time.Minute, | ||
54 | IdleConnTimeout: time.Minute, | ||
55 | TLSHandshakeTimeout: 10 * time.Second, | ||
56 | ExpectContinueTimeout: 10 * time.Second, | ||
57 | // Set this value so that the underlying transport round-tripper | ||
58 | // doesn't try to auto decode the body of objects with | ||
59 | // content-encoding set to `gzip`. | ||
60 | // | ||
61 | // Refer: | ||
62 | // https://golang.org/src/net/http/transport.go?h=roundTrip#L1843 | ||
63 | DisableCompression: true, | ||
64 | } | ||
65 | |||
66 | if secure { | ||
67 | tr.TLSClientConfig = &tls.Config{ | ||
68 | // Can't use SSLv3 because of POODLE and BEAST | ||
69 | // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher | ||
70 | // Can't use TLSv1.1 because of RC4 cipher usage | ||
71 | MinVersion: tls.VersionTLS12, | ||
72 | } | ||
73 | if f := os.Getenv("SSL_CERT_FILE"); f != "" { | ||
74 | rootCAs := mustGetSystemCertPool() | ||
75 | data, err := os.ReadFile(f) | ||
76 | if err == nil { | ||
77 | rootCAs.AppendCertsFromPEM(data) | ||
78 | } | ||
79 | tr.TLSClientConfig.RootCAs = rootCAs | ||
80 | } | ||
81 | } | ||
82 | return tr, nil | ||
83 | } | ||
diff --git a/vendor/github.com/minio/minio-go/v7/utils.go b/vendor/github.com/minio/minio-go/v7/utils.go new file mode 100644 index 0000000..e39eba0 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/utils.go | |||
@@ -0,0 +1,693 @@ | |||
1 | /* | ||
2 | * MinIO Go Library for Amazon S3 Compatible Cloud Storage | ||
3 | * Copyright 2015-2017 MinIO, Inc. | ||
4 | * | ||
5 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
6 | * you may not use this file except in compliance with the License. | ||
7 | * You may obtain a copy of the License at | ||
8 | * | ||
9 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | * | ||
11 | * Unless required by applicable law or agreed to in writing, software | ||
12 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
14 | * See the License for the specific language governing permissions and | ||
15 | * limitations under the License. | ||
16 | */ | ||
17 | |||
18 | package minio | ||
19 | |||
20 | import ( | ||
21 | "context" | ||
22 | "crypto/md5" | ||
23 | fipssha256 "crypto/sha256" | ||
24 | "encoding/base64" | ||
25 | "encoding/hex" | ||
26 | "encoding/xml" | ||
27 | "errors" | ||
28 | "fmt" | ||
29 | "hash" | ||
30 | "io" | ||
31 | "math/rand" | ||
32 | "net" | ||
33 | "net/http" | ||
34 | "net/url" | ||
35 | "regexp" | ||
36 | "strconv" | ||
37 | "strings" | ||
38 | "sync" | ||
39 | "time" | ||
40 | |||
41 | md5simd "github.com/minio/md5-simd" | ||
42 | "github.com/minio/minio-go/v7/pkg/encrypt" | ||
43 | "github.com/minio/minio-go/v7/pkg/s3utils" | ||
44 | "github.com/minio/sha256-simd" | ||
45 | ) | ||
46 | |||
47 | func trimEtag(etag string) string { | ||
48 | etag = strings.TrimPrefix(etag, "\"") | ||
49 | return strings.TrimSuffix(etag, "\"") | ||
50 | } | ||
51 | |||
52 | var expirationRegex = regexp.MustCompile(`expiry-date="(.*?)", rule-id="(.*?)"`) | ||
53 | |||
54 | func amzExpirationToExpiryDateRuleID(expiration string) (time.Time, string) { | ||
55 | if matches := expirationRegex.FindStringSubmatch(expiration); len(matches) == 3 { | ||
56 | expTime, err := parseRFC7231Time(matches[1]) | ||
57 | if err != nil { | ||
58 | return time.Time{}, "" | ||
59 | } | ||
60 | return expTime, matches[2] | ||
61 | } | ||
62 | return time.Time{}, "" | ||
63 | } | ||
64 | |||
65 | var restoreRegex = regexp.MustCompile(`ongoing-request="(.*?)"(, expiry-date="(.*?)")?`) | ||
66 | |||
67 | func amzRestoreToStruct(restore string) (ongoing bool, expTime time.Time, err error) { | ||
68 | matches := restoreRegex.FindStringSubmatch(restore) | ||
69 | if len(matches) != 4 { | ||
70 | return false, time.Time{}, errors.New("unexpected restore header") | ||
71 | } | ||
72 | ongoing, err = strconv.ParseBool(matches[1]) | ||
73 | if err != nil { | ||
74 | return false, time.Time{}, err | ||
75 | } | ||
76 | if matches[3] != "" { | ||
77 | expTime, err = parseRFC7231Time(matches[3]) | ||
78 | if err != nil { | ||
79 | return false, time.Time{}, err | ||
80 | } | ||
81 | } | ||
82 | return | ||
83 | } | ||
84 | |||
85 | // xmlDecoder provide decoded value in xml. | ||
86 | func xmlDecoder(body io.Reader, v interface{}) error { | ||
87 | d := xml.NewDecoder(body) | ||
88 | return d.Decode(v) | ||
89 | } | ||
90 | |||
91 | // sum256 calculate sha256sum for an input byte array, returns hex encoded. | ||
92 | func sum256Hex(data []byte) string { | ||
93 | hash := newSHA256Hasher() | ||
94 | defer hash.Close() | ||
95 | hash.Write(data) | ||
96 | return hex.EncodeToString(hash.Sum(nil)) | ||
97 | } | ||
98 | |||
99 | // sumMD5Base64 calculate md5sum for an input byte array, returns base64 encoded. | ||
100 | func sumMD5Base64(data []byte) string { | ||
101 | hash := newMd5Hasher() | ||
102 | defer hash.Close() | ||
103 | hash.Write(data) | ||
104 | return base64.StdEncoding.EncodeToString(hash.Sum(nil)) | ||
105 | } | ||
106 | |||
107 | // getEndpointURL - construct a new endpoint. | ||
108 | func getEndpointURL(endpoint string, secure bool) (*url.URL, error) { | ||
109 | // If secure is false, use 'http' scheme. | ||
110 | scheme := "https" | ||
111 | if !secure { | ||
112 | scheme = "http" | ||
113 | } | ||
114 | |||
115 | // Construct a secured endpoint URL. | ||
116 | endpointURLStr := scheme + "://" + endpoint | ||
117 | endpointURL, err := url.Parse(endpointURLStr) | ||
118 | if err != nil { | ||
119 | return nil, err | ||
120 | } | ||
121 | |||
122 | // Validate incoming endpoint URL. | ||
123 | if err := isValidEndpointURL(*endpointURL); err != nil { | ||
124 | return nil, err | ||
125 | } | ||
126 | return endpointURL, nil | ||
127 | } | ||
128 | |||
129 | // closeResponse close non nil response with any response Body. | ||
130 | // convenient wrapper to drain any remaining data on response body. | ||
131 | // | ||
132 | // Subsequently this allows golang http RoundTripper | ||
133 | // to re-use the same connection for future requests. | ||
134 | func closeResponse(resp *http.Response) { | ||
135 | // Callers should close resp.Body when done reading from it. | ||
136 | // If resp.Body is not closed, the Client's underlying RoundTripper | ||
137 | // (typically Transport) may not be able to re-use a persistent TCP | ||
138 | // connection to the server for a subsequent "keep-alive" request. | ||
139 | if resp != nil && resp.Body != nil { | ||
140 | // Drain any remaining Body and then close the connection. | ||
141 | // Without this closing connection would disallow re-using | ||
142 | // the same connection for future uses. | ||
143 | // - http://stackoverflow.com/a/17961593/4465767 | ||
144 | io.Copy(io.Discard, resp.Body) | ||
145 | resp.Body.Close() | ||
146 | } | ||
147 | } | ||
148 | |||
149 | var ( | ||
150 | // Hex encoded string of nil sha256sum bytes. | ||
151 | emptySHA256Hex = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" | ||
152 | |||
153 | // Sentinel URL is the default url value which is invalid. | ||
154 | sentinelURL = url.URL{} | ||
155 | ) | ||
156 | |||
157 | // Verify if input endpoint URL is valid. | ||
158 | func isValidEndpointURL(endpointURL url.URL) error { | ||
159 | if endpointURL == sentinelURL { | ||
160 | return errInvalidArgument("Endpoint url cannot be empty.") | ||
161 | } | ||
162 | if endpointURL.Path != "/" && endpointURL.Path != "" { | ||
163 | return errInvalidArgument("Endpoint url cannot have fully qualified paths.") | ||
164 | } | ||
165 | host := endpointURL.Hostname() | ||
166 | if !s3utils.IsValidIP(host) && !s3utils.IsValidDomain(host) { | ||
167 | msg := "Endpoint: " + endpointURL.Host + " does not follow ip address or domain name standards." | ||
168 | return errInvalidArgument(msg) | ||
169 | } | ||
170 | |||
171 | if strings.Contains(host, ".s3.amazonaws.com") { | ||
172 | if !s3utils.IsAmazonEndpoint(endpointURL) { | ||
173 | return errInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.") | ||
174 | } | ||
175 | } | ||
176 | if strings.Contains(host, ".googleapis.com") { | ||
177 | if !s3utils.IsGoogleEndpoint(endpointURL) { | ||
178 | return errInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'.") | ||
179 | } | ||
180 | } | ||
181 | return nil | ||
182 | } | ||
183 | |||
184 | // Verify if input expires value is valid. | ||
185 | func isValidExpiry(expires time.Duration) error { | ||
186 | expireSeconds := int64(expires / time.Second) | ||
187 | if expireSeconds < 1 { | ||
188 | return errInvalidArgument("Expires cannot be lesser than 1 second.") | ||
189 | } | ||
190 | if expireSeconds > 604800 { | ||
191 | return errInvalidArgument("Expires cannot be greater than 7 days.") | ||
192 | } | ||
193 | return nil | ||
194 | } | ||
195 | |||
196 | // Extract only necessary metadata header key/values by | ||
197 | // filtering them out with a list of custom header keys. | ||
198 | func extractObjMetadata(header http.Header) http.Header { | ||
199 | preserveKeys := []string{ | ||
200 | "Content-Type", | ||
201 | "Cache-Control", | ||
202 | "Content-Encoding", | ||
203 | "Content-Language", | ||
204 | "Content-Disposition", | ||
205 | "X-Amz-Storage-Class", | ||
206 | "X-Amz-Object-Lock-Mode", | ||
207 | "X-Amz-Object-Lock-Retain-Until-Date", | ||
208 | "X-Amz-Object-Lock-Legal-Hold", | ||
209 | "X-Amz-Website-Redirect-Location", | ||
210 | "X-Amz-Server-Side-Encryption", | ||
211 | "X-Amz-Tagging-Count", | ||
212 | "X-Amz-Meta-", | ||
213 | // Add new headers to be preserved. | ||
214 | // if you add new headers here, please extend | ||
215 | // PutObjectOptions{} to preserve them | ||
216 | // upon upload as well. | ||
217 | } | ||
218 | filteredHeader := make(http.Header) | ||
219 | for k, v := range header { | ||
220 | var found bool | ||
221 | for _, prefix := range preserveKeys { | ||
222 | if !strings.HasPrefix(k, prefix) { | ||
223 | continue | ||
224 | } | ||
225 | found = true | ||
226 | break | ||
227 | } | ||
228 | if found { | ||
229 | filteredHeader[k] = v | ||
230 | } | ||
231 | } | ||
232 | return filteredHeader | ||
233 | } | ||
234 | |||
235 | const ( | ||
236 | // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT | ||
237 | rfc822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" | ||
238 | rfc822TimeFormatSingleDigitDay = "Mon, _2 Jan 2006 15:04:05 GMT" | ||
239 | rfc822TimeFormatSingleDigitDayTwoDigitYear = "Mon, _2 Jan 06 15:04:05 GMT" | ||
240 | ) | ||
241 | |||
242 | func parseTime(t string, formats ...string) (time.Time, error) { | ||
243 | for _, format := range formats { | ||
244 | tt, err := time.Parse(format, t) | ||
245 | if err == nil { | ||
246 | return tt, nil | ||
247 | } | ||
248 | } | ||
249 | return time.Time{}, fmt.Errorf("unable to parse %s in any of the input formats: %s", t, formats) | ||
250 | } | ||
251 | |||
252 | func parseRFC7231Time(lastModified string) (time.Time, error) { | ||
253 | return parseTime(lastModified, rfc822TimeFormat, rfc822TimeFormatSingleDigitDay, rfc822TimeFormatSingleDigitDayTwoDigitYear) | ||
254 | } | ||
255 | |||
256 | // ToObjectInfo converts http header values into ObjectInfo type, | ||
257 | // extracts metadata and fills in all the necessary fields in ObjectInfo. | ||
258 | func ToObjectInfo(bucketName, objectName string, h http.Header) (ObjectInfo, error) { | ||
259 | var err error | ||
260 | // Trim off the odd double quotes from ETag in the beginning and end. | ||
261 | etag := trimEtag(h.Get("ETag")) | ||
262 | |||
263 | // Parse content length is exists | ||
264 | var size int64 = -1 | ||
265 | contentLengthStr := h.Get("Content-Length") | ||
266 | if contentLengthStr != "" { | ||
267 | size, err = strconv.ParseInt(contentLengthStr, 10, 64) | ||
268 | if err != nil { | ||
269 | // Content-Length is not valid | ||
270 | return ObjectInfo{}, ErrorResponse{ | ||
271 | Code: "InternalError", | ||
272 | Message: fmt.Sprintf("Content-Length is not an integer, failed with %v", err), | ||
273 | BucketName: bucketName, | ||
274 | Key: objectName, | ||
275 | RequestID: h.Get("x-amz-request-id"), | ||
276 | HostID: h.Get("x-amz-id-2"), | ||
277 | Region: h.Get("x-amz-bucket-region"), | ||
278 | } | ||
279 | } | ||
280 | } | ||
281 | |||
282 | // Parse Last-Modified has http time format. | ||
283 | mtime, err := parseRFC7231Time(h.Get("Last-Modified")) | ||
284 | if err != nil { | ||
285 | return ObjectInfo{}, ErrorResponse{ | ||
286 | Code: "InternalError", | ||
287 | Message: fmt.Sprintf("Last-Modified time format is invalid, failed with %v", err), | ||
288 | BucketName: bucketName, | ||
289 | Key: objectName, | ||
290 | RequestID: h.Get("x-amz-request-id"), | ||
291 | HostID: h.Get("x-amz-id-2"), | ||
292 | Region: h.Get("x-amz-bucket-region"), | ||
293 | } | ||
294 | } | ||
295 | |||
296 | // Fetch content type if any present. | ||
297 | contentType := strings.TrimSpace(h.Get("Content-Type")) | ||
298 | if contentType == "" { | ||
299 | contentType = "application/octet-stream" | ||
300 | } | ||
301 | |||
302 | expiryStr := h.Get("Expires") | ||
303 | var expiry time.Time | ||
304 | if expiryStr != "" { | ||
305 | expiry, err = parseRFC7231Time(expiryStr) | ||
306 | if err != nil { | ||
307 | return ObjectInfo{}, ErrorResponse{ | ||
308 | Code: "InternalError", | ||
309 | Message: fmt.Sprintf("'Expiry' is not in supported format: %v", err), | ||
310 | BucketName: bucketName, | ||
311 | Key: objectName, | ||
312 | RequestID: h.Get("x-amz-request-id"), | ||
313 | HostID: h.Get("x-amz-id-2"), | ||
314 | Region: h.Get("x-amz-bucket-region"), | ||
315 | } | ||
316 | } | ||
317 | } | ||
318 | |||
319 | metadata := extractObjMetadata(h) | ||
320 | userMetadata := make(map[string]string) | ||
321 | for k, v := range metadata { | ||
322 | if strings.HasPrefix(k, "X-Amz-Meta-") { | ||
323 | userMetadata[strings.TrimPrefix(k, "X-Amz-Meta-")] = v[0] | ||
324 | } | ||
325 | } | ||
326 | userTags := s3utils.TagDecode(h.Get(amzTaggingHeader)) | ||
327 | |||
328 | var tagCount int | ||
329 | if count := h.Get(amzTaggingCount); count != "" { | ||
330 | tagCount, err = strconv.Atoi(count) | ||
331 | if err != nil { | ||
332 | return ObjectInfo{}, ErrorResponse{ | ||
333 | Code: "InternalError", | ||
334 | Message: fmt.Sprintf("x-amz-tagging-count is not an integer, failed with %v", err), | ||
335 | BucketName: bucketName, | ||
336 | Key: objectName, | ||
337 | RequestID: h.Get("x-amz-request-id"), | ||
338 | HostID: h.Get("x-amz-id-2"), | ||
339 | Region: h.Get("x-amz-bucket-region"), | ||
340 | } | ||
341 | } | ||
342 | } | ||
343 | |||
344 | // Nil if not found | ||
345 | var restore *RestoreInfo | ||
346 | if restoreHdr := h.Get(amzRestore); restoreHdr != "" { | ||
347 | ongoing, expTime, err := amzRestoreToStruct(restoreHdr) | ||
348 | if err != nil { | ||
349 | return ObjectInfo{}, err | ||
350 | } | ||
351 | restore = &RestoreInfo{OngoingRestore: ongoing, ExpiryTime: expTime} | ||
352 | } | ||
353 | |||
354 | // extract lifecycle expiry date and rule ID | ||
355 | expTime, ruleID := amzExpirationToExpiryDateRuleID(h.Get(amzExpiration)) | ||
356 | |||
357 | deleteMarker := h.Get(amzDeleteMarker) == "true" | ||
358 | |||
359 | // Save object metadata info. | ||
360 | return ObjectInfo{ | ||
361 | ETag: etag, | ||
362 | Key: objectName, | ||
363 | Size: size, | ||
364 | LastModified: mtime, | ||
365 | ContentType: contentType, | ||
366 | Expires: expiry, | ||
367 | VersionID: h.Get(amzVersionID), | ||
368 | IsDeleteMarker: deleteMarker, | ||
369 | ReplicationStatus: h.Get(amzReplicationStatus), | ||
370 | Expiration: expTime, | ||
371 | ExpirationRuleID: ruleID, | ||
372 | // Extract only the relevant header keys describing the object. | ||
373 | // following function filters out a list of standard set of keys | ||
374 | // which are not part of object metadata. | ||
375 | Metadata: metadata, | ||
376 | UserMetadata: userMetadata, | ||
377 | UserTags: userTags, | ||
378 | UserTagCount: tagCount, | ||
379 | Restore: restore, | ||
380 | |||
381 | // Checksum values | ||
382 | ChecksumCRC32: h.Get("x-amz-checksum-crc32"), | ||
383 | ChecksumCRC32C: h.Get("x-amz-checksum-crc32c"), | ||
384 | ChecksumSHA1: h.Get("x-amz-checksum-sha1"), | ||
385 | ChecksumSHA256: h.Get("x-amz-checksum-sha256"), | ||
386 | }, nil | ||
387 | } | ||
388 | |||
389 | var readFull = func(r io.Reader, buf []byte) (n int, err error) { | ||
390 | // ReadFull reads exactly len(buf) bytes from r into buf. | ||
391 | // It returns the number of bytes copied and an error if | ||
392 | // fewer bytes were read. The error is EOF only if no bytes | ||
393 | // were read. If an EOF happens after reading some but not | ||
394 | // all the bytes, ReadFull returns ErrUnexpectedEOF. | ||
395 | // On return, n == len(buf) if and only if err == nil. | ||
396 | // If r returns an error having read at least len(buf) bytes, | ||
397 | // the error is dropped. | ||
398 | for n < len(buf) && err == nil { | ||
399 | var nn int | ||
400 | nn, err = r.Read(buf[n:]) | ||
401 | // Some spurious io.Reader's return | ||
402 | // io.ErrUnexpectedEOF when nn == 0 | ||
403 | // this behavior is undocumented | ||
404 | // so we are on purpose not using io.ReadFull | ||
405 | // implementation because this can lead | ||
406 | // to custom handling, to avoid that | ||
407 | // we simply modify the original io.ReadFull | ||
408 | // implementation to avoid this issue. | ||
409 | // io.ErrUnexpectedEOF with nn == 0 really | ||
410 | // means that io.EOF | ||
411 | if err == io.ErrUnexpectedEOF && nn == 0 { | ||
412 | err = io.EOF | ||
413 | } | ||
414 | n += nn | ||
415 | } | ||
416 | if n >= len(buf) { | ||
417 | err = nil | ||
418 | } else if n > 0 && err == io.EOF { | ||
419 | err = io.ErrUnexpectedEOF | ||
420 | } | ||
421 | return | ||
422 | } | ||
423 | |||
424 | // regCred matches credential string in HTTP header | ||
425 | var regCred = regexp.MustCompile("Credential=([A-Z0-9]+)/") | ||
426 | |||
427 | // regCred matches signature string in HTTP header | ||
428 | var regSign = regexp.MustCompile("Signature=([[0-9a-f]+)") | ||
429 | |||
430 | // Redact out signature value from authorization string. | ||
431 | func redactSignature(origAuth string) string { | ||
432 | if !strings.HasPrefix(origAuth, signV4Algorithm) { | ||
433 | // Set a temporary redacted auth | ||
434 | return "AWS **REDACTED**:**REDACTED**" | ||
435 | } | ||
436 | |||
437 | // Signature V4 authorization header. | ||
438 | |||
439 | // Strip out accessKeyID from: | ||
440 | // Credential=<access-key-id>/<date>/<aws-region>/<aws-service>/aws4_request | ||
441 | newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/") | ||
442 | |||
443 | // Strip out 256-bit signature from: Signature=<256-bit signature> | ||
444 | return regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**") | ||
445 | } | ||
446 | |||
447 | // Get default location returns the location based on the input | ||
448 | // URL `u`, if region override is provided then all location | ||
449 | // defaults to regionOverride. | ||
450 | // | ||
451 | // If no other cases match then the location is set to `us-east-1` | ||
452 | // as a last resort. | ||
453 | func getDefaultLocation(u url.URL, regionOverride string) (location string) { | ||
454 | if regionOverride != "" { | ||
455 | return regionOverride | ||
456 | } | ||
457 | region := s3utils.GetRegionFromURL(u) | ||
458 | if region == "" { | ||
459 | region = "us-east-1" | ||
460 | } | ||
461 | return region | ||
462 | } | ||
463 | |||
464 | var supportedHeaders = map[string]bool{ | ||
465 | "content-type": true, | ||
466 | "cache-control": true, | ||
467 | "content-encoding": true, | ||
468 | "content-disposition": true, | ||
469 | "content-language": true, | ||
470 | "x-amz-website-redirect-location": true, | ||
471 | "x-amz-object-lock-mode": true, | ||
472 | "x-amz-metadata-directive": true, | ||
473 | "x-amz-object-lock-retain-until-date": true, | ||
474 | "expires": true, | ||
475 | "x-amz-replication-status": true, | ||
476 | // Add more supported headers here. | ||
477 | // Must be lower case. | ||
478 | } | ||
479 | |||
480 | // isStorageClassHeader returns true if the header is a supported storage class header | ||
481 | func isStorageClassHeader(headerKey string) bool { | ||
482 | return strings.EqualFold(amzStorageClass, headerKey) | ||
483 | } | ||
484 | |||
485 | // isStandardHeader returns true if header is a supported header and not a custom header | ||
486 | func isStandardHeader(headerKey string) bool { | ||
487 | return supportedHeaders[strings.ToLower(headerKey)] | ||
488 | } | ||
489 | |||
490 | // sseHeaders is list of server side encryption headers | ||
491 | var sseHeaders = map[string]bool{ | ||
492 | "x-amz-server-side-encryption": true, | ||
493 | "x-amz-server-side-encryption-aws-kms-key-id": true, | ||
494 | "x-amz-server-side-encryption-context": true, | ||
495 | "x-amz-server-side-encryption-customer-algorithm": true, | ||
496 | "x-amz-server-side-encryption-customer-key": true, | ||
497 | "x-amz-server-side-encryption-customer-key-md5": true, | ||
498 | // Add more supported headers here. | ||
499 | // Must be lower case. | ||
500 | } | ||
501 | |||
502 | // isSSEHeader returns true if header is a server side encryption header. | ||
503 | func isSSEHeader(headerKey string) bool { | ||
504 | return sseHeaders[strings.ToLower(headerKey)] | ||
505 | } | ||
506 | |||
507 | // isAmzHeader returns true if header is a x-amz-meta-* or x-amz-acl header. | ||
508 | func isAmzHeader(headerKey string) bool { | ||
509 | key := strings.ToLower(headerKey) | ||
510 | |||
511 | return strings.HasPrefix(key, "x-amz-meta-") || strings.HasPrefix(key, "x-amz-grant-") || key == "x-amz-acl" || isSSEHeader(headerKey) || strings.HasPrefix(key, "x-amz-checksum-") | ||
512 | } | ||
513 | |||
514 | // supportedQueryValues is a list of query strings that can be passed in when using GetObject. | ||
515 | var supportedQueryValues = map[string]bool{ | ||
516 | "partNumber": true, | ||
517 | "versionId": true, | ||
518 | "response-cache-control": true, | ||
519 | "response-content-disposition": true, | ||
520 | "response-content-encoding": true, | ||
521 | "response-content-language": true, | ||
522 | "response-content-type": true, | ||
523 | "response-expires": true, | ||
524 | } | ||
525 | |||
526 | // isStandardQueryValue will return true when the passed in query string parameter is supported rather than customized. | ||
527 | func isStandardQueryValue(qsKey string) bool { | ||
528 | return supportedQueryValues[qsKey] | ||
529 | } | ||
530 | |||
531 | // Per documentation at https://docs.aws.amazon.com/AmazonS3/latest/userguide/LogFormat.html#LogFormatCustom, the | ||
532 | // set of query params starting with "x-" are ignored by S3. | ||
533 | const allowedCustomQueryPrefix = "x-" | ||
534 | |||
535 | func isCustomQueryValue(qsKey string) bool { | ||
536 | return strings.HasPrefix(qsKey, allowedCustomQueryPrefix) | ||
537 | } | ||
538 | |||
539 | var ( | ||
540 | md5Pool = sync.Pool{New: func() interface{} { return md5.New() }} | ||
541 | sha256Pool = sync.Pool{New: func() interface{} { return sha256.New() }} | ||
542 | ) | ||
543 | |||
544 | func newMd5Hasher() md5simd.Hasher { | ||
545 | return &hashWrapper{Hash: md5Pool.Get().(hash.Hash), isMD5: true} | ||
546 | } | ||
547 | |||
548 | func newSHA256Hasher() md5simd.Hasher { | ||
549 | if encrypt.FIPS { | ||
550 | return &hashWrapper{Hash: fipssha256.New(), isSHA256: true} | ||
551 | } | ||
552 | return &hashWrapper{Hash: sha256Pool.Get().(hash.Hash), isSHA256: true} | ||
553 | } | ||
554 | |||
555 | // hashWrapper implements the md5simd.Hasher interface. | ||
556 | type hashWrapper struct { | ||
557 | hash.Hash | ||
558 | isMD5 bool | ||
559 | isSHA256 bool | ||
560 | } | ||
561 | |||
562 | // Close will put the hasher back into the pool. | ||
563 | func (m *hashWrapper) Close() { | ||
564 | if m.isMD5 && m.Hash != nil { | ||
565 | m.Reset() | ||
566 | md5Pool.Put(m.Hash) | ||
567 | } | ||
568 | if m.isSHA256 && m.Hash != nil { | ||
569 | m.Reset() | ||
570 | sha256Pool.Put(m.Hash) | ||
571 | } | ||
572 | m.Hash = nil | ||
573 | } | ||
574 | |||
575 | const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569" | ||
576 | const ( | ||
577 | letterIdxBits = 6 // 6 bits to represent a letter index | ||
578 | letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits | ||
579 | letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits | ||
580 | ) | ||
581 | |||
582 | // randString generates random names and prepends them with a known prefix. | ||
583 | func randString(n int, src rand.Source, prefix string) string { | ||
584 | b := make([]byte, n) | ||
585 | // A rand.Int63() generates 63 random bits, enough for letterIdxMax letters! | ||
586 | for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; { | ||
587 | if remain == 0 { | ||
588 | cache, remain = src.Int63(), letterIdxMax | ||
589 | } | ||
590 | if idx := int(cache & letterIdxMask); idx < len(letterBytes) { | ||
591 | b[i] = letterBytes[idx] | ||
592 | i-- | ||
593 | } | ||
594 | cache >>= letterIdxBits | ||
595 | remain-- | ||
596 | } | ||
597 | return prefix + string(b[0:30-len(prefix)]) | ||
598 | } | ||
599 | |||
600 | // IsNetworkOrHostDown - if there was a network error or if the host is down. | ||
601 | // expectTimeouts indicates that *context* timeouts are expected and does not | ||
602 | // indicate a downed host. Other timeouts still returns down. | ||
603 | func IsNetworkOrHostDown(err error, expectTimeouts bool) bool { | ||
604 | if err == nil { | ||
605 | return false | ||
606 | } | ||
607 | |||
608 | if errors.Is(err, context.Canceled) { | ||
609 | return false | ||
610 | } | ||
611 | |||
612 | if expectTimeouts && errors.Is(err, context.DeadlineExceeded) { | ||
613 | return false | ||
614 | } | ||
615 | |||
616 | if errors.Is(err, context.DeadlineExceeded) { | ||
617 | return true | ||
618 | } | ||
619 | |||
620 | // We need to figure if the error either a timeout | ||
621 | // or a non-temporary error. | ||
622 | urlErr := &url.Error{} | ||
623 | if errors.As(err, &urlErr) { | ||
624 | switch urlErr.Err.(type) { | ||
625 | case *net.DNSError, *net.OpError, net.UnknownNetworkError: | ||
626 | return true | ||
627 | } | ||
628 | } | ||
629 | var e net.Error | ||
630 | if errors.As(err, &e) { | ||
631 | if e.Timeout() { | ||
632 | return true | ||
633 | } | ||
634 | } | ||
635 | |||
636 | // Fallback to other mechanisms. | ||
637 | switch { | ||
638 | case strings.Contains(err.Error(), "Connection closed by foreign host"): | ||
639 | return true | ||
640 | case strings.Contains(err.Error(), "TLS handshake timeout"): | ||
641 | // If error is - tlsHandshakeTimeoutError. | ||
642 | return true | ||
643 | case strings.Contains(err.Error(), "i/o timeout"): | ||
644 | // If error is - tcp timeoutError. | ||
645 | return true | ||
646 | case strings.Contains(err.Error(), "connection timed out"): | ||
647 | // If err is a net.Dial timeout. | ||
648 | return true | ||
649 | case strings.Contains(err.Error(), "connection refused"): | ||
650 | // If err is connection refused | ||
651 | return true | ||
652 | |||
653 | case strings.Contains(strings.ToLower(err.Error()), "503 service unavailable"): | ||
654 | // Denial errors | ||
655 | return true | ||
656 | } | ||
657 | return false | ||
658 | } | ||
659 | |||
660 | // newHashReaderWrapper will hash all reads done through r. | ||
661 | // When r returns io.EOF the done function will be called with the sum. | ||
662 | func newHashReaderWrapper(r io.Reader, h hash.Hash, done func(hash []byte)) *hashReaderWrapper { | ||
663 | return &hashReaderWrapper{ | ||
664 | r: r, | ||
665 | h: h, | ||
666 | done: done, | ||
667 | } | ||
668 | } | ||
669 | |||
670 | type hashReaderWrapper struct { | ||
671 | r io.Reader | ||
672 | h hash.Hash | ||
673 | done func(hash []byte) | ||
674 | } | ||
675 | |||
676 | // Read implements the io.Reader interface. | ||
677 | func (h *hashReaderWrapper) Read(p []byte) (n int, err error) { | ||
678 | n, err = h.r.Read(p) | ||
679 | if n > 0 { | ||
680 | n2, err := h.h.Write(p[:n]) | ||
681 | if err != nil { | ||
682 | return 0, err | ||
683 | } | ||
684 | if n2 != n { | ||
685 | return 0, io.ErrShortWrite | ||
686 | } | ||
687 | } | ||
688 | if err == io.EOF { | ||
689 | // Call back | ||
690 | h.done(h.h.Sum(nil)) | ||
691 | } | ||
692 | return n, err | ||
693 | } | ||