From 8db41da676ac8368ef7c2549d56239a5ff5eedde Mon Sep 17 00:00:00 2001 From: Rutger Broekhoff Date: Tue, 2 Jan 2024 18:56:31 +0100 Subject: Delete vendor directory --- vendor/github.com/minio/minio-go/v7/.gitignore | 6 - vendor/github.com/minio/minio-go/v7/.golangci.yml | 27 - vendor/github.com/minio/minio-go/v7/CNAME | 1 - .../github.com/minio/minio-go/v7/CONTRIBUTING.md | 22 - vendor/github.com/minio/minio-go/v7/LICENSE | 202 - vendor/github.com/minio/minio-go/v7/MAINTAINERS.md | 35 - vendor/github.com/minio/minio-go/v7/Makefile | 38 - vendor/github.com/minio/minio-go/v7/NOTICE | 9 - vendor/github.com/minio/minio-go/v7/README.md | 312 - .../minio/minio-go/v7/api-bucket-encryption.go | 134 - .../minio/minio-go/v7/api-bucket-lifecycle.go | 169 - .../minio/minio-go/v7/api-bucket-notification.go | 261 - .../minio/minio-go/v7/api-bucket-policy.go | 147 - .../minio/minio-go/v7/api-bucket-replication.go | 355 - .../minio/minio-go/v7/api-bucket-tagging.go | 134 - .../minio/minio-go/v7/api-bucket-versioning.go | 146 - .../minio/minio-go/v7/api-compose-object.go | 594 - .../minio/minio-go/v7/api-copy-object.go | 76 - .../github.com/minio/minio-go/v7/api-datatypes.go | 254 - .../minio/minio-go/v7/api-error-response.go | 284 - .../minio/minio-go/v7/api-get-object-acl.go | 152 - .../minio/minio-go/v7/api-get-object-file.go | 127 - .../github.com/minio/minio-go/v7/api-get-object.go | 683 - .../minio/minio-go/v7/api-get-options.go | 203 - vendor/github.com/minio/minio-go/v7/api-list.go | 1057 -- .../minio/minio-go/v7/api-object-legal-hold.go | 176 - .../minio/minio-go/v7/api-object-lock.go | 241 - .../minio/minio-go/v7/api-object-retention.go | 165 - .../minio/minio-go/v7/api-object-tagging.go | 177 - .../github.com/minio/minio-go/v7/api-presigned.go | 228 - .../github.com/minio/minio-go/v7/api-put-bucket.go | 123 - .../minio/minio-go/v7/api-put-object-common.go | 149 - .../minio/minio-go/v7/api-put-object-fan-out.go | 164 - .../minio-go/v7/api-put-object-file-context.go | 64 - .../minio/minio-go/v7/api-put-object-multipart.go | 465 - .../minio/minio-go/v7/api-put-object-streaming.go | 809 -- .../github.com/minio/minio-go/v7/api-put-object.go | 473 - .../minio/minio-go/v7/api-putobject-snowball.go | 246 - vendor/github.com/minio/minio-go/v7/api-remove.go | 548 - vendor/github.com/minio/minio-go/v7/api-restore.go | 182 - .../minio/minio-go/v7/api-s3-datatypes.go | 390 - vendor/github.com/minio/minio-go/v7/api-select.go | 757 -- vendor/github.com/minio/minio-go/v7/api-stat.go | 116 - vendor/github.com/minio/minio-go/v7/api.go | 995 -- .../github.com/minio/minio-go/v7/bucket-cache.go | 256 - vendor/github.com/minio/minio-go/v7/checksum.go | 210 - .../minio/minio-go/v7/code_of_conduct.md | 80 - vendor/github.com/minio/minio-go/v7/constants.go | 110 - vendor/github.com/minio/minio-go/v7/core.go | 150 - .../minio/minio-go/v7/functional_tests.go | 13004 ------------------- vendor/github.com/minio/minio-go/v7/hook-reader.go | 101 - .../minio-go/v7/pkg/credentials/assume_role.go | 242 - .../minio/minio-go/v7/pkg/credentials/chain.go | 88 - .../minio-go/v7/pkg/credentials/config.json.sample | 17 - .../minio-go/v7/pkg/credentials/credentials.go | 193 - .../minio-go/v7/pkg/credentials/credentials.json | 7 - .../minio-go/v7/pkg/credentials/credentials.sample | 15 - .../minio/minio-go/v7/pkg/credentials/doc.go | 60 - .../minio/minio-go/v7/pkg/credentials/env_aws.go | 71 - .../minio/minio-go/v7/pkg/credentials/env_minio.go | 68 - .../minio-go/v7/pkg/credentials/error_response.go | 95 - .../v7/pkg/credentials/file_aws_credentials.go | 157 - .../v7/pkg/credentials/file_minio_client.go | 139 - .../minio/minio-go/v7/pkg/credentials/iam_aws.go | 433 - .../minio-go/v7/pkg/credentials/signature_type.go | 77 - .../minio/minio-go/v7/pkg/credentials/static.go | 67 - .../v7/pkg/credentials/sts_client_grants.go | 182 - .../v7/pkg/credentials/sts_custom_identity.go | 146 - .../v7/pkg/credentials/sts_ldap_identity.go | 189 - .../v7/pkg/credentials/sts_tls_identity.go | 211 - .../v7/pkg/credentials/sts_web_identity.go | 205 - .../minio/minio-go/v7/pkg/encrypt/fips_disabled.go | 24 - .../minio/minio-go/v7/pkg/encrypt/fips_enabled.go | 24 - .../minio/minio-go/v7/pkg/encrypt/server-side.go | 198 - .../minio/minio-go/v7/pkg/lifecycle/lifecycle.go | 491 - .../minio/minio-go/v7/pkg/notification/info.go | 78 - .../minio-go/v7/pkg/notification/notification.go | 440 - .../minio-go/v7/pkg/replication/replication.go | 971 -- .../minio/minio-go/v7/pkg/s3utils/utils.go | 411 - .../minio/minio-go/v7/pkg/set/stringset.go | 200 - ...request-signature-streaming-unsigned-trailer.go | 224 - .../v7/pkg/signer/request-signature-streaming.go | 403 - .../minio-go/v7/pkg/signer/request-signature-v2.go | 319 - .../minio-go/v7/pkg/signer/request-signature-v4.go | 351 - .../minio/minio-go/v7/pkg/signer/utils.go | 62 - vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go | 66 - .../github.com/minio/minio-go/v7/pkg/tags/tags.go | 413 - vendor/github.com/minio/minio-go/v7/post-policy.go | 349 - .../minio/minio-go/v7/retry-continous.go | 69 - vendor/github.com/minio/minio-go/v7/retry.go | 148 - .../github.com/minio/minio-go/v7/s3-endpoints.go | 64 - vendor/github.com/minio/minio-go/v7/s3-error.go | 61 - vendor/github.com/minio/minio-go/v7/transport.go | 83 - vendor/github.com/minio/minio-go/v7/utils.go | 693 - 94 files changed, 34611 deletions(-) delete mode 100644 vendor/github.com/minio/minio-go/v7/.gitignore delete mode 100644 vendor/github.com/minio/minio-go/v7/.golangci.yml delete mode 100644 vendor/github.com/minio/minio-go/v7/CNAME delete mode 100644 vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md delete mode 100644 vendor/github.com/minio/minio-go/v7/LICENSE delete mode 100644 vendor/github.com/minio/minio-go/v7/MAINTAINERS.md delete mode 100644 vendor/github.com/minio/minio-go/v7/Makefile delete mode 100644 vendor/github.com/minio/minio-go/v7/NOTICE delete mode 100644 vendor/github.com/minio/minio-go/v7/README.md delete mode 100644 vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go delete mode 100644 vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go delete mode 100644 vendor/github.com/minio/minio-go/v7/api-bucket-notification.go delete mode 100644 vendor/github.com/minio/minio-go/v7/api-bucket-policy.go delete mode 100644 vendor/github.com/minio/minio-go/v7/api-bucket-replication.go delete mode 100644 vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go delete mode 100644 vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go delete mode 100644 vendor/github.com/minio/minio-go/v7/api-compose-object.go delete mode 100644 vendor/github.com/minio/minio-go/v7/api-copy-object.go delete mode 100644 vendor/github.com/minio/minio-go/v7/api-datatypes.go delete mode 100644 vendor/github.com/minio/minio-go/v7/api-error-response.go delete mode 100644 vendor/github.com/minio/minio-go/v7/api-get-object-acl.go delete mode 100644 vendor/github.com/minio/minio-go/v7/api-get-object-file.go delete mode 100644 vendor/github.com/minio/minio-go/v7/api-get-object.go delete mode 100644 vendor/github.com/minio/minio-go/v7/api-get-options.go delete mode 100644 vendor/github.com/minio/minio-go/v7/api-list.go delete mode 100644 vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go delete mode 100644 vendor/github.com/minio/minio-go/v7/api-object-lock.go delete mode 100644 vendor/github.com/minio/minio-go/v7/api-object-retention.go delete mode 100644 vendor/github.com/minio/minio-go/v7/api-object-tagging.go delete mode 100644 vendor/github.com/minio/minio-go/v7/api-presigned.go delete mode 100644 vendor/github.com/minio/minio-go/v7/api-put-bucket.go delete mode 100644 vendor/github.com/minio/minio-go/v7/api-put-object-common.go delete mode 100644 vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go delete mode 100644 vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go delete mode 100644 vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go delete mode 100644 vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go delete mode 100644 vendor/github.com/minio/minio-go/v7/api-put-object.go delete mode 100644 vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go delete mode 100644 vendor/github.com/minio/minio-go/v7/api-remove.go delete mode 100644 vendor/github.com/minio/minio-go/v7/api-restore.go delete mode 100644 vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go delete mode 100644 vendor/github.com/minio/minio-go/v7/api-select.go delete mode 100644 vendor/github.com/minio/minio-go/v7/api-stat.go delete mode 100644 vendor/github.com/minio/minio-go/v7/api.go delete mode 100644 vendor/github.com/minio/minio-go/v7/bucket-cache.go delete mode 100644 vendor/github.com/minio/minio-go/v7/checksum.go delete mode 100644 vendor/github.com/minio/minio-go/v7/code_of_conduct.md delete mode 100644 vendor/github.com/minio/minio-go/v7/constants.go delete mode 100644 vendor/github.com/minio/minio-go/v7/core.go delete mode 100644 vendor/github.com/minio/minio-go/v7/functional_tests.go delete mode 100644 vendor/github.com/minio/minio-go/v7/hook-reader.go delete mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go delete mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go delete mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample delete mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go delete mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json delete mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample delete mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go delete mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go delete mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go delete mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go delete mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go delete mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go delete mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go delete mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go delete mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go delete mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go delete mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go delete mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go delete mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go delete mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go delete mode 100644 vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go delete mode 100644 vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go delete mode 100644 vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go delete mode 100644 vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go delete mode 100644 vendor/github.com/minio/minio-go/v7/pkg/notification/info.go delete mode 100644 vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go delete mode 100644 vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go delete mode 100644 vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go delete mode 100644 vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go delete mode 100644 vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go delete mode 100644 vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go delete mode 100644 vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go delete mode 100644 vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go delete mode 100644 vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go delete mode 100644 vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go delete mode 100644 vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go delete mode 100644 vendor/github.com/minio/minio-go/v7/post-policy.go delete mode 100644 vendor/github.com/minio/minio-go/v7/retry-continous.go delete mode 100644 vendor/github.com/minio/minio-go/v7/retry.go delete mode 100644 vendor/github.com/minio/minio-go/v7/s3-endpoints.go delete mode 100644 vendor/github.com/minio/minio-go/v7/s3-error.go delete mode 100644 vendor/github.com/minio/minio-go/v7/transport.go delete mode 100644 vendor/github.com/minio/minio-go/v7/utils.go (limited to 'vendor/github.com/minio/minio-go/v7') diff --git a/vendor/github.com/minio/minio-go/v7/.gitignore b/vendor/github.com/minio/minio-go/v7/.gitignore deleted file mode 100644 index 8ae0384..0000000 --- a/vendor/github.com/minio/minio-go/v7/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -*~ -*.test -validator -golangci-lint -functional_tests -.idea \ No newline at end of file diff --git a/vendor/github.com/minio/minio-go/v7/.golangci.yml b/vendor/github.com/minio/minio-go/v7/.golangci.yml deleted file mode 100644 index 875b949..0000000 --- a/vendor/github.com/minio/minio-go/v7/.golangci.yml +++ /dev/null @@ -1,27 +0,0 @@ -linters-settings: - misspell: - locale: US - -linters: - disable-all: true - enable: - - typecheck - - goimports - - misspell - - revive - - govet - - ineffassign - - gosimple - - unused - - gocritic - -issues: - exclude-use-default: false - exclude: - # todo fix these when we get enough time. - - "singleCaseSwitch: should rewrite switch statement to if statement" - - "unlambda: replace" - - "captLocal:" - - "ifElseChain:" - - "elseif:" - - "should have a package comment" diff --git a/vendor/github.com/minio/minio-go/v7/CNAME b/vendor/github.com/minio/minio-go/v7/CNAME deleted file mode 100644 index d365a7b..0000000 --- a/vendor/github.com/minio/minio-go/v7/CNAME +++ /dev/null @@ -1 +0,0 @@ -minio-go.min.io \ No newline at end of file diff --git a/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md b/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md deleted file mode 100644 index 24522ef..0000000 --- a/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md +++ /dev/null @@ -1,22 +0,0 @@ -### Developer Guidelines - -``minio-go`` welcomes your contribution. To make the process as seamless as possible, we ask for the following: - -* Go ahead and fork the project and make your changes. We encourage pull requests to discuss code changes. - - Fork it - - Create your feature branch (git checkout -b my-new-feature) - - Commit your changes (git commit -am 'Add some feature') - - Push to the branch (git push origin my-new-feature) - - Create new Pull Request - -* When you're ready to create a pull request, be sure to: - - Have test cases for the new code. If you have questions about how to do it, please ask in your pull request. - - Run `go fmt` - - Squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request. - - Make sure `go test -race ./...` and `go build` completes. - NOTE: go test runs functional tests and requires you to have a AWS S3 account. Set them as environment variables - ``ACCESS_KEY`` and ``SECRET_KEY``. To run shorter version of the tests please use ``go test -short -race ./...`` - -* Read [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project - - `minio-go` project is strictly conformant with Golang style - - if you happen to observe offending code, please feel free to send a pull request diff --git a/vendor/github.com/minio/minio-go/v7/LICENSE b/vendor/github.com/minio/minio-go/v7/LICENSE deleted file mode 100644 index d645695..0000000 --- a/vendor/github.com/minio/minio-go/v7/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/minio/minio-go/v7/MAINTAINERS.md b/vendor/github.com/minio/minio-go/v7/MAINTAINERS.md deleted file mode 100644 index f640dfb..0000000 --- a/vendor/github.com/minio/minio-go/v7/MAINTAINERS.md +++ /dev/null @@ -1,35 +0,0 @@ -# For maintainers only - -## Responsibilities - -Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522) - -### Making new releases -Tag and sign your release commit, additionally this step requires you to have access to MinIO's trusted private key. -```sh -$ export GNUPGHOME=/media/${USER}/minio/trusted -$ git tag -s 4.0.0 -$ git push -$ git push --tags -``` - -### Update version -Once release has been made update `libraryVersion` constant in `api.go` to next to be released version. - -```sh -$ grep libraryVersion api.go - libraryVersion = "4.0.1" -``` - -Commit your changes -``` -$ git commit -a -m "Update version for next release" --author "MinIO Trusted " -``` - -### Announce -Announce new release by adding release notes at https://github.com/minio/minio-go/releases from `trusted@min.io` account. Release notes requires two sections `highlights` and `changelog`. Highlights is a bulleted list of salient features in this release and Changelog contains list of all commits since the last release. - -To generate `changelog` -```sh -$ git log --no-color --pretty=format:'-%d %s (%cr) <%an>' .. -``` diff --git a/vendor/github.com/minio/minio-go/v7/Makefile b/vendor/github.com/minio/minio-go/v7/Makefile deleted file mode 100644 index 68444aa..0000000 --- a/vendor/github.com/minio/minio-go/v7/Makefile +++ /dev/null @@ -1,38 +0,0 @@ -GOPATH := $(shell go env GOPATH) -TMPDIR := $(shell mktemp -d) - -all: checks - -.PHONY: examples docs - -checks: lint vet test examples functional-test - -lint: - @mkdir -p ${GOPATH}/bin - @echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin - @echo "Running $@ check" - @GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean - @GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml - -vet: - @GO111MODULE=on go vet ./... - @echo "Installing staticcheck" && go install honnef.co/go/tools/cmd/staticcheck@latest - ${GOPATH}/bin/staticcheck -tests=false -checks="all,-ST1000,-ST1003,-ST1016,-ST1020,-ST1021,-ST1022,-ST1023,-ST1005" - -test: - @GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minioadmin SECRET_KEY=minioadmin ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./... - -examples: - @echo "Building s3 examples" - @cd ./examples/s3 && $(foreach v,$(wildcard examples/s3/*.go),go build -mod=mod -o ${TMPDIR}/$(basename $(v)) $(notdir $(v)) || exit 1;) - @echo "Building minio examples" - @cd ./examples/minio && $(foreach v,$(wildcard examples/minio/*.go),go build -mod=mod -o ${TMPDIR}/$(basename $(v)) $(notdir $(v)) || exit 1;) - -functional-test: - @GO111MODULE=on go build -race functional_tests.go - @SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minioadmin SECRET_KEY=minioadmin ENABLE_HTTPS=1 MINT_MODE=full ./functional_tests - -clean: - @echo "Cleaning up all the generated files" - @find . -name '*.test' | xargs rm -fv - @find . -name '*~' | xargs rm -fv diff --git a/vendor/github.com/minio/minio-go/v7/NOTICE b/vendor/github.com/minio/minio-go/v7/NOTICE deleted file mode 100644 index 1e8fd3b..0000000 --- a/vendor/github.com/minio/minio-go/v7/NOTICE +++ /dev/null @@ -1,9 +0,0 @@ -MinIO Cloud Storage, (C) 2014-2020 MinIO, Inc. - -This product includes software developed at MinIO, Inc. -(https://min.io/). - -The MinIO project contains unmodified/modified subcomponents too with -separate copyright notices and license terms. Your use of the source -code for these subcomponents is subject to the terms and conditions -of Apache License Version 2.0 diff --git a/vendor/github.com/minio/minio-go/v7/README.md b/vendor/github.com/minio/minio-go/v7/README.md deleted file mode 100644 index 82f70a1..0000000 --- a/vendor/github.com/minio/minio-go/v7/README.md +++ /dev/null @@ -1,312 +0,0 @@ -# MinIO Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) [![Apache V2 License](https://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/minio/minio-go/blob/master/LICENSE) - -The MinIO Go Client SDK provides straightforward APIs to access any Amazon S3 compatible object storage. - -This Quickstart Guide covers how to install the MinIO client SDK, connect to MinIO, and create a sample file uploader. -For a complete list of APIs and examples, see the [godoc documentation](https://pkg.go.dev/github.com/minio/minio-go/v7) or [Go Client API Reference](https://min.io/docs/minio/linux/developers/go/API.html). - -These examples presume a working [Go development environment](https://golang.org/doc/install) and the [MinIO `mc` command line tool](https://min.io/docs/minio/linux/reference/minio-mc.html). - -## Download from Github - -From your project directory: - -```sh -go get github.com/minio/minio-go/v7 -``` - -## Initialize a MinIO Client Object - -The MinIO client requires the following parameters to connect to an Amazon S3 compatible object storage: - -| Parameter | Description | -| ----------------- | ---------------------------------------------------------- | -| `endpoint` | URL to object storage service. | -| `_minio.Options_` | All the options such as credentials, custom transport etc. | - -```go -package main - -import ( - "log" - - "github.com/minio/minio-go/v7" - "github.com/minio/minio-go/v7/pkg/credentials" -) - -func main() { - endpoint := "play.min.io" - accessKeyID := "Q3AM3UQ867SPQQA43P2F" - secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" - useSSL := true - - // Initialize minio client object. - minioClient, err := minio.New(endpoint, &minio.Options{ - Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""), - Secure: useSSL, - }) - if err != nil { - log.Fatalln(err) - } - - log.Printf("%#v\n", minioClient) // minioClient is now set up -} -``` - -## Example - File Uploader - -This sample code connects to an object storage server, creates a bucket, and uploads a file to the bucket. -It uses the MinIO `play` server, a public MinIO cluster located at [https://play.min.io](https://play.min.io). - -The `play` server runs the latest stable version of MinIO and may be used for testing and development. -The access credentials shown in this example are open to the public and all data uploaded to `play` should be considered public and non-protected. - -### FileUploader.go - -This example does the following: - -- Connects to the MinIO `play` server using the provided credentials. -- Creates a bucket named `testbucket`. -- Uploads a file named `testdata` from `/tmp`. -- Verifies the file was created using `mc ls`. - -```go -// FileUploader.go MinIO example -package main - -import ( - "context" - "log" - - "github.com/minio/minio-go/v7" - "github.com/minio/minio-go/v7/pkg/credentials" -) - -func main() { - ctx := context.Background() - endpoint := "play.min.io" - accessKeyID := "Q3AM3UQ867SPQQA43P2F" - secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" - useSSL := true - - // Initialize minio client object. - minioClient, err := minio.New(endpoint, &minio.Options{ - Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""), - Secure: useSSL, - }) - if err != nil { - log.Fatalln(err) - } - - // Make a new bucket called testbucket. - bucketName := "testbucket" - location := "us-east-1" - - err = minioClient.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: location}) - if err != nil { - // Check to see if we already own this bucket (which happens if you run this twice) - exists, errBucketExists := minioClient.BucketExists(ctx, bucketName) - if errBucketExists == nil && exists { - log.Printf("We already own %s\n", bucketName) - } else { - log.Fatalln(err) - } - } else { - log.Printf("Successfully created %s\n", bucketName) - } - - // Upload the test file - // Change the value of filePath if the file is in another location - objectName := "testdata" - filePath := "/tmp/testdata" - contentType := "application/octet-stream" - - // Upload the test file with FPutObject - info, err := minioClient.FPutObject(ctx, bucketName, objectName, filePath, minio.PutObjectOptions{ContentType: contentType}) - if err != nil { - log.Fatalln(err) - } - - log.Printf("Successfully uploaded %s of size %d\n", objectName, info.Size) -} -``` - -**1. Create a test file containing data:** - -You can do this with `dd` on Linux or macOS systems: - -```sh -dd if=/dev/urandom of=/tmp/testdata bs=2048 count=10 -``` - -or `fsutil` on Windows: - -```sh -fsutil file createnew "C:\Users\\Desktop\sample.txt" 20480 -``` - -**2. Run FileUploader with the following commands:** - -```sh -go mod init example/FileUploader -go get github.com/minio/minio-go/v7 -go get github.com/minio/minio-go/v7/pkg/credentials -go run FileUploader.go -``` - -The output resembles the following: - -```sh -2023/11/01 14:27:55 Successfully created testbucket -2023/11/01 14:27:55 Successfully uploaded testdata of size 20480 -``` - -**3. Verify the Uploaded File With `mc ls`:** - -```sh -mc ls play/testbucket -[2023-11-01 14:27:55 UTC] 20KiB STANDARD TestDataFile -``` - -## API Reference - -The full API Reference is available here. - -* [Complete API Reference](https://min.io/docs/minio/linux/developers/go/API.html) - -### API Reference : Bucket Operations - -* [`MakeBucket`](https://min.io/docs/minio/linux/developers/go/API.html#MakeBucket) -* [`ListBuckets`](https://min.io/docs/minio/linux/developers/go/API.html#ListBuckets) -* [`BucketExists`](https://min.io/docs/minio/linux/developers/go/API.html#BucketExists) -* [`RemoveBucket`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveBucket) -* [`ListObjects`](https://min.io/docs/minio/linux/developers/go/API.html#ListObjects) -* [`ListIncompleteUploads`](https://min.io/docs/minio/linux/developers/go/API.html#ListIncompleteUploads) - -### API Reference : Bucket policy Operations - -* [`SetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketPolicy) -* [`GetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketPolicy) - -### API Reference : Bucket notification Operations - -* [`SetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketNotification) -* [`GetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketNotification) -* [`RemoveAllBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveAllBucketNotification) -* [`ListenBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenBucketNotification) (MinIO Extension) -* [`ListenNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenNotification) (MinIO Extension) - -### API Reference : File Object Operations - -* [`FPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#FPutObject) -* [`FGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#FGetObject) - -### API Reference : Object Operations - -* [`GetObject`](https://min.io/docs/minio/linux/developers/go/API.html#GetObject) -* [`PutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PutObject) -* [`PutObjectStreaming`](https://min.io/docs/minio/linux/developers/go/API.html#PutObjectStreaming) -* [`StatObject`](https://min.io/docs/minio/linux/developers/go/API.html#StatObject) -* [`CopyObject`](https://min.io/docs/minio/linux/developers/go/API.html#CopyObject) -* [`RemoveObject`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObject) -* [`RemoveObjects`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObjects) -* [`RemoveIncompleteUpload`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveIncompleteUpload) -* [`SelectObjectContent`](https://min.io/docs/minio/linux/developers/go/API.html#SelectObjectContent) - -### API Reference : Presigned Operations - -* [`PresignedGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedGetObject) -* [`PresignedPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPutObject) -* [`PresignedHeadObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedHeadObject) -* [`PresignedPostPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPostPolicy) - -### API Reference : Client custom settings - -* [`SetAppInfo`](https://min.io/docs/minio/linux/developers/go/API.html#SetAppInfo) -* [`TraceOn`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOn) -* [`TraceOff`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOff) - -## Full Examples - -### Full Examples : Bucket Operations - -* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go) -* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go) -* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go) -* [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go) -* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go) -* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go) -* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go) - -### Full Examples : Bucket policy Operations - -* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go) -* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go) -* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go) - -### Full Examples : Bucket lifecycle Operations - -* [setbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketlifecycle.go) -* [getbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketlifecycle.go) - -### Full Examples : Bucket encryption Operations - -* [setbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketencryption.go) -* [getbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketencryption.go) -* [deletebucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/deletebucketencryption.go) - -### Full Examples : Bucket replication Operations - -* [setbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketreplication.go) -* [getbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketreplication.go) -* [removebucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucketreplication.go) - -### Full Examples : Bucket notification Operations - -* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go) -* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go) -* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go) -* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (MinIO Extension) -* [listennotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listen-notification.go) (MinIO Extension) - -### Full Examples : File Object Operations - -* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go) -* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go) - -### Full Examples : Object Operations - -* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go) -* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go) -* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go) -* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go) -* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go) -* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go) -* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go) - -### Full Examples : Encrypted Object Operations - -* [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go) -* [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go) -* [fput-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputencrypted-object.go) - -### Full Examples : Presigned Operations - -* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go) -* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go) -* [presignedheadobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedheadobject.go) -* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go) - -## Explore Further - -* [Godoc Documentation](https://pkg.go.dev/github.com/minio/minio-go/v7) -* [Complete Documentation](https://min.io/docs/minio/kubernetes/upstream/index.html) -* [MinIO Go Client SDK API Reference](https://min.io/docs/minio/linux/developers/go/API.html) - -## Contribute - -[Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md) - -## License - -This SDK is distributed under the [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0), see [LICENSE](https://github.com/minio/minio-go/blob/master/LICENSE) and [NOTICE](https://github.com/minio/minio-go/blob/master/NOTICE) for more information. diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go b/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go deleted file mode 100644 index 24f94e0..0000000 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go +++ /dev/null @@ -1,134 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2020 MinIO, Inc. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/xml" - "net/http" - "net/url" - - "github.com/minio/minio-go/v7/pkg/s3utils" - "github.com/minio/minio-go/v7/pkg/sse" -) - -// SetBucketEncryption sets the default encryption configuration on an existing bucket. -func (c *Client) SetBucketEncryption(ctx context.Context, bucketName string, config *sse.Configuration) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - if config == nil { - return errInvalidArgument("configuration cannot be empty") - } - - buf, err := xml.Marshal(config) - if err != nil { - return err - } - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("encryption", "") - - // Content-length is mandatory to set a default encryption configuration - reqMetadata := requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentBody: bytes.NewReader(buf), - contentLength: int64(len(buf)), - contentMD5Base64: sumMD5Base64(buf), - } - - // Execute PUT to upload a new bucket default encryption configuration. - resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) - defer closeResponse(resp) - if err != nil { - return err - } - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp, bucketName, "") - } - return nil -} - -// RemoveBucketEncryption removes the default encryption configuration on a bucket with a context to control cancellations and timeouts. -func (c *Client) RemoveBucketEncryption(ctx context.Context, bucketName string) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("encryption", "") - - // DELETE default encryption configuration on a bucket. - resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return err - } - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { - return httpRespToErrorResponse(resp, bucketName, "") - } - return nil -} - -// GetBucketEncryption gets the default encryption configuration -// on an existing bucket with a context to control cancellations and timeouts. -func (c *Client) GetBucketEncryption(ctx context.Context, bucketName string) (*sse.Configuration, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return nil, err - } - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("encryption", "") - - // Execute GET on bucket to get the default encryption configuration. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - }) - - defer closeResponse(resp) - if err != nil { - return nil, err - } - - if resp.StatusCode != http.StatusOK { - return nil, httpRespToErrorResponse(resp, bucketName, "") - } - - encryptionConfig := &sse.Configuration{} - if err = xmlDecoder(resp.Body, encryptionConfig); err != nil { - return nil, err - } - - return encryptionConfig, nil -} diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go b/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go deleted file mode 100644 index fec5cec..0000000 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go +++ /dev/null @@ -1,169 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/xml" - "io" - "net/http" - "net/url" - "time" - - "github.com/minio/minio-go/v7/pkg/lifecycle" - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// SetBucketLifecycle set the lifecycle on an existing bucket. -func (c *Client) SetBucketLifecycle(ctx context.Context, bucketName string, config *lifecycle.Configuration) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - // If lifecycle is empty then delete it. - if config.Empty() { - return c.removeBucketLifecycle(ctx, bucketName) - } - - buf, err := xml.Marshal(config) - if err != nil { - return err - } - - // Save the updated lifecycle. - return c.putBucketLifecycle(ctx, bucketName, buf) -} - -// Saves a new bucket lifecycle. -func (c *Client) putBucketLifecycle(ctx context.Context, bucketName string, buf []byte) error { - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("lifecycle", "") - - // Content-length is mandatory for put lifecycle request - reqMetadata := requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentBody: bytes.NewReader(buf), - contentLength: int64(len(buf)), - contentMD5Base64: sumMD5Base64(buf), - } - - // Execute PUT to upload a new bucket lifecycle. - resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp, bucketName, "") - } - } - return nil -} - -// Remove lifecycle from a bucket. -func (c *Client) removeBucketLifecycle(ctx context.Context, bucketName string) error { - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("lifecycle", "") - - // Execute DELETE on objectName. - resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return err - } - return nil -} - -// GetBucketLifecycle fetch bucket lifecycle configuration -func (c *Client) GetBucketLifecycle(ctx context.Context, bucketName string) (*lifecycle.Configuration, error) { - lc, _, err := c.GetBucketLifecycleWithInfo(ctx, bucketName) - return lc, err -} - -// GetBucketLifecycleWithInfo fetch bucket lifecycle configuration along with when it was last updated -func (c *Client) GetBucketLifecycleWithInfo(ctx context.Context, bucketName string) (*lifecycle.Configuration, time.Time, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return nil, time.Time{}, err - } - - bucketLifecycle, updatedAt, err := c.getBucketLifecycle(ctx, bucketName) - if err != nil { - return nil, time.Time{}, err - } - - config := lifecycle.NewConfiguration() - if err = xml.Unmarshal(bucketLifecycle, config); err != nil { - return nil, time.Time{}, err - } - return config, updatedAt, nil -} - -// Request server for current bucket lifecycle. -func (c *Client) getBucketLifecycle(ctx context.Context, bucketName string) ([]byte, time.Time, error) { - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("lifecycle", "") - urlValues.Set("withUpdatedAt", "true") - - // Execute GET on bucket to get lifecycle. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - }) - - defer closeResponse(resp) - if err != nil { - return nil, time.Time{}, err - } - - if resp != nil { - if resp.StatusCode != http.StatusOK { - return nil, time.Time{}, httpRespToErrorResponse(resp, bucketName, "") - } - } - - lcBytes, err := io.ReadAll(resp.Body) - if err != nil { - return nil, time.Time{}, err - } - - const minIOLifecycleCfgUpdatedAt = "X-Minio-LifecycleConfig-UpdatedAt" - var updatedAt time.Time - if timeStr := resp.Header.Get(minIOLifecycleCfgUpdatedAt); timeStr != "" { - updatedAt, err = time.Parse(iso8601DateFormat, timeStr) - if err != nil { - return nil, time.Time{}, err - } - } - - return lcBytes, updatedAt, nil -} diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go b/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go deleted file mode 100644 index 8de5c01..0000000 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go +++ /dev/null @@ -1,261 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017-2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bufio" - "bytes" - "context" - "encoding/xml" - "net/http" - "net/url" - "time" - - jsoniter "github.com/json-iterator/go" - "github.com/minio/minio-go/v7/pkg/notification" - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// SetBucketNotification saves a new bucket notification with a context to control cancellations and timeouts. -func (c *Client) SetBucketNotification(ctx context.Context, bucketName string, config notification.Configuration) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("notification", "") - - notifBytes, err := xml.Marshal(&config) - if err != nil { - return err - } - - notifBuffer := bytes.NewReader(notifBytes) - reqMetadata := requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentBody: notifBuffer, - contentLength: int64(len(notifBytes)), - contentMD5Base64: sumMD5Base64(notifBytes), - contentSHA256Hex: sum256Hex(notifBytes), - } - - // Execute PUT to upload a new bucket notification. - resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp, bucketName, "") - } - } - return nil -} - -// RemoveAllBucketNotification - Remove bucket notification clears all previously specified config -func (c *Client) RemoveAllBucketNotification(ctx context.Context, bucketName string) error { - return c.SetBucketNotification(ctx, bucketName, notification.Configuration{}) -} - -// GetBucketNotification returns current bucket notification configuration -func (c *Client) GetBucketNotification(ctx context.Context, bucketName string) (bucketNotification notification.Configuration, err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return notification.Configuration{}, err - } - return c.getBucketNotification(ctx, bucketName) -} - -// Request server for notification rules. -func (c *Client) getBucketNotification(ctx context.Context, bucketName string) (notification.Configuration, error) { - urlValues := make(url.Values) - urlValues.Set("notification", "") - - // Execute GET on bucket to list objects. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - - defer closeResponse(resp) - if err != nil { - return notification.Configuration{}, err - } - return processBucketNotificationResponse(bucketName, resp) -} - -// processes the GetNotification http response from the server. -func processBucketNotificationResponse(bucketName string, resp *http.Response) (notification.Configuration, error) { - if resp.StatusCode != http.StatusOK { - errResponse := httpRespToErrorResponse(resp, bucketName, "") - return notification.Configuration{}, errResponse - } - var bucketNotification notification.Configuration - err := xmlDecoder(resp.Body, &bucketNotification) - if err != nil { - return notification.Configuration{}, err - } - return bucketNotification, nil -} - -// ListenNotification listen for all events, this is a MinIO specific API -func (c *Client) ListenNotification(ctx context.Context, prefix, suffix string, events []string) <-chan notification.Info { - return c.ListenBucketNotification(ctx, "", prefix, suffix, events) -} - -// ListenBucketNotification listen for bucket events, this is a MinIO specific API -func (c *Client) ListenBucketNotification(ctx context.Context, bucketName, prefix, suffix string, events []string) <-chan notification.Info { - notificationInfoCh := make(chan notification.Info, 1) - const notificationCapacity = 4 * 1024 * 1024 - notificationEventBuffer := make([]byte, notificationCapacity) - // Only success, start a routine to start reading line by line. - go func(notificationInfoCh chan<- notification.Info) { - defer close(notificationInfoCh) - - // Validate the bucket name. - if bucketName != "" { - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - select { - case notificationInfoCh <- notification.Info{ - Err: err, - }: - case <-ctx.Done(): - } - return - } - } - - // Check ARN partition to verify if listening bucket is supported - if s3utils.IsAmazonEndpoint(*c.endpointURL) || s3utils.IsGoogleEndpoint(*c.endpointURL) { - select { - case notificationInfoCh <- notification.Info{ - Err: errAPINotSupported("Listening for bucket notification is specific only to `minio` server endpoints"), - }: - case <-ctx.Done(): - } - return - } - - // Continuously run and listen on bucket notification. - // Create a done channel to control 'ListObjects' go routine. - retryDoneCh := make(chan struct{}, 1) - - // Indicate to our routine to exit cleanly upon return. - defer close(retryDoneCh) - - // Prepare urlValues to pass into the request on every loop - urlValues := make(url.Values) - urlValues.Set("ping", "10") - urlValues.Set("prefix", prefix) - urlValues.Set("suffix", suffix) - urlValues["events"] = events - - // Wait on the jitter retry loop. - for range c.newRetryTimerContinous(time.Second, time.Second*30, MaxJitter, retryDoneCh) { - // Execute GET on bucket to list objects. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - if err != nil { - select { - case notificationInfoCh <- notification.Info{ - Err: err, - }: - case <-ctx.Done(): - } - return - } - - // Validate http response, upon error return quickly. - if resp.StatusCode != http.StatusOK { - errResponse := httpRespToErrorResponse(resp, bucketName, "") - select { - case notificationInfoCh <- notification.Info{ - Err: errResponse, - }: - case <-ctx.Done(): - } - return - } - - // Initialize a new bufio scanner, to read line by line. - bio := bufio.NewScanner(resp.Body) - - // Use a higher buffer to support unexpected - // caching done by proxies - bio.Buffer(notificationEventBuffer, notificationCapacity) - json := jsoniter.ConfigCompatibleWithStandardLibrary - - // Unmarshal each line, returns marshaled values. - for bio.Scan() { - var notificationInfo notification.Info - if err = json.Unmarshal(bio.Bytes(), ¬ificationInfo); err != nil { - // Unexpected error during json unmarshal, send - // the error to caller for actionable as needed. - select { - case notificationInfoCh <- notification.Info{ - Err: err, - }: - case <-ctx.Done(): - return - } - closeResponse(resp) - continue - } - - // Empty events pinged from the server - if len(notificationInfo.Records) == 0 && notificationInfo.Err == nil { - continue - } - - // Send notificationInfo - select { - case notificationInfoCh <- notificationInfo: - case <-ctx.Done(): - closeResponse(resp) - return - } - } - - if err = bio.Err(); err != nil { - select { - case notificationInfoCh <- notification.Info{ - Err: err, - }: - case <-ctx.Done(): - return - } - } - - // Close current connection before looping further. - closeResponse(resp) - - } - }(notificationInfoCh) - - // Returns the notification info channel, for caller to start reading from. - return notificationInfoCh -} diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go b/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go deleted file mode 100644 index dbb5259..0000000 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go +++ /dev/null @@ -1,147 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2020 MinIO, Inc. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "io" - "net/http" - "net/url" - "strings" - - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// SetBucketPolicy sets the access permissions on an existing bucket. -func (c *Client) SetBucketPolicy(ctx context.Context, bucketName, policy string) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - // If policy is empty then delete the bucket policy. - if policy == "" { - return c.removeBucketPolicy(ctx, bucketName) - } - - // Save the updated policies. - return c.putBucketPolicy(ctx, bucketName, policy) -} - -// Saves a new bucket policy. -func (c *Client) putBucketPolicy(ctx context.Context, bucketName, policy string) error { - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("policy", "") - - reqMetadata := requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentBody: strings.NewReader(policy), - contentLength: int64(len(policy)), - } - - // Execute PUT to upload a new bucket policy. - resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp, bucketName, "") - } - } - return nil -} - -// Removes all policies on a bucket. -func (c *Client) removeBucketPolicy(ctx context.Context, bucketName string) error { - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("policy", "") - - // Execute DELETE on objectName. - resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return err - } - - if resp.StatusCode != http.StatusNoContent { - return httpRespToErrorResponse(resp, bucketName, "") - } - - return nil -} - -// GetBucketPolicy returns the current policy -func (c *Client) GetBucketPolicy(ctx context.Context, bucketName string) (string, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return "", err - } - bucketPolicy, err := c.getBucketPolicy(ctx, bucketName) - if err != nil { - errResponse := ToErrorResponse(err) - if errResponse.Code == "NoSuchBucketPolicy" { - return "", nil - } - return "", err - } - return bucketPolicy, nil -} - -// Request server for current bucket policy. -func (c *Client) getBucketPolicy(ctx context.Context, bucketName string) (string, error) { - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("policy", "") - - // Execute GET on bucket to list objects. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - - defer closeResponse(resp) - if err != nil { - return "", err - } - - if resp != nil { - if resp.StatusCode != http.StatusOK { - return "", httpRespToErrorResponse(resp, bucketName, "") - } - } - - bucketPolicyBuf, err := io.ReadAll(resp.Body) - if err != nil { - return "", err - } - - policy := string(bucketPolicyBuf) - return policy, err -} diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go b/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go deleted file mode 100644 index b12bb13..0000000 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go +++ /dev/null @@ -1,355 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/json" - "encoding/xml" - "io" - "net/http" - "net/url" - "time" - - "github.com/google/uuid" - "github.com/minio/minio-go/v7/pkg/replication" - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// RemoveBucketReplication removes a replication config on an existing bucket. -func (c *Client) RemoveBucketReplication(ctx context.Context, bucketName string) error { - return c.removeBucketReplication(ctx, bucketName) -} - -// SetBucketReplication sets a replication config on an existing bucket. -func (c *Client) SetBucketReplication(ctx context.Context, bucketName string, cfg replication.Config) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - // If replication is empty then delete it. - if cfg.Empty() { - return c.removeBucketReplication(ctx, bucketName) - } - // Save the updated replication. - return c.putBucketReplication(ctx, bucketName, cfg) -} - -// Saves a new bucket replication. -func (c *Client) putBucketReplication(ctx context.Context, bucketName string, cfg replication.Config) error { - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("replication", "") - replication, err := xml.Marshal(cfg) - if err != nil { - return err - } - - reqMetadata := requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentBody: bytes.NewReader(replication), - contentLength: int64(len(replication)), - contentMD5Base64: sumMD5Base64(replication), - } - - // Execute PUT to upload a new bucket replication config. - resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) - defer closeResponse(resp) - if err != nil { - return err - } - - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp, bucketName, "") - } - - return nil -} - -// Remove replication from a bucket. -func (c *Client) removeBucketReplication(ctx context.Context, bucketName string) error { - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("replication", "") - - // Execute DELETE on objectName. - resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return err - } - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp, bucketName, "") - } - return nil -} - -// GetBucketReplication fetches bucket replication configuration.If config is not -// found, returns empty config with nil error. -func (c *Client) GetBucketReplication(ctx context.Context, bucketName string) (cfg replication.Config, err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return cfg, err - } - bucketReplicationCfg, err := c.getBucketReplication(ctx, bucketName) - if err != nil { - errResponse := ToErrorResponse(err) - if errResponse.Code == "ReplicationConfigurationNotFoundError" { - return cfg, nil - } - return cfg, err - } - return bucketReplicationCfg, nil -} - -// Request server for current bucket replication config. -func (c *Client) getBucketReplication(ctx context.Context, bucketName string) (cfg replication.Config, err error) { - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("replication", "") - - // Execute GET on bucket to get replication config. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - }) - - defer closeResponse(resp) - if err != nil { - return cfg, err - } - - if resp.StatusCode != http.StatusOK { - return cfg, httpRespToErrorResponse(resp, bucketName, "") - } - - if err = xmlDecoder(resp.Body, &cfg); err != nil { - return cfg, err - } - - return cfg, nil -} - -// GetBucketReplicationMetrics fetches bucket replication status metrics -func (c *Client) GetBucketReplicationMetrics(ctx context.Context, bucketName string) (s replication.Metrics, err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return s, err - } - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("replication-metrics", "") - - // Execute GET on bucket to get replication config. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - }) - - defer closeResponse(resp) - if err != nil { - return s, err - } - - if resp.StatusCode != http.StatusOK { - return s, httpRespToErrorResponse(resp, bucketName, "") - } - respBytes, err := io.ReadAll(resp.Body) - if err != nil { - return s, err - } - - if err := json.Unmarshal(respBytes, &s); err != nil { - return s, err - } - return s, nil -} - -// mustGetUUID - get a random UUID. -func mustGetUUID() string { - u, err := uuid.NewRandom() - if err != nil { - return "" - } - return u.String() -} - -// ResetBucketReplication kicks off replication of previously replicated objects if ExistingObjectReplication -// is enabled in the replication config -func (c *Client) ResetBucketReplication(ctx context.Context, bucketName string, olderThan time.Duration) (rID string, err error) { - rID = mustGetUUID() - _, err = c.resetBucketReplicationOnTarget(ctx, bucketName, olderThan, "", rID) - if err != nil { - return rID, err - } - return rID, nil -} - -// ResetBucketReplicationOnTarget kicks off replication of previously replicated objects if -// ExistingObjectReplication is enabled in the replication config -func (c *Client) ResetBucketReplicationOnTarget(ctx context.Context, bucketName string, olderThan time.Duration, tgtArn string) (replication.ResyncTargetsInfo, error) { - return c.resetBucketReplicationOnTarget(ctx, bucketName, olderThan, tgtArn, mustGetUUID()) -} - -// ResetBucketReplication kicks off replication of previously replicated objects if ExistingObjectReplication -// is enabled in the replication config -func (c *Client) resetBucketReplicationOnTarget(ctx context.Context, bucketName string, olderThan time.Duration, tgtArn, resetID string) (rinfo replication.ResyncTargetsInfo, err error) { - // Input validation. - if err = s3utils.CheckValidBucketName(bucketName); err != nil { - return - } - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("replication-reset", "") - if olderThan > 0 { - urlValues.Set("older-than", olderThan.String()) - } - if tgtArn != "" { - urlValues.Set("arn", tgtArn) - } - urlValues.Set("reset-id", resetID) - // Execute GET on bucket to get replication config. - resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - }) - - defer closeResponse(resp) - if err != nil { - return rinfo, err - } - - if resp.StatusCode != http.StatusOK { - return rinfo, httpRespToErrorResponse(resp, bucketName, "") - } - - if err = json.NewDecoder(resp.Body).Decode(&rinfo); err != nil { - return rinfo, err - } - return rinfo, nil -} - -// GetBucketReplicationResyncStatus gets the status of replication resync -func (c *Client) GetBucketReplicationResyncStatus(ctx context.Context, bucketName, arn string) (rinfo replication.ResyncTargetsInfo, err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return rinfo, err - } - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("replication-reset-status", "") - if arn != "" { - urlValues.Set("arn", arn) - } - // Execute GET on bucket to get replication config. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - }) - - defer closeResponse(resp) - if err != nil { - return rinfo, err - } - - if resp.StatusCode != http.StatusOK { - return rinfo, httpRespToErrorResponse(resp, bucketName, "") - } - - if err = json.NewDecoder(resp.Body).Decode(&rinfo); err != nil { - return rinfo, err - } - return rinfo, nil -} - -// GetBucketReplicationMetricsV2 fetches bucket replication status metrics -func (c *Client) GetBucketReplicationMetricsV2(ctx context.Context, bucketName string) (s replication.MetricsV2, err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return s, err - } - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("replication-metrics", "2") - - // Execute GET on bucket to get replication metrics. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - }) - - defer closeResponse(resp) - if err != nil { - return s, err - } - - if resp.StatusCode != http.StatusOK { - return s, httpRespToErrorResponse(resp, bucketName, "") - } - respBytes, err := io.ReadAll(resp.Body) - if err != nil { - return s, err - } - - if err := json.Unmarshal(respBytes, &s); err != nil { - return s, err - } - return s, nil -} - -// CheckBucketReplication validates if replication is set up properly for a bucket -func (c *Client) CheckBucketReplication(ctx context.Context, bucketName string) (err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("replication-check", "") - - // Execute GET on bucket to get replication config. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - }) - - defer closeResponse(resp) - if err != nil { - return err - } - - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp, bucketName, "") - } - return nil -} diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go b/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go deleted file mode 100644 index 86d7429..0000000 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go +++ /dev/null @@ -1,134 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2020 MinIO, Inc. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/xml" - "errors" - "io" - "net/http" - "net/url" - - "github.com/minio/minio-go/v7/pkg/s3utils" - "github.com/minio/minio-go/v7/pkg/tags" -) - -// GetBucketTagging fetch tagging configuration for a bucket with a -// context to control cancellations and timeouts. -func (c *Client) GetBucketTagging(ctx context.Context, bucketName string) (*tags.Tags, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return nil, err - } - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("tagging", "") - - // Execute GET on bucket to get tagging configuration. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - }) - - defer closeResponse(resp) - if err != nil { - return nil, err - } - - if resp.StatusCode != http.StatusOK { - return nil, httpRespToErrorResponse(resp, bucketName, "") - } - - defer io.Copy(io.Discard, resp.Body) - return tags.ParseBucketXML(resp.Body) -} - -// SetBucketTagging sets tagging configuration for a bucket -// with a context to control cancellations and timeouts. -func (c *Client) SetBucketTagging(ctx context.Context, bucketName string, tags *tags.Tags) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - if tags == nil { - return errors.New("nil tags passed") - } - - buf, err := xml.Marshal(tags) - if err != nil { - return err - } - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("tagging", "") - - // Content-length is mandatory to set a default encryption configuration - reqMetadata := requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentBody: bytes.NewReader(buf), - contentLength: int64(len(buf)), - contentMD5Base64: sumMD5Base64(buf), - } - - // Execute PUT on bucket to put tagging configuration. - resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) - defer closeResponse(resp) - if err != nil { - return err - } - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { - return httpRespToErrorResponse(resp, bucketName, "") - } - return nil -} - -// RemoveBucketTagging removes tagging configuration for a -// bucket with a context to control cancellations and timeouts. -func (c *Client) RemoveBucketTagging(ctx context.Context, bucketName string) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("tagging", "") - - // Execute DELETE on bucket to remove tagging configuration. - resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return err - } - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { - return httpRespToErrorResponse(resp, bucketName, "") - } - return nil -} diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go b/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go deleted file mode 100644 index 8c84e4f..0000000 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go +++ /dev/null @@ -1,146 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2020 MinIO, Inc. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/xml" - "net/http" - "net/url" - - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// SetBucketVersioning sets a bucket versioning configuration -func (c *Client) SetBucketVersioning(ctx context.Context, bucketName string, config BucketVersioningConfiguration) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - buf, err := xml.Marshal(config) - if err != nil { - return err - } - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("versioning", "") - - reqMetadata := requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentBody: bytes.NewReader(buf), - contentLength: int64(len(buf)), - contentMD5Base64: sumMD5Base64(buf), - contentSHA256Hex: sum256Hex(buf), - } - - // Execute PUT to set a bucket versioning. - resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp, bucketName, "") - } - } - return nil -} - -// EnableVersioning - enable object versioning in given bucket. -func (c *Client) EnableVersioning(ctx context.Context, bucketName string) error { - return c.SetBucketVersioning(ctx, bucketName, BucketVersioningConfiguration{Status: "Enabled"}) -} - -// SuspendVersioning - suspend object versioning in given bucket. -func (c *Client) SuspendVersioning(ctx context.Context, bucketName string) error { - return c.SetBucketVersioning(ctx, bucketName, BucketVersioningConfiguration{Status: "Suspended"}) -} - -// ExcludedPrefix - holds individual prefixes excluded from being versioned. -type ExcludedPrefix struct { - Prefix string -} - -// BucketVersioningConfiguration is the versioning configuration structure -type BucketVersioningConfiguration struct { - XMLName xml.Name `xml:"VersioningConfiguration"` - Status string `xml:"Status"` - MFADelete string `xml:"MfaDelete,omitempty"` - // MinIO extension - allows selective, prefix-level versioning exclusion. - // Requires versioning to be enabled - ExcludedPrefixes []ExcludedPrefix `xml:",omitempty"` - ExcludeFolders bool `xml:",omitempty"` -} - -// Various supported states -const ( - Enabled = "Enabled" - // Disabled State = "Disabled" only used by MFA Delete not supported yet. - Suspended = "Suspended" -) - -// Enabled returns true if bucket versioning is enabled -func (b BucketVersioningConfiguration) Enabled() bool { - return b.Status == Enabled -} - -// Suspended returns true if bucket versioning is suspended -func (b BucketVersioningConfiguration) Suspended() bool { - return b.Status == Suspended -} - -// GetBucketVersioning gets the versioning configuration on -// an existing bucket with a context to control cancellations and timeouts. -func (c *Client) GetBucketVersioning(ctx context.Context, bucketName string) (BucketVersioningConfiguration, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return BucketVersioningConfiguration{}, err - } - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("versioning", "") - - // Execute GET on bucket to get the versioning configuration. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - }) - - defer closeResponse(resp) - if err != nil { - return BucketVersioningConfiguration{}, err - } - - if resp.StatusCode != http.StatusOK { - return BucketVersioningConfiguration{}, httpRespToErrorResponse(resp, bucketName, "") - } - - versioningConfig := BucketVersioningConfiguration{} - if err = xmlDecoder(resp.Body, &versioningConfig); err != nil { - return versioningConfig, err - } - - return versioningConfig, nil -} diff --git a/vendor/github.com/minio/minio-go/v7/api-compose-object.go b/vendor/github.com/minio/minio-go/v7/api-compose-object.go deleted file mode 100644 index e64a244..0000000 --- a/vendor/github.com/minio/minio-go/v7/api-compose-object.go +++ /dev/null @@ -1,594 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017, 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/google/uuid" - "github.com/minio/minio-go/v7/pkg/encrypt" - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// CopyDestOptions represents options specified by user for CopyObject/ComposeObject APIs -type CopyDestOptions struct { - Bucket string // points to destination bucket - Object string // points to destination object - - // `Encryption` is the key info for server-side-encryption with customer - // provided key. If it is nil, no encryption is performed. - Encryption encrypt.ServerSide - - // `userMeta` is the user-metadata key-value pairs to be set on the - // destination. The keys are automatically prefixed with `x-amz-meta-` - // if needed. If nil is passed, and if only a single source (of any - // size) is provided in the ComposeObject call, then metadata from the - // source is copied to the destination. - // if no user-metadata is provided, it is copied from source - // (when there is only once source object in the compose - // request) - UserMetadata map[string]string - // UserMetadata is only set to destination if ReplaceMetadata is true - // other value is UserMetadata is ignored and we preserve src.UserMetadata - // NOTE: if you set this value to true and now metadata is present - // in UserMetadata your destination object will not have any metadata - // set. - ReplaceMetadata bool - - // `userTags` is the user defined object tags to be set on destination. - // This will be set only if the `replaceTags` field is set to true. - // Otherwise this field is ignored - UserTags map[string]string - ReplaceTags bool - - // Specifies whether you want to apply a Legal Hold to the copied object. - LegalHold LegalHoldStatus - - // Object Retention related fields - Mode RetentionMode - RetainUntilDate time.Time - - Size int64 // Needs to be specified if progress bar is specified. - // Progress of the entire copy operation will be sent here. - Progress io.Reader -} - -// Process custom-metadata to remove a `x-amz-meta-` prefix if -// present and validate that keys are distinct (after this -// prefix removal). -func filterCustomMeta(userMeta map[string]string) map[string]string { - m := make(map[string]string) - for k, v := range userMeta { - if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") { - k = k[len("x-amz-meta-"):] - } - if _, ok := m[k]; ok { - continue - } - m[k] = v - } - return m -} - -// Marshal converts all the CopyDestOptions into their -// equivalent HTTP header representation -func (opts CopyDestOptions) Marshal(header http.Header) { - const replaceDirective = "REPLACE" - if opts.ReplaceTags { - header.Set(amzTaggingHeaderDirective, replaceDirective) - if tags := s3utils.TagEncode(opts.UserTags); tags != "" { - header.Set(amzTaggingHeader, tags) - } - } - - if opts.LegalHold != LegalHoldStatus("") { - header.Set(amzLegalHoldHeader, opts.LegalHold.String()) - } - - if opts.Mode != RetentionMode("") && !opts.RetainUntilDate.IsZero() { - header.Set(amzLockMode, opts.Mode.String()) - header.Set(amzLockRetainUntil, opts.RetainUntilDate.Format(time.RFC3339)) - } - - if opts.Encryption != nil { - opts.Encryption.Marshal(header) - } - - if opts.ReplaceMetadata { - header.Set("x-amz-metadata-directive", replaceDirective) - for k, v := range filterCustomMeta(opts.UserMetadata) { - if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) { - header.Set(k, v) - } else { - header.Set("x-amz-meta-"+k, v) - } - } - } -} - -// toDestinationInfo returns a validated copyOptions object. -func (opts CopyDestOptions) validate() (err error) { - // Input validation. - if err = s3utils.CheckValidBucketName(opts.Bucket); err != nil { - return err - } - if err = s3utils.CheckValidObjectName(opts.Object); err != nil { - return err - } - if opts.Progress != nil && opts.Size < 0 { - return errInvalidArgument("For progress bar effective size needs to be specified") - } - return nil -} - -// CopySrcOptions represents a source object to be copied, using -// server-side copying APIs. -type CopySrcOptions struct { - Bucket, Object string - VersionID string - MatchETag string - NoMatchETag string - MatchModifiedSince time.Time - MatchUnmodifiedSince time.Time - MatchRange bool - Start, End int64 - Encryption encrypt.ServerSide -} - -// Marshal converts all the CopySrcOptions into their -// equivalent HTTP header representation -func (opts CopySrcOptions) Marshal(header http.Header) { - // Set the source header - header.Set("x-amz-copy-source", s3utils.EncodePath(opts.Bucket+"/"+opts.Object)) - if opts.VersionID != "" { - header.Set("x-amz-copy-source", s3utils.EncodePath(opts.Bucket+"/"+opts.Object)+"?versionId="+opts.VersionID) - } - - if opts.MatchETag != "" { - header.Set("x-amz-copy-source-if-match", opts.MatchETag) - } - if opts.NoMatchETag != "" { - header.Set("x-amz-copy-source-if-none-match", opts.NoMatchETag) - } - - if !opts.MatchModifiedSince.IsZero() { - header.Set("x-amz-copy-source-if-modified-since", opts.MatchModifiedSince.Format(http.TimeFormat)) - } - if !opts.MatchUnmodifiedSince.IsZero() { - header.Set("x-amz-copy-source-if-unmodified-since", opts.MatchUnmodifiedSince.Format(http.TimeFormat)) - } - - if opts.Encryption != nil { - encrypt.SSECopy(opts.Encryption).Marshal(header) - } -} - -func (opts CopySrcOptions) validate() (err error) { - // Input validation. - if err = s3utils.CheckValidBucketName(opts.Bucket); err != nil { - return err - } - if err = s3utils.CheckValidObjectName(opts.Object); err != nil { - return err - } - if opts.Start > opts.End || opts.Start < 0 { - return errInvalidArgument("start must be non-negative, and start must be at most end.") - } - return nil -} - -// Low level implementation of CopyObject API, supports only upto 5GiB worth of copy. -func (c *Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, - metadata map[string]string, srcOpts CopySrcOptions, dstOpts PutObjectOptions, -) (ObjectInfo, error) { - // Build headers. - headers := make(http.Header) - - // Set all the metadata headers. - for k, v := range metadata { - headers.Set(k, v) - } - if !dstOpts.Internal.ReplicationStatus.Empty() { - headers.Set(amzBucketReplicationStatus, string(dstOpts.Internal.ReplicationStatus)) - } - if !dstOpts.Internal.SourceMTime.IsZero() { - headers.Set(minIOBucketSourceMTime, dstOpts.Internal.SourceMTime.Format(time.RFC3339Nano)) - } - if dstOpts.Internal.SourceETag != "" { - headers.Set(minIOBucketSourceETag, dstOpts.Internal.SourceETag) - } - if dstOpts.Internal.ReplicationRequest { - headers.Set(minIOBucketReplicationRequest, "true") - } - if dstOpts.Internal.ReplicationValidityCheck { - headers.Set(minIOBucketReplicationCheck, "true") - } - if !dstOpts.Internal.LegalholdTimestamp.IsZero() { - headers.Set(minIOBucketReplicationObjectLegalHoldTimestamp, dstOpts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano)) - } - if !dstOpts.Internal.RetentionTimestamp.IsZero() { - headers.Set(minIOBucketReplicationObjectRetentionTimestamp, dstOpts.Internal.RetentionTimestamp.Format(time.RFC3339Nano)) - } - if !dstOpts.Internal.TaggingTimestamp.IsZero() { - headers.Set(minIOBucketReplicationTaggingTimestamp, dstOpts.Internal.TaggingTimestamp.Format(time.RFC3339Nano)) - } - - if len(dstOpts.UserTags) != 0 { - headers.Set(amzTaggingHeader, s3utils.TagEncode(dstOpts.UserTags)) - } - - reqMetadata := requestMetadata{ - bucketName: destBucket, - objectName: destObject, - customHeader: headers, - } - if dstOpts.Internal.SourceVersionID != "" { - if dstOpts.Internal.SourceVersionID != nullVersionID { - if _, err := uuid.Parse(dstOpts.Internal.SourceVersionID); err != nil { - return ObjectInfo{}, errInvalidArgument(err.Error()) - } - } - urlValues := make(url.Values) - urlValues.Set("versionId", dstOpts.Internal.SourceVersionID) - reqMetadata.queryValues = urlValues - } - - // Set the source header - headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)) - if srcOpts.VersionID != "" { - headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)+"?versionId="+srcOpts.VersionID) - } - // Send upload-part-copy request - resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) - defer closeResponse(resp) - if err != nil { - return ObjectInfo{}, err - } - - // Check if we got an error response. - if resp.StatusCode != http.StatusOK { - return ObjectInfo{}, httpRespToErrorResponse(resp, srcBucket, srcObject) - } - - cpObjRes := copyObjectResult{} - err = xmlDecoder(resp.Body, &cpObjRes) - if err != nil { - return ObjectInfo{}, err - } - - objInfo := ObjectInfo{ - Key: destObject, - ETag: strings.Trim(cpObjRes.ETag, "\""), - LastModified: cpObjRes.LastModified, - } - return objInfo, nil -} - -func (c *Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string, - partID int, startOffset, length int64, metadata map[string]string, -) (p CompletePart, err error) { - headers := make(http.Header) - - // Set source - headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)) - - if startOffset < 0 { - return p, errInvalidArgument("startOffset must be non-negative") - } - - if length >= 0 { - headers.Set("x-amz-copy-source-range", fmt.Sprintf("bytes=%d-%d", startOffset, startOffset+length-1)) - } - - for k, v := range metadata { - headers.Set(k, v) - } - - queryValues := make(url.Values) - queryValues.Set("partNumber", strconv.Itoa(partID)) - queryValues.Set("uploadId", uploadID) - - resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{ - bucketName: destBucket, - objectName: destObject, - customHeader: headers, - queryValues: queryValues, - }) - defer closeResponse(resp) - if err != nil { - return - } - - // Check if we got an error response. - if resp.StatusCode != http.StatusOK { - return p, httpRespToErrorResponse(resp, destBucket, destObject) - } - - // Decode copy-part response on success. - cpObjRes := copyObjectResult{} - err = xmlDecoder(resp.Body, &cpObjRes) - if err != nil { - return p, err - } - p.PartNumber, p.ETag = partID, cpObjRes.ETag - return p, nil -} - -// uploadPartCopy - helper function to create a part in a multipart -// upload via an upload-part-copy request -// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html -func (c *Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID string, partNumber int, - headers http.Header, -) (p CompletePart, err error) { - // Build query parameters - urlValues := make(url.Values) - urlValues.Set("partNumber", strconv.Itoa(partNumber)) - urlValues.Set("uploadId", uploadID) - - // Send upload-part-copy request - resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{ - bucketName: bucket, - objectName: object, - customHeader: headers, - queryValues: urlValues, - }) - defer closeResponse(resp) - if err != nil { - return p, err - } - - // Check if we got an error response. - if resp.StatusCode != http.StatusOK { - return p, httpRespToErrorResponse(resp, bucket, object) - } - - // Decode copy-part response on success. - cpObjRes := copyObjectResult{} - err = xmlDecoder(resp.Body, &cpObjRes) - if err != nil { - return p, err - } - p.PartNumber, p.ETag = partNumber, cpObjRes.ETag - return p, nil -} - -// ComposeObject - creates an object using server-side copying -// of existing objects. It takes a list of source objects (with optional offsets) -// and concatenates them into a new object using only server-side copying -// operations. Optionally takes progress reader hook for applications to -// look at current progress. -func (c *Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ...CopySrcOptions) (UploadInfo, error) { - if len(srcs) < 1 || len(srcs) > maxPartsCount { - return UploadInfo{}, errInvalidArgument("There must be as least one and up to 10000 source objects.") - } - - for _, src := range srcs { - if err := src.validate(); err != nil { - return UploadInfo{}, err - } - } - - if err := dst.validate(); err != nil { - return UploadInfo{}, err - } - - srcObjectInfos := make([]ObjectInfo, len(srcs)) - srcObjectSizes := make([]int64, len(srcs)) - var totalSize, totalParts int64 - var err error - for i, src := range srcs { - opts := StatObjectOptions{ServerSideEncryption: encrypt.SSE(src.Encryption), VersionID: src.VersionID} - srcObjectInfos[i], err = c.StatObject(context.Background(), src.Bucket, src.Object, opts) - if err != nil { - return UploadInfo{}, err - } - - srcCopySize := srcObjectInfos[i].Size - // Check if a segment is specified, and if so, is the - // segment within object bounds? - if src.MatchRange { - // Since range is specified, - // 0 <= src.start <= src.end - // so only invalid case to check is: - if src.End >= srcCopySize || src.Start < 0 { - return UploadInfo{}, errInvalidArgument( - fmt.Sprintf("CopySrcOptions %d has invalid segment-to-copy [%d, %d] (size is %d)", - i, src.Start, src.End, srcCopySize)) - } - srcCopySize = src.End - src.Start + 1 - } - - // Only the last source may be less than `absMinPartSize` - if srcCopySize < absMinPartSize && i < len(srcs)-1 { - return UploadInfo{}, errInvalidArgument( - fmt.Sprintf("CopySrcOptions %d is too small (%d) and it is not the last part", i, srcCopySize)) - } - - // Is data to copy too large? - totalSize += srcCopySize - if totalSize > maxMultipartPutObjectSize { - return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Cannot compose an object of size %d (> 5TiB)", totalSize)) - } - - // record source size - srcObjectSizes[i] = srcCopySize - - // calculate parts needed for current source - totalParts += partsRequired(srcCopySize) - // Do we need more parts than we are allowed? - if totalParts > maxPartsCount { - return UploadInfo{}, errInvalidArgument(fmt.Sprintf( - "Your proposed compose object requires more than %d parts", maxPartsCount)) - } - } - - // Single source object case (i.e. when only one source is - // involved, it is being copied wholly and at most 5GiB in - // size, emptyfiles are also supported). - if (totalParts == 1 && srcs[0].Start == -1 && totalSize <= maxPartSize) || (totalSize == 0) { - return c.CopyObject(ctx, dst, srcs[0]) - } - - // Now, handle multipart-copy cases. - - // 1. Ensure that the object has not been changed while - // we are copying data. - for i, src := range srcs { - src.MatchETag = srcObjectInfos[i].ETag - } - - // 2. Initiate a new multipart upload. - - // Set user-metadata on the destination object. If no - // user-metadata is specified, and there is only one source, - // (only) then metadata from source is copied. - var userMeta map[string]string - if dst.ReplaceMetadata { - userMeta = dst.UserMetadata - } else { - userMeta = srcObjectInfos[0].UserMetadata - } - - var userTags map[string]string - if dst.ReplaceTags { - userTags = dst.UserTags - } else { - userTags = srcObjectInfos[0].UserTags - } - - uploadID, err := c.newUploadID(ctx, dst.Bucket, dst.Object, PutObjectOptions{ - ServerSideEncryption: dst.Encryption, - UserMetadata: userMeta, - UserTags: userTags, - Mode: dst.Mode, - RetainUntilDate: dst.RetainUntilDate, - LegalHold: dst.LegalHold, - }) - if err != nil { - return UploadInfo{}, err - } - - // 3. Perform copy part uploads - objParts := []CompletePart{} - partIndex := 1 - for i, src := range srcs { - h := make(http.Header) - src.Marshal(h) - if dst.Encryption != nil && dst.Encryption.Type() == encrypt.SSEC { - dst.Encryption.Marshal(h) - } - - // calculate start/end indices of parts after - // splitting. - startIdx, endIdx := calculateEvenSplits(srcObjectSizes[i], src) - for j, start := range startIdx { - end := endIdx[j] - - // Add (or reset) source range header for - // upload part copy request. - h.Set("x-amz-copy-source-range", - fmt.Sprintf("bytes=%d-%d", start, end)) - - // make upload-part-copy request - complPart, err := c.uploadPartCopy(ctx, dst.Bucket, - dst.Object, uploadID, partIndex, h) - if err != nil { - return UploadInfo{}, err - } - if dst.Progress != nil { - io.CopyN(io.Discard, dst.Progress, end-start+1) - } - objParts = append(objParts, complPart) - partIndex++ - } - } - - // 4. Make final complete-multipart request. - uploadInfo, err := c.completeMultipartUpload(ctx, dst.Bucket, dst.Object, uploadID, - completeMultipartUpload{Parts: objParts}, PutObjectOptions{ServerSideEncryption: dst.Encryption}) - if err != nil { - return UploadInfo{}, err - } - - uploadInfo.Size = totalSize - return uploadInfo, nil -} - -// partsRequired is maximum parts possible with -// max part size of ceiling(maxMultipartPutObjectSize / (maxPartsCount - 1)) -func partsRequired(size int64) int64 { - maxPartSize := maxMultipartPutObjectSize / (maxPartsCount - 1) - r := size / int64(maxPartSize) - if size%int64(maxPartSize) > 0 { - r++ - } - return r -} - -// calculateEvenSplits - computes splits for a source and returns -// start and end index slices. Splits happen evenly to be sure that no -// part is less than 5MiB, as that could fail the multipart request if -// it is not the last part. -func calculateEvenSplits(size int64, src CopySrcOptions) (startIndex, endIndex []int64) { - if size == 0 { - return - } - - reqParts := partsRequired(size) - startIndex = make([]int64, reqParts) - endIndex = make([]int64, reqParts) - // Compute number of required parts `k`, as: - // - // k = ceiling(size / copyPartSize) - // - // Now, distribute the `size` bytes in the source into - // k parts as evenly as possible: - // - // r parts sized (q+1) bytes, and - // (k - r) parts sized q bytes, where - // - // size = q * k + r (by simple division of size by k, - // so that 0 <= r < k) - // - start := src.Start - if start == -1 { - start = 0 - } - quot, rem := size/reqParts, size%reqParts - nextStart := start - for j := int64(0); j < reqParts; j++ { - curPartSize := quot - if j < rem { - curPartSize++ - } - - cStart := nextStart - cEnd := cStart + curPartSize - 1 - nextStart = cEnd + 1 - - startIndex[j], endIndex[j] = cStart, cEnd - } - return -} diff --git a/vendor/github.com/minio/minio-go/v7/api-copy-object.go b/vendor/github.com/minio/minio-go/v7/api-copy-object.go deleted file mode 100644 index 0c95d91..0000000 --- a/vendor/github.com/minio/minio-go/v7/api-copy-object.go +++ /dev/null @@ -1,76 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017, 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "io" - "net/http" -) - -// CopyObject - copy a source object into a new object -func (c *Client) CopyObject(ctx context.Context, dst CopyDestOptions, src CopySrcOptions) (UploadInfo, error) { - if err := src.validate(); err != nil { - return UploadInfo{}, err - } - - if err := dst.validate(); err != nil { - return UploadInfo{}, err - } - - header := make(http.Header) - dst.Marshal(header) - src.Marshal(header) - - resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{ - bucketName: dst.Bucket, - objectName: dst.Object, - customHeader: header, - }) - if err != nil { - return UploadInfo{}, err - } - defer closeResponse(resp) - - if resp.StatusCode != http.StatusOK { - return UploadInfo{}, httpRespToErrorResponse(resp, dst.Bucket, dst.Object) - } - - // Update the progress properly after successful copy. - if dst.Progress != nil { - io.Copy(io.Discard, io.LimitReader(dst.Progress, dst.Size)) - } - - cpObjRes := copyObjectResult{} - if err = xmlDecoder(resp.Body, &cpObjRes); err != nil { - return UploadInfo{}, err - } - - // extract lifecycle expiry date and rule ID - expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration)) - - return UploadInfo{ - Bucket: dst.Bucket, - Key: dst.Object, - LastModified: cpObjRes.LastModified, - ETag: trimEtag(resp.Header.Get("ETag")), - VersionID: resp.Header.Get(amzVersionID), - Expiration: expTime, - ExpirationRuleID: ruleID, - }, nil -} diff --git a/vendor/github.com/minio/minio-go/v7/api-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-datatypes.go deleted file mode 100644 index 97a6f80..0000000 --- a/vendor/github.com/minio/minio-go/v7/api-datatypes.go +++ /dev/null @@ -1,254 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "encoding/xml" - "io" - "net/http" - "net/url" - "strings" - "time" -) - -// BucketInfo container for bucket metadata. -type BucketInfo struct { - // The name of the bucket. - Name string `json:"name"` - // Date the bucket was created. - CreationDate time.Time `json:"creationDate"` -} - -// StringMap represents map with custom UnmarshalXML -type StringMap map[string]string - -// UnmarshalXML unmarshals the XML into a map of string to strings, -// creating a key in the map for each tag and setting it's value to the -// tags contents. -// -// The fact this function is on the pointer of Map is important, so that -// if m is nil it can be initialized, which is often the case if m is -// nested in another xml structural. This is also why the first thing done -// on the first line is initialize it. -func (m *StringMap) UnmarshalXML(d *xml.Decoder, _ xml.StartElement) error { - *m = StringMap{} - for { - // Format is value - var e struct { - XMLName xml.Name - Value string `xml:",chardata"` - } - err := d.Decode(&e) - if err == io.EOF { - break - } - if err != nil { - return err - } - (*m)[e.XMLName.Local] = e.Value - } - return nil -} - -// URLMap represents map with custom UnmarshalXML -type URLMap map[string]string - -// UnmarshalXML unmarshals the XML into a map of string to strings, -// creating a key in the map for each tag and setting it's value to the -// tags contents. -// -// The fact this function is on the pointer of Map is important, so that -// if m is nil it can be initialized, which is often the case if m is -// nested in another xml structural. This is also why the first thing done -// on the first line is initialize it. -func (m *URLMap) UnmarshalXML(d *xml.Decoder, se xml.StartElement) error { - *m = URLMap{} - var tgs string - if err := d.DecodeElement(&tgs, &se); err != nil { - if err == io.EOF { - return nil - } - return err - } - for tgs != "" { - var key string - key, tgs, _ = stringsCut(tgs, "&") - if key == "" { - continue - } - key, value, _ := stringsCut(key, "=") - key, err := url.QueryUnescape(key) - if err != nil { - return err - } - - value, err = url.QueryUnescape(value) - if err != nil { - return err - } - (*m)[key] = value - } - return nil -} - -// stringsCut slices s around the first instance of sep, -// returning the text before and after sep. -// The found result reports whether sep appears in s. -// If sep does not appear in s, cut returns s, "", false. -func stringsCut(s, sep string) (before, after string, found bool) { - if i := strings.Index(s, sep); i >= 0 { - return s[:i], s[i+len(sep):], true - } - return s, "", false -} - -// Owner name. -type Owner struct { - XMLName xml.Name `xml:"Owner" json:"owner"` - DisplayName string `xml:"ID" json:"name"` - ID string `xml:"DisplayName" json:"id"` -} - -// UploadInfo contains information about the -// newly uploaded or copied object. -type UploadInfo struct { - Bucket string - Key string - ETag string - Size int64 - LastModified time.Time - Location string - VersionID string - - // Lifecycle expiry-date and ruleID associated with the expiry - // not to be confused with `Expires` HTTP header. - Expiration time.Time - ExpirationRuleID string - - // Verified checksum values, if any. - // Values are base64 (standard) encoded. - // For multipart objects this is a checksum of the checksum of each part. - ChecksumCRC32 string - ChecksumCRC32C string - ChecksumSHA1 string - ChecksumSHA256 string -} - -// RestoreInfo contains information of the restore operation of an archived object -type RestoreInfo struct { - // Is the restoring operation is still ongoing - OngoingRestore bool - // When the restored copy of the archived object will be removed - ExpiryTime time.Time -} - -// ObjectInfo container for object metadata. -type ObjectInfo struct { - // An ETag is optionally set to md5sum of an object. In case of multipart objects, - // ETag is of the form MD5SUM-N where MD5SUM is md5sum of all individual md5sums of - // each parts concatenated into one string. - ETag string `json:"etag"` - - Key string `json:"name"` // Name of the object - LastModified time.Time `json:"lastModified"` // Date and time the object was last modified. - Size int64 `json:"size"` // Size in bytes of the object. - ContentType string `json:"contentType"` // A standard MIME type describing the format of the object data. - Expires time.Time `json:"expires"` // The date and time at which the object is no longer able to be cached. - - // Collection of additional metadata on the object. - // eg: x-amz-meta-*, content-encoding etc. - Metadata http.Header `json:"metadata" xml:"-"` - - // x-amz-meta-* headers stripped "x-amz-meta-" prefix containing the first value. - // Only returned by MinIO servers. - UserMetadata StringMap `json:"userMetadata,omitempty"` - - // x-amz-tagging values in their k/v values. - // Only returned by MinIO servers. - UserTags URLMap `json:"userTags,omitempty" xml:"UserTags"` - - // x-amz-tagging-count value - UserTagCount int - - // Owner name. - Owner Owner - - // ACL grant. - Grant []Grant - - // The class of storage used to store the object. - StorageClass string `json:"storageClass"` - - // Versioning related information - IsLatest bool - IsDeleteMarker bool - VersionID string `xml:"VersionId"` - - // x-amz-replication-status value is either in one of the following states - // - COMPLETED - // - PENDING - // - FAILED - // - REPLICA (on the destination) - ReplicationStatus string `xml:"ReplicationStatus"` - // set to true if delete marker has backing object version on target, and eligible to replicate - ReplicationReady bool - // Lifecycle expiry-date and ruleID associated with the expiry - // not to be confused with `Expires` HTTP header. - Expiration time.Time - ExpirationRuleID string - - Restore *RestoreInfo - - // Checksum values - ChecksumCRC32 string - ChecksumCRC32C string - ChecksumSHA1 string - ChecksumSHA256 string - - Internal *struct { - K int // Data blocks - M int // Parity blocks - } `xml:"Internal"` - - // Error - Err error `json:"-"` -} - -// ObjectMultipartInfo container for multipart object metadata. -type ObjectMultipartInfo struct { - // Date and time at which the multipart upload was initiated. - Initiated time.Time `type:"timestamp" timestampFormat:"iso8601"` - - Initiator initiator - Owner owner - - // The type of storage to use for the object. Defaults to 'STANDARD'. - StorageClass string - - // Key of the object for which the multipart upload was initiated. - Key string - - // Size in bytes of the object. - Size int64 - - // Upload ID that identifies the multipart upload. - UploadID string `xml:"UploadId"` - - // Error - Err error -} diff --git a/vendor/github.com/minio/minio-go/v7/api-error-response.go b/vendor/github.com/minio/minio-go/v7/api-error-response.go deleted file mode 100644 index 7df211f..0000000 --- a/vendor/github.com/minio/minio-go/v7/api-error-response.go +++ /dev/null @@ -1,284 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "encoding/xml" - "fmt" - "io" - "net/http" - "strings" -) - -/* **** SAMPLE ERROR RESPONSE **** - - - AccessDenied - Access Denied - bucketName - objectName - F19772218238A85A - GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD - -*/ - -// ErrorResponse - Is the typed error returned by all API operations. -// ErrorResponse struct should be comparable since it is compared inside -// golang http API (https://github.com/golang/go/issues/29768) -type ErrorResponse struct { - XMLName xml.Name `xml:"Error" json:"-"` - Code string - Message string - BucketName string - Key string - Resource string - RequestID string `xml:"RequestId"` - HostID string `xml:"HostId"` - - // Region where the bucket is located. This header is returned - // only in HEAD bucket and ListObjects response. - Region string - - // Captures the server string returned in response header. - Server string - - // Underlying HTTP status code for the returned error - StatusCode int `xml:"-" json:"-"` -} - -// ToErrorResponse - Returns parsed ErrorResponse struct from body and -// http headers. -// -// For example: -// -// import s3 "github.com/minio/minio-go/v7" -// ... -// ... -// reader, stat, err := s3.GetObject(...) -// if err != nil { -// resp := s3.ToErrorResponse(err) -// } -// ... -func ToErrorResponse(err error) ErrorResponse { - switch err := err.(type) { - case ErrorResponse: - return err - default: - return ErrorResponse{} - } -} - -// Error - Returns S3 error string. -func (e ErrorResponse) Error() string { - if e.Message == "" { - msg, ok := s3ErrorResponseMap[e.Code] - if !ok { - msg = fmt.Sprintf("Error response code %s.", e.Code) - } - return msg - } - return e.Message -} - -// Common string for errors to report issue location in unexpected -// cases. -const ( - reportIssue = "Please report this issue at https://github.com/minio/minio-go/issues." -) - -// xmlDecodeAndBody reads the whole body up to 1MB and -// tries to XML decode it into v. -// The body that was read and any error from reading or decoding is returned. -func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) { - // read the whole body (up to 1MB) - const maxBodyLength = 1 << 20 - body, err := io.ReadAll(io.LimitReader(bodyReader, maxBodyLength)) - if err != nil { - return nil, err - } - return bytes.TrimSpace(body), xmlDecoder(bytes.NewReader(body), v) -} - -// httpRespToErrorResponse returns a new encoded ErrorResponse -// structure as error. -func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) error { - if resp == nil { - msg := "Empty http response. " + reportIssue - return errInvalidArgument(msg) - } - - errResp := ErrorResponse{ - StatusCode: resp.StatusCode, - Server: resp.Header.Get("Server"), - } - - errBody, err := xmlDecodeAndBody(resp.Body, &errResp) - // Xml decoding failed with no body, fall back to HTTP headers. - if err != nil { - switch resp.StatusCode { - case http.StatusNotFound: - if objectName == "" { - errResp = ErrorResponse{ - StatusCode: resp.StatusCode, - Code: "NoSuchBucket", - Message: "The specified bucket does not exist.", - BucketName: bucketName, - } - } else { - errResp = ErrorResponse{ - StatusCode: resp.StatusCode, - Code: "NoSuchKey", - Message: "The specified key does not exist.", - BucketName: bucketName, - Key: objectName, - } - } - case http.StatusForbidden: - errResp = ErrorResponse{ - StatusCode: resp.StatusCode, - Code: "AccessDenied", - Message: "Access Denied.", - BucketName: bucketName, - Key: objectName, - } - case http.StatusConflict: - errResp = ErrorResponse{ - StatusCode: resp.StatusCode, - Code: "Conflict", - Message: "Bucket not empty.", - BucketName: bucketName, - } - case http.StatusPreconditionFailed: - errResp = ErrorResponse{ - StatusCode: resp.StatusCode, - Code: "PreconditionFailed", - Message: s3ErrorResponseMap["PreconditionFailed"], - BucketName: bucketName, - Key: objectName, - } - default: - msg := resp.Status - if len(errBody) > 0 { - msg = string(errBody) - if len(msg) > 1024 { - msg = msg[:1024] + "..." - } - } - errResp = ErrorResponse{ - StatusCode: resp.StatusCode, - Code: resp.Status, - Message: msg, - BucketName: bucketName, - } - } - } - - code := resp.Header.Get("x-minio-error-code") - if code != "" { - errResp.Code = code - } - desc := resp.Header.Get("x-minio-error-desc") - if desc != "" { - errResp.Message = strings.Trim(desc, `"`) - } - - // Save hostID, requestID and region information - // from headers if not available through error XML. - if errResp.RequestID == "" { - errResp.RequestID = resp.Header.Get("x-amz-request-id") - } - if errResp.HostID == "" { - errResp.HostID = resp.Header.Get("x-amz-id-2") - } - if errResp.Region == "" { - errResp.Region = resp.Header.Get("x-amz-bucket-region") - } - if errResp.Code == "InvalidRegion" && errResp.Region != "" { - errResp.Message = fmt.Sprintf("Region does not match, expecting region ‘%s’.", errResp.Region) - } - - return errResp -} - -// errTransferAccelerationBucket - bucket name is invalid to be used with transfer acceleration. -func errTransferAccelerationBucket(bucketName string) error { - return ErrorResponse{ - StatusCode: http.StatusBadRequest, - Code: "InvalidArgument", - Message: "The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods ‘.’.", - BucketName: bucketName, - } -} - -// errEntityTooLarge - Input size is larger than supported maximum. -func errEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error { - msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", totalSize, maxObjectSize) - return ErrorResponse{ - StatusCode: http.StatusBadRequest, - Code: "EntityTooLarge", - Message: msg, - BucketName: bucketName, - Key: objectName, - } -} - -// errEntityTooSmall - Input size is smaller than supported minimum. -func errEntityTooSmall(totalSize int64, bucketName, objectName string) error { - msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size ‘0B’ for single PUT operation.", totalSize) - return ErrorResponse{ - StatusCode: http.StatusBadRequest, - Code: "EntityTooSmall", - Message: msg, - BucketName: bucketName, - Key: objectName, - } -} - -// errUnexpectedEOF - Unexpected end of file reached. -func errUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) error { - msg := fmt.Sprintf("Data read ‘%d’ is not equal to the size ‘%d’ of the input Reader.", totalRead, totalSize) - return ErrorResponse{ - StatusCode: http.StatusBadRequest, - Code: "UnexpectedEOF", - Message: msg, - BucketName: bucketName, - Key: objectName, - } -} - -// errInvalidArgument - Invalid argument response. -func errInvalidArgument(message string) error { - return ErrorResponse{ - StatusCode: http.StatusBadRequest, - Code: "InvalidArgument", - Message: message, - RequestID: "minio", - } -} - -// errAPINotSupported - API not supported response -// The specified API call is not supported -func errAPINotSupported(message string) error { - return ErrorResponse{ - StatusCode: http.StatusNotImplemented, - Code: "APINotSupported", - Message: message, - RequestID: "minio", - } -} diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go b/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go deleted file mode 100644 index 9041d99..0000000 --- a/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go +++ /dev/null @@ -1,152 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "encoding/xml" - "net/http" - "net/url" -) - -// Grantee represents the person being granted permissions. -type Grantee struct { - XMLName xml.Name `xml:"Grantee"` - ID string `xml:"ID"` - DisplayName string `xml:"DisplayName"` - URI string `xml:"URI"` -} - -// Grant holds grant information -type Grant struct { - XMLName xml.Name `xml:"Grant"` - Grantee Grantee - Permission string `xml:"Permission"` -} - -// AccessControlList contains the set of grantees and the permissions assigned to each grantee. -type AccessControlList struct { - XMLName xml.Name `xml:"AccessControlList"` - Grant []Grant - Permission string `xml:"Permission"` -} - -type accessControlPolicy struct { - XMLName xml.Name `xml:"AccessControlPolicy"` - Owner Owner - AccessControlList AccessControlList -} - -// GetObjectACL get object ACLs -func (c *Client) GetObjectACL(ctx context.Context, bucketName, objectName string) (*ObjectInfo, error) { - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: url.Values{ - "acl": []string{""}, - }, - }) - if err != nil { - return nil, err - } - defer closeResponse(resp) - - if resp.StatusCode != http.StatusOK { - return nil, httpRespToErrorResponse(resp, bucketName, objectName) - } - - res := &accessControlPolicy{} - - if err := xmlDecoder(resp.Body, res); err != nil { - return nil, err - } - - objInfo, err := c.StatObject(ctx, bucketName, objectName, StatObjectOptions{}) - if err != nil { - return nil, err - } - - objInfo.Owner.DisplayName = res.Owner.DisplayName - objInfo.Owner.ID = res.Owner.ID - - objInfo.Grant = append(objInfo.Grant, res.AccessControlList.Grant...) - - cannedACL := getCannedACL(res) - if cannedACL != "" { - objInfo.Metadata.Add("X-Amz-Acl", cannedACL) - return &objInfo, nil - } - - grantACL := getAmzGrantACL(res) - for k, v := range grantACL { - objInfo.Metadata[k] = v - } - - return &objInfo, nil -} - -func getCannedACL(aCPolicy *accessControlPolicy) string { - grants := aCPolicy.AccessControlList.Grant - - switch { - case len(grants) == 1: - if grants[0].Grantee.URI == "" && grants[0].Permission == "FULL_CONTROL" { - return "private" - } - case len(grants) == 2: - for _, g := range grants { - if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" && g.Permission == "READ" { - return "authenticated-read" - } - if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "READ" { - return "public-read" - } - if g.Permission == "READ" && g.Grantee.ID == aCPolicy.Owner.ID { - return "bucket-owner-read" - } - } - case len(grants) == 3: - for _, g := range grants { - if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "WRITE" { - return "public-read-write" - } - } - } - return "" -} - -func getAmzGrantACL(aCPolicy *accessControlPolicy) map[string][]string { - grants := aCPolicy.AccessControlList.Grant - res := map[string][]string{} - - for _, g := range grants { - switch { - case g.Permission == "READ": - res["X-Amz-Grant-Read"] = append(res["X-Amz-Grant-Read"], "id="+g.Grantee.ID) - case g.Permission == "WRITE": - res["X-Amz-Grant-Write"] = append(res["X-Amz-Grant-Write"], "id="+g.Grantee.ID) - case g.Permission == "READ_ACP": - res["X-Amz-Grant-Read-Acp"] = append(res["X-Amz-Grant-Read-Acp"], "id="+g.Grantee.ID) - case g.Permission == "WRITE_ACP": - res["X-Amz-Grant-Write-Acp"] = append(res["X-Amz-Grant-Write-Acp"], "id="+g.Grantee.ID) - case g.Permission == "FULL_CONTROL": - res["X-Amz-Grant-Full-Control"] = append(res["X-Amz-Grant-Full-Control"], "id="+g.Grantee.ID) - } - } - return res -} diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object-file.go b/vendor/github.com/minio/minio-go/v7/api-get-object-file.go deleted file mode 100644 index 2332dbf..0000000 --- a/vendor/github.com/minio/minio-go/v7/api-get-object-file.go +++ /dev/null @@ -1,127 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "io" - "os" - "path/filepath" - - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// FGetObject - download contents of an object to a local file. -// The options can be used to specify the GET request further. -func (c *Client) FGetObject(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return err - } - - // Verify if destination already exists. - st, err := os.Stat(filePath) - if err == nil { - // If the destination exists and is a directory. - if st.IsDir() { - return errInvalidArgument("fileName is a directory.") - } - } - - // Proceed if file does not exist. return for all other errors. - if err != nil { - if !os.IsNotExist(err) { - return err - } - } - - // Extract top level directory. - objectDir, _ := filepath.Split(filePath) - if objectDir != "" { - // Create any missing top level directories. - if err := os.MkdirAll(objectDir, 0o700); err != nil { - return err - } - } - - // Gather md5sum. - objectStat, err := c.StatObject(ctx, bucketName, objectName, StatObjectOptions(opts)) - if err != nil { - return err - } - - // Write to a temporary file "fileName.part.minio" before saving. - filePartPath := filePath + objectStat.ETag + ".part.minio" - - // If exists, open in append mode. If not create it as a part file. - filePart, err := os.OpenFile(filePartPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0o600) - if err != nil { - return err - } - - // If we return early with an error, be sure to close and delete - // filePart. If we have an error along the way there is a chance - // that filePart is somehow damaged, and we should discard it. - closeAndRemove := true - defer func() { - if closeAndRemove { - _ = filePart.Close() - _ = os.Remove(filePartPath) - } - }() - - // Issue Stat to get the current offset. - st, err = filePart.Stat() - if err != nil { - return err - } - - // Initialize get object request headers to set the - // appropriate range offsets to read from. - if st.Size() > 0 { - opts.SetRange(st.Size(), 0) - } - - // Seek to current position for incoming reader. - objectReader, objectStat, _, err := c.getObject(ctx, bucketName, objectName, opts) - if err != nil { - return err - } - - // Write to the part file. - if _, err = io.CopyN(filePart, objectReader, objectStat.Size); err != nil { - return err - } - - // Close the file before rename, this is specifically needed for Windows users. - closeAndRemove = false - if err = filePart.Close(); err != nil { - return err - } - - // Safely completed. Now commit by renaming to actual filename. - if err = os.Rename(filePartPath, filePath); err != nil { - return err - } - - // Return. - return nil -} diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object.go b/vendor/github.com/minio/minio-go/v7/api-get-object.go deleted file mode 100644 index 9e6b154..0000000 --- a/vendor/github.com/minio/minio-go/v7/api-get-object.go +++ /dev/null @@ -1,683 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "errors" - "fmt" - "io" - "net/http" - "sync" - - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// GetObject wrapper function that accepts a request context -func (c *Client) GetObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return nil, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return nil, err - } - - gctx, cancel := context.WithCancel(ctx) - - // Detect if snowball is server location we are talking to. - var snowball bool - if location, ok := c.bucketLocCache.Get(bucketName); ok { - snowball = location == "snowball" - } - - var ( - err error - httpReader io.ReadCloser - objectInfo ObjectInfo - totalRead int - ) - - // Create request channel. - reqCh := make(chan getRequest) - // Create response channel. - resCh := make(chan getResponse) - - // This routine feeds partial object data as and when the caller reads. - go func() { - defer close(resCh) - defer func() { - // Close the http response body before returning. - // This ends the connection with the server. - if httpReader != nil { - httpReader.Close() - } - }() - defer cancel() - - // Used to verify if etag of object has changed since last read. - var etag string - - for req := range reqCh { - // If this is the first request we may not need to do a getObject request yet. - if req.isFirstReq { - // First request is a Read/ReadAt. - if req.isReadOp { - // Differentiate between wanting the whole object and just a range. - if req.isReadAt { - // If this is a ReadAt request only get the specified range. - // Range is set with respect to the offset and length of the buffer requested. - // Do not set objectInfo from the first readAt request because it will not get - // the whole object. - opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1) - } else if req.Offset > 0 { - opts.SetRange(req.Offset, 0) - } - httpReader, objectInfo, _, err = c.getObject(gctx, bucketName, objectName, opts) - if err != nil { - resCh <- getResponse{Error: err} - return - } - etag = objectInfo.ETag - // Read at least firstReq.Buffer bytes, if not we have - // reached our EOF. - size, err := readFull(httpReader, req.Buffer) - totalRead += size - if size > 0 && err == io.ErrUnexpectedEOF { - if int64(size) < objectInfo.Size { - // In situations when returned size - // is less than the expected content - // length set by the server, make sure - // we return io.ErrUnexpectedEOF - err = io.ErrUnexpectedEOF - } else { - // If an EOF happens after reading some but not - // all the bytes ReadFull returns ErrUnexpectedEOF - err = io.EOF - } - } else if size == 0 && err == io.EOF && objectInfo.Size > 0 { - // Special cases when server writes more data - // than the content-length, net/http response - // body returns an error, instead of converting - // it to io.EOF - return unexpected EOF. - err = io.ErrUnexpectedEOF - } - // Send back the first response. - resCh <- getResponse{ - objectInfo: objectInfo, - Size: size, - Error: err, - didRead: true, - } - } else { - // First request is a Stat or Seek call. - // Only need to run a StatObject until an actual Read or ReadAt request comes through. - - // Remove range header if already set, for stat Operations to get original file size. - delete(opts.headers, "Range") - objectInfo, err = c.StatObject(gctx, bucketName, objectName, StatObjectOptions(opts)) - if err != nil { - resCh <- getResponse{ - Error: err, - } - // Exit the go-routine. - return - } - etag = objectInfo.ETag - // Send back the first response. - resCh <- getResponse{ - objectInfo: objectInfo, - } - } - } else if req.settingObjectInfo { // Request is just to get objectInfo. - // Remove range header if already set, for stat Operations to get original file size. - delete(opts.headers, "Range") - // Check whether this is snowball - // if yes do not use If-Match feature - // it doesn't work. - if etag != "" && !snowball { - opts.SetMatchETag(etag) - } - objectInfo, err := c.StatObject(gctx, bucketName, objectName, StatObjectOptions(opts)) - if err != nil { - resCh <- getResponse{ - Error: err, - } - // Exit the goroutine. - return - } - // Send back the objectInfo. - resCh <- getResponse{ - objectInfo: objectInfo, - } - } else { - // Offset changes fetch the new object at an Offset. - // Because the httpReader may not be set by the first - // request if it was a stat or seek it must be checked - // if the object has been read or not to only initialize - // new ones when they haven't been already. - // All readAt requests are new requests. - if req.DidOffsetChange || !req.beenRead { - // Check whether this is snowball - // if yes do not use If-Match feature - // it doesn't work. - if etag != "" && !snowball { - opts.SetMatchETag(etag) - } - if httpReader != nil { - // Close previously opened http reader. - httpReader.Close() - } - // If this request is a readAt only get the specified range. - if req.isReadAt { - // Range is set with respect to the offset and length of the buffer requested. - opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1) - } else if req.Offset > 0 { // Range is set with respect to the offset. - opts.SetRange(req.Offset, 0) - } else { - // Remove range header if already set - delete(opts.headers, "Range") - } - httpReader, objectInfo, _, err = c.getObject(gctx, bucketName, objectName, opts) - if err != nil { - resCh <- getResponse{ - Error: err, - } - return - } - totalRead = 0 - } - - // Read at least req.Buffer bytes, if not we have - // reached our EOF. - size, err := readFull(httpReader, req.Buffer) - totalRead += size - if size > 0 && err == io.ErrUnexpectedEOF { - if int64(totalRead) < objectInfo.Size { - // In situations when returned size - // is less than the expected content - // length set by the server, make sure - // we return io.ErrUnexpectedEOF - err = io.ErrUnexpectedEOF - } else { - // If an EOF happens after reading some but not - // all the bytes ReadFull returns ErrUnexpectedEOF - err = io.EOF - } - } else if size == 0 && err == io.EOF && objectInfo.Size > 0 { - // Special cases when server writes more data - // than the content-length, net/http response - // body returns an error, instead of converting - // it to io.EOF - return unexpected EOF. - err = io.ErrUnexpectedEOF - } - - // Reply back how much was read. - resCh <- getResponse{ - Size: size, - Error: err, - didRead: true, - objectInfo: objectInfo, - } - } - } - }() - - // Create a newObject through the information sent back by reqCh. - return newObject(gctx, cancel, reqCh, resCh), nil -} - -// get request message container to communicate with internal -// go-routine. -type getRequest struct { - Buffer []byte - Offset int64 // readAt offset. - DidOffsetChange bool // Tracks the offset changes for Seek requests. - beenRead bool // Determines if this is the first time an object is being read. - isReadAt bool // Determines if this request is a request to a specific range - isReadOp bool // Determines if this request is a Read or Read/At request. - isFirstReq bool // Determines if this request is the first time an object is being accessed. - settingObjectInfo bool // Determines if this request is to set the objectInfo of an object. -} - -// get response message container to reply back for the request. -type getResponse struct { - Size int - Error error - didRead bool // Lets subsequent calls know whether or not httpReader has been initiated. - objectInfo ObjectInfo // Used for the first request. -} - -// Object represents an open object. It implements -// Reader, ReaderAt, Seeker, Closer for a HTTP stream. -type Object struct { - // Mutex. - mutex *sync.Mutex - - // User allocated and defined. - reqCh chan<- getRequest - resCh <-chan getResponse - ctx context.Context - cancel context.CancelFunc - currOffset int64 - objectInfo ObjectInfo - - // Ask lower level to initiate data fetching based on currOffset - seekData bool - - // Keeps track of closed call. - isClosed bool - - // Keeps track of if this is the first call. - isStarted bool - - // Previous error saved for future calls. - prevErr error - - // Keeps track of if this object has been read yet. - beenRead bool - - // Keeps track of if objectInfo has been set yet. - objectInfoSet bool -} - -// doGetRequest - sends and blocks on the firstReqCh and reqCh of an object. -// Returns back the size of the buffer read, if anything was read, as well -// as any error encountered. For all first requests sent on the object -// it is also responsible for sending back the objectInfo. -func (o *Object) doGetRequest(request getRequest) (getResponse, error) { - select { - case <-o.ctx.Done(): - return getResponse{}, o.ctx.Err() - case o.reqCh <- request: - } - - response := <-o.resCh - - // Return any error to the top level. - if response.Error != nil { - return response, response.Error - } - - // This was the first request. - if !o.isStarted { - // The object has been operated on. - o.isStarted = true - } - // Set the objectInfo if the request was not readAt - // and it hasn't been set before. - if !o.objectInfoSet && !request.isReadAt { - o.objectInfo = response.objectInfo - o.objectInfoSet = true - } - // Set beenRead only if it has not been set before. - if !o.beenRead { - o.beenRead = response.didRead - } - // Data are ready on the wire, no need to reinitiate connection in lower level - o.seekData = false - - return response, nil -} - -// setOffset - handles the setting of offsets for -// Read/ReadAt/Seek requests. -func (o *Object) setOffset(bytesRead int64) error { - // Update the currentOffset. - o.currOffset += bytesRead - - if o.objectInfo.Size > -1 && o.currOffset >= o.objectInfo.Size { - return io.EOF - } - return nil -} - -// Read reads up to len(b) bytes into b. It returns the number of -// bytes read (0 <= n <= len(b)) and any error encountered. Returns -// io.EOF upon end of file. -func (o *Object) Read(b []byte) (n int, err error) { - if o == nil { - return 0, errInvalidArgument("Object is nil") - } - - // Locking. - o.mutex.Lock() - defer o.mutex.Unlock() - - // prevErr is previous error saved from previous operation. - if o.prevErr != nil || o.isClosed { - return 0, o.prevErr - } - - // Create a new request. - readReq := getRequest{ - isReadOp: true, - beenRead: o.beenRead, - Buffer: b, - } - - // Alert that this is the first request. - if !o.isStarted { - readReq.isFirstReq = true - } - - // Ask to establish a new data fetch routine based on seekData flag - readReq.DidOffsetChange = o.seekData - readReq.Offset = o.currOffset - - // Send and receive from the first request. - response, err := o.doGetRequest(readReq) - if err != nil && err != io.EOF { - // Save the error for future calls. - o.prevErr = err - return response.Size, err - } - - // Bytes read. - bytesRead := int64(response.Size) - - // Set the new offset. - oerr := o.setOffset(bytesRead) - if oerr != nil { - // Save the error for future calls. - o.prevErr = oerr - return response.Size, oerr - } - - // Return the response. - return response.Size, err -} - -// Stat returns the ObjectInfo structure describing Object. -func (o *Object) Stat() (ObjectInfo, error) { - if o == nil { - return ObjectInfo{}, errInvalidArgument("Object is nil") - } - // Locking. - o.mutex.Lock() - defer o.mutex.Unlock() - - if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed { - return ObjectInfo{}, o.prevErr - } - - // This is the first request. - if !o.isStarted || !o.objectInfoSet { - // Send the request and get the response. - _, err := o.doGetRequest(getRequest{ - isFirstReq: !o.isStarted, - settingObjectInfo: !o.objectInfoSet, - }) - if err != nil { - o.prevErr = err - return ObjectInfo{}, err - } - } - - return o.objectInfo, nil -} - -// ReadAt reads len(b) bytes from the File starting at byte offset -// off. It returns the number of bytes read and the error, if any. -// ReadAt always returns a non-nil error when n < len(b). At end of -// file, that error is io.EOF. -func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) { - if o == nil { - return 0, errInvalidArgument("Object is nil") - } - - // Locking. - o.mutex.Lock() - defer o.mutex.Unlock() - - // prevErr is error which was saved in previous operation. - if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed { - return 0, o.prevErr - } - - // Set the current offset to ReadAt offset, because the current offset will be shifted at the end of this method. - o.currOffset = offset - - // Can only compare offsets to size when size has been set. - if o.objectInfoSet { - // If offset is negative than we return io.EOF. - // If offset is greater than or equal to object size we return io.EOF. - if (o.objectInfo.Size > -1 && offset >= o.objectInfo.Size) || offset < 0 { - return 0, io.EOF - } - } - - // Create the new readAt request. - readAtReq := getRequest{ - isReadOp: true, - isReadAt: true, - DidOffsetChange: true, // Offset always changes. - beenRead: o.beenRead, // Set if this is the first request to try and read. - Offset: offset, // Set the offset. - Buffer: b, - } - - // Alert that this is the first request. - if !o.isStarted { - readAtReq.isFirstReq = true - } - - // Send and receive from the first request. - response, err := o.doGetRequest(readAtReq) - if err != nil && err != io.EOF { - // Save the error. - o.prevErr = err - return response.Size, err - } - // Bytes read. - bytesRead := int64(response.Size) - // There is no valid objectInfo yet - // to compare against for EOF. - if !o.objectInfoSet { - // Update the currentOffset. - o.currOffset += bytesRead - } else { - // If this was not the first request update - // the offsets and compare against objectInfo - // for EOF. - oerr := o.setOffset(bytesRead) - if oerr != nil { - o.prevErr = oerr - return response.Size, oerr - } - } - return response.Size, err -} - -// Seek sets the offset for the next Read or Write to offset, -// interpreted according to whence: 0 means relative to the -// origin of the file, 1 means relative to the current offset, -// and 2 means relative to the end. -// Seek returns the new offset and an error, if any. -// -// Seeking to a negative offset is an error. Seeking to any positive -// offset is legal, subsequent io operations succeed until the -// underlying object is not closed. -func (o *Object) Seek(offset int64, whence int) (n int64, err error) { - if o == nil { - return 0, errInvalidArgument("Object is nil") - } - - // Locking. - o.mutex.Lock() - defer o.mutex.Unlock() - - // At EOF seeking is legal allow only io.EOF, for any other errors we return. - if o.prevErr != nil && o.prevErr != io.EOF { - return 0, o.prevErr - } - - // Negative offset is valid for whence of '2'. - if offset < 0 && whence != 2 { - return 0, errInvalidArgument(fmt.Sprintf("Negative position not allowed for %d", whence)) - } - - // This is the first request. So before anything else - // get the ObjectInfo. - if !o.isStarted || !o.objectInfoSet { - // Create the new Seek request. - seekReq := getRequest{ - isReadOp: false, - Offset: offset, - isFirstReq: true, - } - // Send and receive from the seek request. - _, err := o.doGetRequest(seekReq) - if err != nil { - // Save the error. - o.prevErr = err - return 0, err - } - } - - newOffset := o.currOffset - - // Switch through whence. - switch whence { - default: - return 0, errInvalidArgument(fmt.Sprintf("Invalid whence %d", whence)) - case 0: - if o.objectInfo.Size > -1 && offset > o.objectInfo.Size { - return 0, io.EOF - } - newOffset = offset - case 1: - if o.objectInfo.Size > -1 && o.currOffset+offset > o.objectInfo.Size { - return 0, io.EOF - } - newOffset += offset - case 2: - // If we don't know the object size return an error for io.SeekEnd - if o.objectInfo.Size < 0 { - return 0, errInvalidArgument("Whence END is not supported when the object size is unknown") - } - // Seeking to positive offset is valid for whence '2', but - // since we are backing a Reader we have reached 'EOF' if - // offset is positive. - if offset > 0 { - return 0, io.EOF - } - // Seeking to negative position not allowed for whence. - if o.objectInfo.Size+offset < 0 { - return 0, errInvalidArgument(fmt.Sprintf("Seeking at negative offset not allowed for %d", whence)) - } - newOffset = o.objectInfo.Size + offset - } - // Reset the saved error since we successfully seeked, let the Read - // and ReadAt decide. - if o.prevErr == io.EOF { - o.prevErr = nil - } - - // Ask lower level to fetch again from source when necessary - o.seekData = (newOffset != o.currOffset) || o.seekData - o.currOffset = newOffset - - // Return the effective offset. - return o.currOffset, nil -} - -// Close - The behavior of Close after the first call returns error -// for subsequent Close() calls. -func (o *Object) Close() (err error) { - if o == nil { - return errInvalidArgument("Object is nil") - } - - // Locking. - o.mutex.Lock() - defer o.mutex.Unlock() - - // if already closed return an error. - if o.isClosed { - return o.prevErr - } - - // Close successfully. - o.cancel() - - // Close the request channel to indicate the internal go-routine to exit. - close(o.reqCh) - - // Save for future operations. - errMsg := "Object is already closed. Bad file descriptor." - o.prevErr = errors.New(errMsg) - // Save here that we closed done channel successfully. - o.isClosed = true - return nil -} - -// newObject instantiates a new *minio.Object* -// ObjectInfo will be set by setObjectInfo -func newObject(ctx context.Context, cancel context.CancelFunc, reqCh chan<- getRequest, resCh <-chan getResponse) *Object { - return &Object{ - ctx: ctx, - cancel: cancel, - mutex: &sync.Mutex{}, - reqCh: reqCh, - resCh: resCh, - } -} - -// getObject - retrieve object from Object Storage. -// -// Additionally this function also takes range arguments to download the specified -// range bytes of an object. Setting offset and length = 0 will download the full object. -// -// For more information about the HTTP Range header. -// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. -func (c *Client) getObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) { - // Validate input arguments. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return nil, ObjectInfo{}, nil, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return nil, ObjectInfo{}, nil, err - } - - // Execute GET on objectName. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: opts.toQueryValues(), - customHeader: opts.Header(), - contentSHA256Hex: emptySHA256Hex, - }) - if err != nil { - return nil, ObjectInfo{}, nil, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { - return nil, ObjectInfo{}, nil, httpRespToErrorResponse(resp, bucketName, objectName) - } - } - - objectStat, err := ToObjectInfo(bucketName, objectName, resp.Header) - if err != nil { - closeResponse(resp) - return nil, ObjectInfo{}, nil, err - } - - // do not close body here, caller will close - return resp.Body, objectStat, resp.Header, nil -} diff --git a/vendor/github.com/minio/minio-go/v7/api-get-options.go b/vendor/github.com/minio/minio-go/v7/api-get-options.go deleted file mode 100644 index a0216e2..0000000 --- a/vendor/github.com/minio/minio-go/v7/api-get-options.go +++ /dev/null @@ -1,203 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "fmt" - "net/http" - "net/url" - "strconv" - "time" - - "github.com/minio/minio-go/v7/pkg/encrypt" -) - -// AdvancedGetOptions for internal use by MinIO server - not intended for client use. -type AdvancedGetOptions struct { - ReplicationDeleteMarker bool - IsReplicationReadyForDeleteMarker bool - ReplicationProxyRequest string -} - -// GetObjectOptions are used to specify additional headers or options -// during GET requests. -type GetObjectOptions struct { - headers map[string]string - reqParams url.Values - ServerSideEncryption encrypt.ServerSide - VersionID string - PartNumber int - - // Include any checksums, if object was uploaded with checksum. - // For multipart objects this is a checksum of part checksums. - // https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - Checksum bool - - // To be not used by external applications - Internal AdvancedGetOptions -} - -// StatObjectOptions are used to specify additional headers or options -// during GET info/stat requests. -type StatObjectOptions = GetObjectOptions - -// Header returns the http.Header representation of the GET options. -func (o GetObjectOptions) Header() http.Header { - headers := make(http.Header, len(o.headers)) - for k, v := range o.headers { - headers.Set(k, v) - } - if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() == encrypt.SSEC { - o.ServerSideEncryption.Marshal(headers) - } - // this header is set for active-active replication scenario where GET/HEAD - // to site A is proxy'd to site B if object/version missing on site A. - if o.Internal.ReplicationProxyRequest != "" { - headers.Set(minIOBucketReplicationProxyRequest, o.Internal.ReplicationProxyRequest) - } - if o.Checksum { - headers.Set("x-amz-checksum-mode", "ENABLED") - } - return headers -} - -// Set adds a key value pair to the options. The -// key-value pair will be part of the HTTP GET request -// headers. -func (o *GetObjectOptions) Set(key, value string) { - if o.headers == nil { - o.headers = make(map[string]string) - } - o.headers[http.CanonicalHeaderKey(key)] = value -} - -// SetReqParam - set request query string parameter -// supported key: see supportedQueryValues and allowedCustomQueryPrefix. -// If an unsupported key is passed in, it will be ignored and nothing will be done. -func (o *GetObjectOptions) SetReqParam(key, value string) { - if !isCustomQueryValue(key) && !isStandardQueryValue(key) { - // do nothing - return - } - if o.reqParams == nil { - o.reqParams = make(url.Values) - } - o.reqParams.Set(key, value) -} - -// AddReqParam - add request query string parameter -// supported key: see supportedQueryValues and allowedCustomQueryPrefix. -// If an unsupported key is passed in, it will be ignored and nothing will be done. -func (o *GetObjectOptions) AddReqParam(key, value string) { - if !isCustomQueryValue(key) && !isStandardQueryValue(key) { - // do nothing - return - } - if o.reqParams == nil { - o.reqParams = make(url.Values) - } - o.reqParams.Add(key, value) -} - -// SetMatchETag - set match etag. -func (o *GetObjectOptions) SetMatchETag(etag string) error { - if etag == "" { - return errInvalidArgument("ETag cannot be empty.") - } - o.Set("If-Match", "\""+etag+"\"") - return nil -} - -// SetMatchETagExcept - set match etag except. -func (o *GetObjectOptions) SetMatchETagExcept(etag string) error { - if etag == "" { - return errInvalidArgument("ETag cannot be empty.") - } - o.Set("If-None-Match", "\""+etag+"\"") - return nil -} - -// SetUnmodified - set unmodified time since. -func (o *GetObjectOptions) SetUnmodified(modTime time.Time) error { - if modTime.IsZero() { - return errInvalidArgument("Modified since cannot be empty.") - } - o.Set("If-Unmodified-Since", modTime.Format(http.TimeFormat)) - return nil -} - -// SetModified - set modified time since. -func (o *GetObjectOptions) SetModified(modTime time.Time) error { - if modTime.IsZero() { - return errInvalidArgument("Modified since cannot be empty.") - } - o.Set("If-Modified-Since", modTime.Format(http.TimeFormat)) - return nil -} - -// SetRange - set the start and end offset of the object to be read. -// See https://tools.ietf.org/html/rfc7233#section-3.1 for reference. -func (o *GetObjectOptions) SetRange(start, end int64) error { - switch { - case start == 0 && end < 0: - // Read last '-end' bytes. `bytes=-N`. - o.Set("Range", fmt.Sprintf("bytes=%d", end)) - case 0 < start && end == 0: - // Read everything starting from offset - // 'start'. `bytes=N-`. - o.Set("Range", fmt.Sprintf("bytes=%d-", start)) - case 0 <= start && start <= end: - // Read everything starting at 'start' till the - // 'end'. `bytes=N-M` - o.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end)) - default: - // All other cases such as - // bytes=-3- - // bytes=5-3 - // bytes=-2-4 - // bytes=-3-0 - // bytes=-3--2 - // are invalid. - return errInvalidArgument( - fmt.Sprintf( - "Invalid range specified: start=%d end=%d", - start, end)) - } - return nil -} - -// toQueryValues - Convert the versionId, partNumber, and reqParams in Options to query string parameters. -func (o *GetObjectOptions) toQueryValues() url.Values { - urlValues := make(url.Values) - if o.VersionID != "" { - urlValues.Set("versionId", o.VersionID) - } - if o.PartNumber > 0 { - urlValues.Set("partNumber", strconv.Itoa(o.PartNumber)) - } - - if o.reqParams != nil { - for key, values := range o.reqParams { - for _, value := range values { - urlValues.Add(key, value) - } - } - } - - return urlValues -} diff --git a/vendor/github.com/minio/minio-go/v7/api-list.go b/vendor/github.com/minio/minio-go/v7/api-list.go deleted file mode 100644 index 31b6edf..0000000 --- a/vendor/github.com/minio/minio-go/v7/api-list.go +++ /dev/null @@ -1,1057 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "fmt" - "net/http" - "net/url" - "time" - - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// ListBuckets list all buckets owned by this authenticated user. -// -// This call requires explicit authentication, no anonymous requests are -// allowed for listing buckets. -// -// api := client.New(....) -// for message := range api.ListBuckets(context.Background()) { -// fmt.Println(message) -// } -func (c *Client) ListBuckets(ctx context.Context) ([]BucketInfo, error) { - // Execute GET on service. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{contentSHA256Hex: emptySHA256Hex}) - defer closeResponse(resp) - if err != nil { - return nil, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return nil, httpRespToErrorResponse(resp, "", "") - } - } - listAllMyBucketsResult := listAllMyBucketsResult{} - err = xmlDecoder(resp.Body, &listAllMyBucketsResult) - if err != nil { - return nil, err - } - return listAllMyBucketsResult.Buckets.Bucket, nil -} - -// Bucket List Operations. -func (c *Client) listObjectsV2(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { - // Allocate new list objects channel. - objectStatCh := make(chan ObjectInfo, 1) - // Default listing is delimited at "/" - delimiter := "/" - if opts.Recursive { - // If recursive we do not delimit. - delimiter = "" - } - - // Return object owner information by default - fetchOwner := true - - sendObjectInfo := func(info ObjectInfo) { - select { - case objectStatCh <- info: - case <-ctx.Done(): - } - } - - // Validate bucket name. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - defer close(objectStatCh) - sendObjectInfo(ObjectInfo{ - Err: err, - }) - return objectStatCh - } - - // Validate incoming object prefix. - if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { - defer close(objectStatCh) - sendObjectInfo(ObjectInfo{ - Err: err, - }) - return objectStatCh - } - - // Initiate list objects goroutine here. - go func(objectStatCh chan<- ObjectInfo) { - defer func() { - if contextCanceled(ctx) { - objectStatCh <- ObjectInfo{ - Err: ctx.Err(), - } - } - close(objectStatCh) - }() - - // Save continuationToken for next request. - var continuationToken string - for { - // Get list of objects a maximum of 1000 per request. - result, err := c.listObjectsV2Query(ctx, bucketName, opts.Prefix, continuationToken, - fetchOwner, opts.WithMetadata, delimiter, opts.StartAfter, opts.MaxKeys, opts.headers) - if err != nil { - sendObjectInfo(ObjectInfo{ - Err: err, - }) - return - } - - // If contents are available loop through and send over channel. - for _, object := range result.Contents { - object.ETag = trimEtag(object.ETag) - select { - // Send object content. - case objectStatCh <- object: - // If receives done from the caller, return here. - case <-ctx.Done(): - return - } - } - - // Send all common prefixes if any. - // NOTE: prefixes are only present if the request is delimited. - for _, obj := range result.CommonPrefixes { - select { - // Send object prefixes. - case objectStatCh <- ObjectInfo{Key: obj.Prefix}: - // If receives done from the caller, return here. - case <-ctx.Done(): - return - } - } - - // If continuation token present, save it for next request. - if result.NextContinuationToken != "" { - continuationToken = result.NextContinuationToken - } - - // Listing ends result is not truncated, return right here. - if !result.IsTruncated { - return - } - - // Add this to catch broken S3 API implementations. - if continuationToken == "" { - sendObjectInfo(ObjectInfo{ - Err: fmt.Errorf("listObjectsV2 is truncated without continuationToken, %s S3 server is incompatible with S3 API", c.endpointURL), - }) - return - } - } - }(objectStatCh) - return objectStatCh -} - -// listObjectsV2Query - (List Objects V2) - List some or all (up to 1000) of the objects in a bucket. -// -// You can use the request parameters as selection criteria to return a subset of the objects in a bucket. -// request parameters :- -// --------- -// ?prefix - Limits the response to keys that begin with the specified prefix. -// ?continuation-token - Used to continue iterating over a set of objects -// ?metadata - Specifies if we want metadata for the objects as part of list operation. -// ?delimiter - A delimiter is a character you use to group keys. -// ?start-after - Sets a marker to start listing lexically at this key onwards. -// ?max-keys - Sets the maximum number of keys returned in the response body. -func (c *Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix, continuationToken string, fetchOwner, metadata bool, delimiter, startAfter string, maxkeys int, headers http.Header) (ListBucketV2Result, error) { - // Validate bucket name. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return ListBucketV2Result{}, err - } - // Validate object prefix. - if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { - return ListBucketV2Result{}, err - } - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - - // Always set list-type in ListObjects V2 - urlValues.Set("list-type", "2") - - if metadata { - urlValues.Set("metadata", "true") - } - - // Set this conditionally if asked - if startAfter != "" { - urlValues.Set("start-after", startAfter) - } - - // Always set encoding-type in ListObjects V2 - urlValues.Set("encoding-type", "url") - - // Set object prefix, prefix value to be set to empty is okay. - urlValues.Set("prefix", objectPrefix) - - // Set delimiter, delimiter value to be set to empty is okay. - urlValues.Set("delimiter", delimiter) - - // Set continuation token - if continuationToken != "" { - urlValues.Set("continuation-token", continuationToken) - } - - // Fetch owner when listing - if fetchOwner { - urlValues.Set("fetch-owner", "true") - } - - // Set max keys. - if maxkeys > 0 { - urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) - } - - // Execute GET on bucket to list objects. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - customHeader: headers, - }) - defer closeResponse(resp) - if err != nil { - return ListBucketV2Result{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return ListBucketV2Result{}, httpRespToErrorResponse(resp, bucketName, "") - } - } - - // Decode listBuckets XML. - listBucketResult := ListBucketV2Result{} - if err = xmlDecoder(resp.Body, &listBucketResult); err != nil { - return listBucketResult, err - } - - // This is an additional verification check to make - // sure proper responses are received. - if listBucketResult.IsTruncated && listBucketResult.NextContinuationToken == "" { - return listBucketResult, ErrorResponse{ - Code: "NotImplemented", - Message: "Truncated response should have continuation token set", - } - } - - for i, obj := range listBucketResult.Contents { - listBucketResult.Contents[i].Key, err = decodeS3Name(obj.Key, listBucketResult.EncodingType) - if err != nil { - return listBucketResult, err - } - listBucketResult.Contents[i].LastModified = listBucketResult.Contents[i].LastModified.Truncate(time.Millisecond) - } - - for i, obj := range listBucketResult.CommonPrefixes { - listBucketResult.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listBucketResult.EncodingType) - if err != nil { - return listBucketResult, err - } - } - - // Success. - return listBucketResult, nil -} - -func (c *Client) listObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { - // Allocate new list objects channel. - objectStatCh := make(chan ObjectInfo, 1) - // Default listing is delimited at "/" - delimiter := "/" - if opts.Recursive { - // If recursive we do not delimit. - delimiter = "" - } - - sendObjectInfo := func(info ObjectInfo) { - select { - case objectStatCh <- info: - case <-ctx.Done(): - } - } - - // Validate bucket name. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - defer close(objectStatCh) - sendObjectInfo(ObjectInfo{ - Err: err, - }) - return objectStatCh - } - // Validate incoming object prefix. - if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { - defer close(objectStatCh) - sendObjectInfo(ObjectInfo{ - Err: err, - }) - return objectStatCh - } - - // Initiate list objects goroutine here. - go func(objectStatCh chan<- ObjectInfo) { - defer func() { - if contextCanceled(ctx) { - objectStatCh <- ObjectInfo{ - Err: ctx.Err(), - } - } - close(objectStatCh) - }() - - marker := opts.StartAfter - for { - // Get list of objects a maximum of 1000 per request. - result, err := c.listObjectsQuery(ctx, bucketName, opts.Prefix, marker, delimiter, opts.MaxKeys, opts.headers) - if err != nil { - sendObjectInfo(ObjectInfo{ - Err: err, - }) - return - } - - // If contents are available loop through and send over channel. - for _, object := range result.Contents { - // Save the marker. - marker = object.Key - object.ETag = trimEtag(object.ETag) - select { - // Send object content. - case objectStatCh <- object: - // If receives done from the caller, return here. - case <-ctx.Done(): - return - } - } - - // Send all common prefixes if any. - // NOTE: prefixes are only present if the request is delimited. - for _, obj := range result.CommonPrefixes { - select { - // Send object prefixes. - case objectStatCh <- ObjectInfo{Key: obj.Prefix}: - // If receives done from the caller, return here. - case <-ctx.Done(): - return - } - } - - // If next marker present, save it for next request. - if result.NextMarker != "" { - marker = result.NextMarker - } - - // Listing ends result is not truncated, return right here. - if !result.IsTruncated { - return - } - } - }(objectStatCh) - return objectStatCh -} - -func (c *Client) listObjectVersions(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { - // Allocate new list objects channel. - resultCh := make(chan ObjectInfo, 1) - // Default listing is delimited at "/" - delimiter := "/" - if opts.Recursive { - // If recursive we do not delimit. - delimiter = "" - } - - sendObjectInfo := func(info ObjectInfo) { - select { - case resultCh <- info: - case <-ctx.Done(): - } - } - - // Validate bucket name. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - defer close(resultCh) - sendObjectInfo(ObjectInfo{ - Err: err, - }) - return resultCh - } - - // Validate incoming object prefix. - if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { - defer close(resultCh) - sendObjectInfo(ObjectInfo{ - Err: err, - }) - return resultCh - } - - // Initiate list objects goroutine here. - go func(resultCh chan<- ObjectInfo) { - defer func() { - if contextCanceled(ctx) { - resultCh <- ObjectInfo{ - Err: ctx.Err(), - } - } - close(resultCh) - }() - - var ( - keyMarker = "" - versionIDMarker = "" - ) - - for { - // Get list of objects a maximum of 1000 per request. - result, err := c.listObjectVersionsQuery(ctx, bucketName, opts, keyMarker, versionIDMarker, delimiter) - if err != nil { - sendObjectInfo(ObjectInfo{ - Err: err, - }) - return - } - - // If contents are available loop through and send over channel. - for _, version := range result.Versions { - info := ObjectInfo{ - ETag: trimEtag(version.ETag), - Key: version.Key, - LastModified: version.LastModified.Truncate(time.Millisecond), - Size: version.Size, - Owner: version.Owner, - StorageClass: version.StorageClass, - IsLatest: version.IsLatest, - VersionID: version.VersionID, - IsDeleteMarker: version.isDeleteMarker, - UserTags: version.UserTags, - UserMetadata: version.UserMetadata, - Internal: version.Internal, - } - select { - // Send object version info. - case resultCh <- info: - // If receives done from the caller, return here. - case <-ctx.Done(): - return - } - } - - // Send all common prefixes if any. - // NOTE: prefixes are only present if the request is delimited. - for _, obj := range result.CommonPrefixes { - select { - // Send object prefixes. - case resultCh <- ObjectInfo{Key: obj.Prefix}: - // If receives done from the caller, return here. - case <-ctx.Done(): - return - } - } - - // If next key marker is present, save it for next request. - if result.NextKeyMarker != "" { - keyMarker = result.NextKeyMarker - } - - // If next version id marker is present, save it for next request. - if result.NextVersionIDMarker != "" { - versionIDMarker = result.NextVersionIDMarker - } - - // Listing ends result is not truncated, return right here. - if !result.IsTruncated { - return - } - } - }(resultCh) - return resultCh -} - -// listObjectVersions - (List Object Versions) - List some or all (up to 1000) of the existing objects -// and their versions in a bucket. -// -// You can use the request parameters as selection criteria to return a subset of the objects in a bucket. -// request parameters :- -// --------- -// ?key-marker - Specifies the key to start with when listing objects in a bucket. -// ?version-id-marker - Specifies the version id marker to start with when listing objects with versions in a bucket. -// ?delimiter - A delimiter is a character you use to group keys. -// ?prefix - Limits the response to keys that begin with the specified prefix. -// ?max-keys - Sets the maximum number of keys returned in the response body. -func (c *Client) listObjectVersionsQuery(ctx context.Context, bucketName string, opts ListObjectsOptions, keyMarker, versionIDMarker, delimiter string) (ListVersionsResult, error) { - // Validate bucket name. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return ListVersionsResult{}, err - } - // Validate object prefix. - if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { - return ListVersionsResult{}, err - } - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - - // Set versions to trigger versioning API - urlValues.Set("versions", "") - - // Set object prefix, prefix value to be set to empty is okay. - urlValues.Set("prefix", opts.Prefix) - - // Set delimiter, delimiter value to be set to empty is okay. - urlValues.Set("delimiter", delimiter) - - // Set object marker. - if keyMarker != "" { - urlValues.Set("key-marker", keyMarker) - } - - // Set max keys. - if opts.MaxKeys > 0 { - urlValues.Set("max-keys", fmt.Sprintf("%d", opts.MaxKeys)) - } - - // Set version ID marker - if versionIDMarker != "" { - urlValues.Set("version-id-marker", versionIDMarker) - } - - if opts.WithMetadata { - urlValues.Set("metadata", "true") - } - - // Always set encoding-type - urlValues.Set("encoding-type", "url") - - // Execute GET on bucket to list objects. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - customHeader: opts.headers, - }) - defer closeResponse(resp) - if err != nil { - return ListVersionsResult{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return ListVersionsResult{}, httpRespToErrorResponse(resp, bucketName, "") - } - } - - // Decode ListVersionsResult XML. - listObjectVersionsOutput := ListVersionsResult{} - err = xmlDecoder(resp.Body, &listObjectVersionsOutput) - if err != nil { - return ListVersionsResult{}, err - } - - for i, obj := range listObjectVersionsOutput.Versions { - listObjectVersionsOutput.Versions[i].Key, err = decodeS3Name(obj.Key, listObjectVersionsOutput.EncodingType) - if err != nil { - return listObjectVersionsOutput, err - } - } - - for i, obj := range listObjectVersionsOutput.CommonPrefixes { - listObjectVersionsOutput.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listObjectVersionsOutput.EncodingType) - if err != nil { - return listObjectVersionsOutput, err - } - } - - if listObjectVersionsOutput.NextKeyMarker != "" { - listObjectVersionsOutput.NextKeyMarker, err = decodeS3Name(listObjectVersionsOutput.NextKeyMarker, listObjectVersionsOutput.EncodingType) - if err != nil { - return listObjectVersionsOutput, err - } - } - - return listObjectVersionsOutput, nil -} - -// listObjects - (List Objects) - List some or all (up to 1000) of the objects in a bucket. -// -// You can use the request parameters as selection criteria to return a subset of the objects in a bucket. -// request parameters :- -// --------- -// ?marker - Specifies the key to start with when listing objects in a bucket. -// ?delimiter - A delimiter is a character you use to group keys. -// ?prefix - Limits the response to keys that begin with the specified prefix. -// ?max-keys - Sets the maximum number of keys returned in the response body. -func (c *Client) listObjectsQuery(ctx context.Context, bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int, headers http.Header) (ListBucketResult, error) { - // Validate bucket name. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return ListBucketResult{}, err - } - // Validate object prefix. - if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { - return ListBucketResult{}, err - } - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - - // Set object prefix, prefix value to be set to empty is okay. - urlValues.Set("prefix", objectPrefix) - - // Set delimiter, delimiter value to be set to empty is okay. - urlValues.Set("delimiter", delimiter) - - // Set object marker. - if objectMarker != "" { - urlValues.Set("marker", objectMarker) - } - - // Set max keys. - if maxkeys > 0 { - urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) - } - - // Always set encoding-type - urlValues.Set("encoding-type", "url") - - // Execute GET on bucket to list objects. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - customHeader: headers, - }) - defer closeResponse(resp) - if err != nil { - return ListBucketResult{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return ListBucketResult{}, httpRespToErrorResponse(resp, bucketName, "") - } - } - // Decode listBuckets XML. - listBucketResult := ListBucketResult{} - err = xmlDecoder(resp.Body, &listBucketResult) - if err != nil { - return listBucketResult, err - } - - for i, obj := range listBucketResult.Contents { - listBucketResult.Contents[i].Key, err = decodeS3Name(obj.Key, listBucketResult.EncodingType) - if err != nil { - return listBucketResult, err - } - listBucketResult.Contents[i].LastModified = listBucketResult.Contents[i].LastModified.Truncate(time.Millisecond) - } - - for i, obj := range listBucketResult.CommonPrefixes { - listBucketResult.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listBucketResult.EncodingType) - if err != nil { - return listBucketResult, err - } - } - - if listBucketResult.NextMarker != "" { - listBucketResult.NextMarker, err = decodeS3Name(listBucketResult.NextMarker, listBucketResult.EncodingType) - if err != nil { - return listBucketResult, err - } - } - - return listBucketResult, nil -} - -// ListObjectsOptions holds all options of a list object request -type ListObjectsOptions struct { - // Include objects versions in the listing - WithVersions bool - // Include objects metadata in the listing - WithMetadata bool - // Only list objects with the prefix - Prefix string - // Ignore '/' delimiter - Recursive bool - // The maximum number of objects requested per - // batch, advanced use-case not useful for most - // applications - MaxKeys int - // StartAfter start listing lexically at this - // object onwards, this value can also be set - // for Marker when `UseV1` is set to true. - StartAfter string - - // Use the deprecated list objects V1 API - UseV1 bool - - headers http.Header -} - -// Set adds a key value pair to the options. The -// key-value pair will be part of the HTTP GET request -// headers. -func (o *ListObjectsOptions) Set(key, value string) { - if o.headers == nil { - o.headers = make(http.Header) - } - o.headers.Set(key, value) -} - -// ListObjects returns objects list after evaluating the passed options. -// -// api := client.New(....) -// for object := range api.ListObjects(ctx, "mytestbucket", minio.ListObjectsOptions{Prefix: "starthere", Recursive:true}) { -// fmt.Println(object) -// } -// -// If caller cancels the context, then the last entry on the 'chan ObjectInfo' will be the context.Error() -// caller must drain the channel entirely and wait until channel is closed before proceeding, without -// waiting on the channel to be closed completely you might leak goroutines. -func (c *Client) ListObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { - if opts.WithVersions { - return c.listObjectVersions(ctx, bucketName, opts) - } - - // Use legacy list objects v1 API - if opts.UseV1 { - return c.listObjects(ctx, bucketName, opts) - } - - // Check whether this is snowball region, if yes ListObjectsV2 doesn't work, fallback to listObjectsV1. - if location, ok := c.bucketLocCache.Get(bucketName); ok { - if location == "snowball" { - return c.listObjects(ctx, bucketName, opts) - } - } - - return c.listObjectsV2(ctx, bucketName, opts) -} - -// ListIncompleteUploads - List incompletely uploaded multipart objects. -// -// ListIncompleteUploads lists all incompleted objects matching the -// objectPrefix from the specified bucket. If recursion is enabled -// it would list all subdirectories and all its contents. -// -// Your input parameters are just bucketName, objectPrefix, recursive. -// If you enable recursive as 'true' this function will return back all -// the multipart objects in a given bucket name. -// -// api := client.New(....) -// // Recurively list all objects in 'mytestbucket' -// recursive := true -// for message := range api.ListIncompleteUploads(context.Background(), "mytestbucket", "starthere", recursive) { -// fmt.Println(message) -// } -func (c *Client) ListIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo { - return c.listIncompleteUploads(ctx, bucketName, objectPrefix, recursive) -} - -// contextCanceled returns whether a context is canceled. -func contextCanceled(ctx context.Context) bool { - select { - case <-ctx.Done(): - return true - default: - return false - } -} - -// listIncompleteUploads lists all incomplete uploads. -func (c *Client) listIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo { - // Allocate channel for multipart uploads. - objectMultipartStatCh := make(chan ObjectMultipartInfo, 1) - // Delimiter is set to "/" by default. - delimiter := "/" - if recursive { - // If recursive do not delimit. - delimiter = "" - } - // Validate bucket name. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - defer close(objectMultipartStatCh) - objectMultipartStatCh <- ObjectMultipartInfo{ - Err: err, - } - return objectMultipartStatCh - } - // Validate incoming object prefix. - if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { - defer close(objectMultipartStatCh) - objectMultipartStatCh <- ObjectMultipartInfo{ - Err: err, - } - return objectMultipartStatCh - } - go func(objectMultipartStatCh chan<- ObjectMultipartInfo) { - defer func() { - if contextCanceled(ctx) { - objectMultipartStatCh <- ObjectMultipartInfo{ - Err: ctx.Err(), - } - } - close(objectMultipartStatCh) - }() - - // object and upload ID marker for future requests. - var objectMarker string - var uploadIDMarker string - for { - // list all multipart uploads. - result, err := c.listMultipartUploadsQuery(ctx, bucketName, objectMarker, uploadIDMarker, objectPrefix, delimiter, 0) - if err != nil { - objectMultipartStatCh <- ObjectMultipartInfo{ - Err: err, - } - return - } - objectMarker = result.NextKeyMarker - uploadIDMarker = result.NextUploadIDMarker - - // Send all multipart uploads. - for _, obj := range result.Uploads { - // Calculate total size of the uploaded parts if 'aggregateSize' is enabled. - select { - // Send individual uploads here. - case objectMultipartStatCh <- obj: - // If the context is canceled - case <-ctx.Done(): - return - } - } - // Send all common prefixes if any. - // NOTE: prefixes are only present if the request is delimited. - for _, obj := range result.CommonPrefixes { - select { - // Send delimited prefixes here. - case objectMultipartStatCh <- ObjectMultipartInfo{Key: obj.Prefix, Size: 0}: - // If context is canceled. - case <-ctx.Done(): - return - } - } - // Listing ends if result not truncated, return right here. - if !result.IsTruncated { - return - } - } - }(objectMultipartStatCh) - // return. - return objectMultipartStatCh -} - -// listMultipartUploadsQuery - (List Multipart Uploads). -// - Lists some or all (up to 1000) in-progress multipart uploads in a bucket. -// -// You can use the request parameters as selection criteria to return a subset of the uploads in a bucket. -// request parameters. :- -// --------- -// ?key-marker - Specifies the multipart upload after which listing should begin. -// ?upload-id-marker - Together with key-marker specifies the multipart upload after which listing should begin. -// ?delimiter - A delimiter is a character you use to group keys. -// ?prefix - Limits the response to keys that begin with the specified prefix. -// ?max-uploads - Sets the maximum number of multipart uploads returned in the response body. -func (c *Client) listMultipartUploadsQuery(ctx context.Context, bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (ListMultipartUploadsResult, error) { - // Get resources properly escaped and lined up before using them in http request. - urlValues := make(url.Values) - // Set uploads. - urlValues.Set("uploads", "") - // Set object key marker. - if keyMarker != "" { - urlValues.Set("key-marker", keyMarker) - } - // Set upload id marker. - if uploadIDMarker != "" { - urlValues.Set("upload-id-marker", uploadIDMarker) - } - - // Set object prefix, prefix value to be set to empty is okay. - urlValues.Set("prefix", prefix) - - // Set delimiter, delimiter value to be set to empty is okay. - urlValues.Set("delimiter", delimiter) - - // Always set encoding-type - urlValues.Set("encoding-type", "url") - - // maxUploads should be 1000 or less. - if maxUploads > 0 { - // Set max-uploads. - urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads)) - } - - // Execute GET on bucketName to list multipart uploads. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return ListMultipartUploadsResult{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return ListMultipartUploadsResult{}, httpRespToErrorResponse(resp, bucketName, "") - } - } - // Decode response body. - listMultipartUploadsResult := ListMultipartUploadsResult{} - err = xmlDecoder(resp.Body, &listMultipartUploadsResult) - if err != nil { - return listMultipartUploadsResult, err - } - - listMultipartUploadsResult.NextKeyMarker, err = decodeS3Name(listMultipartUploadsResult.NextKeyMarker, listMultipartUploadsResult.EncodingType) - if err != nil { - return listMultipartUploadsResult, err - } - - listMultipartUploadsResult.NextUploadIDMarker, err = decodeS3Name(listMultipartUploadsResult.NextUploadIDMarker, listMultipartUploadsResult.EncodingType) - if err != nil { - return listMultipartUploadsResult, err - } - - for i, obj := range listMultipartUploadsResult.Uploads { - listMultipartUploadsResult.Uploads[i].Key, err = decodeS3Name(obj.Key, listMultipartUploadsResult.EncodingType) - if err != nil { - return listMultipartUploadsResult, err - } - } - - for i, obj := range listMultipartUploadsResult.CommonPrefixes { - listMultipartUploadsResult.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listMultipartUploadsResult.EncodingType) - if err != nil { - return listMultipartUploadsResult, err - } - } - - return listMultipartUploadsResult, nil -} - -// listObjectParts list all object parts recursively. -// -//lint:ignore U1000 Keep this around -func (c *Client) listObjectParts(ctx context.Context, bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) { - // Part number marker for the next batch of request. - var nextPartNumberMarker int - partsInfo = make(map[int]ObjectPart) - for { - // Get list of uploaded parts a maximum of 1000 per request. - listObjPartsResult, err := c.listObjectPartsQuery(ctx, bucketName, objectName, uploadID, nextPartNumberMarker, 1000) - if err != nil { - return nil, err - } - // Append to parts info. - for _, part := range listObjPartsResult.ObjectParts { - // Trim off the odd double quotes from ETag in the beginning and end. - part.ETag = trimEtag(part.ETag) - partsInfo[part.PartNumber] = part - } - // Keep part number marker, for the next iteration. - nextPartNumberMarker = listObjPartsResult.NextPartNumberMarker - // Listing ends result is not truncated, return right here. - if !listObjPartsResult.IsTruncated { - break - } - } - - // Return all the parts. - return partsInfo, nil -} - -// findUploadIDs lists all incomplete uploads and find the uploadIDs of the matching object name. -func (c *Client) findUploadIDs(ctx context.Context, bucketName, objectName string) ([]string, error) { - var uploadIDs []string - // Make list incomplete uploads recursive. - isRecursive := true - // List all incomplete uploads. - for mpUpload := range c.listIncompleteUploads(ctx, bucketName, objectName, isRecursive) { - if mpUpload.Err != nil { - return nil, mpUpload.Err - } - if objectName == mpUpload.Key { - uploadIDs = append(uploadIDs, mpUpload.UploadID) - } - } - // Return the latest upload id. - return uploadIDs, nil -} - -// listObjectPartsQuery (List Parts query) -// - lists some or all (up to 1000) parts that have been uploaded -// for a specific multipart upload -// -// You can use the request parameters as selection criteria to return -// a subset of the uploads in a bucket, request parameters :- -// --------- -// ?part-number-marker - Specifies the part after which listing should -// begin. -// ?max-parts - Maximum parts to be listed per request. -func (c *Client) listObjectPartsQuery(ctx context.Context, bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (ListObjectPartsResult, error) { - // Get resources properly escaped and lined up before using them in http request. - urlValues := make(url.Values) - // Set part number marker. - urlValues.Set("part-number-marker", fmt.Sprintf("%d", partNumberMarker)) - // Set upload id. - urlValues.Set("uploadId", uploadID) - - // maxParts should be 1000 or less. - if maxParts > 0 { - // Set max parts. - urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts)) - } - - // Execute GET on objectName to get list of parts. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return ListObjectPartsResult{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return ListObjectPartsResult{}, httpRespToErrorResponse(resp, bucketName, objectName) - } - } - // Decode list object parts XML. - listObjectPartsResult := ListObjectPartsResult{} - err = xmlDecoder(resp.Body, &listObjectPartsResult) - if err != nil { - return listObjectPartsResult, err - } - return listObjectPartsResult, nil -} - -// Decode an S3 object name according to the encoding type -func decodeS3Name(name, encodingType string) (string, error) { - switch encodingType { - case "url": - return url.QueryUnescape(name) - default: - return name, nil - } -} diff --git a/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go b/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go deleted file mode 100644 index 0c027d5..0000000 --- a/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go +++ /dev/null @@ -1,176 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/xml" - "fmt" - "net/http" - "net/url" - - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// objectLegalHold - object legal hold specified in -// https://docs.aws.amazon.com/AmazonS3/latest/API/archive-RESTObjectPUTLegalHold.html -type objectLegalHold struct { - XMLNS string `xml:"xmlns,attr,omitempty"` - XMLName xml.Name `xml:"LegalHold"` - Status LegalHoldStatus `xml:"Status,omitempty"` -} - -// PutObjectLegalHoldOptions represents options specified by user for PutObjectLegalHold call -type PutObjectLegalHoldOptions struct { - VersionID string - Status *LegalHoldStatus -} - -// GetObjectLegalHoldOptions represents options specified by user for GetObjectLegalHold call -type GetObjectLegalHoldOptions struct { - VersionID string -} - -// LegalHoldStatus - object legal hold status. -type LegalHoldStatus string - -const ( - // LegalHoldEnabled indicates legal hold is enabled - LegalHoldEnabled LegalHoldStatus = "ON" - - // LegalHoldDisabled indicates legal hold is disabled - LegalHoldDisabled LegalHoldStatus = "OFF" -) - -func (r LegalHoldStatus) String() string { - return string(r) -} - -// IsValid - check whether this legal hold status is valid or not. -func (r LegalHoldStatus) IsValid() bool { - return r == LegalHoldEnabled || r == LegalHoldDisabled -} - -func newObjectLegalHold(status *LegalHoldStatus) (*objectLegalHold, error) { - if status == nil { - return nil, fmt.Errorf("Status not set") - } - if !status.IsValid() { - return nil, fmt.Errorf("invalid legal hold status `%v`", status) - } - legalHold := &objectLegalHold{ - Status: *status, - } - return legalHold, nil -} - -// PutObjectLegalHold : sets object legal hold for a given object and versionID. -func (c *Client) PutObjectLegalHold(ctx context.Context, bucketName, objectName string, opts PutObjectLegalHoldOptions) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return err - } - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("legal-hold", "") - - if opts.VersionID != "" { - urlValues.Set("versionId", opts.VersionID) - } - - lh, err := newObjectLegalHold(opts.Status) - if err != nil { - return err - } - - lhData, err := xml.Marshal(lh) - if err != nil { - return err - } - - reqMetadata := requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - contentBody: bytes.NewReader(lhData), - contentLength: int64(len(lhData)), - contentMD5Base64: sumMD5Base64(lhData), - contentSHA256Hex: sum256Hex(lhData), - } - - // Execute PUT Object Legal Hold. - resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { - return httpRespToErrorResponse(resp, bucketName, objectName) - } - } - return nil -} - -// GetObjectLegalHold gets legal-hold status of given object. -func (c *Client) GetObjectLegalHold(ctx context.Context, bucketName, objectName string, opts GetObjectLegalHoldOptions) (status *LegalHoldStatus, err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return nil, err - } - - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return nil, err - } - urlValues := make(url.Values) - urlValues.Set("legal-hold", "") - - if opts.VersionID != "" { - urlValues.Set("versionId", opts.VersionID) - } - - // Execute GET on bucket to list objects. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return nil, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return nil, httpRespToErrorResponse(resp, bucketName, objectName) - } - } - lh := &objectLegalHold{} - if err = xml.NewDecoder(resp.Body).Decode(lh); err != nil { - return nil, err - } - - return &lh.Status, nil -} diff --git a/vendor/github.com/minio/minio-go/v7/api-object-lock.go b/vendor/github.com/minio/minio-go/v7/api-object-lock.go deleted file mode 100644 index f0a4398..0000000 --- a/vendor/github.com/minio/minio-go/v7/api-object-lock.go +++ /dev/null @@ -1,241 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/xml" - "fmt" - "net/http" - "net/url" - "time" - - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// RetentionMode - object retention mode. -type RetentionMode string - -const ( - // Governance - governance mode. - Governance RetentionMode = "GOVERNANCE" - - // Compliance - compliance mode. - Compliance RetentionMode = "COMPLIANCE" -) - -func (r RetentionMode) String() string { - return string(r) -} - -// IsValid - check whether this retention mode is valid or not. -func (r RetentionMode) IsValid() bool { - return r == Governance || r == Compliance -} - -// ValidityUnit - retention validity unit. -type ValidityUnit string - -const ( - // Days - denotes no. of days. - Days ValidityUnit = "DAYS" - - // Years - denotes no. of years. - Years ValidityUnit = "YEARS" -) - -func (unit ValidityUnit) String() string { - return string(unit) -} - -// IsValid - check whether this validity unit is valid or not. -func (unit ValidityUnit) isValid() bool { - return unit == Days || unit == Years -} - -// Retention - bucket level retention configuration. -type Retention struct { - Mode RetentionMode - Validity time.Duration -} - -func (r Retention) String() string { - return fmt.Sprintf("{Mode:%v, Validity:%v}", r.Mode, r.Validity) -} - -// IsEmpty - returns whether retention is empty or not. -func (r Retention) IsEmpty() bool { - return r.Mode == "" || r.Validity == 0 -} - -// objectLockConfig - object lock configuration specified in -// https://docs.aws.amazon.com/AmazonS3/latest/API/Type_API_ObjectLockConfiguration.html -type objectLockConfig struct { - XMLNS string `xml:"xmlns,attr,omitempty"` - XMLName xml.Name `xml:"ObjectLockConfiguration"` - ObjectLockEnabled string `xml:"ObjectLockEnabled"` - Rule *struct { - DefaultRetention struct { - Mode RetentionMode `xml:"Mode"` - Days *uint `xml:"Days"` - Years *uint `xml:"Years"` - } `xml:"DefaultRetention"` - } `xml:"Rule,omitempty"` -} - -func newObjectLockConfig(mode *RetentionMode, validity *uint, unit *ValidityUnit) (*objectLockConfig, error) { - config := &objectLockConfig{ - ObjectLockEnabled: "Enabled", - } - - if mode != nil && validity != nil && unit != nil { - if !mode.IsValid() { - return nil, fmt.Errorf("invalid retention mode `%v`", mode) - } - - if !unit.isValid() { - return nil, fmt.Errorf("invalid validity unit `%v`", unit) - } - - config.Rule = &struct { - DefaultRetention struct { - Mode RetentionMode `xml:"Mode"` - Days *uint `xml:"Days"` - Years *uint `xml:"Years"` - } `xml:"DefaultRetention"` - }{} - - config.Rule.DefaultRetention.Mode = *mode - if *unit == Days { - config.Rule.DefaultRetention.Days = validity - } else { - config.Rule.DefaultRetention.Years = validity - } - - return config, nil - } - - if mode == nil && validity == nil && unit == nil { - return config, nil - } - - return nil, fmt.Errorf("all of retention mode, validity and validity unit must be passed") -} - -// SetBucketObjectLockConfig sets object lock configuration in given bucket. mode, validity and unit are either all set or all nil. -func (c *Client) SetBucketObjectLockConfig(ctx context.Context, bucketName string, mode *RetentionMode, validity *uint, unit *ValidityUnit) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("object-lock", "") - - config, err := newObjectLockConfig(mode, validity, unit) - if err != nil { - return err - } - - configData, err := xml.Marshal(config) - if err != nil { - return err - } - - reqMetadata := requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentBody: bytes.NewReader(configData), - contentLength: int64(len(configData)), - contentMD5Base64: sumMD5Base64(configData), - contentSHA256Hex: sum256Hex(configData), - } - - // Execute PUT bucket object lock configuration. - resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp, bucketName, "") - } - } - return nil -} - -// GetObjectLockConfig gets object lock configuration of given bucket. -func (c *Client) GetObjectLockConfig(ctx context.Context, bucketName string) (objectLock string, mode *RetentionMode, validity *uint, unit *ValidityUnit, err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return "", nil, nil, nil, err - } - - urlValues := make(url.Values) - urlValues.Set("object-lock", "") - - // Execute GET on bucket to list objects. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return "", nil, nil, nil, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return "", nil, nil, nil, httpRespToErrorResponse(resp, bucketName, "") - } - } - config := &objectLockConfig{} - if err = xml.NewDecoder(resp.Body).Decode(config); err != nil { - return "", nil, nil, nil, err - } - - if config.Rule != nil { - mode = &config.Rule.DefaultRetention.Mode - if config.Rule.DefaultRetention.Days != nil { - validity = config.Rule.DefaultRetention.Days - days := Days - unit = &days - } else { - validity = config.Rule.DefaultRetention.Years - years := Years - unit = &years - } - return config.ObjectLockEnabled, mode, validity, unit, nil - } - return config.ObjectLockEnabled, nil, nil, nil, nil -} - -// GetBucketObjectLockConfig gets object lock configuration of given bucket. -func (c *Client) GetBucketObjectLockConfig(ctx context.Context, bucketName string) (mode *RetentionMode, validity *uint, unit *ValidityUnit, err error) { - _, mode, validity, unit, err = c.GetObjectLockConfig(ctx, bucketName) - return mode, validity, unit, err -} - -// SetObjectLockConfig sets object lock configuration in given bucket. mode, validity and unit are either all set or all nil. -func (c *Client) SetObjectLockConfig(ctx context.Context, bucketName string, mode *RetentionMode, validity *uint, unit *ValidityUnit) error { - return c.SetBucketObjectLockConfig(ctx, bucketName, mode, validity, unit) -} diff --git a/vendor/github.com/minio/minio-go/v7/api-object-retention.go b/vendor/github.com/minio/minio-go/v7/api-object-retention.go deleted file mode 100644 index b29cb1f..0000000 --- a/vendor/github.com/minio/minio-go/v7/api-object-retention.go +++ /dev/null @@ -1,165 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2019-2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/xml" - "fmt" - "net/http" - "net/url" - "time" - - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// objectRetention - object retention specified in -// https://docs.aws.amazon.com/AmazonS3/latest/API/Type_API_ObjectLockConfiguration.html -type objectRetention struct { - XMLNS string `xml:"xmlns,attr,omitempty"` - XMLName xml.Name `xml:"Retention"` - Mode RetentionMode `xml:"Mode,omitempty"` - RetainUntilDate *time.Time `type:"timestamp" timestampFormat:"iso8601" xml:"RetainUntilDate,omitempty"` -} - -func newObjectRetention(mode *RetentionMode, date *time.Time) (*objectRetention, error) { - objectRetention := &objectRetention{} - - if date != nil && !date.IsZero() { - objectRetention.RetainUntilDate = date - } - if mode != nil { - if !mode.IsValid() { - return nil, fmt.Errorf("invalid retention mode `%v`", mode) - } - objectRetention.Mode = *mode - } - - return objectRetention, nil -} - -// PutObjectRetentionOptions represents options specified by user for PutObject call -type PutObjectRetentionOptions struct { - GovernanceBypass bool - Mode *RetentionMode - RetainUntilDate *time.Time - VersionID string -} - -// PutObjectRetention sets object retention for a given object and versionID. -func (c *Client) PutObjectRetention(ctx context.Context, bucketName, objectName string, opts PutObjectRetentionOptions) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return err - } - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("retention", "") - - if opts.VersionID != "" { - urlValues.Set("versionId", opts.VersionID) - } - - retention, err := newObjectRetention(opts.Mode, opts.RetainUntilDate) - if err != nil { - return err - } - - retentionData, err := xml.Marshal(retention) - if err != nil { - return err - } - - // Build headers. - headers := make(http.Header) - - if opts.GovernanceBypass { - // Set the bypass goverenance retention header - headers.Set(amzBypassGovernance, "true") - } - - reqMetadata := requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - contentBody: bytes.NewReader(retentionData), - contentLength: int64(len(retentionData)), - contentMD5Base64: sumMD5Base64(retentionData), - contentSHA256Hex: sum256Hex(retentionData), - customHeader: headers, - } - - // Execute PUT Object Retention. - resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { - return httpRespToErrorResponse(resp, bucketName, objectName) - } - } - return nil -} - -// GetObjectRetention gets retention of given object. -func (c *Client) GetObjectRetention(ctx context.Context, bucketName, objectName, versionID string) (mode *RetentionMode, retainUntilDate *time.Time, err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return nil, nil, err - } - - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return nil, nil, err - } - urlValues := make(url.Values) - urlValues.Set("retention", "") - if versionID != "" { - urlValues.Set("versionId", versionID) - } - // Execute GET on bucket to list objects. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return nil, nil, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return nil, nil, httpRespToErrorResponse(resp, bucketName, objectName) - } - } - retention := &objectRetention{} - if err = xml.NewDecoder(resp.Body).Decode(retention); err != nil { - return nil, nil, err - } - - return &retention.Mode, retention.RetainUntilDate, nil -} diff --git a/vendor/github.com/minio/minio-go/v7/api-object-tagging.go b/vendor/github.com/minio/minio-go/v7/api-object-tagging.go deleted file mode 100644 index 6623e26..0000000 --- a/vendor/github.com/minio/minio-go/v7/api-object-tagging.go +++ /dev/null @@ -1,177 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/xml" - "net/http" - "net/url" - - "github.com/minio/minio-go/v7/pkg/s3utils" - "github.com/minio/minio-go/v7/pkg/tags" -) - -// PutObjectTaggingOptions holds an object version id -// to update tag(s) of a specific object version -type PutObjectTaggingOptions struct { - VersionID string - Internal AdvancedObjectTaggingOptions -} - -// AdvancedObjectTaggingOptions for internal use by MinIO server - not intended for client use. -type AdvancedObjectTaggingOptions struct { - ReplicationProxyRequest string -} - -// PutObjectTagging replaces or creates object tag(s) and can target -// a specific object version in a versioned bucket. -func (c *Client) PutObjectTagging(ctx context.Context, bucketName, objectName string, otags *tags.Tags, opts PutObjectTaggingOptions) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("tagging", "") - - if opts.VersionID != "" { - urlValues.Set("versionId", opts.VersionID) - } - headers := make(http.Header, 0) - if opts.Internal.ReplicationProxyRequest != "" { - headers.Set(minIOBucketReplicationProxyRequest, opts.Internal.ReplicationProxyRequest) - } - reqBytes, err := xml.Marshal(otags) - if err != nil { - return err - } - - reqMetadata := requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - contentBody: bytes.NewReader(reqBytes), - contentLength: int64(len(reqBytes)), - contentMD5Base64: sumMD5Base64(reqBytes), - customHeader: headers, - } - - // Execute PUT to set a object tagging. - resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp, bucketName, objectName) - } - } - return nil -} - -// GetObjectTaggingOptions holds the object version ID -// to fetch the tagging key/value pairs -type GetObjectTaggingOptions struct { - VersionID string - Internal AdvancedObjectTaggingOptions -} - -// GetObjectTagging fetches object tag(s) with options to target -// a specific object version in a versioned bucket. -func (c *Client) GetObjectTagging(ctx context.Context, bucketName, objectName string, opts GetObjectTaggingOptions) (*tags.Tags, error) { - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("tagging", "") - - if opts.VersionID != "" { - urlValues.Set("versionId", opts.VersionID) - } - headers := make(http.Header, 0) - if opts.Internal.ReplicationProxyRequest != "" { - headers.Set(minIOBucketReplicationProxyRequest, opts.Internal.ReplicationProxyRequest) - } - // Execute GET on object to get object tag(s) - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - customHeader: headers, - }) - - defer closeResponse(resp) - if err != nil { - return nil, err - } - - if resp != nil { - if resp.StatusCode != http.StatusOK { - return nil, httpRespToErrorResponse(resp, bucketName, objectName) - } - } - - return tags.ParseObjectXML(resp.Body) -} - -// RemoveObjectTaggingOptions holds the version id of the object to remove -type RemoveObjectTaggingOptions struct { - VersionID string - Internal AdvancedObjectTaggingOptions -} - -// RemoveObjectTagging removes object tag(s) with options to control a specific object -// version in a versioned bucket -func (c *Client) RemoveObjectTagging(ctx context.Context, bucketName, objectName string, opts RemoveObjectTaggingOptions) error { - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("tagging", "") - - if opts.VersionID != "" { - urlValues.Set("versionId", opts.VersionID) - } - headers := make(http.Header, 0) - if opts.Internal.ReplicationProxyRequest != "" { - headers.Set(minIOBucketReplicationProxyRequest, opts.Internal.ReplicationProxyRequest) - } - // Execute DELETE on object to remove object tag(s) - resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - customHeader: headers, - }) - - defer closeResponse(resp) - if err != nil { - return err - } - - if resp != nil { - // S3 returns "204 No content" after Object tag deletion. - if resp.StatusCode != http.StatusNoContent { - return httpRespToErrorResponse(resp, bucketName, objectName) - } - } - return err -} diff --git a/vendor/github.com/minio/minio-go/v7/api-presigned.go b/vendor/github.com/minio/minio-go/v7/api-presigned.go deleted file mode 100644 index 9e85f81..0000000 --- a/vendor/github.com/minio/minio-go/v7/api-presigned.go +++ /dev/null @@ -1,228 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "errors" - "net/http" - "net/url" - "time" - - "github.com/minio/minio-go/v7/pkg/s3utils" - "github.com/minio/minio-go/v7/pkg/signer" -) - -// presignURL - Returns a presigned URL for an input 'method'. -// Expires maximum is 7days - ie. 604800 and minimum is 1. -func (c *Client) presignURL(ctx context.Context, method, bucketName, objectName string, expires time.Duration, reqParams url.Values, extraHeaders http.Header) (u *url.URL, err error) { - // Input validation. - if method == "" { - return nil, errInvalidArgument("method cannot be empty.") - } - if err = s3utils.CheckValidBucketName(bucketName); err != nil { - return nil, err - } - if err = isValidExpiry(expires); err != nil { - return nil, err - } - - // Convert expires into seconds. - expireSeconds := int64(expires / time.Second) - reqMetadata := requestMetadata{ - presignURL: true, - bucketName: bucketName, - objectName: objectName, - expires: expireSeconds, - queryValues: reqParams, - extraPresignHeader: extraHeaders, - } - - // Instantiate a new request. - // Since expires is set newRequest will presign the request. - var req *http.Request - if req, err = c.newRequest(ctx, method, reqMetadata); err != nil { - return nil, err - } - return req.URL, nil -} - -// PresignedGetObject - Returns a presigned URL to access an object -// data without credentials. URL can have a maximum expiry of -// upto 7days or a minimum of 1sec. Additionally you can override -// a set of response headers using the query parameters. -func (c *Client) PresignedGetObject(ctx context.Context, bucketName, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { - if err = s3utils.CheckValidObjectName(objectName); err != nil { - return nil, err - } - return c.presignURL(ctx, http.MethodGet, bucketName, objectName, expires, reqParams, nil) -} - -// PresignedHeadObject - Returns a presigned URL to access -// object metadata without credentials. URL can have a maximum expiry -// of upto 7days or a minimum of 1sec. Additionally you can override -// a set of response headers using the query parameters. -func (c *Client) PresignedHeadObject(ctx context.Context, bucketName, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { - if err = s3utils.CheckValidObjectName(objectName); err != nil { - return nil, err - } - return c.presignURL(ctx, http.MethodHead, bucketName, objectName, expires, reqParams, nil) -} - -// PresignedPutObject - Returns a presigned URL to upload an object -// without credentials. URL can have a maximum expiry of upto 7days -// or a minimum of 1sec. -func (c *Client) PresignedPutObject(ctx context.Context, bucketName, objectName string, expires time.Duration) (u *url.URL, err error) { - if err = s3utils.CheckValidObjectName(objectName); err != nil { - return nil, err - } - return c.presignURL(ctx, http.MethodPut, bucketName, objectName, expires, nil, nil) -} - -// PresignHeader - similar to Presign() but allows including HTTP headers that -// will be used to build the signature. The request using the resulting URL will -// need to have the exact same headers to be added for signature validation to -// pass. -// -// FIXME: The extra header parameter should be included in Presign() in the next -// major version bump, and this function should then be deprecated. -func (c *Client) PresignHeader(ctx context.Context, method, bucketName, objectName string, expires time.Duration, reqParams url.Values, extraHeaders http.Header) (u *url.URL, err error) { - return c.presignURL(ctx, method, bucketName, objectName, expires, reqParams, extraHeaders) -} - -// Presign - returns a presigned URL for any http method of your choice along -// with custom request params and extra signed headers. URL can have a maximum -// expiry of upto 7days or a minimum of 1sec. -func (c *Client) Presign(ctx context.Context, method, bucketName, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { - return c.presignURL(ctx, method, bucketName, objectName, expires, reqParams, nil) -} - -// PresignedPostPolicy - Returns POST urlString, form data to upload an object. -func (c *Client) PresignedPostPolicy(ctx context.Context, p *PostPolicy) (u *url.URL, formData map[string]string, err error) { - // Validate input arguments. - if p.expiration.IsZero() { - return nil, nil, errors.New("Expiration time must be specified") - } - if _, ok := p.formData["key"]; !ok { - return nil, nil, errors.New("object key must be specified") - } - if _, ok := p.formData["bucket"]; !ok { - return nil, nil, errors.New("bucket name must be specified") - } - - bucketName := p.formData["bucket"] - // Fetch the bucket location. - location, err := c.getBucketLocation(ctx, bucketName) - if err != nil { - return nil, nil, err - } - - isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, bucketName) - - u, err = c.makeTargetURL(bucketName, "", location, isVirtualHost, nil) - if err != nil { - return nil, nil, err - } - - // Get credentials from the configured credentials provider. - credValues, err := c.credsProvider.Get() - if err != nil { - return nil, nil, err - } - - var ( - signerType = credValues.SignerType - sessionToken = credValues.SessionToken - accessKeyID = credValues.AccessKeyID - secretAccessKey = credValues.SecretAccessKey - ) - - if signerType.IsAnonymous() { - return nil, nil, errInvalidArgument("Presigned operations are not supported for anonymous credentials") - } - - // Keep time. - t := time.Now().UTC() - // For signature version '2' handle here. - if signerType.IsV2() { - policyBase64 := p.base64() - p.formData["policy"] = policyBase64 - // For Google endpoint set this value to be 'GoogleAccessId'. - if s3utils.IsGoogleEndpoint(*c.endpointURL) { - p.formData["GoogleAccessId"] = accessKeyID - } else { - // For all other endpoints set this value to be 'AWSAccessKeyId'. - p.formData["AWSAccessKeyId"] = accessKeyID - } - // Sign the policy. - p.formData["signature"] = signer.PostPresignSignatureV2(policyBase64, secretAccessKey) - return u, p.formData, nil - } - - // Add date policy. - if err = p.addNewPolicy(policyCondition{ - matchType: "eq", - condition: "$x-amz-date", - value: t.Format(iso8601DateFormat), - }); err != nil { - return nil, nil, err - } - - // Add algorithm policy. - if err = p.addNewPolicy(policyCondition{ - matchType: "eq", - condition: "$x-amz-algorithm", - value: signV4Algorithm, - }); err != nil { - return nil, nil, err - } - - // Add a credential policy. - credential := signer.GetCredential(accessKeyID, location, t, signer.ServiceTypeS3) - if err = p.addNewPolicy(policyCondition{ - matchType: "eq", - condition: "$x-amz-credential", - value: credential, - }); err != nil { - return nil, nil, err - } - - if sessionToken != "" { - if err = p.addNewPolicy(policyCondition{ - matchType: "eq", - condition: "$x-amz-security-token", - value: sessionToken, - }); err != nil { - return nil, nil, err - } - } - - // Get base64 encoded policy. - policyBase64 := p.base64() - - // Fill in the form data. - p.formData["policy"] = policyBase64 - p.formData["x-amz-algorithm"] = signV4Algorithm - p.formData["x-amz-credential"] = credential - p.formData["x-amz-date"] = t.Format(iso8601DateFormat) - if sessionToken != "" { - p.formData["x-amz-security-token"] = sessionToken - } - p.formData["x-amz-signature"] = signer.PostPresignSignatureV4(policyBase64, t, secretAccessKey, location) - return u, p.formData, nil -} diff --git a/vendor/github.com/minio/minio-go/v7/api-put-bucket.go b/vendor/github.com/minio/minio-go/v7/api-put-bucket.go deleted file mode 100644 index 7376669..0000000 --- a/vendor/github.com/minio/minio-go/v7/api-put-bucket.go +++ /dev/null @@ -1,123 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/xml" - "net/http" - - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// Bucket operations -func (c *Client) makeBucket(ctx context.Context, bucketName string, opts MakeBucketOptions) (err error) { - // Validate the input arguments. - if err := s3utils.CheckValidBucketNameStrict(bucketName); err != nil { - return err - } - - err = c.doMakeBucket(ctx, bucketName, opts.Region, opts.ObjectLocking) - if err != nil && (opts.Region == "" || opts.Region == "us-east-1") { - if resp, ok := err.(ErrorResponse); ok && resp.Code == "AuthorizationHeaderMalformed" && resp.Region != "" { - err = c.doMakeBucket(ctx, bucketName, resp.Region, opts.ObjectLocking) - } - } - return err -} - -func (c *Client) doMakeBucket(ctx context.Context, bucketName, location string, objectLockEnabled bool) (err error) { - defer func() { - // Save the location into cache on a successful makeBucket response. - if err == nil { - c.bucketLocCache.Set(bucketName, location) - } - }() - - // If location is empty, treat is a default region 'us-east-1'. - if location == "" { - location = "us-east-1" - // For custom region clients, default - // to custom region instead not 'us-east-1'. - if c.region != "" { - location = c.region - } - } - // PUT bucket request metadata. - reqMetadata := requestMetadata{ - bucketName: bucketName, - bucketLocation: location, - } - - if objectLockEnabled { - headers := make(http.Header) - headers.Add("x-amz-bucket-object-lock-enabled", "true") - reqMetadata.customHeader = headers - } - - // If location is not 'us-east-1' create bucket location config. - if location != "us-east-1" && location != "" { - createBucketConfig := createBucketConfiguration{} - createBucketConfig.Location = location - var createBucketConfigBytes []byte - createBucketConfigBytes, err = xml.Marshal(createBucketConfig) - if err != nil { - return err - } - reqMetadata.contentMD5Base64 = sumMD5Base64(createBucketConfigBytes) - reqMetadata.contentSHA256Hex = sum256Hex(createBucketConfigBytes) - reqMetadata.contentBody = bytes.NewReader(createBucketConfigBytes) - reqMetadata.contentLength = int64(len(createBucketConfigBytes)) - } - - // Execute PUT to create a new bucket. - resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) - defer closeResponse(resp) - if err != nil { - return err - } - - if resp != nil { - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp, bucketName, "") - } - } - - // Success. - return nil -} - -// MakeBucketOptions holds all options to tweak bucket creation -type MakeBucketOptions struct { - // Bucket location - Region string - // Enable object locking - ObjectLocking bool -} - -// MakeBucket creates a new bucket with bucketName with a context to control cancellations and timeouts. -// -// Location is an optional argument, by default all buckets are -// created in US Standard Region. -// -// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html -// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations -func (c *Client) MakeBucket(ctx context.Context, bucketName string, opts MakeBucketOptions) (err error) { - return c.makeBucket(ctx, bucketName, opts) -} diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-common.go b/vendor/github.com/minio/minio-go/v7/api-put-object-common.go deleted file mode 100644 index 9ccb97c..0000000 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-common.go +++ /dev/null @@ -1,149 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "io" - "math" - "os" - - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -const nullVersionID = "null" - -// Verify if reader is *minio.Object -func isObject(reader io.Reader) (ok bool) { - _, ok = reader.(*Object) - return -} - -// Verify if reader is a generic ReaderAt -func isReadAt(reader io.Reader) (ok bool) { - var v *os.File - v, ok = reader.(*os.File) - if ok { - // Stdin, Stdout and Stderr all have *os.File type - // which happen to also be io.ReaderAt compatible - // we need to add special conditions for them to - // be ignored by this function. - for _, f := range []string{ - "/dev/stdin", - "/dev/stdout", - "/dev/stderr", - } { - if f == v.Name() { - ok = false - break - } - } - } else { - _, ok = reader.(io.ReaderAt) - } - return -} - -// OptimalPartInfo - calculate the optimal part info for a given -// object size. -// -// NOTE: Assumption here is that for any object to be uploaded to any S3 compatible -// object storage it will have the following parameters as constants. -// -// maxPartsCount - 10000 -// minPartSize - 16MiB -// maxMultipartPutObjectSize - 5TiB -func OptimalPartInfo(objectSize int64, configuredPartSize uint64) (totalPartsCount int, partSize, lastPartSize int64, err error) { - // object size is '-1' set it to 5TiB. - var unknownSize bool - if objectSize == -1 { - unknownSize = true - objectSize = maxMultipartPutObjectSize - } - - // object size is larger than supported maximum. - if objectSize > maxMultipartPutObjectSize { - err = errEntityTooLarge(objectSize, maxMultipartPutObjectSize, "", "") - return - } - - var partSizeFlt float64 - if configuredPartSize > 0 { - if int64(configuredPartSize) > objectSize { - err = errEntityTooLarge(int64(configuredPartSize), objectSize, "", "") - return - } - - if !unknownSize { - if objectSize > (int64(configuredPartSize) * maxPartsCount) { - err = errInvalidArgument("Part size * max_parts(10000) is lesser than input objectSize.") - return - } - } - - if configuredPartSize < absMinPartSize { - err = errInvalidArgument("Input part size is smaller than allowed minimum of 5MiB.") - return - } - - if configuredPartSize > maxPartSize { - err = errInvalidArgument("Input part size is bigger than allowed maximum of 5GiB.") - return - } - - partSizeFlt = float64(configuredPartSize) - if unknownSize { - // If input has unknown size and part size is configured - // keep it to maximum allowed as per 10000 parts. - objectSize = int64(configuredPartSize) * maxPartsCount - } - } else { - configuredPartSize = minPartSize - // Use floats for part size for all calculations to avoid - // overflows during float64 to int64 conversions. - partSizeFlt = float64(objectSize / maxPartsCount) - partSizeFlt = math.Ceil(partSizeFlt/float64(configuredPartSize)) * float64(configuredPartSize) - } - - // Total parts count. - totalPartsCount = int(math.Ceil(float64(objectSize) / partSizeFlt)) - // Part size. - partSize = int64(partSizeFlt) - // Last part size. - lastPartSize = objectSize - int64(totalPartsCount-1)*partSize - return totalPartsCount, partSize, lastPartSize, nil -} - -// getUploadID - fetch upload id if already present for an object name -// or initiate a new request to fetch a new upload id. -func (c *Client) newUploadID(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (uploadID string, err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return "", err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return "", err - } - - // Initiate multipart upload for an object. - initMultipartUploadResult, err := c.initiateMultipartUpload(ctx, bucketName, objectName, opts) - if err != nil { - return "", err - } - return initMultipartUploadResult.UploadID, nil -} diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go b/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go deleted file mode 100644 index 0ae9142..0000000 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go +++ /dev/null @@ -1,164 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2023 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "encoding/json" - "errors" - "io" - "mime/multipart" - "net/http" - "strconv" - "strings" - "time" - - "github.com/minio/minio-go/v7/pkg/encrypt" -) - -// PutObjectFanOutEntry is per object entry fan-out metadata -type PutObjectFanOutEntry struct { - Key string `json:"key"` - UserMetadata map[string]string `json:"metadata,omitempty"` - UserTags map[string]string `json:"tags,omitempty"` - ContentType string `json:"contentType,omitempty"` - ContentEncoding string `json:"contentEncoding,omitempty"` - ContentDisposition string `json:"contentDisposition,omitempty"` - ContentLanguage string `json:"contentLanguage,omitempty"` - CacheControl string `json:"cacheControl,omitempty"` - Retention RetentionMode `json:"retention,omitempty"` - RetainUntilDate *time.Time `json:"retainUntil,omitempty"` -} - -// PutObjectFanOutRequest this is the request structure sent -// to the server to fan-out the stream to multiple objects. -type PutObjectFanOutRequest struct { - Entries []PutObjectFanOutEntry - Checksum Checksum - SSE encrypt.ServerSide -} - -// PutObjectFanOutResponse this is the response structure sent -// by the server upon success or failure for each object -// fan-out keys. Additionally, this response carries ETag, -// VersionID and LastModified for each object fan-out. -type PutObjectFanOutResponse struct { - Key string `json:"key"` - ETag string `json:"etag,omitempty"` - VersionID string `json:"versionId,omitempty"` - LastModified *time.Time `json:"lastModified,omitempty"` - Error string `json:"error,omitempty"` -} - -// PutObjectFanOut - is a variant of PutObject instead of writing a single object from a single -// stream multiple objects are written, defined via a list of PutObjectFanOutRequests. Each entry -// in PutObjectFanOutRequest carries an object keyname and its relevant metadata if any. `Key` is -// mandatory, rest of the other options in PutObjectFanOutRequest are optional. -func (c *Client) PutObjectFanOut(ctx context.Context, bucket string, fanOutData io.Reader, fanOutReq PutObjectFanOutRequest) ([]PutObjectFanOutResponse, error) { - if len(fanOutReq.Entries) == 0 { - return nil, errInvalidArgument("fan out requests cannot be empty") - } - - policy := NewPostPolicy() - policy.SetBucket(bucket) - policy.SetKey(strconv.FormatInt(time.Now().UnixNano(), 16)) - - // Expires in 15 minutes. - policy.SetExpires(time.Now().UTC().Add(15 * time.Minute)) - - // Set encryption headers if any. - policy.SetEncryption(fanOutReq.SSE) - - // Set checksum headers if any. - policy.SetChecksum(fanOutReq.Checksum) - - url, formData, err := c.PresignedPostPolicy(ctx, policy) - if err != nil { - return nil, err - } - - r, w := io.Pipe() - - req, err := http.NewRequest(http.MethodPost, url.String(), r) - if err != nil { - w.Close() - return nil, err - } - - var b strings.Builder - enc := json.NewEncoder(&b) - for _, req := range fanOutReq.Entries { - if req.Key == "" { - w.Close() - return nil, errors.New("PutObjectFanOutRequest.Key is mandatory and cannot be empty") - } - if err = enc.Encode(&req); err != nil { - w.Close() - return nil, err - } - } - - mwriter := multipart.NewWriter(w) - req.Header.Add("Content-Type", mwriter.FormDataContentType()) - - go func() { - defer w.Close() - defer mwriter.Close() - - for k, v := range formData { - if err := mwriter.WriteField(k, v); err != nil { - return - } - } - - if err := mwriter.WriteField("x-minio-fanout-list", b.String()); err != nil { - return - } - - mw, err := mwriter.CreateFormFile("file", "fanout-content") - if err != nil { - return - } - - if _, err = io.Copy(mw, fanOutData); err != nil { - return - } - }() - - resp, err := c.do(req) - if err != nil { - return nil, err - } - defer closeResponse(resp) - - if resp.StatusCode != http.StatusOK { - return nil, httpRespToErrorResponse(resp, bucket, "fanout-content") - } - - dec := json.NewDecoder(resp.Body) - fanOutResp := make([]PutObjectFanOutResponse, 0, len(fanOutReq.Entries)) - for dec.More() { - var m PutObjectFanOutResponse - if err = dec.Decode(&m); err != nil { - return nil, err - } - fanOutResp = append(fanOutResp, m) - } - - return fanOutResp, nil -} diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go b/vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go deleted file mode 100644 index 4d29dfc..0000000 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go +++ /dev/null @@ -1,64 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "mime" - "os" - "path/filepath" - - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// FPutObject - Create an object in a bucket, with contents from file at filePath. Allows request cancellation. -func (c *Client) FPutObject(ctx context.Context, bucketName, objectName, filePath string, opts PutObjectOptions) (info UploadInfo, err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return UploadInfo{}, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return UploadInfo{}, err - } - - // Open the referenced file. - fileReader, err := os.Open(filePath) - // If any error fail quickly here. - if err != nil { - return UploadInfo{}, err - } - defer fileReader.Close() - - // Save the file stat. - fileStat, err := fileReader.Stat() - if err != nil { - return UploadInfo{}, err - } - - // Save the file size. - fileSize := fileStat.Size() - - // Set contentType based on filepath extension if not given or default - // value of "application/octet-stream" if the extension has no associated type. - if opts.ContentType == "" { - if opts.ContentType = mime.TypeByExtension(filepath.Ext(filePath)); opts.ContentType == "" { - opts.ContentType = "application/octet-stream" - } - } - return c.PutObject(ctx, bucketName, objectName, fileReader, fileSize, opts) -} diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go deleted file mode 100644 index 5f117af..0000000 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go +++ /dev/null @@ -1,465 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/base64" - "encoding/hex" - "encoding/xml" - "fmt" - "hash/crc32" - "io" - "net/http" - "net/url" - "sort" - "strconv" - "strings" - - "github.com/google/uuid" - "github.com/minio/minio-go/v7/pkg/encrypt" - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -func (c *Client) putObjectMultipart(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, - opts PutObjectOptions, -) (info UploadInfo, err error) { - info, err = c.putObjectMultipartNoStream(ctx, bucketName, objectName, reader, opts) - if err != nil { - errResp := ToErrorResponse(err) - // Verify if multipart functionality is not available, if not - // fall back to single PutObject operation. - if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") { - // Verify if size of reader is greater than '5GiB'. - if size > maxSinglePutObjectSize { - return UploadInfo{}, errEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) - } - // Fall back to uploading as single PutObject operation. - return c.putObject(ctx, bucketName, objectName, reader, size, opts) - } - } - return info, err -} - -func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) { - // Input validation. - if err = s3utils.CheckValidBucketName(bucketName); err != nil { - return UploadInfo{}, err - } - if err = s3utils.CheckValidObjectName(objectName); err != nil { - return UploadInfo{}, err - } - - // Total data read and written to server. should be equal to - // 'size' at the end of the call. - var totalUploadedSize int64 - - // Complete multipart upload. - var complMultipartUpload completeMultipartUpload - - // Calculate the optimal parts info for a given size. - totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize) - if err != nil { - return UploadInfo{}, err - } - - // Choose hash algorithms to be calculated by hashCopyN, - // avoid sha256 with non-v4 signature request or - // HTTPS connection. - hashAlgos, hashSums := c.hashMaterials(opts.SendContentMd5, !opts.DisableContentSha256) - if len(hashSums) == 0 { - if opts.UserMetadata == nil { - opts.UserMetadata = make(map[string]string, 1) - } - opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C" - } - - // Initiate a new multipart upload. - uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) - if err != nil { - return UploadInfo{}, err - } - delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm") - - defer func() { - if err != nil { - c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) - } - }() - - // Part number always starts with '1'. - partNumber := 1 - - // Initialize parts uploaded map. - partsInfo := make(map[int]ObjectPart) - - // Create a buffer. - buf := make([]byte, partSize) - - // Create checksums - // CRC32C is ~50% faster on AMD64 @ 30GB/s - var crcBytes []byte - customHeader := make(http.Header) - crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) - for partNumber <= totalPartsCount { - length, rErr := readFull(reader, buf) - if rErr == io.EOF && partNumber > 1 { - break - } - - if rErr != nil && rErr != io.ErrUnexpectedEOF && rErr != io.EOF { - return UploadInfo{}, rErr - } - - // Calculates hash sums while copying partSize bytes into cw. - for k, v := range hashAlgos { - v.Write(buf[:length]) - hashSums[k] = v.Sum(nil) - v.Close() - } - - // Update progress reader appropriately to the latest offset - // as we read from the source. - rd := newHook(bytes.NewReader(buf[:length]), opts.Progress) - - // Checksums.. - var ( - md5Base64 string - sha256Hex string - ) - - if hashSums["md5"] != nil { - md5Base64 = base64.StdEncoding.EncodeToString(hashSums["md5"]) - } - if hashSums["sha256"] != nil { - sha256Hex = hex.EncodeToString(hashSums["sha256"]) - } - if len(hashSums) == 0 { - crc.Reset() - crc.Write(buf[:length]) - cSum := crc.Sum(nil) - customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum)) - crcBytes = append(crcBytes, cSum...) - } - - p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: rd, partNumber: partNumber, md5Base64: md5Base64, sha256Hex: sha256Hex, size: int64(length), sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader} - // Proceed to upload the part. - objPart, uerr := c.uploadPart(ctx, p) - if uerr != nil { - return UploadInfo{}, uerr - } - - // Save successfully uploaded part metadata. - partsInfo[partNumber] = objPart - - // Save successfully uploaded size. - totalUploadedSize += int64(length) - - // Increment part number. - partNumber++ - - // For unknown size, Read EOF we break away. - // We do not have to upload till totalPartsCount. - if rErr == io.EOF { - break - } - } - - // Loop over total uploaded parts to save them in - // Parts array before completing the multipart request. - for i := 1; i < partNumber; i++ { - part, ok := partsInfo[i] - if !ok { - return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) - } - complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ - ETag: part.ETag, - PartNumber: part.PartNumber, - ChecksumCRC32: part.ChecksumCRC32, - ChecksumCRC32C: part.ChecksumCRC32C, - ChecksumSHA1: part.ChecksumSHA1, - ChecksumSHA256: part.ChecksumSHA256, - }) - } - - // Sort all completed parts. - sort.Sort(completedParts(complMultipartUpload.Parts)) - opts = PutObjectOptions{ - ServerSideEncryption: opts.ServerSideEncryption, - } - if len(crcBytes) > 0 { - // Add hash of hashes. - crc.Reset() - crc.Write(crcBytes) - opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))} - } - uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) - if err != nil { - return UploadInfo{}, err - } - - uploadInfo.Size = totalUploadedSize - return uploadInfo, nil -} - -// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID. -func (c *Client) initiateMultipartUpload(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (initiateMultipartUploadResult, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return initiateMultipartUploadResult{}, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return initiateMultipartUploadResult{}, err - } - - // Initialize url queries. - urlValues := make(url.Values) - urlValues.Set("uploads", "") - - if opts.Internal.SourceVersionID != "" { - if opts.Internal.SourceVersionID != nullVersionID { - if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil { - return initiateMultipartUploadResult{}, errInvalidArgument(err.Error()) - } - } - urlValues.Set("versionId", opts.Internal.SourceVersionID) - } - - // Set ContentType header. - customHeader := opts.Header() - - reqMetadata := requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - customHeader: customHeader, - } - - // Execute POST on an objectName to initiate multipart upload. - resp, err := c.executeMethod(ctx, http.MethodPost, reqMetadata) - defer closeResponse(resp) - if err != nil { - return initiateMultipartUploadResult{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return initiateMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName) - } - } - // Decode xml for new multipart upload. - initiateMultipartUploadResult := initiateMultipartUploadResult{} - err = xmlDecoder(resp.Body, &initiateMultipartUploadResult) - if err != nil { - return initiateMultipartUploadResult, err - } - return initiateMultipartUploadResult, nil -} - -type uploadPartParams struct { - bucketName string - objectName string - uploadID string - reader io.Reader - partNumber int - md5Base64 string - sha256Hex string - size int64 - sse encrypt.ServerSide - streamSha256 bool - customHeader http.Header - trailer http.Header -} - -// uploadPart - Uploads a part in a multipart upload. -func (c *Client) uploadPart(ctx context.Context, p uploadPartParams) (ObjectPart, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(p.bucketName); err != nil { - return ObjectPart{}, err - } - if err := s3utils.CheckValidObjectName(p.objectName); err != nil { - return ObjectPart{}, err - } - if p.size > maxPartSize { - return ObjectPart{}, errEntityTooLarge(p.size, maxPartSize, p.bucketName, p.objectName) - } - if p.size <= -1 { - return ObjectPart{}, errEntityTooSmall(p.size, p.bucketName, p.objectName) - } - if p.partNumber <= 0 { - return ObjectPart{}, errInvalidArgument("Part number cannot be negative or equal to zero.") - } - if p.uploadID == "" { - return ObjectPart{}, errInvalidArgument("UploadID cannot be empty.") - } - - // Get resources properly escaped and lined up before using them in http request. - urlValues := make(url.Values) - // Set part number. - urlValues.Set("partNumber", strconv.Itoa(p.partNumber)) - // Set upload id. - urlValues.Set("uploadId", p.uploadID) - - // Set encryption headers, if any. - if p.customHeader == nil { - p.customHeader = make(http.Header) - } - // https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html - // Server-side encryption is supported by the S3 Multipart Upload actions. - // Unless you are using a customer-provided encryption key, you don't need - // to specify the encryption parameters in each UploadPart request. - if p.sse != nil && p.sse.Type() == encrypt.SSEC { - p.sse.Marshal(p.customHeader) - } - - reqMetadata := requestMetadata{ - bucketName: p.bucketName, - objectName: p.objectName, - queryValues: urlValues, - customHeader: p.customHeader, - contentBody: p.reader, - contentLength: p.size, - contentMD5Base64: p.md5Base64, - contentSHA256Hex: p.sha256Hex, - streamSha256: p.streamSha256, - trailer: p.trailer, - } - - // Execute PUT on each part. - resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) - defer closeResponse(resp) - if err != nil { - return ObjectPart{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return ObjectPart{}, httpRespToErrorResponse(resp, p.bucketName, p.objectName) - } - } - // Once successfully uploaded, return completed part. - h := resp.Header - objPart := ObjectPart{ - ChecksumCRC32: h.Get("x-amz-checksum-crc32"), - ChecksumCRC32C: h.Get("x-amz-checksum-crc32c"), - ChecksumSHA1: h.Get("x-amz-checksum-sha1"), - ChecksumSHA256: h.Get("x-amz-checksum-sha256"), - } - objPart.Size = p.size - objPart.PartNumber = p.partNumber - // Trim off the odd double quotes from ETag in the beginning and end. - objPart.ETag = trimEtag(h.Get("ETag")) - return objPart, nil -} - -// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts. -func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string, - complete completeMultipartUpload, opts PutObjectOptions, -) (UploadInfo, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return UploadInfo{}, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return UploadInfo{}, err - } - - // Initialize url queries. - urlValues := make(url.Values) - urlValues.Set("uploadId", uploadID) - // Marshal complete multipart body. - completeMultipartUploadBytes, err := xml.Marshal(complete) - if err != nil { - return UploadInfo{}, err - } - - headers := opts.Header() - if s3utils.IsAmazonEndpoint(*c.endpointURL) { - headers.Del(encrypt.SseKmsKeyID) // Remove X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id not supported in CompleteMultipartUpload - headers.Del(encrypt.SseGenericHeader) // Remove X-Amz-Server-Side-Encryption not supported in CompleteMultipartUpload - headers.Del(encrypt.SseEncryptionContext) // Remove X-Amz-Server-Side-Encryption-Context not supported in CompleteMultipartUpload - } - - // Instantiate all the complete multipart buffer. - completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes) - reqMetadata := requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - contentBody: completeMultipartUploadBuffer, - contentLength: int64(len(completeMultipartUploadBytes)), - contentSHA256Hex: sum256Hex(completeMultipartUploadBytes), - customHeader: headers, - } - - // Execute POST to complete multipart upload for an objectName. - resp, err := c.executeMethod(ctx, http.MethodPost, reqMetadata) - defer closeResponse(resp) - if err != nil { - return UploadInfo{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return UploadInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) - } - } - - // Read resp.Body into a []bytes to parse for Error response inside the body - var b []byte - b, err = io.ReadAll(resp.Body) - if err != nil { - return UploadInfo{}, err - } - // Decode completed multipart upload response on success. - completeMultipartUploadResult := completeMultipartUploadResult{} - err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadResult) - if err != nil { - // xml parsing failure due to presence an ill-formed xml fragment - return UploadInfo{}, err - } else if completeMultipartUploadResult.Bucket == "" { - // xml's Decode method ignores well-formed xml that don't apply to the type of value supplied. - // In this case, it would leave completeMultipartUploadResult with the corresponding zero-values - // of the members. - - // Decode completed multipart upload response on failure - completeMultipartUploadErr := ErrorResponse{} - err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadErr) - if err != nil { - // xml parsing failure due to presence an ill-formed xml fragment - return UploadInfo{}, err - } - return UploadInfo{}, completeMultipartUploadErr - } - - // extract lifecycle expiry date and rule ID - expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration)) - - return UploadInfo{ - Bucket: completeMultipartUploadResult.Bucket, - Key: completeMultipartUploadResult.Key, - ETag: trimEtag(completeMultipartUploadResult.ETag), - VersionID: resp.Header.Get(amzVersionID), - Location: completeMultipartUploadResult.Location, - Expiration: expTime, - ExpirationRuleID: ruleID, - - ChecksumSHA256: completeMultipartUploadResult.ChecksumSHA256, - ChecksumSHA1: completeMultipartUploadResult.ChecksumSHA1, - ChecksumCRC32: completeMultipartUploadResult.ChecksumCRC32, - ChecksumCRC32C: completeMultipartUploadResult.ChecksumCRC32C, - }, nil -} diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go deleted file mode 100644 index 9182d4e..0000000 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go +++ /dev/null @@ -1,809 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/base64" - "fmt" - "hash/crc32" - "io" - "net/http" - "net/url" - "sort" - "strings" - "sync" - - "github.com/google/uuid" - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// putObjectMultipartStream - upload a large object using -// multipart upload and streaming signature for signing payload. -// Comprehensive put object operation involving multipart uploads. -// -// Following code handles these types of readers. -// -// - *minio.Object -// - Any reader which has a method 'ReadAt()' -func (c *Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string, - reader io.Reader, size int64, opts PutObjectOptions, -) (info UploadInfo, err error) { - if opts.ConcurrentStreamParts && opts.NumThreads > 1 { - info, err = c.putObjectMultipartStreamParallel(ctx, bucketName, objectName, reader, opts) - } else if !isObject(reader) && isReadAt(reader) && !opts.SendContentMd5 { - // Verify if the reader implements ReadAt and it is not a *minio.Object then we will use parallel uploader. - info, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts) - } else { - info, err = c.putObjectMultipartStreamOptionalChecksum(ctx, bucketName, objectName, reader, size, opts) - } - if err != nil { - errResp := ToErrorResponse(err) - // Verify if multipart functionality is not available, if not - // fall back to single PutObject operation. - if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") { - // Verify if size of reader is greater than '5GiB'. - if size > maxSinglePutObjectSize { - return UploadInfo{}, errEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) - } - // Fall back to uploading as single PutObject operation. - return c.putObject(ctx, bucketName, objectName, reader, size, opts) - } - } - return info, err -} - -// uploadedPartRes - the response received from a part upload. -type uploadedPartRes struct { - Error error // Any error encountered while uploading the part. - PartNum int // Number of the part uploaded. - Size int64 // Size of the part uploaded. - Part ObjectPart -} - -type uploadPartReq struct { - PartNum int // Number of the part uploaded. - Part ObjectPart // Size of the part uploaded. -} - -// putObjectMultipartFromReadAt - Uploads files bigger than 128MiB. -// Supports all readers which implements io.ReaderAt interface -// (ReadAt method). -// -// NOTE: This function is meant to be used for all readers which -// implement io.ReaderAt which allows us for resuming multipart -// uploads but reading at an offset, which would avoid re-read the -// data which was already uploaded. Internally this function uses -// temporary files for staging all the data, these temporary files are -// cleaned automatically when the caller i.e http client closes the -// stream after uploading all the contents successfully. -func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketName, objectName string, - reader io.ReaderAt, size int64, opts PutObjectOptions, -) (info UploadInfo, err error) { - // Input validation. - if err = s3utils.CheckValidBucketName(bucketName); err != nil { - return UploadInfo{}, err - } - if err = s3utils.CheckValidObjectName(objectName); err != nil { - return UploadInfo{}, err - } - - // Calculate the optimal parts info for a given size. - totalPartsCount, partSize, lastPartSize, err := OptimalPartInfo(size, opts.PartSize) - if err != nil { - return UploadInfo{}, err - } - - withChecksum := c.trailingHeaderSupport - if withChecksum { - if opts.UserMetadata == nil { - opts.UserMetadata = make(map[string]string, 1) - } - opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C" - } - // Initiate a new multipart upload. - uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) - if err != nil { - return UploadInfo{}, err - } - delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm") - - // Aborts the multipart upload in progress, if the - // function returns any error, since we do not resume - // we should purge the parts which have been uploaded - // to relinquish storage space. - defer func() { - if err != nil { - c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) - } - }() - - // Total data read and written to server. should be equal to 'size' at the end of the call. - var totalUploadedSize int64 - - // Complete multipart upload. - var complMultipartUpload completeMultipartUpload - - // Declare a channel that sends the next part number to be uploaded. - uploadPartsCh := make(chan uploadPartReq) - - // Declare a channel that sends back the response of a part upload. - uploadedPartsCh := make(chan uploadedPartRes) - - // Used for readability, lastPartNumber is always totalPartsCount. - lastPartNumber := totalPartsCount - - partitionCtx, partitionCancel := context.WithCancel(ctx) - defer partitionCancel() - // Send each part number to the channel to be processed. - go func() { - defer close(uploadPartsCh) - - for p := 1; p <= totalPartsCount; p++ { - select { - case <-partitionCtx.Done(): - return - case uploadPartsCh <- uploadPartReq{PartNum: p}: - } - } - }() - - // Receive each part number from the channel allowing three parallel uploads. - for w := 1; w <= opts.getNumThreads(); w++ { - go func(partSize int64) { - for { - var uploadReq uploadPartReq - var ok bool - select { - case <-ctx.Done(): - return - case uploadReq, ok = <-uploadPartsCh: - if !ok { - return - } - // Each worker will draw from the part channel and upload in parallel. - } - - // If partNumber was not uploaded we calculate the missing - // part offset and size. For all other part numbers we - // calculate offset based on multiples of partSize. - readOffset := int64(uploadReq.PartNum-1) * partSize - - // As a special case if partNumber is lastPartNumber, we - // calculate the offset based on the last part size. - if uploadReq.PartNum == lastPartNumber { - readOffset = size - lastPartSize - partSize = lastPartSize - } - - sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), opts.Progress) - trailer := make(http.Header, 1) - if withChecksum { - crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) - trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(crc.Sum(nil))) - sectionReader = newHashReaderWrapper(sectionReader, crc, func(hash []byte) { - trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(hash)) - }) - } - - // Proceed to upload the part. - p := uploadPartParams{ - bucketName: bucketName, - objectName: objectName, - uploadID: uploadID, - reader: sectionReader, - partNumber: uploadReq.PartNum, - size: partSize, - sse: opts.ServerSideEncryption, - streamSha256: !opts.DisableContentSha256, - sha256Hex: "", - trailer: trailer, - } - objPart, err := c.uploadPart(ctx, p) - if err != nil { - uploadedPartsCh <- uploadedPartRes{ - Error: err, - } - // Exit the goroutine. - return - } - - // Save successfully uploaded part metadata. - uploadReq.Part = objPart - - // Send successful part info through the channel. - uploadedPartsCh <- uploadedPartRes{ - Size: objPart.Size, - PartNum: uploadReq.PartNum, - Part: uploadReq.Part, - } - } - }(partSize) - } - - // Gather the responses as they occur and update any - // progress bar. - for u := 1; u <= totalPartsCount; u++ { - select { - case <-ctx.Done(): - return UploadInfo{}, ctx.Err() - case uploadRes := <-uploadedPartsCh: - if uploadRes.Error != nil { - return UploadInfo{}, uploadRes.Error - } - - // Update the totalUploadedSize. - totalUploadedSize += uploadRes.Size - complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ - ETag: uploadRes.Part.ETag, - PartNumber: uploadRes.Part.PartNumber, - ChecksumCRC32: uploadRes.Part.ChecksumCRC32, - ChecksumCRC32C: uploadRes.Part.ChecksumCRC32C, - ChecksumSHA1: uploadRes.Part.ChecksumSHA1, - ChecksumSHA256: uploadRes.Part.ChecksumSHA256, - }) - } - } - - // Verify if we uploaded all the data. - if totalUploadedSize != size { - return UploadInfo{}, errUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) - } - - // Sort all completed parts. - sort.Sort(completedParts(complMultipartUpload.Parts)) - - opts = PutObjectOptions{ - ServerSideEncryption: opts.ServerSideEncryption, - } - if withChecksum { - // Add hash of hashes. - crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) - for _, part := range complMultipartUpload.Parts { - cs, err := base64.StdEncoding.DecodeString(part.ChecksumCRC32C) - if err == nil { - crc.Write(cs) - } - } - opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))} - } - - uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) - if err != nil { - return UploadInfo{}, err - } - - uploadInfo.Size = totalUploadedSize - return uploadInfo, nil -} - -func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, bucketName, objectName string, - reader io.Reader, size int64, opts PutObjectOptions, -) (info UploadInfo, err error) { - // Input validation. - if err = s3utils.CheckValidBucketName(bucketName); err != nil { - return UploadInfo{}, err - } - if err = s3utils.CheckValidObjectName(objectName); err != nil { - return UploadInfo{}, err - } - - if !opts.SendContentMd5 { - if opts.UserMetadata == nil { - opts.UserMetadata = make(map[string]string, 1) - } - opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C" - } - - // Calculate the optimal parts info for a given size. - totalPartsCount, partSize, lastPartSize, err := OptimalPartInfo(size, opts.PartSize) - if err != nil { - return UploadInfo{}, err - } - // Initiates a new multipart request - uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) - if err != nil { - return UploadInfo{}, err - } - delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm") - - // Aborts the multipart upload if the function returns - // any error, since we do not resume we should purge - // the parts which have been uploaded to relinquish - // storage space. - defer func() { - if err != nil { - c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) - } - }() - - // Create checksums - // CRC32C is ~50% faster on AMD64 @ 30GB/s - var crcBytes []byte - customHeader := make(http.Header) - crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) - md5Hash := c.md5Hasher() - defer md5Hash.Close() - - // Total data read and written to server. should be equal to 'size' at the end of the call. - var totalUploadedSize int64 - - // Initialize parts uploaded map. - partsInfo := make(map[int]ObjectPart) - - // Create a buffer. - buf := make([]byte, partSize) - - // Avoid declaring variables in the for loop - var md5Base64 string - - // Part number always starts with '1'. - var partNumber int - for partNumber = 1; partNumber <= totalPartsCount; partNumber++ { - - // Proceed to upload the part. - if partNumber == totalPartsCount { - partSize = lastPartSize - } - - length, rerr := readFull(reader, buf) - if rerr == io.EOF && partNumber > 1 { - break - } - - if rerr != nil && rerr != io.ErrUnexpectedEOF && err != io.EOF { - return UploadInfo{}, rerr - } - - // Calculate md5sum. - if opts.SendContentMd5 { - md5Hash.Reset() - md5Hash.Write(buf[:length]) - md5Base64 = base64.StdEncoding.EncodeToString(md5Hash.Sum(nil)) - } else { - // Add CRC32C instead. - crc.Reset() - crc.Write(buf[:length]) - cSum := crc.Sum(nil) - customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum)) - crcBytes = append(crcBytes, cSum...) - } - - // Update progress reader appropriately to the latest offset - // as we read from the source. - hooked := newHook(bytes.NewReader(buf[:length]), opts.Progress) - p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: hooked, partNumber: partNumber, md5Base64: md5Base64, size: partSize, sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader} - objPart, uerr := c.uploadPart(ctx, p) - if uerr != nil { - return UploadInfo{}, uerr - } - - // Save successfully uploaded part metadata. - partsInfo[partNumber] = objPart - - // Save successfully uploaded size. - totalUploadedSize += partSize - } - - // Verify if we uploaded all the data. - if size > 0 { - if totalUploadedSize != size { - return UploadInfo{}, errUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) - } - } - - // Complete multipart upload. - var complMultipartUpload completeMultipartUpload - - // Loop over total uploaded parts to save them in - // Parts array before completing the multipart request. - for i := 1; i < partNumber; i++ { - part, ok := partsInfo[i] - if !ok { - return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) - } - complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ - ETag: part.ETag, - PartNumber: part.PartNumber, - ChecksumCRC32: part.ChecksumCRC32, - ChecksumCRC32C: part.ChecksumCRC32C, - ChecksumSHA1: part.ChecksumSHA1, - ChecksumSHA256: part.ChecksumSHA256, - }) - } - - // Sort all completed parts. - sort.Sort(completedParts(complMultipartUpload.Parts)) - - opts = PutObjectOptions{ - ServerSideEncryption: opts.ServerSideEncryption, - } - if len(crcBytes) > 0 { - // Add hash of hashes. - crc.Reset() - crc.Write(crcBytes) - opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))} - } - uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) - if err != nil { - return UploadInfo{}, err - } - - uploadInfo.Size = totalUploadedSize - return uploadInfo, nil -} - -// putObjectMultipartStreamParallel uploads opts.NumThreads parts in parallel. -// This is expected to take opts.PartSize * opts.NumThreads * (GOGC / 100) bytes of buffer. -func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketName, objectName string, - reader io.Reader, opts PutObjectOptions, -) (info UploadInfo, err error) { - // Input validation. - if err = s3utils.CheckValidBucketName(bucketName); err != nil { - return UploadInfo{}, err - } - - if err = s3utils.CheckValidObjectName(objectName); err != nil { - return UploadInfo{}, err - } - - if !opts.SendContentMd5 { - if opts.UserMetadata == nil { - opts.UserMetadata = make(map[string]string, 1) - } - opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C" - } - - // Cancel all when an error occurs. - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - // Calculate the optimal parts info for a given size. - totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize) - if err != nil { - return UploadInfo{}, err - } - - // Initiates a new multipart request - uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) - if err != nil { - return UploadInfo{}, err - } - delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm") - - // Aborts the multipart upload if the function returns - // any error, since we do not resume we should purge - // the parts which have been uploaded to relinquish - // storage space. - defer func() { - if err != nil { - c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) - } - }() - - // Create checksums - // CRC32C is ~50% faster on AMD64 @ 30GB/s - var crcBytes []byte - crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) - - // Total data read and written to server. should be equal to 'size' at the end of the call. - var totalUploadedSize int64 - - // Initialize parts uploaded map. - partsInfo := make(map[int]ObjectPart) - - // Create a buffer. - nBuffers := int64(opts.NumThreads) - bufs := make(chan []byte, nBuffers) - all := make([]byte, nBuffers*partSize) - for i := int64(0); i < nBuffers; i++ { - bufs <- all[i*partSize : i*partSize+partSize] - } - - var wg sync.WaitGroup - var mu sync.Mutex - errCh := make(chan error, opts.NumThreads) - - reader = newHook(reader, opts.Progress) - - // Part number always starts with '1'. - var partNumber int - for partNumber = 1; partNumber <= totalPartsCount; partNumber++ { - // Proceed to upload the part. - var buf []byte - select { - case buf = <-bufs: - case err = <-errCh: - cancel() - wg.Wait() - return UploadInfo{}, err - } - - if int64(len(buf)) != partSize { - return UploadInfo{}, fmt.Errorf("read buffer < %d than expected partSize: %d", len(buf), partSize) - } - - length, rerr := readFull(reader, buf) - if rerr == io.EOF && partNumber > 1 { - // Done - break - } - - if rerr != nil && rerr != io.ErrUnexpectedEOF && err != io.EOF { - cancel() - wg.Wait() - return UploadInfo{}, rerr - } - - // Calculate md5sum. - customHeader := make(http.Header) - if !opts.SendContentMd5 { - // Add CRC32C instead. - crc.Reset() - crc.Write(buf[:length]) - cSum := crc.Sum(nil) - customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum)) - crcBytes = append(crcBytes, cSum...) - } - - wg.Add(1) - go func(partNumber int) { - // Avoid declaring variables in the for loop - var md5Base64 string - - if opts.SendContentMd5 { - md5Hash := c.md5Hasher() - md5Hash.Write(buf[:length]) - md5Base64 = base64.StdEncoding.EncodeToString(md5Hash.Sum(nil)) - md5Hash.Close() - } - - defer wg.Done() - p := uploadPartParams{ - bucketName: bucketName, - objectName: objectName, - uploadID: uploadID, - reader: bytes.NewReader(buf[:length]), - partNumber: partNumber, - md5Base64: md5Base64, - size: int64(length), - sse: opts.ServerSideEncryption, - streamSha256: !opts.DisableContentSha256, - customHeader: customHeader, - } - objPart, uerr := c.uploadPart(ctx, p) - if uerr != nil { - errCh <- uerr - return - } - - // Save successfully uploaded part metadata. - mu.Lock() - partsInfo[partNumber] = objPart - mu.Unlock() - - // Send buffer back so it can be reused. - bufs <- buf - }(partNumber) - - // Save successfully uploaded size. - totalUploadedSize += int64(length) - } - wg.Wait() - - // Collect any error - select { - case err = <-errCh: - return UploadInfo{}, err - default: - } - - // Complete multipart upload. - var complMultipartUpload completeMultipartUpload - - // Loop over total uploaded parts to save them in - // Parts array before completing the multipart request. - for i := 1; i < partNumber; i++ { - part, ok := partsInfo[i] - if !ok { - return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) - } - complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ - ETag: part.ETag, - PartNumber: part.PartNumber, - ChecksumCRC32: part.ChecksumCRC32, - ChecksumCRC32C: part.ChecksumCRC32C, - ChecksumSHA1: part.ChecksumSHA1, - ChecksumSHA256: part.ChecksumSHA256, - }) - } - - // Sort all completed parts. - sort.Sort(completedParts(complMultipartUpload.Parts)) - - opts = PutObjectOptions{} - if len(crcBytes) > 0 { - // Add hash of hashes. - crc.Reset() - crc.Write(crcBytes) - opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))} - } - uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) - if err != nil { - return UploadInfo{}, err - } - - uploadInfo.Size = totalUploadedSize - return uploadInfo, nil -} - -// putObject special function used Google Cloud Storage. This special function -// is used for Google Cloud Storage since Google's multipart API is not S3 compatible. -func (c *Client) putObject(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return UploadInfo{}, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return UploadInfo{}, err - } - - // Size -1 is only supported on Google Cloud Storage, we error - // out in all other situations. - if size < 0 && !s3utils.IsGoogleEndpoint(*c.endpointURL) { - return UploadInfo{}, errEntityTooSmall(size, bucketName, objectName) - } - - if opts.SendContentMd5 && s3utils.IsGoogleEndpoint(*c.endpointURL) && size < 0 { - return UploadInfo{}, errInvalidArgument("MD5Sum cannot be calculated with size '-1'") - } - - var readSeeker io.Seeker - if size > 0 { - if isReadAt(reader) && !isObject(reader) { - seeker, ok := reader.(io.Seeker) - if ok { - offset, err := seeker.Seek(0, io.SeekCurrent) - if err != nil { - return UploadInfo{}, errInvalidArgument(err.Error()) - } - reader = io.NewSectionReader(reader.(io.ReaderAt), offset, size) - readSeeker = reader.(io.Seeker) - } - } - } - - var md5Base64 string - if opts.SendContentMd5 { - // Calculate md5sum. - hash := c.md5Hasher() - - if readSeeker != nil { - if _, err := io.Copy(hash, reader); err != nil { - return UploadInfo{}, err - } - // Seek back to beginning of io.NewSectionReader's offset. - _, err = readSeeker.Seek(0, io.SeekStart) - if err != nil { - return UploadInfo{}, errInvalidArgument(err.Error()) - } - } else { - // Create a buffer. - buf := make([]byte, size) - - length, err := readFull(reader, buf) - if err != nil && err != io.ErrUnexpectedEOF && err != io.EOF { - return UploadInfo{}, err - } - - hash.Write(buf[:length]) - reader = bytes.NewReader(buf[:length]) - } - - md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil)) - hash.Close() - } - - // Update progress reader appropriately to the latest offset as we - // read from the source. - progressReader := newHook(reader, opts.Progress) - - // This function does not calculate sha256 and md5sum for payload. - // Execute put object. - return c.putObjectDo(ctx, bucketName, objectName, progressReader, md5Base64, "", size, opts) -} - -// putObjectDo - executes the put object http operation. -// NOTE: You must have WRITE permissions on a bucket to add an object to it. -func (c *Client) putObjectDo(ctx context.Context, bucketName, objectName string, reader io.Reader, md5Base64, sha256Hex string, size int64, opts PutObjectOptions) (UploadInfo, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return UploadInfo{}, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return UploadInfo{}, err - } - // Set headers. - customHeader := opts.Header() - - // Add CRC when client supports it, MD5 is not set, not Google and we don't add SHA256 to chunks. - addCrc := c.trailingHeaderSupport && md5Base64 == "" && !s3utils.IsGoogleEndpoint(*c.endpointURL) && (opts.DisableContentSha256 || c.secure) - - if addCrc { - // If user has added checksums, don't add them ourselves. - for k := range opts.UserMetadata { - if strings.HasPrefix(strings.ToLower(k), "x-amz-checksum-") { - addCrc = false - } - } - } - // Populate request metadata. - reqMetadata := requestMetadata{ - bucketName: bucketName, - objectName: objectName, - customHeader: customHeader, - contentBody: reader, - contentLength: size, - contentMD5Base64: md5Base64, - contentSHA256Hex: sha256Hex, - streamSha256: !opts.DisableContentSha256, - addCrc: addCrc, - } - if opts.Internal.SourceVersionID != "" { - if opts.Internal.SourceVersionID != nullVersionID { - if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil { - return UploadInfo{}, errInvalidArgument(err.Error()) - } - } - urlValues := make(url.Values) - urlValues.Set("versionId", opts.Internal.SourceVersionID) - reqMetadata.queryValues = urlValues - } - - // Execute PUT an objectName. - resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) - defer closeResponse(resp) - if err != nil { - return UploadInfo{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return UploadInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) - } - } - - // extract lifecycle expiry date and rule ID - expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration)) - h := resp.Header - return UploadInfo{ - Bucket: bucketName, - Key: objectName, - ETag: trimEtag(h.Get("ETag")), - VersionID: h.Get(amzVersionID), - Size: size, - Expiration: expTime, - ExpirationRuleID: ruleID, - - // Checksum values - ChecksumCRC32: h.Get("x-amz-checksum-crc32"), - ChecksumCRC32C: h.Get("x-amz-checksum-crc32c"), - ChecksumSHA1: h.Get("x-amz-checksum-sha1"), - ChecksumSHA256: h.Get("x-amz-checksum-sha256"), - }, nil -} diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object.go b/vendor/github.com/minio/minio-go/v7/api-put-object.go deleted file mode 100644 index bbd8924..0000000 --- a/vendor/github.com/minio/minio-go/v7/api-put-object.go +++ /dev/null @@ -1,473 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/base64" - "errors" - "fmt" - "hash/crc32" - "io" - "net/http" - "sort" - "time" - - "github.com/minio/minio-go/v7/pkg/encrypt" - "github.com/minio/minio-go/v7/pkg/s3utils" - "golang.org/x/net/http/httpguts" -) - -// ReplicationStatus represents replication status of object -type ReplicationStatus string - -const ( - // ReplicationStatusPending indicates replication is pending - ReplicationStatusPending ReplicationStatus = "PENDING" - // ReplicationStatusComplete indicates replication completed ok - ReplicationStatusComplete ReplicationStatus = "COMPLETED" - // ReplicationStatusFailed indicates replication failed - ReplicationStatusFailed ReplicationStatus = "FAILED" - // ReplicationStatusReplica indicates object is a replica of a source - ReplicationStatusReplica ReplicationStatus = "REPLICA" -) - -// Empty returns true if no replication status set. -func (r ReplicationStatus) Empty() bool { - return r == "" -} - -// AdvancedPutOptions for internal use - to be utilized by replication, ILM transition -// implementation on MinIO server -type AdvancedPutOptions struct { - SourceVersionID string - SourceETag string - ReplicationStatus ReplicationStatus - SourceMTime time.Time - ReplicationRequest bool - RetentionTimestamp time.Time - TaggingTimestamp time.Time - LegalholdTimestamp time.Time - ReplicationValidityCheck bool -} - -// PutObjectOptions represents options specified by user for PutObject call -type PutObjectOptions struct { - UserMetadata map[string]string - UserTags map[string]string - Progress io.Reader - ContentType string - ContentEncoding string - ContentDisposition string - ContentLanguage string - CacheControl string - Expires time.Time - Mode RetentionMode - RetainUntilDate time.Time - ServerSideEncryption encrypt.ServerSide - NumThreads uint - StorageClass string - WebsiteRedirectLocation string - PartSize uint64 - LegalHold LegalHoldStatus - SendContentMd5 bool - DisableContentSha256 bool - DisableMultipart bool - - // ConcurrentStreamParts will create NumThreads buffers of PartSize bytes, - // fill them serially and upload them in parallel. - // This can be used for faster uploads on non-seekable or slow-to-seek input. - ConcurrentStreamParts bool - Internal AdvancedPutOptions - - customHeaders http.Header -} - -// SetMatchETag if etag matches while PUT MinIO returns an error -// this is a MinIO specific extension to support optimistic locking -// semantics. -func (opts *PutObjectOptions) SetMatchETag(etag string) { - if opts.customHeaders == nil { - opts.customHeaders = http.Header{} - } - opts.customHeaders.Set("If-Match", "\""+etag+"\"") -} - -// SetMatchETagExcept if etag does not match while PUT MinIO returns an -// error this is a MinIO specific extension to support optimistic locking -// semantics. -func (opts *PutObjectOptions) SetMatchETagExcept(etag string) { - if opts.customHeaders == nil { - opts.customHeaders = http.Header{} - } - opts.customHeaders.Set("If-None-Match", "\""+etag+"\"") -} - -// getNumThreads - gets the number of threads to be used in the multipart -// put object operation -func (opts PutObjectOptions) getNumThreads() (numThreads int) { - if opts.NumThreads > 0 { - numThreads = int(opts.NumThreads) - } else { - numThreads = totalWorkers - } - return -} - -// Header - constructs the headers from metadata entered by user in -// PutObjectOptions struct -func (opts PutObjectOptions) Header() (header http.Header) { - header = make(http.Header) - - contentType := opts.ContentType - if contentType == "" { - contentType = "application/octet-stream" - } - header.Set("Content-Type", contentType) - - if opts.ContentEncoding != "" { - header.Set("Content-Encoding", opts.ContentEncoding) - } - if opts.ContentDisposition != "" { - header.Set("Content-Disposition", opts.ContentDisposition) - } - if opts.ContentLanguage != "" { - header.Set("Content-Language", opts.ContentLanguage) - } - if opts.CacheControl != "" { - header.Set("Cache-Control", opts.CacheControl) - } - - if !opts.Expires.IsZero() { - header.Set("Expires", opts.Expires.UTC().Format(http.TimeFormat)) - } - - if opts.Mode != "" { - header.Set(amzLockMode, opts.Mode.String()) - } - - if !opts.RetainUntilDate.IsZero() { - header.Set("X-Amz-Object-Lock-Retain-Until-Date", opts.RetainUntilDate.Format(time.RFC3339)) - } - - if opts.LegalHold != "" { - header.Set(amzLegalHoldHeader, opts.LegalHold.String()) - } - - if opts.ServerSideEncryption != nil { - opts.ServerSideEncryption.Marshal(header) - } - - if opts.StorageClass != "" { - header.Set(amzStorageClass, opts.StorageClass) - } - - if opts.WebsiteRedirectLocation != "" { - header.Set(amzWebsiteRedirectLocation, opts.WebsiteRedirectLocation) - } - - if !opts.Internal.ReplicationStatus.Empty() { - header.Set(amzBucketReplicationStatus, string(opts.Internal.ReplicationStatus)) - } - if !opts.Internal.SourceMTime.IsZero() { - header.Set(minIOBucketSourceMTime, opts.Internal.SourceMTime.Format(time.RFC3339Nano)) - } - if opts.Internal.SourceETag != "" { - header.Set(minIOBucketSourceETag, opts.Internal.SourceETag) - } - if opts.Internal.ReplicationRequest { - header.Set(minIOBucketReplicationRequest, "true") - } - if opts.Internal.ReplicationValidityCheck { - header.Set(minIOBucketReplicationCheck, "true") - } - if !opts.Internal.LegalholdTimestamp.IsZero() { - header.Set(minIOBucketReplicationObjectLegalHoldTimestamp, opts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano)) - } - if !opts.Internal.RetentionTimestamp.IsZero() { - header.Set(minIOBucketReplicationObjectRetentionTimestamp, opts.Internal.RetentionTimestamp.Format(time.RFC3339Nano)) - } - if !opts.Internal.TaggingTimestamp.IsZero() { - header.Set(minIOBucketReplicationTaggingTimestamp, opts.Internal.TaggingTimestamp.Format(time.RFC3339Nano)) - } - - if len(opts.UserTags) != 0 { - header.Set(amzTaggingHeader, s3utils.TagEncode(opts.UserTags)) - } - - for k, v := range opts.UserMetadata { - if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) { - header.Set(k, v) - } else { - header.Set("x-amz-meta-"+k, v) - } - } - - // set any other additional custom headers. - for k, v := range opts.customHeaders { - header[k] = v - } - - return -} - -// validate() checks if the UserMetadata map has standard headers or and raises an error if so. -func (opts PutObjectOptions) validate() (err error) { - for k, v := range opts.UserMetadata { - if !httpguts.ValidHeaderFieldName(k) || isStandardHeader(k) || isSSEHeader(k) || isStorageClassHeader(k) { - return errInvalidArgument(k + " unsupported user defined metadata name") - } - if !httpguts.ValidHeaderFieldValue(v) { - return errInvalidArgument(v + " unsupported user defined metadata value") - } - } - if opts.Mode != "" && !opts.Mode.IsValid() { - return errInvalidArgument(opts.Mode.String() + " unsupported retention mode") - } - if opts.LegalHold != "" && !opts.LegalHold.IsValid() { - return errInvalidArgument(opts.LegalHold.String() + " unsupported legal-hold status") - } - return nil -} - -// completedParts is a collection of parts sortable by their part numbers. -// used for sorting the uploaded parts before completing the multipart request. -type completedParts []CompletePart - -func (a completedParts) Len() int { return len(a) } -func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber } - -// PutObject creates an object in a bucket. -// -// You must have WRITE permissions on a bucket to create an object. -// -// - For size smaller than 16MiB PutObject automatically does a -// single atomic PUT operation. -// -// - For size larger than 16MiB PutObject automatically does a -// multipart upload operation. -// -// - For size input as -1 PutObject does a multipart Put operation -// until input stream reaches EOF. Maximum object size that can -// be uploaded through this operation will be 5TiB. -// -// WARNING: Passing down '-1' will use memory and these cannot -// be reused for best outcomes for PutObject(), pass the size always. -// -// NOTE: Upon errors during upload multipart operation is entirely aborted. -func (c *Client) PutObject(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64, - opts PutObjectOptions, -) (info UploadInfo, err error) { - if objectSize < 0 && opts.DisableMultipart { - return UploadInfo{}, errors.New("object size must be provided with disable multipart upload") - } - - err = opts.validate() - if err != nil { - return UploadInfo{}, err - } - - return c.putObjectCommon(ctx, bucketName, objectName, reader, objectSize, opts) -} - -func (c *Client) putObjectCommon(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) { - // Check for largest object size allowed. - if size > int64(maxMultipartPutObjectSize) { - return UploadInfo{}, errEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName) - } - - // NOTE: Streaming signature is not supported by GCS. - if s3utils.IsGoogleEndpoint(*c.endpointURL) { - return c.putObject(ctx, bucketName, objectName, reader, size, opts) - } - - partSize := opts.PartSize - if opts.PartSize == 0 { - partSize = minPartSize - } - - if c.overrideSignerType.IsV2() { - if size >= 0 && size < int64(partSize) || opts.DisableMultipart { - return c.putObject(ctx, bucketName, objectName, reader, size, opts) - } - return c.putObjectMultipart(ctx, bucketName, objectName, reader, size, opts) - } - - if size < 0 { - if opts.DisableMultipart { - return UploadInfo{}, errors.New("no length provided and multipart disabled") - } - if opts.ConcurrentStreamParts && opts.NumThreads > 1 { - return c.putObjectMultipartStreamParallel(ctx, bucketName, objectName, reader, opts) - } - return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts) - } - - if size < int64(partSize) || opts.DisableMultipart { - return c.putObject(ctx, bucketName, objectName, reader, size, opts) - } - - return c.putObjectMultipartStream(ctx, bucketName, objectName, reader, size, opts) -} - -func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) { - // Input validation. - if err = s3utils.CheckValidBucketName(bucketName); err != nil { - return UploadInfo{}, err - } - if err = s3utils.CheckValidObjectName(objectName); err != nil { - return UploadInfo{}, err - } - - // Total data read and written to server. should be equal to - // 'size' at the end of the call. - var totalUploadedSize int64 - - // Complete multipart upload. - var complMultipartUpload completeMultipartUpload - - // Calculate the optimal parts info for a given size. - totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize) - if err != nil { - return UploadInfo{}, err - } - - if !opts.SendContentMd5 { - if opts.UserMetadata == nil { - opts.UserMetadata = make(map[string]string, 1) - } - opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C" - } - - // Initiate a new multipart upload. - uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) - if err != nil { - return UploadInfo{}, err - } - delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm") - - defer func() { - if err != nil { - c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) - } - }() - - // Part number always starts with '1'. - partNumber := 1 - - // Initialize parts uploaded map. - partsInfo := make(map[int]ObjectPart) - - // Create a buffer. - buf := make([]byte, partSize) - - // Create checksums - // CRC32C is ~50% faster on AMD64 @ 30GB/s - var crcBytes []byte - customHeader := make(http.Header) - crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) - - for partNumber <= totalPartsCount { - length, rerr := readFull(reader, buf) - if rerr == io.EOF && partNumber > 1 { - break - } - - if rerr != nil && rerr != io.ErrUnexpectedEOF && rerr != io.EOF { - return UploadInfo{}, rerr - } - - var md5Base64 string - if opts.SendContentMd5 { - // Calculate md5sum. - hash := c.md5Hasher() - hash.Write(buf[:length]) - md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil)) - hash.Close() - } else { - crc.Reset() - crc.Write(buf[:length]) - cSum := crc.Sum(nil) - customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum)) - crcBytes = append(crcBytes, cSum...) - } - - // Update progress reader appropriately to the latest offset - // as we read from the source. - rd := newHook(bytes.NewReader(buf[:length]), opts.Progress) - - // Proceed to upload the part. - p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: rd, partNumber: partNumber, md5Base64: md5Base64, size: int64(length), sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader} - objPart, uerr := c.uploadPart(ctx, p) - if uerr != nil { - return UploadInfo{}, uerr - } - - // Save successfully uploaded part metadata. - partsInfo[partNumber] = objPart - - // Save successfully uploaded size. - totalUploadedSize += int64(length) - - // Increment part number. - partNumber++ - - // For unknown size, Read EOF we break away. - // We do not have to upload till totalPartsCount. - if rerr == io.EOF { - break - } - } - - // Loop over total uploaded parts to save them in - // Parts array before completing the multipart request. - for i := 1; i < partNumber; i++ { - part, ok := partsInfo[i] - if !ok { - return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) - } - complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ - ETag: part.ETag, - PartNumber: part.PartNumber, - ChecksumCRC32: part.ChecksumCRC32, - ChecksumCRC32C: part.ChecksumCRC32C, - ChecksumSHA1: part.ChecksumSHA1, - ChecksumSHA256: part.ChecksumSHA256, - }) - } - - // Sort all completed parts. - sort.Sort(completedParts(complMultipartUpload.Parts)) - - opts = PutObjectOptions{} - if len(crcBytes) > 0 { - // Add hash of hashes. - crc.Reset() - crc.Write(crcBytes) - opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))} - } - uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) - if err != nil { - return UploadInfo{}, err - } - - uploadInfo.Size = totalUploadedSize - return uploadInfo, nil -} diff --git a/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go b/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go deleted file mode 100644 index eb4da41..0000000 --- a/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go +++ /dev/null @@ -1,246 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2021 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "archive/tar" - "bufio" - "bytes" - "context" - "fmt" - "io" - "net/http" - "os" - "strings" - "sync" - "time" - - "github.com/klauspost/compress/s2" -) - -// SnowballOptions contains options for PutObjectsSnowball calls. -type SnowballOptions struct { - // Opts is options applied to all objects. - Opts PutObjectOptions - - // Processing options: - - // InMemory specifies that all objects should be collected in memory - // before they are uploaded. - // If false a temporary file will be created. - InMemory bool - - // Compress enabled content compression before upload. - // Compression will typically reduce memory and network usage, - // Compression can safely be enabled with MinIO hosts. - Compress bool - - // SkipErrs if enabled will skip any errors while reading the - // object content while creating the snowball archive - SkipErrs bool -} - -// SnowballObject contains information about a single object to be added to the snowball. -type SnowballObject struct { - // Key is the destination key, including prefix. - Key string - - // Size is the content size of this object. - Size int64 - - // Modtime to apply to the object. - // If Modtime is the zero value current time will be used. - ModTime time.Time - - // Content of the object. - // Exactly 'Size' number of bytes must be provided. - Content io.Reader - - // VersionID of the object; if empty, a new versionID will be generated - VersionID string - - // Headers contains more options for this object upload, the same as you - // would include in a regular PutObject operation, such as user metadata - // and content-disposition, expires, .. - Headers http.Header - - // Close will be called when an object has finished processing. - // Note that if PutObjectsSnowball returns because of an error, - // objects not consumed from the input will NOT have been closed. - // Leave as nil for no callback. - Close func() -} - -type nopReadSeekCloser struct { - io.ReadSeeker -} - -func (n nopReadSeekCloser) Close() error { - return nil -} - -// This is available as io.ReadSeekCloser from go1.16 -type readSeekCloser interface { - io.Reader - io.Closer - io.Seeker -} - -// PutObjectsSnowball will put multiple objects with a single put call. -// A (compressed) TAR file will be created which will contain multiple objects. -// The key for each object will be used for the destination in the specified bucket. -// Total size should be < 5TB. -// This function blocks until 'objs' is closed and the content has been uploaded. -func (c Client) PutObjectsSnowball(ctx context.Context, bucketName string, opts SnowballOptions, objs <-chan SnowballObject) (err error) { - err = opts.Opts.validate() - if err != nil { - return err - } - var tmpWriter io.Writer - var getTmpReader func() (rc readSeekCloser, sz int64, err error) - if opts.InMemory { - b := bytes.NewBuffer(nil) - tmpWriter = b - getTmpReader = func() (readSeekCloser, int64, error) { - return nopReadSeekCloser{bytes.NewReader(b.Bytes())}, int64(b.Len()), nil - } - } else { - f, err := os.CreateTemp("", "s3-putsnowballobjects-*") - if err != nil { - return err - } - name := f.Name() - tmpWriter = f - var once sync.Once - defer once.Do(func() { - f.Close() - }) - defer os.Remove(name) - getTmpReader = func() (readSeekCloser, int64, error) { - once.Do(func() { - f.Close() - }) - f, err := os.Open(name) - if err != nil { - return nil, 0, err - } - st, err := f.Stat() - if err != nil { - return nil, 0, err - } - return f, st.Size(), nil - } - } - flush := func() error { return nil } - if !opts.Compress { - if !opts.InMemory { - // Insert buffer for writes. - buf := bufio.NewWriterSize(tmpWriter, 1<<20) - flush = buf.Flush - tmpWriter = buf - } - } else { - s2c := s2.NewWriter(tmpWriter, s2.WriterBetterCompression()) - flush = s2c.Close - defer s2c.Close() - tmpWriter = s2c - } - t := tar.NewWriter(tmpWriter) - -objectLoop: - for { - select { - case <-ctx.Done(): - return ctx.Err() - case obj, ok := <-objs: - if !ok { - break objectLoop - } - - closeObj := func() {} - if obj.Close != nil { - closeObj = obj.Close - } - - // Trim accidental slash prefix. - obj.Key = strings.TrimPrefix(obj.Key, "/") - header := tar.Header{ - Typeflag: tar.TypeReg, - Name: obj.Key, - Size: obj.Size, - ModTime: obj.ModTime, - Format: tar.FormatPAX, - } - if header.ModTime.IsZero() { - header.ModTime = time.Now().UTC() - } - - header.PAXRecords = make(map[string]string) - if obj.VersionID != "" { - header.PAXRecords["minio.versionId"] = obj.VersionID - } - for k, vals := range obj.Headers { - header.PAXRecords["minio.metadata."+k] = strings.Join(vals, ",") - } - - if err := t.WriteHeader(&header); err != nil { - closeObj() - return err - } - n, err := io.Copy(t, obj.Content) - if err != nil { - closeObj() - if opts.SkipErrs { - continue - } - return err - } - if n != obj.Size { - closeObj() - if opts.SkipErrs { - continue - } - return io.ErrUnexpectedEOF - } - closeObj() - } - } - // Flush tar - err = t.Flush() - if err != nil { - return err - } - // Flush compression - err = flush() - if err != nil { - return err - } - if opts.Opts.UserMetadata == nil { - opts.Opts.UserMetadata = map[string]string{} - } - opts.Opts.UserMetadata["X-Amz-Meta-Snowball-Auto-Extract"] = "true" - opts.Opts.DisableMultipart = true - rc, sz, err := getTmpReader() - if err != nil { - return err - } - defer rc.Close() - rand := c.random.Uint64() - _, err = c.PutObject(ctx, bucketName, fmt.Sprintf("snowball-upload-%x.tar", rand), rc, sz, opts.Opts) - return err -} diff --git a/vendor/github.com/minio/minio-go/v7/api-remove.go b/vendor/github.com/minio/minio-go/v7/api-remove.go deleted file mode 100644 index 9c0ac44..0000000 --- a/vendor/github.com/minio/minio-go/v7/api-remove.go +++ /dev/null @@ -1,548 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/xml" - "io" - "net/http" - "net/url" - "time" - - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -//revive:disable - -// Deprecated: BucketOptions will be renamed to RemoveBucketOptions in future versions. -type BucketOptions = RemoveBucketOptions - -//revive:enable - -// RemoveBucketOptions special headers to purge buckets, only -// useful when endpoint is MinIO -type RemoveBucketOptions struct { - ForceDelete bool -} - -// RemoveBucketWithOptions deletes the bucket name. -// -// All objects (including all object versions and delete markers) -// in the bucket will be deleted forcibly if bucket options set -// ForceDelete to 'true'. -func (c *Client) RemoveBucketWithOptions(ctx context.Context, bucketName string, opts RemoveBucketOptions) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - // Build headers. - headers := make(http.Header) - if opts.ForceDelete { - headers.Set(minIOForceDelete, "true") - } - - // Execute DELETE on bucket. - resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ - bucketName: bucketName, - contentSHA256Hex: emptySHA256Hex, - customHeader: headers, - }) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusNoContent { - return httpRespToErrorResponse(resp, bucketName, "") - } - } - - // Remove the location from cache on a successful delete. - c.bucketLocCache.Delete(bucketName) - return nil -} - -// RemoveBucket deletes the bucket name. -// -// All objects (including all object versions and delete markers). -// in the bucket must be deleted before successfully attempting this request. -func (c *Client) RemoveBucket(ctx context.Context, bucketName string) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - // Execute DELETE on bucket. - resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ - bucketName: bucketName, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusNoContent { - return httpRespToErrorResponse(resp, bucketName, "") - } - } - - // Remove the location from cache on a successful delete. - c.bucketLocCache.Delete(bucketName) - - return nil -} - -// AdvancedRemoveOptions intended for internal use by replication -type AdvancedRemoveOptions struct { - ReplicationDeleteMarker bool - ReplicationStatus ReplicationStatus - ReplicationMTime time.Time - ReplicationRequest bool - ReplicationValidityCheck bool // check permissions -} - -// RemoveObjectOptions represents options specified by user for RemoveObject call -type RemoveObjectOptions struct { - ForceDelete bool - GovernanceBypass bool - VersionID string - Internal AdvancedRemoveOptions -} - -// RemoveObject removes an object from a bucket. -func (c *Client) RemoveObject(ctx context.Context, bucketName, objectName string, opts RemoveObjectOptions) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return err - } - - res := c.removeObject(ctx, bucketName, objectName, opts) - return res.Err -} - -func (c *Client) removeObject(ctx context.Context, bucketName, objectName string, opts RemoveObjectOptions) RemoveObjectResult { - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - - if opts.VersionID != "" { - urlValues.Set("versionId", opts.VersionID) - } - - // Build headers. - headers := make(http.Header) - - if opts.GovernanceBypass { - // Set the bypass goverenance retention header - headers.Set(amzBypassGovernance, "true") - } - if opts.Internal.ReplicationDeleteMarker { - headers.Set(minIOBucketReplicationDeleteMarker, "true") - } - if !opts.Internal.ReplicationMTime.IsZero() { - headers.Set(minIOBucketSourceMTime, opts.Internal.ReplicationMTime.Format(time.RFC3339Nano)) - } - if !opts.Internal.ReplicationStatus.Empty() { - headers.Set(amzBucketReplicationStatus, string(opts.Internal.ReplicationStatus)) - } - if opts.Internal.ReplicationRequest { - headers.Set(minIOBucketReplicationRequest, "true") - } - if opts.Internal.ReplicationValidityCheck { - headers.Set(minIOBucketReplicationCheck, "true") - } - if opts.ForceDelete { - headers.Set(minIOForceDelete, "true") - } - // Execute DELETE on objectName. - resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ - bucketName: bucketName, - objectName: objectName, - contentSHA256Hex: emptySHA256Hex, - queryValues: urlValues, - customHeader: headers, - }) - defer closeResponse(resp) - if err != nil { - return RemoveObjectResult{Err: err} - } - if resp != nil { - // if some unexpected error happened and max retry is reached, we want to let client know - if resp.StatusCode != http.StatusNoContent { - err := httpRespToErrorResponse(resp, bucketName, objectName) - return RemoveObjectResult{Err: err} - } - } - - // DeleteObject always responds with http '204' even for - // objects which do not exist. So no need to handle them - // specifically. - return RemoveObjectResult{ - ObjectName: objectName, - ObjectVersionID: opts.VersionID, - DeleteMarker: resp.Header.Get("x-amz-delete-marker") == "true", - DeleteMarkerVersionID: resp.Header.Get("x-amz-version-id"), - } -} - -// RemoveObjectError - container of Multi Delete S3 API error -type RemoveObjectError struct { - ObjectName string - VersionID string - Err error -} - -// RemoveObjectResult - container of Multi Delete S3 API result -type RemoveObjectResult struct { - ObjectName string - ObjectVersionID string - - DeleteMarker bool - DeleteMarkerVersionID string - - Err error -} - -// generateRemoveMultiObjects - generate the XML request for remove multi objects request -func generateRemoveMultiObjectsRequest(objects []ObjectInfo) []byte { - delObjects := []deleteObject{} - for _, obj := range objects { - delObjects = append(delObjects, deleteObject{ - Key: obj.Key, - VersionID: obj.VersionID, - }) - } - xmlBytes, _ := xml.Marshal(deleteMultiObjects{Objects: delObjects, Quiet: false}) - return xmlBytes -} - -// processRemoveMultiObjectsResponse - parse the remove multi objects web service -// and return the success/failure result status for each object -func processRemoveMultiObjectsResponse(body io.Reader, resultCh chan<- RemoveObjectResult) { - // Parse multi delete XML response - rmResult := &deleteMultiObjectsResult{} - err := xmlDecoder(body, rmResult) - if err != nil { - resultCh <- RemoveObjectResult{ObjectName: "", Err: err} - return - } - - // Fill deletion that returned success - for _, obj := range rmResult.DeletedObjects { - resultCh <- RemoveObjectResult{ - ObjectName: obj.Key, - // Only filled with versioned buckets - ObjectVersionID: obj.VersionID, - DeleteMarker: obj.DeleteMarker, - DeleteMarkerVersionID: obj.DeleteMarkerVersionID, - } - } - - // Fill deletion that returned an error. - for _, obj := range rmResult.UnDeletedObjects { - // Version does not exist is not an error ignore and continue. - switch obj.Code { - case "InvalidArgument", "NoSuchVersion": - continue - } - resultCh <- RemoveObjectResult{ - ObjectName: obj.Key, - ObjectVersionID: obj.VersionID, - Err: ErrorResponse{ - Code: obj.Code, - Message: obj.Message, - }, - } - } -} - -// RemoveObjectsOptions represents options specified by user for RemoveObjects call -type RemoveObjectsOptions struct { - GovernanceBypass bool -} - -// RemoveObjects removes multiple objects from a bucket while -// it is possible to specify objects versions which are received from -// objectsCh. Remove failures are sent back via error channel. -func (c *Client) RemoveObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, opts RemoveObjectsOptions) <-chan RemoveObjectError { - errorCh := make(chan RemoveObjectError, 1) - - // Validate if bucket name is valid. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - defer close(errorCh) - errorCh <- RemoveObjectError{ - Err: err, - } - return errorCh - } - // Validate objects channel to be properly allocated. - if objectsCh == nil { - defer close(errorCh) - errorCh <- RemoveObjectError{ - Err: errInvalidArgument("Objects channel cannot be nil"), - } - return errorCh - } - - resultCh := make(chan RemoveObjectResult, 1) - go c.removeObjects(ctx, bucketName, objectsCh, resultCh, opts) - go func() { - defer close(errorCh) - for res := range resultCh { - // Send only errors to the error channel - if res.Err == nil { - continue - } - errorCh <- RemoveObjectError{ - ObjectName: res.ObjectName, - VersionID: res.ObjectVersionID, - Err: res.Err, - } - } - }() - - return errorCh -} - -// RemoveObjectsWithResult removes multiple objects from a bucket while -// it is possible to specify objects versions which are received from -// objectsCh. Remove results, successes and failures are sent back via -// RemoveObjectResult channel -func (c *Client) RemoveObjectsWithResult(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, opts RemoveObjectsOptions) <-chan RemoveObjectResult { - resultCh := make(chan RemoveObjectResult, 1) - - // Validate if bucket name is valid. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - defer close(resultCh) - resultCh <- RemoveObjectResult{ - Err: err, - } - return resultCh - } - // Validate objects channel to be properly allocated. - if objectsCh == nil { - defer close(resultCh) - resultCh <- RemoveObjectResult{ - Err: errInvalidArgument("Objects channel cannot be nil"), - } - return resultCh - } - - go c.removeObjects(ctx, bucketName, objectsCh, resultCh, opts) - return resultCh -} - -// Return true if the character is within the allowed characters in an XML 1.0 document -// The list of allowed characters can be found here: https://www.w3.org/TR/xml/#charsets -func validXMLChar(r rune) (ok bool) { - return r == 0x09 || - r == 0x0A || - r == 0x0D || - r >= 0x20 && r <= 0xD7FF || - r >= 0xE000 && r <= 0xFFFD || - r >= 0x10000 && r <= 0x10FFFF -} - -func hasInvalidXMLChar(str string) bool { - for _, s := range str { - if !validXMLChar(s) { - return true - } - } - return false -} - -// Generate and call MultiDelete S3 requests based on entries received from objectsCh -func (c *Client) removeObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, resultCh chan<- RemoveObjectResult, opts RemoveObjectsOptions) { - maxEntries := 1000 - finish := false - urlValues := make(url.Values) - urlValues.Set("delete", "") - - // Close result channel when Multi delete finishes. - defer close(resultCh) - - // Loop over entries by 1000 and call MultiDelete requests - for { - if finish { - break - } - count := 0 - var batch []ObjectInfo - - // Try to gather 1000 entries - for object := range objectsCh { - if hasInvalidXMLChar(object.Key) { - // Use single DELETE so the object name will be in the request URL instead of the multi-delete XML document. - removeResult := c.removeObject(ctx, bucketName, object.Key, RemoveObjectOptions{ - VersionID: object.VersionID, - GovernanceBypass: opts.GovernanceBypass, - }) - if err := removeResult.Err; err != nil { - // Version does not exist is not an error ignore and continue. - switch ToErrorResponse(err).Code { - case "InvalidArgument", "NoSuchVersion": - continue - } - resultCh <- removeResult - } - - resultCh <- removeResult - continue - } - - batch = append(batch, object) - if count++; count >= maxEntries { - break - } - } - if count == 0 { - // Multi Objects Delete API doesn't accept empty object list, quit immediately - break - } - if count < maxEntries { - // We didn't have 1000 entries, so this is the last batch - finish = true - } - - // Build headers. - headers := make(http.Header) - if opts.GovernanceBypass { - // Set the bypass goverenance retention header - headers.Set(amzBypassGovernance, "true") - } - - // Generate remove multi objects XML request - removeBytes := generateRemoveMultiObjectsRequest(batch) - // Execute GET on bucket to list objects. - resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentBody: bytes.NewReader(removeBytes), - contentLength: int64(len(removeBytes)), - contentMD5Base64: sumMD5Base64(removeBytes), - contentSHA256Hex: sum256Hex(removeBytes), - customHeader: headers, - }) - if resp != nil { - if resp.StatusCode != http.StatusOK { - e := httpRespToErrorResponse(resp, bucketName, "") - resultCh <- RemoveObjectResult{ObjectName: "", Err: e} - } - } - if err != nil { - for _, b := range batch { - resultCh <- RemoveObjectResult{ - ObjectName: b.Key, - ObjectVersionID: b.VersionID, - Err: err, - } - } - continue - } - - // Process multiobjects remove xml response - processRemoveMultiObjectsResponse(resp.Body, resultCh) - - closeResponse(resp) - } -} - -// RemoveIncompleteUpload aborts an partially uploaded object. -func (c *Client) RemoveIncompleteUpload(ctx context.Context, bucketName, objectName string) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return err - } - // Find multipart upload ids of the object to be aborted. - uploadIDs, err := c.findUploadIDs(ctx, bucketName, objectName) - if err != nil { - return err - } - - for _, uploadID := range uploadIDs { - // abort incomplete multipart upload, based on the upload id passed. - err := c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) - if err != nil { - return err - } - } - - return nil -} - -// abortMultipartUpload aborts a multipart upload for the given -// uploadID, all previously uploaded parts are deleted. -func (c *Client) abortMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return err - } - - // Initialize url queries. - urlValues := make(url.Values) - urlValues.Set("uploadId", uploadID) - - // Execute DELETE on multipart upload. - resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusNoContent { - // Abort has no response body, handle it for any errors. - var errorResponse ErrorResponse - switch resp.StatusCode { - case http.StatusNotFound: - // This is needed specifically for abort and it cannot - // be converged into default case. - errorResponse = ErrorResponse{ - Code: "NoSuchUpload", - Message: "The specified multipart upload does not exist.", - BucketName: bucketName, - Key: objectName, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - Region: resp.Header.Get("x-amz-bucket-region"), - } - default: - return httpRespToErrorResponse(resp, bucketName, objectName) - } - return errorResponse - } - } - return nil -} diff --git a/vendor/github.com/minio/minio-go/v7/api-restore.go b/vendor/github.com/minio/minio-go/v7/api-restore.go deleted file mode 100644 index 9ec8f4f..0000000 --- a/vendor/github.com/minio/minio-go/v7/api-restore.go +++ /dev/null @@ -1,182 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * (C) 2018-2021 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/xml" - "net/http" - "net/url" - - "github.com/minio/minio-go/v7/pkg/s3utils" - "github.com/minio/minio-go/v7/pkg/tags" -) - -// RestoreType represents the restore request type -type RestoreType string - -const ( - // RestoreSelect represents the restore SELECT operation - RestoreSelect = RestoreType("SELECT") -) - -// TierType represents a retrieval tier -type TierType string - -const ( - // TierStandard is the standard retrieval tier - TierStandard = TierType("Standard") - // TierBulk is the bulk retrieval tier - TierBulk = TierType("Bulk") - // TierExpedited is the expedited retrieval tier - TierExpedited = TierType("Expedited") -) - -// GlacierJobParameters represents the retrieval tier parameter -type GlacierJobParameters struct { - Tier TierType -} - -// Encryption contains the type of server-side encryption used during object retrieval -type Encryption struct { - EncryptionType string - KMSContext string - KMSKeyID string `xml:"KMSKeyId"` -} - -// MetadataEntry represents a metadata information of the restored object. -type MetadataEntry struct { - Name string - Value string -} - -// S3 holds properties of the copy of the archived object -type S3 struct { - AccessControlList *AccessControlList `xml:"AccessControlList,omitempty"` - BucketName string - Prefix string - CannedACL *string `xml:"CannedACL,omitempty"` - Encryption *Encryption `xml:"Encryption,omitempty"` - StorageClass *string `xml:"StorageClass,omitempty"` - Tagging *tags.Tags `xml:"Tagging,omitempty"` - UserMetadata *MetadataEntry `xml:"UserMetadata,omitempty"` -} - -// SelectParameters holds the select request parameters -type SelectParameters struct { - XMLName xml.Name `xml:"SelectParameters"` - ExpressionType QueryExpressionType - Expression string - InputSerialization SelectObjectInputSerialization - OutputSerialization SelectObjectOutputSerialization -} - -// OutputLocation holds properties of the copy of the archived object -type OutputLocation struct { - XMLName xml.Name `xml:"OutputLocation"` - S3 S3 `xml:"S3"` -} - -// RestoreRequest holds properties of the restore object request -type RestoreRequest struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ RestoreRequest"` - Type *RestoreType `xml:"Type,omitempty"` - Tier *TierType `xml:"Tier,omitempty"` - Days *int `xml:"Days,omitempty"` - GlacierJobParameters *GlacierJobParameters `xml:"GlacierJobParameters,omitempty"` - Description *string `xml:"Description,omitempty"` - SelectParameters *SelectParameters `xml:"SelectParameters,omitempty"` - OutputLocation *OutputLocation `xml:"OutputLocation,omitempty"` -} - -// SetDays sets the days parameter of the restore request -func (r *RestoreRequest) SetDays(v int) { - r.Days = &v -} - -// SetGlacierJobParameters sets the GlacierJobParameters of the restore request -func (r *RestoreRequest) SetGlacierJobParameters(v GlacierJobParameters) { - r.GlacierJobParameters = &v -} - -// SetType sets the type of the restore request -func (r *RestoreRequest) SetType(v RestoreType) { - r.Type = &v -} - -// SetTier sets the retrieval tier of the restore request -func (r *RestoreRequest) SetTier(v TierType) { - r.Tier = &v -} - -// SetDescription sets the description of the restore request -func (r *RestoreRequest) SetDescription(v string) { - r.Description = &v -} - -// SetSelectParameters sets SelectParameters of the restore select request -func (r *RestoreRequest) SetSelectParameters(v SelectParameters) { - r.SelectParameters = &v -} - -// SetOutputLocation sets the properties of the copy of the archived object -func (r *RestoreRequest) SetOutputLocation(v OutputLocation) { - r.OutputLocation = &v -} - -// RestoreObject is a implementation of https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html AWS S3 API -func (c *Client) RestoreObject(ctx context.Context, bucketName, objectName, versionID string, req RestoreRequest) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return err - } - - restoreRequestBytes, err := xml.Marshal(req) - if err != nil { - return err - } - - urlValues := make(url.Values) - urlValues.Set("restore", "") - if versionID != "" { - urlValues.Set("versionId", versionID) - } - - // Execute POST on bucket/object. - resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - contentMD5Base64: sumMD5Base64(restoreRequestBytes), - contentSHA256Hex: sum256Hex(restoreRequestBytes), - contentBody: bytes.NewReader(restoreRequestBytes), - contentLength: int64(len(restoreRequestBytes)), - }) - defer closeResponse(resp) - if err != nil { - return err - } - if resp.StatusCode != http.StatusAccepted && resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp, bucketName, "") - } - return nil -} diff --git a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go deleted file mode 100644 index 1527b74..0000000 --- a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go +++ /dev/null @@ -1,390 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "encoding/xml" - "errors" - "io" - "reflect" - "time" -) - -// listAllMyBucketsResult container for listBuckets response. -type listAllMyBucketsResult struct { - // Container for one or more buckets. - Buckets struct { - Bucket []BucketInfo - } - Owner owner -} - -// owner container for bucket owner information. -type owner struct { - DisplayName string - ID string -} - -// CommonPrefix container for prefix response. -type CommonPrefix struct { - Prefix string -} - -// ListBucketV2Result container for listObjects response version 2. -type ListBucketV2Result struct { - // A response can contain CommonPrefixes only if you have - // specified a delimiter. - CommonPrefixes []CommonPrefix - // Metadata about each object returned. - Contents []ObjectInfo - Delimiter string - - // Encoding type used to encode object keys in the response. - EncodingType string - - // A flag that indicates whether or not ListObjects returned all of the results - // that satisfied the search criteria. - IsTruncated bool - MaxKeys int64 - Name string - - // Hold the token that will be sent in the next request to fetch the next group of keys - NextContinuationToken string - - ContinuationToken string - Prefix string - - // FetchOwner and StartAfter are currently not used - FetchOwner string - StartAfter string -} - -// Version is an element in the list object versions response -type Version struct { - ETag string - IsLatest bool - Key string - LastModified time.Time - Owner Owner - Size int64 - StorageClass string - VersionID string `xml:"VersionId"` - - // x-amz-meta-* headers stripped "x-amz-meta-" prefix containing the first value. - // Only returned by MinIO servers. - UserMetadata StringMap `json:"userMetadata,omitempty"` - - // x-amz-tagging values in their k/v values. - // Only returned by MinIO servers. - UserTags URLMap `json:"userTags,omitempty" xml:"UserTags"` - - Internal *struct { - K int // Data blocks - M int // Parity blocks - } `xml:"Internal"` - - isDeleteMarker bool -} - -// ListVersionsResult is an element in the list object versions response -// and has a special Unmarshaler because we need to preserver the order -// of and in ListVersionsResult.Versions slice -type ListVersionsResult struct { - Versions []Version - - CommonPrefixes []CommonPrefix - Name string - Prefix string - Delimiter string - MaxKeys int64 - EncodingType string - IsTruncated bool - KeyMarker string - VersionIDMarker string - NextKeyMarker string - NextVersionIDMarker string -} - -// UnmarshalXML is a custom unmarshal code for the response of ListObjectVersions, the custom -// code will unmarshal and tags and save them in Versions field to -// preserve the lexical order of the listing. -func (l *ListVersionsResult) UnmarshalXML(d *xml.Decoder, _ xml.StartElement) (err error) { - for { - // Read tokens from the XML document in a stream. - t, err := d.Token() - if err != nil { - if err == io.EOF { - break - } - return err - } - - se, ok := t.(xml.StartElement) - if ok { - tagName := se.Name.Local - switch tagName { - case "Name", "Prefix", - "Delimiter", "EncodingType", - "KeyMarker", "NextKeyMarker": - var s string - if err = d.DecodeElement(&s, &se); err != nil { - return err - } - v := reflect.ValueOf(l).Elem().FieldByName(tagName) - if v.IsValid() { - v.SetString(s) - } - case "VersionIdMarker": - // VersionIdMarker is a special case because of 'Id' instead of 'ID' in field name - var s string - if err = d.DecodeElement(&s, &se); err != nil { - return err - } - l.VersionIDMarker = s - case "NextVersionIdMarker": - // NextVersionIdMarker is a special case because of 'Id' instead of 'ID' in field name - var s string - if err = d.DecodeElement(&s, &se); err != nil { - return err - } - l.NextVersionIDMarker = s - case "IsTruncated": // bool - var b bool - if err = d.DecodeElement(&b, &se); err != nil { - return err - } - l.IsTruncated = b - case "MaxKeys": // int64 - var i int64 - if err = d.DecodeElement(&i, &se); err != nil { - return err - } - l.MaxKeys = i - case "CommonPrefixes": - var cp CommonPrefix - if err = d.DecodeElement(&cp, &se); err != nil { - return err - } - l.CommonPrefixes = append(l.CommonPrefixes, cp) - case "DeleteMarker", "Version": - var v Version - if err = d.DecodeElement(&v, &se); err != nil { - return err - } - if tagName == "DeleteMarker" { - v.isDeleteMarker = true - } - l.Versions = append(l.Versions, v) - default: - return errors.New("unrecognized option:" + tagName) - } - - } - } - return nil -} - -// ListBucketResult container for listObjects response. -type ListBucketResult struct { - // A response can contain CommonPrefixes only if you have - // specified a delimiter. - CommonPrefixes []CommonPrefix - // Metadata about each object returned. - Contents []ObjectInfo - Delimiter string - - // Encoding type used to encode object keys in the response. - EncodingType string - - // A flag that indicates whether or not ListObjects returned all of the results - // that satisfied the search criteria. - IsTruncated bool - Marker string - MaxKeys int64 - Name string - - // When response is truncated (the IsTruncated element value in - // the response is true), you can use the key name in this field - // as marker in the subsequent request to get next set of objects. - // Object storage lists objects in alphabetical order Note: This - // element is returned only if you have delimiter request - // parameter specified. If response does not include the NextMaker - // and it is truncated, you can use the value of the last Key in - // the response as the marker in the subsequent request to get the - // next set of object keys. - NextMarker string - Prefix string -} - -// ListMultipartUploadsResult container for ListMultipartUploads response -type ListMultipartUploadsResult struct { - Bucket string - KeyMarker string - UploadIDMarker string `xml:"UploadIdMarker"` - NextKeyMarker string - NextUploadIDMarker string `xml:"NextUploadIdMarker"` - EncodingType string - MaxUploads int64 - IsTruncated bool - Uploads []ObjectMultipartInfo `xml:"Upload"` - Prefix string - Delimiter string - // A response can contain CommonPrefixes only if you specify a delimiter. - CommonPrefixes []CommonPrefix -} - -// initiator container for who initiated multipart upload. -type initiator struct { - ID string - DisplayName string -} - -// copyObjectResult container for copy object response. -type copyObjectResult struct { - ETag string - LastModified time.Time // time string format "2006-01-02T15:04:05.000Z" -} - -// ObjectPart container for particular part of an object. -type ObjectPart struct { - // Part number identifies the part. - PartNumber int - - // Date and time the part was uploaded. - LastModified time.Time - - // Entity tag returned when the part was uploaded, usually md5sum - // of the part. - ETag string - - // Size of the uploaded part data. - Size int64 - - // Checksum values of each part. - ChecksumCRC32 string - ChecksumCRC32C string - ChecksumSHA1 string - ChecksumSHA256 string -} - -// ListObjectPartsResult container for ListObjectParts response. -type ListObjectPartsResult struct { - Bucket string - Key string - UploadID string `xml:"UploadId"` - - Initiator initiator - Owner owner - - StorageClass string - PartNumberMarker int - NextPartNumberMarker int - MaxParts int - - // Indicates whether the returned list of parts is truncated. - IsTruncated bool - ObjectParts []ObjectPart `xml:"Part"` - - EncodingType string -} - -// initiateMultipartUploadResult container for InitiateMultiPartUpload -// response. -type initiateMultipartUploadResult struct { - Bucket string - Key string - UploadID string `xml:"UploadId"` -} - -// completeMultipartUploadResult container for completed multipart -// upload response. -type completeMultipartUploadResult struct { - Location string - Bucket string - Key string - ETag string - - // Checksum values, hash of hashes of parts. - ChecksumCRC32 string - ChecksumCRC32C string - ChecksumSHA1 string - ChecksumSHA256 string -} - -// CompletePart sub container lists individual part numbers and their -// md5sum, part of completeMultipartUpload. -type CompletePart struct { - // Part number identifies the part. - PartNumber int - ETag string - - // Checksum values - ChecksumCRC32 string `xml:"ChecksumCRC32,omitempty"` - ChecksumCRC32C string `xml:"ChecksumCRC32C,omitempty"` - ChecksumSHA1 string `xml:"ChecksumSHA1,omitempty"` - ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"` -} - -// completeMultipartUpload container for completing multipart upload. -type completeMultipartUpload struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"` - Parts []CompletePart `xml:"Part"` -} - -// createBucketConfiguration container for bucket configuration. -type createBucketConfiguration struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"` - Location string `xml:"LocationConstraint"` -} - -// deleteObject container for Delete element in MultiObjects Delete XML request -type deleteObject struct { - Key string - VersionID string `xml:"VersionId,omitempty"` -} - -// deletedObject container for Deleted element in MultiObjects Delete XML response -type deletedObject struct { - Key string - VersionID string `xml:"VersionId,omitempty"` - // These fields are ignored. - DeleteMarker bool - DeleteMarkerVersionID string `xml:"DeleteMarkerVersionId,omitempty"` -} - -// nonDeletedObject container for Error element (failed deletion) in MultiObjects Delete XML response -type nonDeletedObject struct { - Key string - Code string - Message string - VersionID string `xml:"VersionId"` -} - -// deletedMultiObjects container for MultiObjects Delete XML request -type deleteMultiObjects struct { - XMLName xml.Name `xml:"Delete"` - Quiet bool - Objects []deleteObject `xml:"Object"` -} - -// deletedMultiObjectsResult container for MultiObjects Delete XML response -type deleteMultiObjectsResult struct { - XMLName xml.Name `xml:"DeleteResult"` - DeletedObjects []deletedObject `xml:"Deleted"` - UnDeletedObjects []nonDeletedObject `xml:"Error"` -} diff --git a/vendor/github.com/minio/minio-go/v7/api-select.go b/vendor/github.com/minio/minio-go/v7/api-select.go deleted file mode 100644 index 628d967..0000000 --- a/vendor/github.com/minio/minio-go/v7/api-select.go +++ /dev/null @@ -1,757 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * (C) 2018-2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/binary" - "encoding/xml" - "errors" - "fmt" - "hash" - "hash/crc32" - "io" - "net/http" - "net/url" - "strings" - - "github.com/minio/minio-go/v7/pkg/encrypt" - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// CSVFileHeaderInfo - is the parameter for whether to utilize headers. -type CSVFileHeaderInfo string - -// Constants for file header info. -const ( - CSVFileHeaderInfoNone CSVFileHeaderInfo = "NONE" - CSVFileHeaderInfoIgnore CSVFileHeaderInfo = "IGNORE" - CSVFileHeaderInfoUse CSVFileHeaderInfo = "USE" -) - -// SelectCompressionType - is the parameter for what type of compression is -// present -type SelectCompressionType string - -// Constants for compression types under select API. -const ( - SelectCompressionNONE SelectCompressionType = "NONE" - SelectCompressionGZIP SelectCompressionType = "GZIP" - SelectCompressionBZIP SelectCompressionType = "BZIP2" - - // Non-standard compression schemes, supported by MinIO hosts: - - SelectCompressionZSTD SelectCompressionType = "ZSTD" // Zstandard compression. - SelectCompressionLZ4 SelectCompressionType = "LZ4" // LZ4 Stream - SelectCompressionS2 SelectCompressionType = "S2" // S2 Stream - SelectCompressionSNAPPY SelectCompressionType = "SNAPPY" // Snappy stream -) - -// CSVQuoteFields - is the parameter for how CSV fields are quoted. -type CSVQuoteFields string - -// Constants for csv quote styles. -const ( - CSVQuoteFieldsAlways CSVQuoteFields = "Always" - CSVQuoteFieldsAsNeeded CSVQuoteFields = "AsNeeded" -) - -// QueryExpressionType - is of what syntax the expression is, this should only -// be SQL -type QueryExpressionType string - -// Constants for expression type. -const ( - QueryExpressionTypeSQL QueryExpressionType = "SQL" -) - -// JSONType determines json input serialization type. -type JSONType string - -// Constants for JSONTypes. -const ( - JSONDocumentType JSONType = "DOCUMENT" - JSONLinesType JSONType = "LINES" -) - -// ParquetInputOptions parquet input specific options -type ParquetInputOptions struct{} - -// CSVInputOptions csv input specific options -type CSVInputOptions struct { - FileHeaderInfo CSVFileHeaderInfo - fileHeaderInfoSet bool - - RecordDelimiter string - recordDelimiterSet bool - - FieldDelimiter string - fieldDelimiterSet bool - - QuoteCharacter string - quoteCharacterSet bool - - QuoteEscapeCharacter string - quoteEscapeCharacterSet bool - - Comments string - commentsSet bool -} - -// SetFileHeaderInfo sets the file header info in the CSV input options -func (c *CSVInputOptions) SetFileHeaderInfo(val CSVFileHeaderInfo) { - c.FileHeaderInfo = val - c.fileHeaderInfoSet = true -} - -// SetRecordDelimiter sets the record delimiter in the CSV input options -func (c *CSVInputOptions) SetRecordDelimiter(val string) { - c.RecordDelimiter = val - c.recordDelimiterSet = true -} - -// SetFieldDelimiter sets the field delimiter in the CSV input options -func (c *CSVInputOptions) SetFieldDelimiter(val string) { - c.FieldDelimiter = val - c.fieldDelimiterSet = true -} - -// SetQuoteCharacter sets the quote character in the CSV input options -func (c *CSVInputOptions) SetQuoteCharacter(val string) { - c.QuoteCharacter = val - c.quoteCharacterSet = true -} - -// SetQuoteEscapeCharacter sets the quote escape character in the CSV input options -func (c *CSVInputOptions) SetQuoteEscapeCharacter(val string) { - c.QuoteEscapeCharacter = val - c.quoteEscapeCharacterSet = true -} - -// SetComments sets the comments character in the CSV input options -func (c *CSVInputOptions) SetComments(val string) { - c.Comments = val - c.commentsSet = true -} - -// MarshalXML - produces the xml representation of the CSV input options struct -func (c CSVInputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - if err := e.EncodeToken(start); err != nil { - return err - } - if c.FileHeaderInfo != "" || c.fileHeaderInfoSet { - if err := e.EncodeElement(c.FileHeaderInfo, xml.StartElement{Name: xml.Name{Local: "FileHeaderInfo"}}); err != nil { - return err - } - } - - if c.RecordDelimiter != "" || c.recordDelimiterSet { - if err := e.EncodeElement(c.RecordDelimiter, xml.StartElement{Name: xml.Name{Local: "RecordDelimiter"}}); err != nil { - return err - } - } - - if c.FieldDelimiter != "" || c.fieldDelimiterSet { - if err := e.EncodeElement(c.FieldDelimiter, xml.StartElement{Name: xml.Name{Local: "FieldDelimiter"}}); err != nil { - return err - } - } - - if c.QuoteCharacter != "" || c.quoteCharacterSet { - if err := e.EncodeElement(c.QuoteCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteCharacter"}}); err != nil { - return err - } - } - - if c.QuoteEscapeCharacter != "" || c.quoteEscapeCharacterSet { - if err := e.EncodeElement(c.QuoteEscapeCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteEscapeCharacter"}}); err != nil { - return err - } - } - - if c.Comments != "" || c.commentsSet { - if err := e.EncodeElement(c.Comments, xml.StartElement{Name: xml.Name{Local: "Comments"}}); err != nil { - return err - } - } - - return e.EncodeToken(xml.EndElement{Name: start.Name}) -} - -// CSVOutputOptions csv output specific options -type CSVOutputOptions struct { - QuoteFields CSVQuoteFields - quoteFieldsSet bool - - RecordDelimiter string - recordDelimiterSet bool - - FieldDelimiter string - fieldDelimiterSet bool - - QuoteCharacter string - quoteCharacterSet bool - - QuoteEscapeCharacter string - quoteEscapeCharacterSet bool -} - -// SetQuoteFields sets the quote field parameter in the CSV output options -func (c *CSVOutputOptions) SetQuoteFields(val CSVQuoteFields) { - c.QuoteFields = val - c.quoteFieldsSet = true -} - -// SetRecordDelimiter sets the record delimiter character in the CSV output options -func (c *CSVOutputOptions) SetRecordDelimiter(val string) { - c.RecordDelimiter = val - c.recordDelimiterSet = true -} - -// SetFieldDelimiter sets the field delimiter character in the CSV output options -func (c *CSVOutputOptions) SetFieldDelimiter(val string) { - c.FieldDelimiter = val - c.fieldDelimiterSet = true -} - -// SetQuoteCharacter sets the quote character in the CSV output options -func (c *CSVOutputOptions) SetQuoteCharacter(val string) { - c.QuoteCharacter = val - c.quoteCharacterSet = true -} - -// SetQuoteEscapeCharacter sets the quote escape character in the CSV output options -func (c *CSVOutputOptions) SetQuoteEscapeCharacter(val string) { - c.QuoteEscapeCharacter = val - c.quoteEscapeCharacterSet = true -} - -// MarshalXML - produces the xml representation of the CSVOutputOptions struct -func (c CSVOutputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - if err := e.EncodeToken(start); err != nil { - return err - } - - if c.QuoteFields != "" || c.quoteFieldsSet { - if err := e.EncodeElement(c.QuoteFields, xml.StartElement{Name: xml.Name{Local: "QuoteFields"}}); err != nil { - return err - } - } - - if c.RecordDelimiter != "" || c.recordDelimiterSet { - if err := e.EncodeElement(c.RecordDelimiter, xml.StartElement{Name: xml.Name{Local: "RecordDelimiter"}}); err != nil { - return err - } - } - - if c.FieldDelimiter != "" || c.fieldDelimiterSet { - if err := e.EncodeElement(c.FieldDelimiter, xml.StartElement{Name: xml.Name{Local: "FieldDelimiter"}}); err != nil { - return err - } - } - - if c.QuoteCharacter != "" || c.quoteCharacterSet { - if err := e.EncodeElement(c.QuoteCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteCharacter"}}); err != nil { - return err - } - } - - if c.QuoteEscapeCharacter != "" || c.quoteEscapeCharacterSet { - if err := e.EncodeElement(c.QuoteEscapeCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteEscapeCharacter"}}); err != nil { - return err - } - } - - return e.EncodeToken(xml.EndElement{Name: start.Name}) -} - -// JSONInputOptions json input specific options -type JSONInputOptions struct { - Type JSONType - typeSet bool -} - -// SetType sets the JSON type in the JSON input options -func (j *JSONInputOptions) SetType(typ JSONType) { - j.Type = typ - j.typeSet = true -} - -// MarshalXML - produces the xml representation of the JSONInputOptions struct -func (j JSONInputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - if err := e.EncodeToken(start); err != nil { - return err - } - - if j.Type != "" || j.typeSet { - if err := e.EncodeElement(j.Type, xml.StartElement{Name: xml.Name{Local: "Type"}}); err != nil { - return err - } - } - - return e.EncodeToken(xml.EndElement{Name: start.Name}) -} - -// JSONOutputOptions - json output specific options -type JSONOutputOptions struct { - RecordDelimiter string - recordDelimiterSet bool -} - -// SetRecordDelimiter sets the record delimiter in the JSON output options -func (j *JSONOutputOptions) SetRecordDelimiter(val string) { - j.RecordDelimiter = val - j.recordDelimiterSet = true -} - -// MarshalXML - produces the xml representation of the JSONOutputOptions struct -func (j JSONOutputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - if err := e.EncodeToken(start); err != nil { - return err - } - - if j.RecordDelimiter != "" || j.recordDelimiterSet { - if err := e.EncodeElement(j.RecordDelimiter, xml.StartElement{Name: xml.Name{Local: "RecordDelimiter"}}); err != nil { - return err - } - } - - return e.EncodeToken(xml.EndElement{Name: start.Name}) -} - -// SelectObjectInputSerialization - input serialization parameters -type SelectObjectInputSerialization struct { - CompressionType SelectCompressionType `xml:"CompressionType,omitempty"` - Parquet *ParquetInputOptions `xml:"Parquet,omitempty"` - CSV *CSVInputOptions `xml:"CSV,omitempty"` - JSON *JSONInputOptions `xml:"JSON,omitempty"` -} - -// SelectObjectOutputSerialization - output serialization parameters. -type SelectObjectOutputSerialization struct { - CSV *CSVOutputOptions `xml:"CSV,omitempty"` - JSON *JSONOutputOptions `xml:"JSON,omitempty"` -} - -// SelectObjectOptions - represents the input select body -type SelectObjectOptions struct { - XMLName xml.Name `xml:"SelectObjectContentRequest" json:"-"` - ServerSideEncryption encrypt.ServerSide `xml:"-"` - Expression string - ExpressionType QueryExpressionType - InputSerialization SelectObjectInputSerialization - OutputSerialization SelectObjectOutputSerialization - RequestProgress struct { - Enabled bool - } -} - -// Header returns the http.Header representation of the SelectObject options. -func (o SelectObjectOptions) Header() http.Header { - headers := make(http.Header) - if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() == encrypt.SSEC { - o.ServerSideEncryption.Marshal(headers) - } - return headers -} - -// SelectObjectType - is the parameter which defines what type of object the -// operation is being performed on. -type SelectObjectType string - -// Constants for input data types. -const ( - SelectObjectTypeCSV SelectObjectType = "CSV" - SelectObjectTypeJSON SelectObjectType = "JSON" - SelectObjectTypeParquet SelectObjectType = "Parquet" -) - -// preludeInfo is used for keeping track of necessary information from the -// prelude. -type preludeInfo struct { - totalLen uint32 - headerLen uint32 -} - -// SelectResults is used for the streaming responses from the server. -type SelectResults struct { - pipeReader *io.PipeReader - resp *http.Response - stats *StatsMessage - progress *ProgressMessage -} - -// ProgressMessage is a struct for progress xml message. -type ProgressMessage struct { - XMLName xml.Name `xml:"Progress" json:"-"` - StatsMessage -} - -// StatsMessage is a struct for stat xml message. -type StatsMessage struct { - XMLName xml.Name `xml:"Stats" json:"-"` - BytesScanned int64 - BytesProcessed int64 - BytesReturned int64 -} - -// messageType represents the type of message. -type messageType string - -const ( - errorMsg messageType = "error" - commonMsg messageType = "event" -) - -// eventType represents the type of event. -type eventType string - -// list of event-types returned by Select API. -const ( - endEvent eventType = "End" - recordsEvent eventType = "Records" - progressEvent eventType = "Progress" - statsEvent eventType = "Stats" -) - -// contentType represents content type of event. -type contentType string - -const ( - xmlContent contentType = "text/xml" -) - -// SelectObjectContent is a implementation of http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html AWS S3 API. -func (c *Client) SelectObjectContent(ctx context.Context, bucketName, objectName string, opts SelectObjectOptions) (*SelectResults, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return nil, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return nil, err - } - - selectReqBytes, err := xml.Marshal(opts) - if err != nil { - return nil, err - } - - urlValues := make(url.Values) - urlValues.Set("select", "") - urlValues.Set("select-type", "2") - - // Execute POST on bucket/object. - resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - customHeader: opts.Header(), - contentMD5Base64: sumMD5Base64(selectReqBytes), - contentSHA256Hex: sum256Hex(selectReqBytes), - contentBody: bytes.NewReader(selectReqBytes), - contentLength: int64(len(selectReqBytes)), - }) - if err != nil { - return nil, err - } - - return NewSelectResults(resp, bucketName) -} - -// NewSelectResults creates a Select Result parser that parses the response -// and returns a Reader that will return parsed and assembled select output. -func NewSelectResults(resp *http.Response, bucketName string) (*SelectResults, error) { - if resp.StatusCode != http.StatusOK { - return nil, httpRespToErrorResponse(resp, bucketName, "") - } - - pipeReader, pipeWriter := io.Pipe() - streamer := &SelectResults{ - resp: resp, - stats: &StatsMessage{}, - progress: &ProgressMessage{}, - pipeReader: pipeReader, - } - streamer.start(pipeWriter) - return streamer, nil -} - -// Close - closes the underlying response body and the stream reader. -func (s *SelectResults) Close() error { - defer closeResponse(s.resp) - return s.pipeReader.Close() -} - -// Read - is a reader compatible implementation for SelectObjectContent records. -func (s *SelectResults) Read(b []byte) (n int, err error) { - return s.pipeReader.Read(b) -} - -// Stats - information about a request's stats when processing is complete. -func (s *SelectResults) Stats() *StatsMessage { - return s.stats -} - -// Progress - information about the progress of a request. -func (s *SelectResults) Progress() *ProgressMessage { - return s.progress -} - -// start is the main function that decodes the large byte array into -// several events that are sent through the eventstream. -func (s *SelectResults) start(pipeWriter *io.PipeWriter) { - go func() { - for { - var prelude preludeInfo - headers := make(http.Header) - var err error - - // Create CRC code - crc := crc32.New(crc32.IEEETable) - crcReader := io.TeeReader(s.resp.Body, crc) - - // Extract the prelude(12 bytes) into a struct to extract relevant information. - prelude, err = processPrelude(crcReader, crc) - if err != nil { - pipeWriter.CloseWithError(err) - closeResponse(s.resp) - return - } - - // Extract the headers(variable bytes) into a struct to extract relevant information - if prelude.headerLen > 0 { - if err = extractHeader(io.LimitReader(crcReader, int64(prelude.headerLen)), headers); err != nil { - pipeWriter.CloseWithError(err) - closeResponse(s.resp) - return - } - } - - // Get the actual payload length so that the appropriate amount of - // bytes can be read or parsed. - payloadLen := prelude.PayloadLen() - - m := messageType(headers.Get("message-type")) - - switch m { - case errorMsg: - pipeWriter.CloseWithError(errors.New(headers.Get("error-code") + ":\"" + headers.Get("error-message") + "\"")) - closeResponse(s.resp) - return - case commonMsg: - // Get content-type of the payload. - c := contentType(headers.Get("content-type")) - - // Get event type of the payload. - e := eventType(headers.Get("event-type")) - - // Handle all supported events. - switch e { - case endEvent: - pipeWriter.Close() - closeResponse(s.resp) - return - case recordsEvent: - if _, err = io.Copy(pipeWriter, io.LimitReader(crcReader, payloadLen)); err != nil { - pipeWriter.CloseWithError(err) - closeResponse(s.resp) - return - } - case progressEvent: - switch c { - case xmlContent: - if err = xmlDecoder(io.LimitReader(crcReader, payloadLen), s.progress); err != nil { - pipeWriter.CloseWithError(err) - closeResponse(s.resp) - return - } - default: - pipeWriter.CloseWithError(fmt.Errorf("Unexpected content-type %s sent for event-type %s", c, progressEvent)) - closeResponse(s.resp) - return - } - case statsEvent: - switch c { - case xmlContent: - if err = xmlDecoder(io.LimitReader(crcReader, payloadLen), s.stats); err != nil { - pipeWriter.CloseWithError(err) - closeResponse(s.resp) - return - } - default: - pipeWriter.CloseWithError(fmt.Errorf("Unexpected content-type %s sent for event-type %s", c, statsEvent)) - closeResponse(s.resp) - return - } - } - } - - // Ensures that the full message's CRC is correct and - // that the message is not corrupted - if err := checkCRC(s.resp.Body, crc.Sum32()); err != nil { - pipeWriter.CloseWithError(err) - closeResponse(s.resp) - return - } - - } - }() -} - -// PayloadLen is a function that calculates the length of the payload. -func (p preludeInfo) PayloadLen() int64 { - return int64(p.totalLen - p.headerLen - 16) -} - -// processPrelude is the function that reads the 12 bytes of the prelude and -// ensures the CRC is correct while also extracting relevant information into -// the struct, -func processPrelude(prelude io.Reader, crc hash.Hash32) (preludeInfo, error) { - var err error - pInfo := preludeInfo{} - - // reads total length of the message (first 4 bytes) - pInfo.totalLen, err = extractUint32(prelude) - if err != nil { - return pInfo, err - } - - // reads total header length of the message (2nd 4 bytes) - pInfo.headerLen, err = extractUint32(prelude) - if err != nil { - return pInfo, err - } - - // checks that the CRC is correct (3rd 4 bytes) - preCRC := crc.Sum32() - if err := checkCRC(prelude, preCRC); err != nil { - return pInfo, err - } - - return pInfo, nil -} - -// extracts the relevant information from the Headers. -func extractHeader(body io.Reader, myHeaders http.Header) error { - for { - // extracts the first part of the header, - headerTypeName, err := extractHeaderType(body) - if err != nil { - // Since end of file, we have read all of our headers - if err == io.EOF { - break - } - return err - } - - // reads the 7 present in the header and ignores it. - extractUint8(body) - - headerValueName, err := extractHeaderValue(body) - if err != nil { - return err - } - - myHeaders.Set(headerTypeName, headerValueName) - - } - return nil -} - -// extractHeaderType extracts the first half of the header message, the header type. -func extractHeaderType(body io.Reader) (string, error) { - // extracts 2 bit integer - headerNameLen, err := extractUint8(body) - if err != nil { - return "", err - } - // extracts the string with the appropriate number of bytes - headerName, err := extractString(body, int(headerNameLen)) - if err != nil { - return "", err - } - return strings.TrimPrefix(headerName, ":"), nil -} - -// extractsHeaderValue extracts the second half of the header message, the -// header value -func extractHeaderValue(body io.Reader) (string, error) { - bodyLen, err := extractUint16(body) - if err != nil { - return "", err - } - bodyName, err := extractString(body, int(bodyLen)) - if err != nil { - return "", err - } - return bodyName, nil -} - -// extracts a string from byte array of a particular number of bytes. -func extractString(source io.Reader, lenBytes int) (string, error) { - myVal := make([]byte, lenBytes) - _, err := source.Read(myVal) - if err != nil { - return "", err - } - return string(myVal), nil -} - -// extractUint32 extracts a 4 byte integer from the byte array. -func extractUint32(r io.Reader) (uint32, error) { - buf := make([]byte, 4) - _, err := readFull(r, buf) - if err != nil { - return 0, err - } - return binary.BigEndian.Uint32(buf), nil -} - -// extractUint16 extracts a 2 byte integer from the byte array. -func extractUint16(r io.Reader) (uint16, error) { - buf := make([]byte, 2) - _, err := readFull(r, buf) - if err != nil { - return 0, err - } - return binary.BigEndian.Uint16(buf), nil -} - -// extractUint8 extracts a 1 byte integer from the byte array. -func extractUint8(r io.Reader) (uint8, error) { - buf := make([]byte, 1) - _, err := readFull(r, buf) - if err != nil { - return 0, err - } - return buf[0], nil -} - -// checkCRC ensures that the CRC matches with the one from the reader. -func checkCRC(r io.Reader, expect uint32) error { - msgCRC, err := extractUint32(r) - if err != nil { - return err - } - - if msgCRC != expect { - return fmt.Errorf("Checksum Mismatch, MessageCRC of 0x%X does not equal expected CRC of 0x%X", msgCRC, expect) - } - return nil -} diff --git a/vendor/github.com/minio/minio-go/v7/api-stat.go b/vendor/github.com/minio/minio-go/v7/api-stat.go deleted file mode 100644 index b043dc4..0000000 --- a/vendor/github.com/minio/minio-go/v7/api-stat.go +++ /dev/null @@ -1,116 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "net/http" - - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// BucketExists verifies if bucket exists and you have permission to access it. Allows for a Context to -// control cancellations and timeouts. -func (c *Client) BucketExists(ctx context.Context, bucketName string) (bool, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return false, err - } - - // Execute HEAD on bucketName. - resp, err := c.executeMethod(ctx, http.MethodHead, requestMetadata{ - bucketName: bucketName, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - if ToErrorResponse(err).Code == "NoSuchBucket" { - return false, nil - } - return false, err - } - if resp != nil { - resperr := httpRespToErrorResponse(resp, bucketName, "") - if ToErrorResponse(resperr).Code == "NoSuchBucket" { - return false, nil - } - if resp.StatusCode != http.StatusOK { - return false, httpRespToErrorResponse(resp, bucketName, "") - } - } - return true, nil -} - -// StatObject verifies if object exists, you have permission to access it -// and returns information about the object. -func (c *Client) StatObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return ObjectInfo{}, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return ObjectInfo{}, err - } - headers := opts.Header() - if opts.Internal.ReplicationDeleteMarker { - headers.Set(minIOBucketReplicationDeleteMarker, "true") - } - if opts.Internal.IsReplicationReadyForDeleteMarker { - headers.Set(isMinioTgtReplicationReady, "true") - } - - // Execute HEAD on objectName. - resp, err := c.executeMethod(ctx, http.MethodHead, requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: opts.toQueryValues(), - contentSHA256Hex: emptySHA256Hex, - customHeader: headers, - }) - defer closeResponse(resp) - if err != nil { - return ObjectInfo{}, err - } - - if resp != nil { - deleteMarker := resp.Header.Get(amzDeleteMarker) == "true" - replicationReady := resp.Header.Get(minioTgtReplicationReady) == "true" - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { - if resp.StatusCode == http.StatusMethodNotAllowed && opts.VersionID != "" && deleteMarker { - errResp := ErrorResponse{ - StatusCode: resp.StatusCode, - Code: "MethodNotAllowed", - Message: "The specified method is not allowed against this resource.", - BucketName: bucketName, - Key: objectName, - } - return ObjectInfo{ - VersionID: resp.Header.Get(amzVersionID), - IsDeleteMarker: deleteMarker, - }, errResp - } - return ObjectInfo{ - VersionID: resp.Header.Get(amzVersionID), - IsDeleteMarker: deleteMarker, - ReplicationReady: replicationReady, // whether delete marker can be replicated - }, httpRespToErrorResponse(resp, bucketName, objectName) - } - } - - return ToObjectInfo(bucketName, objectName, resp.Header) -} diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go deleted file mode 100644 index f8a9b34..0000000 --- a/vendor/github.com/minio/minio-go/v7/api.go +++ /dev/null @@ -1,995 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2023 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/base64" - "errors" - "fmt" - "hash/crc32" - "io" - "math/rand" - "net" - "net/http" - "net/http/cookiejar" - "net/http/httptrace" - "net/http/httputil" - "net/url" - "os" - "runtime" - "strings" - "sync" - "sync/atomic" - "time" - - md5simd "github.com/minio/md5-simd" - "github.com/minio/minio-go/v7/pkg/credentials" - "github.com/minio/minio-go/v7/pkg/s3utils" - "github.com/minio/minio-go/v7/pkg/signer" - "golang.org/x/net/publicsuffix" -) - -// Client implements Amazon S3 compatible methods. -type Client struct { - // Standard options. - - // Parsed endpoint url provided by the user. - endpointURL *url.URL - - // Holds various credential providers. - credsProvider *credentials.Credentials - - // Custom signerType value overrides all credentials. - overrideSignerType credentials.SignatureType - - // User supplied. - appInfo struct { - appName string - appVersion string - } - - // Indicate whether we are using https or not - secure bool - - // Needs allocation. - httpClient *http.Client - httpTrace *httptrace.ClientTrace - bucketLocCache *bucketLocationCache - - // Advanced functionality. - isTraceEnabled bool - traceErrorsOnly bool - traceOutput io.Writer - - // S3 specific accelerated endpoint. - s3AccelerateEndpoint string - - // Region endpoint - region string - - // Random seed. - random *rand.Rand - - // lookup indicates type of url lookup supported by server. If not specified, - // default to Auto. - lookup BucketLookupType - - // Factory for MD5 hash functions. - md5Hasher func() md5simd.Hasher - sha256Hasher func() md5simd.Hasher - - healthStatus int32 - - trailingHeaderSupport bool -} - -// Options for New method -type Options struct { - Creds *credentials.Credentials - Secure bool - Transport http.RoundTripper - Trace *httptrace.ClientTrace - Region string - BucketLookup BucketLookupType - - // Allows setting a custom region lookup based on URL pattern - // not all URL patterns are covered by this library so if you - // have a custom endpoints with many regions you can use this - // function to perform region lookups appropriately. - CustomRegionViaURL func(u url.URL) string - - // TrailingHeaders indicates server support of trailing headers. - // Only supported for v4 signatures. - TrailingHeaders bool - - // Custom hash routines. Leave nil to use standard. - CustomMD5 func() md5simd.Hasher - CustomSHA256 func() md5simd.Hasher -} - -// Global constants. -const ( - libraryName = "minio-go" - libraryVersion = "v7.0.66" -) - -// User Agent should always following the below style. -// Please open an issue to discuss any new changes here. -// -// MinIO (OS; ARCH) LIB/VER APP/VER -const ( - libraryUserAgentPrefix = "MinIO (" + runtime.GOOS + "; " + runtime.GOARCH + ") " - libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion -) - -// BucketLookupType is type of url lookup supported by server. -type BucketLookupType int - -// Different types of url lookup supported by the server.Initialized to BucketLookupAuto -const ( - BucketLookupAuto BucketLookupType = iota - BucketLookupDNS - BucketLookupPath -) - -// New - instantiate minio client with options -func New(endpoint string, opts *Options) (*Client, error) { - if opts == nil { - return nil, errors.New("no options provided") - } - clnt, err := privateNew(endpoint, opts) - if err != nil { - return nil, err - } - // If Amazon S3 set to signature v4. - if s3utils.IsAmazonEndpoint(*clnt.endpointURL) { - clnt.overrideSignerType = credentials.SignatureV4 - } - - return clnt, nil -} - -// EndpointURL returns the URL of the S3 endpoint. -func (c *Client) EndpointURL() *url.URL { - endpoint := *c.endpointURL // copy to prevent callers from modifying internal state - return &endpoint -} - -// lockedRandSource provides protected rand source, implements rand.Source interface. -type lockedRandSource struct { - lk sync.Mutex - src rand.Source -} - -// Int63 returns a non-negative pseudo-random 63-bit integer as an int64. -func (r *lockedRandSource) Int63() (n int64) { - r.lk.Lock() - n = r.src.Int63() - r.lk.Unlock() - return -} - -// Seed uses the provided seed value to initialize the generator to a -// deterministic state. -func (r *lockedRandSource) Seed(seed int64) { - r.lk.Lock() - r.src.Seed(seed) - r.lk.Unlock() -} - -func privateNew(endpoint string, opts *Options) (*Client, error) { - // construct endpoint. - endpointURL, err := getEndpointURL(endpoint, opts.Secure) - if err != nil { - return nil, err - } - - // Initialize cookies to preserve server sent cookies if any and replay - // them upon each request. - jar, err := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List}) - if err != nil { - return nil, err - } - - // instantiate new Client. - clnt := new(Client) - - // Save the credentials. - clnt.credsProvider = opts.Creds - - // Remember whether we are using https or not - clnt.secure = opts.Secure - - // Save endpoint URL, user agent for future uses. - clnt.endpointURL = endpointURL - - transport := opts.Transport - if transport == nil { - transport, err = DefaultTransport(opts.Secure) - if err != nil { - return nil, err - } - } - - clnt.httpTrace = opts.Trace - - // Instantiate http client and bucket location cache. - clnt.httpClient = &http.Client{ - Jar: jar, - Transport: transport, - CheckRedirect: func(req *http.Request, via []*http.Request) error { - return http.ErrUseLastResponse - }, - } - - // Sets custom region, if region is empty bucket location cache is used automatically. - if opts.Region == "" { - if opts.CustomRegionViaURL != nil { - opts.Region = opts.CustomRegionViaURL(*clnt.endpointURL) - } else { - opts.Region = s3utils.GetRegionFromURL(*clnt.endpointURL) - } - } - clnt.region = opts.Region - - // Instantiate bucket location cache. - clnt.bucketLocCache = newBucketLocationCache() - - // Introduce a new locked random seed. - clnt.random = rand.New(&lockedRandSource{src: rand.NewSource(time.Now().UTC().UnixNano())}) - - // Add default md5 hasher. - clnt.md5Hasher = opts.CustomMD5 - clnt.sha256Hasher = opts.CustomSHA256 - if clnt.md5Hasher == nil { - clnt.md5Hasher = newMd5Hasher - } - if clnt.sha256Hasher == nil { - clnt.sha256Hasher = newSHA256Hasher - } - - clnt.trailingHeaderSupport = opts.TrailingHeaders && clnt.overrideSignerType.IsV4() - - // Sets bucket lookup style, whether server accepts DNS or Path lookup. Default is Auto - determined - // by the SDK. When Auto is specified, DNS lookup is used for Amazon/Google cloud endpoints and Path for all other endpoints. - clnt.lookup = opts.BucketLookup - - // healthcheck is not initialized - clnt.healthStatus = unknown - - // Return. - return clnt, nil -} - -// SetAppInfo - add application details to user agent. -func (c *Client) SetAppInfo(appName, appVersion string) { - // if app name and version not set, we do not set a new user agent. - if appName != "" && appVersion != "" { - c.appInfo.appName = appName - c.appInfo.appVersion = appVersion - } -} - -// TraceOn - enable HTTP tracing. -func (c *Client) TraceOn(outputStream io.Writer) { - // if outputStream is nil then default to os.Stdout. - if outputStream == nil { - outputStream = os.Stdout - } - // Sets a new output stream. - c.traceOutput = outputStream - - // Enable tracing. - c.isTraceEnabled = true -} - -// TraceErrorsOnlyOn - same as TraceOn, but only errors will be traced. -func (c *Client) TraceErrorsOnlyOn(outputStream io.Writer) { - c.TraceOn(outputStream) - c.traceErrorsOnly = true -} - -// TraceErrorsOnlyOff - Turns off the errors only tracing and everything will be traced after this call. -// If all tracing needs to be turned off, call TraceOff(). -func (c *Client) TraceErrorsOnlyOff() { - c.traceErrorsOnly = false -} - -// TraceOff - disable HTTP tracing. -func (c *Client) TraceOff() { - // Disable tracing. - c.isTraceEnabled = false - c.traceErrorsOnly = false -} - -// SetS3TransferAccelerate - turns s3 accelerated endpoint on or off for all your -// requests. This feature is only specific to S3 for all other endpoints this -// function does nothing. To read further details on s3 transfer acceleration -// please vist - -// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html -func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) { - if s3utils.IsAmazonEndpoint(*c.endpointURL) { - c.s3AccelerateEndpoint = accelerateEndpoint - } -} - -// Hash materials provides relevant initialized hash algo writers -// based on the expected signature type. -// -// - For signature v4 request if the connection is insecure compute only sha256. -// - For signature v4 request if the connection is secure compute only md5. -// - For anonymous request compute md5. -func (c *Client) hashMaterials(isMd5Requested, isSha256Requested bool) (hashAlgos map[string]md5simd.Hasher, hashSums map[string][]byte) { - hashSums = make(map[string][]byte) - hashAlgos = make(map[string]md5simd.Hasher) - if c.overrideSignerType.IsV4() { - if c.secure { - hashAlgos["md5"] = c.md5Hasher() - } else { - if isSha256Requested { - hashAlgos["sha256"] = c.sha256Hasher() - } - } - } else { - if c.overrideSignerType.IsAnonymous() { - hashAlgos["md5"] = c.md5Hasher() - } - } - if isMd5Requested { - hashAlgos["md5"] = c.md5Hasher() - } - return hashAlgos, hashSums -} - -const ( - unknown = -1 - offline = 0 - online = 1 -) - -// IsOnline returns true if healthcheck enabled and client is online. -// If HealthCheck function has not been called this will always return true. -func (c *Client) IsOnline() bool { - return !c.IsOffline() -} - -// sets online healthStatus to offline -func (c *Client) markOffline() { - atomic.CompareAndSwapInt32(&c.healthStatus, online, offline) -} - -// IsOffline returns true if healthcheck enabled and client is offline -// If HealthCheck function has not been called this will always return false. -func (c *Client) IsOffline() bool { - return atomic.LoadInt32(&c.healthStatus) == offline -} - -// HealthCheck starts a healthcheck to see if endpoint is up. -// Returns a context cancellation function, to stop the health check, -// and an error if health check is already started. -func (c *Client) HealthCheck(hcDuration time.Duration) (context.CancelFunc, error) { - if atomic.LoadInt32(&c.healthStatus) != unknown { - return nil, fmt.Errorf("health check is running") - } - if hcDuration < 1*time.Second { - return nil, fmt.Errorf("health check duration should be at least 1 second") - } - probeBucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "probe-health-") - ctx, cancelFn := context.WithCancel(context.Background()) - atomic.StoreInt32(&c.healthStatus, offline) - { - // Change to online, if we can connect. - gctx, gcancel := context.WithTimeout(ctx, 3*time.Second) - _, err := c.getBucketLocation(gctx, probeBucketName) - gcancel() - if !IsNetworkOrHostDown(err, false) { - switch ToErrorResponse(err).Code { - case "NoSuchBucket", "AccessDenied", "": - atomic.CompareAndSwapInt32(&c.healthStatus, offline, online) - } - } - } - - go func(duration time.Duration) { - timer := time.NewTimer(duration) - defer timer.Stop() - for { - select { - case <-ctx.Done(): - atomic.StoreInt32(&c.healthStatus, unknown) - return - case <-timer.C: - // Do health check the first time and ONLY if the connection is marked offline - if c.IsOffline() { - gctx, gcancel := context.WithTimeout(context.Background(), 3*time.Second) - _, err := c.getBucketLocation(gctx, probeBucketName) - gcancel() - if !IsNetworkOrHostDown(err, false) { - switch ToErrorResponse(err).Code { - case "NoSuchBucket", "AccessDenied", "": - atomic.CompareAndSwapInt32(&c.healthStatus, offline, online) - } - } - } - - timer.Reset(duration) - } - } - }(hcDuration) - return cancelFn, nil -} - -// requestMetadata - is container for all the values to make a request. -type requestMetadata struct { - // If set newRequest presigns the URL. - presignURL bool - - // User supplied. - bucketName string - objectName string - queryValues url.Values - customHeader http.Header - extraPresignHeader http.Header - expires int64 - - // Generated by our internal code. - bucketLocation string - contentBody io.Reader - contentLength int64 - contentMD5Base64 string // carries base64 encoded md5sum - contentSHA256Hex string // carries hex encoded sha256sum - streamSha256 bool - addCrc bool - trailer http.Header // (http.Request).Trailer. Requires v4 signature. -} - -// dumpHTTP - dump HTTP request and response. -func (c *Client) dumpHTTP(req *http.Request, resp *http.Response) error { - // Starts http dump. - _, err := fmt.Fprintln(c.traceOutput, "---------START-HTTP---------") - if err != nil { - return err - } - - // Filter out Signature field from Authorization header. - origAuth := req.Header.Get("Authorization") - if origAuth != "" { - req.Header.Set("Authorization", redactSignature(origAuth)) - } - - // Only display request header. - reqTrace, err := httputil.DumpRequestOut(req, false) - if err != nil { - return err - } - - // Write request to trace output. - _, err = fmt.Fprint(c.traceOutput, string(reqTrace)) - if err != nil { - return err - } - - // Only display response header. - var respTrace []byte - - // For errors we make sure to dump response body as well. - if resp.StatusCode != http.StatusOK && - resp.StatusCode != http.StatusPartialContent && - resp.StatusCode != http.StatusNoContent { - respTrace, err = httputil.DumpResponse(resp, true) - if err != nil { - return err - } - } else { - respTrace, err = httputil.DumpResponse(resp, false) - if err != nil { - return err - } - } - - // Write response to trace output. - _, err = fmt.Fprint(c.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n")) - if err != nil { - return err - } - - // Ends the http dump. - _, err = fmt.Fprintln(c.traceOutput, "---------END-HTTP---------") - if err != nil { - return err - } - - // Returns success. - return nil -} - -// do - execute http request. -func (c *Client) do(req *http.Request) (resp *http.Response, err error) { - defer func() { - if IsNetworkOrHostDown(err, false) { - c.markOffline() - } - }() - - resp, err = c.httpClient.Do(req) - if err != nil { - // Handle this specifically for now until future Golang versions fix this issue properly. - if urlErr, ok := err.(*url.Error); ok { - if strings.Contains(urlErr.Err.Error(), "EOF") { - return nil, &url.Error{ - Op: urlErr.Op, - URL: urlErr.URL, - Err: errors.New("Connection closed by foreign host " + urlErr.URL + ". Retry again."), - } - } - } - return nil, err - } - - // Response cannot be non-nil, report error if thats the case. - if resp == nil { - msg := "Response is empty. " + reportIssue - return nil, errInvalidArgument(msg) - } - - // If trace is enabled, dump http request and response, - // except when the traceErrorsOnly enabled and the response's status code is ok - if c.isTraceEnabled && !(c.traceErrorsOnly && resp.StatusCode == http.StatusOK) { - err = c.dumpHTTP(req, resp) - if err != nil { - return nil, err - } - } - - return resp, nil -} - -// List of success status. -var successStatus = []int{ - http.StatusOK, - http.StatusNoContent, - http.StatusPartialContent, -} - -// executeMethod - instantiates a given method, and retries the -// request upon any error up to maxRetries attempts in a binomially -// delayed manner using a standard back off algorithm. -func (c *Client) executeMethod(ctx context.Context, method string, metadata requestMetadata) (res *http.Response, err error) { - if c.IsOffline() { - return nil, errors.New(c.endpointURL.String() + " is offline.") - } - - var retryable bool // Indicates if request can be retried. - var bodySeeker io.Seeker // Extracted seeker from io.Reader. - reqRetry := MaxRetry // Indicates how many times we can retry the request - - if metadata.contentBody != nil { - // Check if body is seekable then it is retryable. - bodySeeker, retryable = metadata.contentBody.(io.Seeker) - switch bodySeeker { - case os.Stdin, os.Stdout, os.Stderr: - retryable = false - } - // Retry only when reader is seekable - if !retryable { - reqRetry = 1 - } - - // Figure out if the body can be closed - if yes - // we will definitely close it upon the function - // return. - bodyCloser, ok := metadata.contentBody.(io.Closer) - if ok { - defer bodyCloser.Close() - } - } - - // Create cancel context to control 'newRetryTimer' go routine. - retryCtx, cancel := context.WithCancel(ctx) - - // Indicate to our routine to exit cleanly upon return. - defer cancel() - - for range c.newRetryTimer(retryCtx, reqRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter) { - // Retry executes the following function body if request has an - // error until maxRetries have been exhausted, retry attempts are - // performed after waiting for a given period of time in a - // binomial fashion. - if retryable { - // Seek back to beginning for each attempt. - if _, err = bodySeeker.Seek(0, 0); err != nil { - // If seek failed, no need to retry. - return nil, err - } - } - - if metadata.addCrc { - if metadata.trailer == nil { - metadata.trailer = make(http.Header, 1) - } - crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) - metadata.contentBody = newHashReaderWrapper(metadata.contentBody, crc, func(hash []byte) { - // Update trailer when done. - metadata.trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(hash)) - }) - metadata.trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(crc.Sum(nil))) - } - // Instantiate a new request. - var req *http.Request - req, err = c.newRequest(ctx, method, metadata) - if err != nil { - errResponse := ToErrorResponse(err) - if isS3CodeRetryable(errResponse.Code) { - continue // Retry. - } - - return nil, err - } - - // Initiate the request. - res, err = c.do(req) - if err != nil { - if isRequestErrorRetryable(err) { - // Retry the request - continue - } - return nil, err - } - - // For any known successful http status, return quickly. - for _, httpStatus := range successStatus { - if httpStatus == res.StatusCode { - return res, nil - } - } - - // Read the body to be saved later. - errBodyBytes, err := io.ReadAll(res.Body) - // res.Body should be closed - closeResponse(res) - if err != nil { - return nil, err - } - - // Save the body. - errBodySeeker := bytes.NewReader(errBodyBytes) - res.Body = io.NopCloser(errBodySeeker) - - // For errors verify if its retryable otherwise fail quickly. - errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName)) - - // Save the body back again. - errBodySeeker.Seek(0, 0) // Seek back to starting point. - res.Body = io.NopCloser(errBodySeeker) - - // Bucket region if set in error response and the error - // code dictates invalid region, we can retry the request - // with the new region. - // - // Additionally, we should only retry if bucketLocation and custom - // region is empty. - if c.region == "" { - switch errResponse.Code { - case "AuthorizationHeaderMalformed": - fallthrough - case "InvalidRegion": - fallthrough - case "AccessDenied": - if errResponse.Region == "" { - // Region is empty we simply return the error. - return res, err - } - // Region is not empty figure out a way to - // handle this appropriately. - if metadata.bucketName != "" { - // Gather Cached location only if bucketName is present. - if location, cachedOk := c.bucketLocCache.Get(metadata.bucketName); cachedOk && location != errResponse.Region { - c.bucketLocCache.Set(metadata.bucketName, errResponse.Region) - continue // Retry. - } - } else { - // This is for ListBuckets() fallback. - if errResponse.Region != metadata.bucketLocation { - // Retry if the error response has a different region - // than the request we just made. - metadata.bucketLocation = errResponse.Region - continue // Retry - } - } - } - } - - // Verify if error response code is retryable. - if isS3CodeRetryable(errResponse.Code) { - continue // Retry. - } - - // Verify if http status code is retryable. - if isHTTPStatusRetryable(res.StatusCode) { - continue // Retry. - } - - // For all other cases break out of the retry loop. - break - } - - // Return an error when retry is canceled or deadlined - if e := retryCtx.Err(); e != nil { - return nil, e - } - - return res, err -} - -// newRequest - instantiate a new HTTP request for a given method. -func (c *Client) newRequest(ctx context.Context, method string, metadata requestMetadata) (req *http.Request, err error) { - // If no method is supplied default to 'POST'. - if method == "" { - method = http.MethodPost - } - - location := metadata.bucketLocation - if location == "" { - if metadata.bucketName != "" { - // Gather location only if bucketName is present. - location, err = c.getBucketLocation(ctx, metadata.bucketName) - if err != nil { - return nil, err - } - } - if location == "" { - location = getDefaultLocation(*c.endpointURL, c.region) - } - } - - // Look if target url supports virtual host. - // We explicitly disallow MakeBucket calls to not use virtual DNS style, - // since the resolution may fail. - isMakeBucket := (metadata.objectName == "" && method == http.MethodPut && len(metadata.queryValues) == 0) - isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, metadata.bucketName) && !isMakeBucket - - // Construct a new target URL. - targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, location, - isVirtualHost, metadata.queryValues) - if err != nil { - return nil, err - } - - if c.httpTrace != nil { - ctx = httptrace.WithClientTrace(ctx, c.httpTrace) - } - - // Initialize a new HTTP request for the method. - req, err = http.NewRequestWithContext(ctx, method, targetURL.String(), nil) - if err != nil { - return nil, err - } - - // Get credentials from the configured credentials provider. - value, err := c.credsProvider.Get() - if err != nil { - return nil, err - } - - var ( - signerType = value.SignerType - accessKeyID = value.AccessKeyID - secretAccessKey = value.SecretAccessKey - sessionToken = value.SessionToken - ) - - // Custom signer set then override the behavior. - if c.overrideSignerType != credentials.SignatureDefault { - signerType = c.overrideSignerType - } - - // If signerType returned by credentials helper is anonymous, - // then do not sign regardless of signerType override. - if value.SignerType == credentials.SignatureAnonymous { - signerType = credentials.SignatureAnonymous - } - - // Generate presign url if needed, return right here. - if metadata.expires != 0 && metadata.presignURL { - if signerType.IsAnonymous() { - return nil, errInvalidArgument("Presigned URLs cannot be generated with anonymous credentials.") - } - if metadata.extraPresignHeader != nil { - if signerType.IsV2() { - return nil, errInvalidArgument("Extra signed headers for Presign with Signature V2 is not supported.") - } - for k, v := range metadata.extraPresignHeader { - req.Header.Set(k, v[0]) - } - } - if signerType.IsV2() { - // Presign URL with signature v2. - req = signer.PreSignV2(*req, accessKeyID, secretAccessKey, metadata.expires, isVirtualHost) - } else if signerType.IsV4() { - // Presign URL with signature v4. - req = signer.PreSignV4(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.expires) - } - return req, nil - } - - // Set 'User-Agent' header for the request. - c.setUserAgent(req) - - // Set all headers. - for k, v := range metadata.customHeader { - req.Header.Set(k, v[0]) - } - - // Go net/http notoriously closes the request body. - // - The request Body, if non-nil, will be closed by the underlying Transport, even on errors. - // This can cause underlying *os.File seekers to fail, avoid that - // by making sure to wrap the closer as a nop. - if metadata.contentLength == 0 { - req.Body = nil - } else { - req.Body = io.NopCloser(metadata.contentBody) - } - - // Set incoming content-length. - req.ContentLength = metadata.contentLength - if req.ContentLength <= -1 { - // For unknown content length, we upload using transfer-encoding: chunked. - req.TransferEncoding = []string{"chunked"} - } - - // set md5Sum for content protection. - if len(metadata.contentMD5Base64) > 0 { - req.Header.Set("Content-Md5", metadata.contentMD5Base64) - } - - // For anonymous requests just return. - if signerType.IsAnonymous() { - return req, nil - } - - switch { - case signerType.IsV2(): - // Add signature version '2' authorization header. - req = signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost) - case metadata.streamSha256 && !c.secure: - if len(metadata.trailer) > 0 { - req.Trailer = metadata.trailer - } - // Streaming signature is used by default for a PUT object request. - // Additionally, we also look if the initialized client is secure, - // if yes then we don't need to perform streaming signature. - req = signer.StreamingSignV4(req, accessKeyID, - secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC(), c.sha256Hasher()) - default: - // Set sha256 sum for signature calculation only with signature version '4'. - shaHeader := unsignedPayload - if metadata.contentSHA256Hex != "" { - shaHeader = metadata.contentSHA256Hex - if len(metadata.trailer) > 0 { - // Sanity check, we should not end up here if upstream is sane. - return nil, errors.New("internal error: contentSHA256Hex with trailer not supported") - } - } else if len(metadata.trailer) > 0 { - shaHeader = unsignedPayloadTrailer - } - req.Header.Set("X-Amz-Content-Sha256", shaHeader) - - // Add signature version '4' authorization header. - req = signer.SignV4Trailer(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.trailer) - } - - // Return request. - return req, nil -} - -// set User agent. -func (c *Client) setUserAgent(req *http.Request) { - req.Header.Set("User-Agent", libraryUserAgent) - if c.appInfo.appName != "" && c.appInfo.appVersion != "" { - req.Header.Set("User-Agent", libraryUserAgent+" "+c.appInfo.appName+"/"+c.appInfo.appVersion) - } -} - -// makeTargetURL make a new target url. -func (c *Client) makeTargetURL(bucketName, objectName, bucketLocation string, isVirtualHostStyle bool, queryValues url.Values) (*url.URL, error) { - host := c.endpointURL.Host - // For Amazon S3 endpoint, try to fetch location based endpoint. - if s3utils.IsAmazonEndpoint(*c.endpointURL) { - if c.s3AccelerateEndpoint != "" && bucketName != "" { - // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html - // Disable transfer acceleration for non-compliant bucket names. - if strings.Contains(bucketName, ".") { - return nil, errTransferAccelerationBucket(bucketName) - } - // If transfer acceleration is requested set new host. - // For more details about enabling transfer acceleration read here. - // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html - host = c.s3AccelerateEndpoint - } else { - // Do not change the host if the endpoint URL is a FIPS S3 endpoint or a S3 PrivateLink interface endpoint - if !s3utils.IsAmazonFIPSEndpoint(*c.endpointURL) && !s3utils.IsAmazonPrivateLinkEndpoint(*c.endpointURL) { - // Fetch new host based on the bucket location. - host = getS3Endpoint(bucketLocation) - } - } - } - - // Save scheme. - scheme := c.endpointURL.Scheme - - // Strip port 80 and 443 so we won't send these ports in Host header. - // The reason is that browsers and curl automatically remove :80 and :443 - // with the generated presigned urls, then a signature mismatch error. - if h, p, err := net.SplitHostPort(host); err == nil { - if scheme == "http" && p == "80" || scheme == "https" && p == "443" { - host = h - if ip := net.ParseIP(h); ip != nil && ip.To4() == nil { - host = "[" + h + "]" - } - } - } - - urlStr := scheme + "://" + host + "/" - - // Make URL only if bucketName is available, otherwise use the - // endpoint URL. - if bucketName != "" { - // If endpoint supports virtual host style use that always. - // Currently only S3 and Google Cloud Storage would support - // virtual host style. - if isVirtualHostStyle { - urlStr = scheme + "://" + bucketName + "." + host + "/" - if objectName != "" { - urlStr += s3utils.EncodePath(objectName) - } - } else { - // If not fall back to using path style. - urlStr = urlStr + bucketName + "/" - if objectName != "" { - urlStr += s3utils.EncodePath(objectName) - } - } - } - - // If there are any query values, add them to the end. - if len(queryValues) > 0 { - urlStr = urlStr + "?" + s3utils.QueryEncode(queryValues) - } - - return url.Parse(urlStr) -} - -// returns true if virtual hosted style requests are to be used. -func (c *Client) isVirtualHostStyleRequest(url url.URL, bucketName string) bool { - if bucketName == "" { - return false - } - - if c.lookup == BucketLookupDNS { - return true - } - if c.lookup == BucketLookupPath { - return false - } - - // default to virtual only for Amazon/Google storage. In all other cases use - // path style requests - return s3utils.IsVirtualHostSupported(url, bucketName) -} diff --git a/vendor/github.com/minio/minio-go/v7/bucket-cache.go b/vendor/github.com/minio/minio-go/v7/bucket-cache.go deleted file mode 100644 index b1d3b38..0000000 --- a/vendor/github.com/minio/minio-go/v7/bucket-cache.go +++ /dev/null @@ -1,256 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "net" - "net/http" - "net/url" - "path" - "sync" - - "github.com/minio/minio-go/v7/pkg/credentials" - "github.com/minio/minio-go/v7/pkg/s3utils" - "github.com/minio/minio-go/v7/pkg/signer" -) - -// bucketLocationCache - Provides simple mechanism to hold bucket -// locations in memory. -type bucketLocationCache struct { - // mutex is used for handling the concurrent - // read/write requests for cache. - sync.RWMutex - - // items holds the cached bucket locations. - items map[string]string -} - -// newBucketLocationCache - Provides a new bucket location cache to be -// used internally with the client object. -func newBucketLocationCache() *bucketLocationCache { - return &bucketLocationCache{ - items: make(map[string]string), - } -} - -// Get - Returns a value of a given key if it exists. -func (r *bucketLocationCache) Get(bucketName string) (location string, ok bool) { - r.RLock() - defer r.RUnlock() - location, ok = r.items[bucketName] - return -} - -// Set - Will persist a value into cache. -func (r *bucketLocationCache) Set(bucketName, location string) { - r.Lock() - defer r.Unlock() - r.items[bucketName] = location -} - -// Delete - Deletes a bucket name from cache. -func (r *bucketLocationCache) Delete(bucketName string) { - r.Lock() - defer r.Unlock() - delete(r.items, bucketName) -} - -// GetBucketLocation - get location for the bucket name from location cache, if not -// fetch freshly by making a new request. -func (c *Client) GetBucketLocation(ctx context.Context, bucketName string) (string, error) { - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return "", err - } - return c.getBucketLocation(ctx, bucketName) -} - -// getBucketLocation - Get location for the bucketName from location map cache, if not -// fetch freshly by making a new request. -func (c *Client) getBucketLocation(ctx context.Context, bucketName string) (string, error) { - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return "", err - } - - // Region set then no need to fetch bucket location. - if c.region != "" { - return c.region, nil - } - - if location, ok := c.bucketLocCache.Get(bucketName); ok { - return location, nil - } - - // Initialize a new request. - req, err := c.getBucketLocationRequest(ctx, bucketName) - if err != nil { - return "", err - } - - // Initiate the request. - resp, err := c.do(req) - defer closeResponse(resp) - if err != nil { - return "", err - } - location, err := processBucketLocationResponse(resp, bucketName) - if err != nil { - return "", err - } - c.bucketLocCache.Set(bucketName, location) - return location, nil -} - -// processes the getBucketLocation http response from the server. -func processBucketLocationResponse(resp *http.Response, bucketName string) (bucketLocation string, err error) { - if resp != nil { - if resp.StatusCode != http.StatusOK { - err = httpRespToErrorResponse(resp, bucketName, "") - errResp := ToErrorResponse(err) - // For access denied error, it could be an anonymous - // request. Move forward and let the top level callers - // succeed if possible based on their policy. - switch errResp.Code { - case "NotImplemented": - switch errResp.Server { - case "AmazonSnowball": - return "snowball", nil - case "cloudflare": - return "us-east-1", nil - } - case "AuthorizationHeaderMalformed": - fallthrough - case "InvalidRegion": - fallthrough - case "AccessDenied": - if errResp.Region == "" { - return "us-east-1", nil - } - return errResp.Region, nil - } - return "", err - } - } - - // Extract location. - var locationConstraint string - err = xmlDecoder(resp.Body, &locationConstraint) - if err != nil { - return "", err - } - - location := locationConstraint - // Location is empty will be 'us-east-1'. - if location == "" { - location = "us-east-1" - } - - // Location can be 'EU' convert it to meaningful 'eu-west-1'. - if location == "EU" { - location = "eu-west-1" - } - - // Save the location into cache. - - // Return. - return location, nil -} - -// getBucketLocationRequest - Wrapper creates a new getBucketLocation request. -func (c *Client) getBucketLocationRequest(ctx context.Context, bucketName string) (*http.Request, error) { - // Set location query. - urlValues := make(url.Values) - urlValues.Set("location", "") - - // Set get bucket location always as path style. - targetURL := *c.endpointURL - - // as it works in makeTargetURL method from api.go file - if h, p, err := net.SplitHostPort(targetURL.Host); err == nil { - if targetURL.Scheme == "http" && p == "80" || targetURL.Scheme == "https" && p == "443" { - targetURL.Host = h - if ip := net.ParseIP(h); ip != nil && ip.To16() != nil { - targetURL.Host = "[" + h + "]" - } - } - } - - isVirtualStyle := c.isVirtualHostStyleRequest(targetURL, bucketName) - - var urlStr string - - if isVirtualStyle { - urlStr = c.endpointURL.Scheme + "://" + bucketName + "." + targetURL.Host + "/?location" - } else { - targetURL.Path = path.Join(bucketName, "") + "/" - targetURL.RawQuery = urlValues.Encode() - urlStr = targetURL.String() - } - - // Get a new HTTP request for the method. - req, err := http.NewRequestWithContext(ctx, http.MethodGet, urlStr, nil) - if err != nil { - return nil, err - } - - // Set UserAgent for the request. - c.setUserAgent(req) - - // Get credentials from the configured credentials provider. - value, err := c.credsProvider.Get() - if err != nil { - return nil, err - } - - var ( - signerType = value.SignerType - accessKeyID = value.AccessKeyID - secretAccessKey = value.SecretAccessKey - sessionToken = value.SessionToken - ) - - // Custom signer set then override the behavior. - if c.overrideSignerType != credentials.SignatureDefault { - signerType = c.overrideSignerType - } - - // If signerType returned by credentials helper is anonymous, - // then do not sign regardless of signerType override. - if value.SignerType == credentials.SignatureAnonymous { - signerType = credentials.SignatureAnonymous - } - - if signerType.IsAnonymous() { - return req, nil - } - - if signerType.IsV2() { - req = signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualStyle) - return req, nil - } - - // Set sha256 sum for signature calculation only with signature version '4'. - contentSha256 := emptySHA256Hex - if c.secure { - contentSha256 = unsignedPayload - } - - req.Header.Set("X-Amz-Content-Sha256", contentSha256) - req = signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1") - return req, nil -} diff --git a/vendor/github.com/minio/minio-go/v7/checksum.go b/vendor/github.com/minio/minio-go/v7/checksum.go deleted file mode 100644 index a1f6f43..0000000 --- a/vendor/github.com/minio/minio-go/v7/checksum.go +++ /dev/null @@ -1,210 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2023 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "crypto/sha1" - "crypto/sha256" - "encoding/base64" - "hash" - "hash/crc32" - "io" - "math/bits" -) - -// ChecksumType contains information about the checksum type. -type ChecksumType uint32 - -const ( - - // ChecksumSHA256 indicates a SHA256 checksum. - ChecksumSHA256 ChecksumType = 1 << iota - // ChecksumSHA1 indicates a SHA-1 checksum. - ChecksumSHA1 - // ChecksumCRC32 indicates a CRC32 checksum with IEEE table. - ChecksumCRC32 - // ChecksumCRC32C indicates a CRC32 checksum with Castagnoli table. - ChecksumCRC32C - - // Keep after all valid checksums - checksumLast - - // checksumMask is a mask for valid checksum types. - checksumMask = checksumLast - 1 - - // ChecksumNone indicates no checksum. - ChecksumNone ChecksumType = 0 - - amzChecksumAlgo = "x-amz-checksum-algorithm" - amzChecksumCRC32 = "x-amz-checksum-crc32" - amzChecksumCRC32C = "x-amz-checksum-crc32c" - amzChecksumSHA1 = "x-amz-checksum-sha1" - amzChecksumSHA256 = "x-amz-checksum-sha256" -) - -// Is returns if c is all of t. -func (c ChecksumType) Is(t ChecksumType) bool { - return c&t == t -} - -// Key returns the header key. -// returns empty string if invalid or none. -func (c ChecksumType) Key() string { - switch c & checksumMask { - case ChecksumCRC32: - return amzChecksumCRC32 - case ChecksumCRC32C: - return amzChecksumCRC32C - case ChecksumSHA1: - return amzChecksumSHA1 - case ChecksumSHA256: - return amzChecksumSHA256 - } - return "" -} - -// RawByteLen returns the size of the un-encoded checksum. -func (c ChecksumType) RawByteLen() int { - switch c & checksumMask { - case ChecksumCRC32, ChecksumCRC32C: - return 4 - case ChecksumSHA1: - return sha1.Size - case ChecksumSHA256: - return sha256.Size - } - return 0 -} - -// Hasher returns a hasher corresponding to the checksum type. -// Returns nil if no checksum. -func (c ChecksumType) Hasher() hash.Hash { - switch c & checksumMask { - case ChecksumCRC32: - return crc32.NewIEEE() - case ChecksumCRC32C: - return crc32.New(crc32.MakeTable(crc32.Castagnoli)) - case ChecksumSHA1: - return sha1.New() - case ChecksumSHA256: - return sha256.New() - } - return nil -} - -// IsSet returns whether the type is valid and known. -func (c ChecksumType) IsSet() bool { - return bits.OnesCount32(uint32(c)) == 1 -} - -// String returns the type as a string. -// CRC32, CRC32C, SHA1, and SHA256 for valid values. -// Empty string for unset and "" if not valid. -func (c ChecksumType) String() string { - switch c & checksumMask { - case ChecksumCRC32: - return "CRC32" - case ChecksumCRC32C: - return "CRC32C" - case ChecksumSHA1: - return "SHA1" - case ChecksumSHA256: - return "SHA256" - case ChecksumNone: - return "" - } - return "" -} - -// ChecksumReader reads all of r and returns a checksum of type c. -// Returns any error that may have occurred while reading. -func (c ChecksumType) ChecksumReader(r io.Reader) (Checksum, error) { - h := c.Hasher() - if h == nil { - return Checksum{}, nil - } - _, err := io.Copy(h, r) - if err != nil { - return Checksum{}, err - } - return NewChecksum(c, h.Sum(nil)), nil -} - -// ChecksumBytes returns a checksum of the content b with type c. -func (c ChecksumType) ChecksumBytes(b []byte) Checksum { - h := c.Hasher() - if h == nil { - return Checksum{} - } - n, err := h.Write(b) - if err != nil || n != len(b) { - // Shouldn't happen with these checksummers. - return Checksum{} - } - return NewChecksum(c, h.Sum(nil)) -} - -// Checksum is a type and encoded value. -type Checksum struct { - Type ChecksumType - r []byte -} - -// NewChecksum sets the checksum to the value of b, -// which is the raw hash output. -// If the length of c does not match t.RawByteLen, -// a checksum with ChecksumNone is returned. -func NewChecksum(t ChecksumType, b []byte) Checksum { - if t.IsSet() && len(b) == t.RawByteLen() { - return Checksum{Type: t, r: b} - } - return Checksum{} -} - -// NewChecksumString sets the checksum to the value of s, -// which is the base 64 encoded raw hash output. -// If the length of c does not match t.RawByteLen, it is not added. -func NewChecksumString(t ChecksumType, s string) Checksum { - b, _ := base64.StdEncoding.DecodeString(s) - if t.IsSet() && len(b) == t.RawByteLen() { - return Checksum{Type: t, r: b} - } - return Checksum{} -} - -// IsSet returns whether the checksum is valid and known. -func (c Checksum) IsSet() bool { - return c.Type.IsSet() && len(c.r) == c.Type.RawByteLen() -} - -// Encoded returns the encoded value. -// Returns the empty string if not set or valid. -func (c Checksum) Encoded() string { - if !c.IsSet() { - return "" - } - return base64.StdEncoding.EncodeToString(c.r) -} - -// Raw returns the raw checksum value if set. -func (c Checksum) Raw() []byte { - if !c.IsSet() { - return nil - } - return c.r -} diff --git a/vendor/github.com/minio/minio-go/v7/code_of_conduct.md b/vendor/github.com/minio/minio-go/v7/code_of_conduct.md deleted file mode 100644 index cb232c3..0000000 --- a/vendor/github.com/minio/minio-go/v7/code_of_conduct.md +++ /dev/null @@ -1,80 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or - advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior, in compliance with the -licensing terms applying to the Project developments. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. However, these actions shall respect the -licensing terms of the Project Developments that will always supersede such -Code of Conduct. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at dev@min.io. The project team -will review and investigate all complaints, and will respond in a way that it deems -appropriate to the circumstances. The project team is obligated to maintain -confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -This version includes a clarification to ensure that the code of conduct is in -compliance with the free software licensing terms of the project. - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/minio/minio-go/v7/constants.go b/vendor/github.com/minio/minio-go/v7/constants.go deleted file mode 100644 index 401d2a7..0000000 --- a/vendor/github.com/minio/minio-go/v7/constants.go +++ /dev/null @@ -1,110 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -// Multipart upload defaults. - -// absMinPartSize - absolute minimum part size (5 MiB) below which -// a part in a multipart upload may not be uploaded. -const absMinPartSize = 1024 * 1024 * 5 - -// minPartSize - minimum part size 16MiB per object after which -// putObject behaves internally as multipart. -const minPartSize = 1024 * 1024 * 16 - -// maxPartsCount - maximum number of parts for a single multipart session. -const maxPartsCount = 10000 - -// maxPartSize - maximum part size 5GiB for a single multipart upload -// operation. -const maxPartSize = 1024 * 1024 * 1024 * 5 - -// maxSinglePutObjectSize - maximum size 5GiB of object per PUT -// operation. -const maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5 - -// maxMultipartPutObjectSize - maximum size 5TiB of object for -// Multipart operation. -const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5 - -// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when -// we don't want to sign the request payload -const unsignedPayload = "UNSIGNED-PAYLOAD" - -// unsignedPayloadTrailer value to be set to X-Amz-Content-Sha256 header when -// we don't want to sign the request payload, but have a trailer. -const unsignedPayloadTrailer = "STREAMING-UNSIGNED-PAYLOAD-TRAILER" - -// Total number of parallel workers used for multipart operation. -const totalWorkers = 4 - -// Signature related constants. -const ( - signV4Algorithm = "AWS4-HMAC-SHA256" - iso8601DateFormat = "20060102T150405Z" -) - -const ( - // Storage class header. - amzStorageClass = "X-Amz-Storage-Class" - - // Website redirect location header - amzWebsiteRedirectLocation = "X-Amz-Website-Redirect-Location" - - // Object Tagging headers - amzTaggingHeader = "X-Amz-Tagging" - amzTaggingHeaderDirective = "X-Amz-Tagging-Directive" - - amzVersionID = "X-Amz-Version-Id" - amzTaggingCount = "X-Amz-Tagging-Count" - amzExpiration = "X-Amz-Expiration" - amzRestore = "X-Amz-Restore" - amzReplicationStatus = "X-Amz-Replication-Status" - amzDeleteMarker = "X-Amz-Delete-Marker" - - // Object legal hold header - amzLegalHoldHeader = "X-Amz-Object-Lock-Legal-Hold" - - // Object retention header - amzLockMode = "X-Amz-Object-Lock-Mode" - amzLockRetainUntil = "X-Amz-Object-Lock-Retain-Until-Date" - amzBypassGovernance = "X-Amz-Bypass-Governance-Retention" - - // Replication status - amzBucketReplicationStatus = "X-Amz-Replication-Status" - // Minio specific Replication/lifecycle transition extension - minIOBucketSourceMTime = "X-Minio-Source-Mtime" - - minIOBucketSourceETag = "X-Minio-Source-Etag" - minIOBucketReplicationDeleteMarker = "X-Minio-Source-DeleteMarker" - minIOBucketReplicationProxyRequest = "X-Minio-Source-Proxy-Request" - minIOBucketReplicationRequest = "X-Minio-Source-Replication-Request" - minIOBucketReplicationCheck = "X-Minio-Source-Replication-Check" - - // Header indicates last tag update time on source - minIOBucketReplicationTaggingTimestamp = "X-Minio-Source-Replication-Tagging-Timestamp" - // Header indicates last retention update time on source - minIOBucketReplicationObjectRetentionTimestamp = "X-Minio-Source-Replication-Retention-Timestamp" - // Header indicates last legalhold update time on source - minIOBucketReplicationObjectLegalHoldTimestamp = "X-Minio-Source-Replication-LegalHold-Timestamp" - minIOForceDelete = "x-minio-force-delete" - // Header indicates delete marker replication request can be sent by source now. - minioTgtReplicationReady = "X-Minio-Replication-Ready" - // Header asks if delete marker replication request can be sent by source now. - isMinioTgtReplicationReady = "X-Minio-Check-Replication-Ready" -) diff --git a/vendor/github.com/minio/minio-go/v7/core.go b/vendor/github.com/minio/minio-go/v7/core.go deleted file mode 100644 index 132ea70..0000000 --- a/vendor/github.com/minio/minio-go/v7/core.go +++ /dev/null @@ -1,150 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "io" - "net/http" - - "github.com/minio/minio-go/v7/pkg/encrypt" -) - -// Core - Inherits Client and adds new methods to expose the low level S3 APIs. -type Core struct { - *Client -} - -// NewCore - Returns new initialized a Core client, this CoreClient should be -// only used under special conditions such as need to access lower primitives -// and being able to use them to write your own wrappers. -func NewCore(endpoint string, opts *Options) (*Core, error) { - var s3Client Core - client, err := New(endpoint, opts) - if err != nil { - return nil, err - } - s3Client.Client = client - return &s3Client, nil -} - -// ListObjects - List all the objects at a prefix, optionally with marker and delimiter -// you can further filter the results. -func (c Core) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListBucketResult, err error) { - return c.listObjectsQuery(context.Background(), bucket, prefix, marker, delimiter, maxKeys, nil) -} - -// ListObjectsV2 - Lists all the objects at a prefix, similar to ListObjects() but uses -// continuationToken instead of marker to support iteration over the results. -func (c Core) ListObjectsV2(bucketName, objectPrefix, startAfter, continuationToken, delimiter string, maxkeys int) (ListBucketV2Result, error) { - return c.listObjectsV2Query(context.Background(), bucketName, objectPrefix, continuationToken, true, false, delimiter, startAfter, maxkeys, nil) -} - -// CopyObject - copies an object from source object to destination object on server side. -func (c Core) CopyObject(ctx context.Context, sourceBucket, sourceObject, destBucket, destObject string, metadata map[string]string, srcOpts CopySrcOptions, dstOpts PutObjectOptions) (ObjectInfo, error) { - return c.copyObjectDo(ctx, sourceBucket, sourceObject, destBucket, destObject, metadata, srcOpts, dstOpts) -} - -// CopyObjectPart - creates a part in a multipart upload by copying (a -// part of) an existing object. -func (c Core) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string, - partID int, startOffset, length int64, metadata map[string]string, -) (p CompletePart, err error) { - return c.copyObjectPartDo(ctx, srcBucket, srcObject, destBucket, destObject, uploadID, - partID, startOffset, length, metadata) -} - -// PutObject - Upload object. Uploads using single PUT call. -func (c Core) PutObject(ctx context.Context, bucket, object string, data io.Reader, size int64, md5Base64, sha256Hex string, opts PutObjectOptions) (UploadInfo, error) { - hookReader := newHook(data, opts.Progress) - return c.putObjectDo(ctx, bucket, object, hookReader, md5Base64, sha256Hex, size, opts) -} - -// NewMultipartUpload - Initiates new multipart upload and returns the new uploadID. -func (c Core) NewMultipartUpload(ctx context.Context, bucket, object string, opts PutObjectOptions) (uploadID string, err error) { - result, err := c.initiateMultipartUpload(ctx, bucket, object, opts) - return result.UploadID, err -} - -// ListMultipartUploads - List incomplete uploads. -func (c Core) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartUploadsResult, err error) { - return c.listMultipartUploadsQuery(ctx, bucket, keyMarker, uploadIDMarker, prefix, delimiter, maxUploads) -} - -// PutObjectPartOptions contains options for PutObjectPart API -type PutObjectPartOptions struct { - Md5Base64, Sha256Hex string - SSE encrypt.ServerSide - CustomHeader, Trailer http.Header -} - -// PutObjectPart - Upload an object part. -func (c Core) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, - data io.Reader, size int64, opts PutObjectPartOptions, -) (ObjectPart, error) { - p := uploadPartParams{ - bucketName: bucket, - objectName: object, - uploadID: uploadID, - reader: data, - partNumber: partID, - md5Base64: opts.Md5Base64, - sha256Hex: opts.Sha256Hex, - size: size, - sse: opts.SSE, - streamSha256: true, - customHeader: opts.CustomHeader, - trailer: opts.Trailer, - } - return c.uploadPart(ctx, p) -} - -// ListObjectParts - List uploaded parts of an incomplete upload.x -func (c Core) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int) (result ListObjectPartsResult, err error) { - return c.listObjectPartsQuery(ctx, bucket, object, uploadID, partNumberMarker, maxParts) -} - -// CompleteMultipartUpload - Concatenate uploaded parts and commit to an object. -func (c Core) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, parts []CompletePart, opts PutObjectOptions) (UploadInfo, error) { - res, err := c.completeMultipartUpload(ctx, bucket, object, uploadID, completeMultipartUpload{ - Parts: parts, - }, opts) - return res, err -} - -// AbortMultipartUpload - Abort an incomplete upload. -func (c Core) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error { - return c.abortMultipartUpload(ctx, bucket, object, uploadID) -} - -// GetBucketPolicy - fetches bucket access policy for a given bucket. -func (c Core) GetBucketPolicy(ctx context.Context, bucket string) (string, error) { - return c.getBucketPolicy(ctx, bucket) -} - -// PutBucketPolicy - applies a new bucket access policy for a given bucket. -func (c Core) PutBucketPolicy(ctx context.Context, bucket, bucketPolicy string) error { - return c.putBucketPolicy(ctx, bucket, bucketPolicy) -} - -// GetObject is a lower level API implemented to support reading -// partial objects and also downloading objects with special conditions -// matching etag, modtime etc. -func (c Core) GetObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) { - return c.getObject(ctx, bucketName, objectName, opts) -} diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go deleted file mode 100644 index f951cd0..0000000 --- a/vendor/github.com/minio/minio-go/v7/functional_tests.go +++ /dev/null @@ -1,13004 +0,0 @@ -//go:build mint -// +build mint - -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "archive/zip" - "bytes" - "context" - "crypto/sha1" - "encoding/base64" - "errors" - "fmt" - "hash" - "hash/crc32" - "io" - "math/rand" - "mime/multipart" - "net/http" - "net/url" - "os" - "path" - "path/filepath" - "reflect" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "time" - - "github.com/dustin/go-humanize" - jsoniter "github.com/json-iterator/go" - "github.com/minio/sha256-simd" - log "github.com/sirupsen/logrus" - - "github.com/minio/minio-go/v7" - "github.com/minio/minio-go/v7/pkg/credentials" - "github.com/minio/minio-go/v7/pkg/encrypt" - "github.com/minio/minio-go/v7/pkg/notification" - "github.com/minio/minio-go/v7/pkg/tags" -) - -const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569" -const ( - letterIdxBits = 6 // 6 bits to represent a letter index - letterIdxMask = 1<= len(buf) { - err = nil - } else if n > 0 && err == io.EOF { - err = io.ErrUnexpectedEOF - } - return -} - -func cleanEmptyEntries(fields log.Fields) log.Fields { - cleanFields := log.Fields{} - for k, v := range fields { - if v != "" { - cleanFields[k] = v - } - } - return cleanFields -} - -// log successful test runs -func successLogger(testName, function string, args map[string]interface{}, startTime time.Time) *log.Entry { - // calculate the test case duration - duration := time.Since(startTime) - // log with the fields as per mint - fields := log.Fields{"name": "minio-go: " + testName, "function": function, "args": args, "duration": duration.Nanoseconds() / 1000000, "status": "PASS"} - return log.WithFields(cleanEmptyEntries(fields)) -} - -// As few of the features are not available in Gateway(s) currently, Check if err value is NotImplemented, -// and log as NA in that case and continue execution. Otherwise log as failure and return -func logError(testName, function string, args map[string]interface{}, startTime time.Time, alert, message string, err error) { - // If server returns NotImplemented we assume it is gateway mode and hence log it as info and move on to next tests - // Special case for ComposeObject API as it is implemented on client side and adds specific error details like `Error in upload-part-copy` in - // addition to NotImplemented error returned from server - if isErrNotImplemented(err) { - ignoredLog(testName, function, args, startTime, message).Info() - } else if isRunOnFail() { - failureLog(testName, function, args, startTime, alert, message, err).Error() - } else { - failureLog(testName, function, args, startTime, alert, message, err).Fatal() - } -} - -// log failed test runs -func failureLog(testName, function string, args map[string]interface{}, startTime time.Time, alert, message string, err error) *log.Entry { - // calculate the test case duration - duration := time.Since(startTime) - var fields log.Fields - // log with the fields as per mint - if err != nil { - fields = log.Fields{ - "name": "minio-go: " + testName, "function": function, "args": args, - "duration": duration.Nanoseconds() / 1000000, "status": "FAIL", "alert": alert, "message": message, "error": err, - } - } else { - fields = log.Fields{ - "name": "minio-go: " + testName, "function": function, "args": args, - "duration": duration.Nanoseconds() / 1000000, "status": "FAIL", "alert": alert, "message": message, - } - } - return log.WithFields(cleanEmptyEntries(fields)) -} - -// log not applicable test runs -func ignoredLog(testName, function string, args map[string]interface{}, startTime time.Time, alert string) *log.Entry { - // calculate the test case duration - duration := time.Since(startTime) - // log with the fields as per mint - fields := log.Fields{ - "name": "minio-go: " + testName, "function": function, "args": args, - "duration": duration.Nanoseconds() / 1000000, "status": "NA", "alert": strings.Split(alert, " ")[0] + " is NotImplemented", - } - return log.WithFields(cleanEmptyEntries(fields)) -} - -// Delete objects in given bucket, recursively -func cleanupBucket(bucketName string, c *minio.Client) error { - // Create a done channel to control 'ListObjectsV2' go routine. - doneCh := make(chan struct{}) - // Exit cleanly upon return. - defer close(doneCh) - // Iterate over all objects in the bucket via listObjectsV2 and delete - for objCh := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{Recursive: true}) { - if objCh.Err != nil { - return objCh.Err - } - if objCh.Key != "" { - err := c.RemoveObject(context.Background(), bucketName, objCh.Key, minio.RemoveObjectOptions{}) - if err != nil { - return err - } - } - } - for objPartInfo := range c.ListIncompleteUploads(context.Background(), bucketName, "", true) { - if objPartInfo.Err != nil { - return objPartInfo.Err - } - if objPartInfo.Key != "" { - err := c.RemoveIncompleteUpload(context.Background(), bucketName, objPartInfo.Key) - if err != nil { - return err - } - } - } - // objects are already deleted, clear the buckets now - err := c.RemoveBucket(context.Background(), bucketName) - if err != nil { - return err - } - return err -} - -func cleanupVersionedBucket(bucketName string, c *minio.Client) error { - doneCh := make(chan struct{}) - defer close(doneCh) - for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) { - if obj.Err != nil { - return obj.Err - } - if obj.Key != "" { - err := c.RemoveObject(context.Background(), bucketName, obj.Key, - minio.RemoveObjectOptions{VersionID: obj.VersionID, GovernanceBypass: true}) - if err != nil { - return err - } - } - } - for objPartInfo := range c.ListIncompleteUploads(context.Background(), bucketName, "", true) { - if objPartInfo.Err != nil { - return objPartInfo.Err - } - if objPartInfo.Key != "" { - err := c.RemoveIncompleteUpload(context.Background(), bucketName, objPartInfo.Key) - if err != nil { - return err - } - } - } - // objects are already deleted, clear the buckets now - err := c.RemoveBucket(context.Background(), bucketName) - if err != nil { - for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) { - log.Println("found", obj.Key, obj.VersionID) - } - return err - } - return err -} - -func isErrNotImplemented(err error) bool { - return minio.ToErrorResponse(err).Code == "NotImplemented" -} - -func isRunOnFail() bool { - return os.Getenv("RUN_ON_FAIL") == "1" -} - -func init() { - // If server endpoint is not set, all tests default to - // using https://play.min.io - if os.Getenv(serverEndpoint) == "" { - os.Setenv(serverEndpoint, "play.min.io") - os.Setenv(accessKey, "Q3AM3UQ867SPQQA43P2F") - os.Setenv(secretKey, "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG") - os.Setenv(enableHTTPS, "1") - } -} - -var mintDataDir = os.Getenv("MINT_DATA_DIR") - -func getMintDataDirFilePath(filename string) (fp string) { - if mintDataDir == "" { - return - } - return filepath.Join(mintDataDir, filename) -} - -func newRandomReader(seed, size int64) io.Reader { - return io.LimitReader(rand.New(rand.NewSource(seed)), size) -} - -func mustCrcReader(r io.Reader) uint32 { - crc := crc32.NewIEEE() - _, err := io.Copy(crc, r) - if err != nil { - panic(err) - } - return crc.Sum32() -} - -func crcMatches(r io.Reader, want uint32) error { - crc := crc32.NewIEEE() - _, err := io.Copy(crc, r) - if err != nil { - panic(err) - } - got := crc.Sum32() - if got != want { - return fmt.Errorf("crc mismatch, want %x, got %x", want, got) - } - return nil -} - -func crcMatchesName(r io.Reader, name string) error { - want := dataFileCRC32[name] - crc := crc32.NewIEEE() - _, err := io.Copy(crc, r) - if err != nil { - panic(err) - } - got := crc.Sum32() - if got != want { - return fmt.Errorf("crc mismatch, want %x, got %x", want, got) - } - return nil -} - -// read data from file if it exists or optionally create a buffer of particular size -func getDataReader(fileName string) io.ReadCloser { - if mintDataDir == "" { - size := int64(dataFileMap[fileName]) - if _, ok := dataFileCRC32[fileName]; !ok { - dataFileCRC32[fileName] = mustCrcReader(newRandomReader(size, size)) - } - return io.NopCloser(newRandomReader(size, size)) - } - reader, _ := os.Open(getMintDataDirFilePath(fileName)) - if _, ok := dataFileCRC32[fileName]; !ok { - dataFileCRC32[fileName] = mustCrcReader(reader) - reader.Close() - reader, _ = os.Open(getMintDataDirFilePath(fileName)) - } - return reader -} - -// randString generates random names and prepends them with a known prefix. -func randString(n int, src rand.Source, prefix string) string { - b := make([]byte, n) - // A rand.Int63() generates 63 random bits, enough for letterIdxMax letters! - for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; { - if remain == 0 { - cache, remain = src.Int63(), letterIdxMax - } - if idx := int(cache & letterIdxMask); idx < len(letterBytes) { - b[i] = letterBytes[idx] - i-- - } - cache >>= letterIdxBits - remain-- - } - return prefix + string(b[0:30-len(prefix)]) -} - -var dataFileMap = map[string]int{ - "datafile-0-b": 0, - "datafile-1-b": 1, - "datafile-1-kB": 1 * humanize.KiByte, - "datafile-10-kB": 10 * humanize.KiByte, - "datafile-33-kB": 33 * humanize.KiByte, - "datafile-100-kB": 100 * humanize.KiByte, - "datafile-1.03-MB": 1056 * humanize.KiByte, - "datafile-1-MB": 1 * humanize.MiByte, - "datafile-5-MB": 5 * humanize.MiByte, - "datafile-6-MB": 6 * humanize.MiByte, - "datafile-11-MB": 11 * humanize.MiByte, - "datafile-65-MB": 65 * humanize.MiByte, - "datafile-129-MB": 129 * humanize.MiByte, -} - -var dataFileCRC32 = map[string]uint32{} - -func isFullMode() bool { - return os.Getenv("MINT_MODE") == "full" -} - -func getFuncName() string { - return getFuncNameLoc(2) -} - -func getFuncNameLoc(caller int) string { - pc, _, _, _ := runtime.Caller(caller) - return strings.TrimPrefix(runtime.FuncForPC(pc).Name(), "main.") -} - -// Tests bucket re-create errors. -func testMakeBucketError() { - region := "eu-central-1" - - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "MakeBucket(bucketName, region)" - // initialize logging params - args := map[string]interface{}{ - "bucketName": "", - "region": region, - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket in 'eu-central-1'. - if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err != nil { - logError(testName, function, args, startTime, "", "MakeBucket Failed", err) - return - } - defer cleanupBucket(bucketName, c) - - if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err == nil { - logError(testName, function, args, startTime, "", "Bucket already exists", err) - return - } - // Verify valid error response from server. - if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" && - minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" { - logError(testName, function, args, startTime, "", "Invalid error returned by server", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -func testMetadataSizeLimit() { - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader, objectSize, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "opts.UserMetadata": "", - } - rand.Seed(startTime.Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client creation failed", err) - return - } - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "Make bucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - const HeaderSizeLimit = 8 * 1024 - const UserMetadataLimit = 2 * 1024 - - // Meta-data greater than the 2 KB limit of AWS - PUT calls with this meta-data should fail - metadata := make(map[string]string) - metadata["X-Amz-Meta-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+UserMetadataLimit-len("X-Amz-Meta-Mint-Test"))) - args["metadata"] = fmt.Sprint(metadata) - - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata}) - if err == nil { - logError(testName, function, args, startTime, "", "Created object with user-defined metadata exceeding metadata size limits", nil) - return - } - - // Meta-data (headers) greater than the 8 KB limit of AWS - PUT calls with this meta-data should fail - metadata = make(map[string]string) - metadata["X-Amz-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+HeaderSizeLimit-len("X-Amz-Mint-Test"))) - args["metadata"] = fmt.Sprint(metadata) - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata}) - if err == nil { - logError(testName, function, args, startTime, "", "Created object with headers exceeding header size limits", nil) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests various bucket supported formats. -func testMakeBucketRegions() { - region := "eu-central-1" - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "MakeBucket(bucketName, region)" - // initialize logging params - args := map[string]interface{}{ - "bucketName": "", - "region": region, - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket in 'eu-central-1'. - if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed", err) - return - } - - // Make a new bucket with '.' in its name, in 'us-west-2'. This - // request is internally staged into a path style instead of - // virtual host style. - region = "us-west-2" - args["region"] = region - if err = c.MakeBucket(context.Background(), bucketName+".withperiod", minio.MakeBucketOptions{Region: region}); err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName+".withperiod", c); err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed", err) - return - } - successLogger(testName, function, args, startTime).Info() -} - -// Test PutObject using a large data to trigger multipart readat -func testPutObjectReadAt() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "opts": "objectContentType", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "Make bucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - bufSize := dataFileMap["datafile-129-MB"] - reader := getDataReader("datafile-129-MB") - defer reader.Close() - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - // Object content type - objectContentType := "binary/octet-stream" - args["objectContentType"] = objectContentType - - _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: objectContentType}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Read the data back - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "Get Object failed", err) - return - } - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat Object failed", err) - return - } - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Number of bytes in stat does not match, expected %d got %d", bufSize, st.Size), err) - return - } - if st.ContentType != objectContentType && st.ContentType != "application/octet-stream" { - logError(testName, function, args, startTime, "", "Content types don't match", err) - return - } - if err := crcMatchesName(r, "datafile-129-MB"); err != nil { - logError(testName, function, args, startTime, "", "data CRC check failed", err) - return - } - if err := r.Close(); err != nil { - logError(testName, function, args, startTime, "", "Object Close failed", err) - return - } - if err := r.Close(); err == nil { - logError(testName, function, args, startTime, "", "Object is already closed, didn't return error on Close", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -func testListObjectVersions() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "ListObjectVersions(bucketName, prefix, recursive)" - args := map[string]interface{}{ - "bucketName": "", - "prefix": "", - "recursive": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) - if err != nil { - logError(testName, function, args, startTime, "", "Make bucket failed", err) - return - } - - err = c.EnableVersioning(context.Background(), bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "Enable versioning failed", err) - return - } - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - bufSize := dataFileMap["datafile-10-kB"] - reader := getDataReader("datafile-10-kB") - - _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - reader.Close() - - bufSize = dataFileMap["datafile-1-b"] - reader = getDataReader("datafile-1-b") - _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - reader.Close() - - err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "Unexpected object deletion", err) - return - } - - var deleteMarkers, versions int - - objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) - for info := range objectsInfo { - if info.Err != nil { - logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) - return - } - if info.Key != objectName { - logError(testName, function, args, startTime, "", "Unexpected object name in listing objects", nil) - return - } - if info.VersionID == "" { - logError(testName, function, args, startTime, "", "Unexpected version id in listing objects", nil) - return - } - if info.IsDeleteMarker { - deleteMarkers++ - if !info.IsLatest { - logError(testName, function, args, startTime, "", "Unexpected IsLatest field in listing objects", nil) - return - } - } else { - versions++ - } - } - - if deleteMarkers != 1 { - logError(testName, function, args, startTime, "", "Unexpected number of DeleteMarker elements in listing objects", nil) - return - } - - if versions != 2 { - logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) - return - } - - // Delete all objects and their versions as long as the bucket itself - if err = cleanupVersionedBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -func testStatObjectWithVersioning() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "StatObject" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) - if err != nil { - logError(testName, function, args, startTime, "", "Make bucket failed", err) - return - } - - err = c.EnableVersioning(context.Background(), bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "Enable versioning failed", err) - return - } - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - bufSize := dataFileMap["datafile-10-kB"] - reader := getDataReader("datafile-10-kB") - - _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - reader.Close() - - bufSize = dataFileMap["datafile-1-b"] - reader = getDataReader("datafile-1-b") - _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - reader.Close() - - objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) - - var results []minio.ObjectInfo - for info := range objectsInfo { - if info.Err != nil { - logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) - return - } - results = append(results, info) - } - - if len(results) != 2 { - logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) - return - } - - for i := 0; i < len(results); i++ { - opts := minio.StatObjectOptions{VersionID: results[i].VersionID} - statInfo, err := c.StatObject(context.Background(), bucketName, objectName, opts) - if err != nil { - logError(testName, function, args, startTime, "", "error during HEAD object", err) - return - } - if statInfo.VersionID == "" || statInfo.VersionID != results[i].VersionID { - logError(testName, function, args, startTime, "", "error during HEAD object, unexpected version id", err) - return - } - if statInfo.ETag != results[i].ETag { - logError(testName, function, args, startTime, "", "error during HEAD object, unexpected ETag", err) - return - } - if statInfo.LastModified.Unix() != results[i].LastModified.Unix() { - logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Last-Modified", err) - return - } - if statInfo.Size != results[i].Size { - logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Content-Length", err) - return - } - } - - // Delete all objects and their versions as long as the bucket itself - if err = cleanupVersionedBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -func testGetObjectWithVersioning() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject()" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) - if err != nil { - logError(testName, function, args, startTime, "", "Make bucket failed", err) - return - } - - err = c.EnableVersioning(context.Background(), bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "Enable versioning failed", err) - return - } - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - // Save the contents of datafiles to check with GetObject() reader output later - var buffers [][]byte - testFiles := []string{"datafile-1-b", "datafile-10-kB"} - - for _, testFile := range testFiles { - r := getDataReader(testFile) - buf, err := io.ReadAll(r) - if err != nil { - logError(testName, function, args, startTime, "", "unexpected failure", err) - return - } - r.Close() - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - buffers = append(buffers, buf) - } - - objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) - - var results []minio.ObjectInfo - for info := range objectsInfo { - if info.Err != nil { - logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) - return - } - results = append(results, info) - } - - if len(results) != 2 { - logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) - return - } - - sort.SliceStable(results, func(i, j int) bool { - return results[i].Size < results[j].Size - }) - - sort.SliceStable(buffers, func(i, j int) bool { - return len(buffers[i]) < len(buffers[j]) - }) - - for i := 0; i < len(results); i++ { - opts := minio.GetObjectOptions{VersionID: results[i].VersionID} - reader, err := c.GetObject(context.Background(), bucketName, objectName, opts) - if err != nil { - logError(testName, function, args, startTime, "", "error during GET object", err) - return - } - statInfo, err := reader.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "error during calling reader.Stat()", err) - return - } - if statInfo.ETag != results[i].ETag { - logError(testName, function, args, startTime, "", "error during HEAD object, unexpected ETag", err) - return - } - if statInfo.LastModified.Unix() != results[i].LastModified.Unix() { - logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Last-Modified", err) - return - } - if statInfo.Size != results[i].Size { - logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Content-Length", err) - return - } - - tmpBuffer := bytes.NewBuffer([]byte{}) - _, err = io.Copy(tmpBuffer, reader) - if err != nil { - logError(testName, function, args, startTime, "", "unexpected io.Copy()", err) - return - } - - if !bytes.Equal(tmpBuffer.Bytes(), buffers[i]) { - logError(testName, function, args, startTime, "", "unexpected content of GetObject()", err) - return - } - } - - // Delete all objects and their versions as long as the bucket itself - if err = cleanupVersionedBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -func testPutObjectWithVersioning() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject()" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) - if err != nil { - logError(testName, function, args, startTime, "", "Make bucket failed", err) - return - } - - err = c.EnableVersioning(context.Background(), bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "Enable versioning failed", err) - return - } - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - const n = 10 - // Read input... - - // Save the data concurrently. - var wg sync.WaitGroup - wg.Add(n) - buffers := make([][]byte, n) - var errs [n]error - for i := 0; i < n; i++ { - r := newRandomReader(int64((1<<20)*i+i), int64(i)) - buf, err := io.ReadAll(r) - if err != nil { - logError(testName, function, args, startTime, "", "unexpected failure", err) - return - } - buffers[i] = buf - - go func(i int) { - defer wg.Done() - _, errs[i] = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{PartSize: 5 << 20}) - }(i) - } - wg.Wait() - for _, err := range errs { - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - } - - objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) - var results []minio.ObjectInfo - for info := range objectsInfo { - if info.Err != nil { - logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) - return - } - results = append(results, info) - } - - if len(results) != n { - logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) - return - } - - sort.Slice(results, func(i, j int) bool { - return results[i].Size < results[j].Size - }) - - sort.Slice(buffers, func(i, j int) bool { - return len(buffers[i]) < len(buffers[j]) - }) - - for i := 0; i < len(results); i++ { - opts := minio.GetObjectOptions{VersionID: results[i].VersionID} - reader, err := c.GetObject(context.Background(), bucketName, objectName, opts) - if err != nil { - logError(testName, function, args, startTime, "", "error during GET object", err) - return - } - statInfo, err := reader.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "error during calling reader.Stat()", err) - return - } - if statInfo.ETag != results[i].ETag { - logError(testName, function, args, startTime, "", "error during HEAD object, unexpected ETag", err) - return - } - if statInfo.LastModified.Unix() != results[i].LastModified.Unix() { - logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Last-Modified", err) - return - } - if statInfo.Size != results[i].Size { - logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Content-Length", err) - return - } - - tmpBuffer := bytes.NewBuffer([]byte{}) - _, err = io.Copy(tmpBuffer, reader) - if err != nil { - logError(testName, function, args, startTime, "", "unexpected io.Copy()", err) - return - } - - if !bytes.Equal(tmpBuffer.Bytes(), buffers[i]) { - logError(testName, function, args, startTime, "", "unexpected content of GetObject()", err) - return - } - } - - // Delete all objects and their versions as long as the bucket itself - if err = cleanupVersionedBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -func testCopyObjectWithVersioning() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject()" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) - if err != nil { - logError(testName, function, args, startTime, "", "Make bucket failed", err) - return - } - - err = c.EnableVersioning(context.Background(), bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "Enable versioning failed", err) - return - } - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - testFiles := []string{"datafile-1-b", "datafile-10-kB"} - for _, testFile := range testFiles { - r := getDataReader(testFile) - buf, err := io.ReadAll(r) - if err != nil { - logError(testName, function, args, startTime, "", "unexpected failure", err) - return - } - r.Close() - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - } - - objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) - var infos []minio.ObjectInfo - for info := range objectsInfo { - if info.Err != nil { - logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) - return - } - infos = append(infos, info) - } - - sort.Slice(infos, func(i, j int) bool { - return infos[i].Size < infos[j].Size - }) - - reader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{VersionID: infos[0].VersionID}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject of the oldest version content failed", err) - return - } - - oldestContent, err := io.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err) - return - } - - // Copy Source - srcOpts := minio.CopySrcOptions{ - Bucket: bucketName, - Object: objectName, - VersionID: infos[0].VersionID, - } - args["src"] = srcOpts - - dstOpts := minio.CopyDestOptions{ - Bucket: bucketName, - Object: objectName + "-copy", - } - args["dst"] = dstOpts - - // Perform the Copy - if _, err = c.CopyObject(context.Background(), dstOpts, srcOpts); err != nil { - logError(testName, function, args, startTime, "", "CopyObject failed", err) - return - } - - // Destination object - readerCopy, err := c.GetObject(context.Background(), bucketName, objectName+"-copy", minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - defer readerCopy.Close() - - newestContent, err := io.ReadAll(readerCopy) - if err != nil { - logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err) - return - } - - if len(newestContent) == 0 || !bytes.Equal(oldestContent, newestContent) { - logError(testName, function, args, startTime, "", "Unexpected destination object content", err) - return - } - - // Delete all objects and their versions as long as the bucket itself - if err = cleanupVersionedBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -func testConcurrentCopyObjectWithVersioning() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject()" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) - if err != nil { - logError(testName, function, args, startTime, "", "Make bucket failed", err) - return - } - - err = c.EnableVersioning(context.Background(), bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "Enable versioning failed", err) - return - } - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - testFiles := []string{"datafile-10-kB"} - for _, testFile := range testFiles { - r := getDataReader(testFile) - buf, err := io.ReadAll(r) - if err != nil { - logError(testName, function, args, startTime, "", "unexpected failure", err) - return - } - r.Close() - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - } - - objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) - var infos []minio.ObjectInfo - for info := range objectsInfo { - if info.Err != nil { - logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) - return - } - infos = append(infos, info) - } - - sort.Slice(infos, func(i, j int) bool { - return infos[i].Size < infos[j].Size - }) - - reader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{VersionID: infos[0].VersionID}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject of the oldest version content failed", err) - return - } - - oldestContent, err := io.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err) - return - } - - // Copy Source - srcOpts := minio.CopySrcOptions{ - Bucket: bucketName, - Object: objectName, - VersionID: infos[0].VersionID, - } - args["src"] = srcOpts - - dstOpts := minio.CopyDestOptions{ - Bucket: bucketName, - Object: objectName + "-copy", - } - args["dst"] = dstOpts - - // Perform the Copy concurrently - const n = 10 - var wg sync.WaitGroup - wg.Add(n) - var errs [n]error - for i := 0; i < n; i++ { - go func(i int) { - defer wg.Done() - _, errs[i] = c.CopyObject(context.Background(), dstOpts, srcOpts) - }(i) - } - wg.Wait() - for _, err := range errs { - if err != nil { - logError(testName, function, args, startTime, "", "CopyObject failed", err) - return - } - } - - objectsInfo = c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: false, Prefix: dstOpts.Object}) - infos = []minio.ObjectInfo{} - for info := range objectsInfo { - // Destination object - readerCopy, err := c.GetObject(context.Background(), bucketName, objectName+"-copy", minio.GetObjectOptions{VersionID: info.VersionID}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - defer readerCopy.Close() - - newestContent, err := io.ReadAll(readerCopy) - if err != nil { - logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err) - return - } - - if len(newestContent) == 0 || !bytes.Equal(oldestContent, newestContent) { - logError(testName, function, args, startTime, "", "Unexpected destination object content", err) - return - } - infos = append(infos, info) - } - - if len(infos) != n { - logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) - return - } - - // Delete all objects and their versions as long as the bucket itself - if err = cleanupVersionedBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -func testComposeObjectWithVersioning() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "ComposeObject()" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) - if err != nil { - logError(testName, function, args, startTime, "", "Make bucket failed", err) - return - } - - err = c.EnableVersioning(context.Background(), bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "Enable versioning failed", err) - return - } - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - // var testFiles = []string{"datafile-5-MB", "datafile-10-kB"} - testFiles := []string{"datafile-5-MB", "datafile-10-kB"} - var testFilesBytes [][]byte - - for _, testFile := range testFiles { - r := getDataReader(testFile) - buf, err := io.ReadAll(r) - if err != nil { - logError(testName, function, args, startTime, "", "unexpected failure", err) - return - } - r.Close() - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - testFilesBytes = append(testFilesBytes, buf) - } - - objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) - - var results []minio.ObjectInfo - for info := range objectsInfo { - if info.Err != nil { - logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) - return - } - results = append(results, info) - } - - sort.SliceStable(results, func(i, j int) bool { - return results[i].Size > results[j].Size - }) - - // Source objects to concatenate. We also specify decryption - // key for each - src1 := minio.CopySrcOptions{ - Bucket: bucketName, - Object: objectName, - VersionID: results[0].VersionID, - } - - src2 := minio.CopySrcOptions{ - Bucket: bucketName, - Object: objectName, - VersionID: results[1].VersionID, - } - - dst := minio.CopyDestOptions{ - Bucket: bucketName, - Object: objectName + "-copy", - } - - _, err = c.ComposeObject(context.Background(), dst, src1, src2) - if err != nil { - logError(testName, function, args, startTime, "", "ComposeObject failed", err) - return - } - - // Destination object - readerCopy, err := c.GetObject(context.Background(), bucketName, objectName+"-copy", minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject of the copy object failed", err) - return - } - defer readerCopy.Close() - - copyContentBytes, err := io.ReadAll(readerCopy) - if err != nil { - logError(testName, function, args, startTime, "", "Reading from the copy object reader failed", err) - return - } - - var expectedContent []byte - for _, fileBytes := range testFilesBytes { - expectedContent = append(expectedContent, fileBytes...) - } - - if len(copyContentBytes) == 0 || !bytes.Equal(copyContentBytes, expectedContent) { - logError(testName, function, args, startTime, "", "Unexpected destination object content", err) - return - } - - // Delete all objects and their versions as long as the bucket itself - if err = cleanupVersionedBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -func testRemoveObjectWithVersioning() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "DeleteObject()" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) - if err != nil { - logError(testName, function, args, startTime, "", "Make bucket failed", err) - return - } - - err = c.EnableVersioning(context.Background(), bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "Enable versioning failed", err) - return - } - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader("datafile-10-kB"), int64(dataFileMap["datafile-10-kB"]), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) - var version minio.ObjectInfo - for info := range objectsInfo { - if info.Err != nil { - logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) - return - } - version = info - break - } - - err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{VersionID: version.VersionID}) - if err != nil { - logError(testName, function, args, startTime, "", "DeleteObject failed", err) - return - } - - objectsInfo = c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) - for range objectsInfo { - logError(testName, function, args, startTime, "", "Unexpected versioning info, should not have any one ", err) - return - } - // test delete marker version id is non-null - _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader("datafile-10-kB"), int64(dataFileMap["datafile-10-kB"]), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - // create delete marker - err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "DeleteObject failed", err) - return - } - objectsInfo = c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) - idx := 0 - for info := range objectsInfo { - if info.Err != nil { - logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) - return - } - if idx == 0 { - if !info.IsDeleteMarker { - logError(testName, function, args, startTime, "", "Unexpected error - expected delete marker to have been created", err) - return - } - if info.VersionID == "" { - logError(testName, function, args, startTime, "", "Unexpected error - expected delete marker to be versioned", err) - return - } - } - idx++ - } - - defer cleanupBucket(bucketName, c) - - successLogger(testName, function, args, startTime).Info() -} - -func testRemoveObjectsWithVersioning() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "DeleteObjects()" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) - if err != nil { - logError(testName, function, args, startTime, "", "Make bucket failed", err) - return - } - - err = c.EnableVersioning(context.Background(), bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "Enable versioning failed", err) - return - } - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader("datafile-10-kB"), int64(dataFileMap["datafile-10-kB"]), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - objectsVersions := make(chan minio.ObjectInfo) - go func() { - objectsVersionsInfo := c.ListObjects(context.Background(), bucketName, - minio.ListObjectsOptions{WithVersions: true, Recursive: true}) - for info := range objectsVersionsInfo { - if info.Err != nil { - logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) - return - } - objectsVersions <- info - } - close(objectsVersions) - }() - - removeErrors := c.RemoveObjects(context.Background(), bucketName, objectsVersions, minio.RemoveObjectsOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "DeleteObjects call failed", err) - return - } - - for e := range removeErrors { - if e.Err != nil { - logError(testName, function, args, startTime, "", "Single delete operation failed", err) - return - } - } - - objectsVersionsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) - for range objectsVersionsInfo { - logError(testName, function, args, startTime, "", "Unexpected versioning info, should not have any one ", err) - return - } - - err = c.RemoveBucket(context.Background(), bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -func testObjectTaggingWithVersioning() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "{Get,Set,Remove}ObjectTagging()" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) - if err != nil { - logError(testName, function, args, startTime, "", "Make bucket failed", err) - return - } - - err = c.EnableVersioning(context.Background(), bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "Enable versioning failed", err) - return - } - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - for _, file := range []string{"datafile-1-b", "datafile-10-kB"} { - _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader(file), int64(dataFileMap[file]), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - } - - versionsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) - - var versions []minio.ObjectInfo - for info := range versionsInfo { - if info.Err != nil { - logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) - return - } - versions = append(versions, info) - } - - sort.SliceStable(versions, func(i, j int) bool { - return versions[i].Size < versions[j].Size - }) - - tagsV1 := map[string]string{"key1": "val1"} - t1, err := tags.MapToObjectTags(tagsV1) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectTagging (1) failed", err) - return - } - - err = c.PutObjectTagging(context.Background(), bucketName, objectName, t1, minio.PutObjectTaggingOptions{VersionID: versions[0].VersionID}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectTagging (1) failed", err) - return - } - - tagsV2 := map[string]string{"key2": "val2"} - t2, err := tags.MapToObjectTags(tagsV2) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectTagging (1) failed", err) - return - } - - err = c.PutObjectTagging(context.Background(), bucketName, objectName, t2, minio.PutObjectTaggingOptions{VersionID: versions[1].VersionID}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectTagging (2) failed", err) - return - } - - tagsEqual := func(tags1, tags2 map[string]string) bool { - for k1, v1 := range tags1 { - v2, found := tags2[k1] - if found { - if v1 != v2 { - return false - } - } - } - return true - } - - gotTagsV1, err := c.GetObjectTagging(context.Background(), bucketName, objectName, minio.GetObjectTaggingOptions{VersionID: versions[0].VersionID}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObjectTagging failed", err) - return - } - - if !tagsEqual(t1.ToMap(), gotTagsV1.ToMap()) { - logError(testName, function, args, startTime, "", "Unexpected tags content (1)", err) - return - } - - gotTagsV2, err := c.GetObjectTagging(context.Background(), bucketName, objectName, minio.GetObjectTaggingOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObjectTaggingContext failed", err) - return - } - - if !tagsEqual(t2.ToMap(), gotTagsV2.ToMap()) { - logError(testName, function, args, startTime, "", "Unexpected tags content (2)", err) - return - } - - err = c.RemoveObjectTagging(context.Background(), bucketName, objectName, minio.RemoveObjectTaggingOptions{VersionID: versions[0].VersionID}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectTagging (2) failed", err) - return - } - - emptyTags, err := c.GetObjectTagging(context.Background(), bucketName, objectName, - minio.GetObjectTaggingOptions{VersionID: versions[0].VersionID}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObjectTagging failed", err) - return - } - - if len(emptyTags.ToMap()) != 0 { - logError(testName, function, args, startTime, "", "Unexpected tags content (2)", err) - return - } - - // Delete all objects and their versions as long as the bucket itself - if err = cleanupVersionedBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test PutObject with custom checksums. -func testPutObjectWithChecksums() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader,size, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}", - } - - if !isFullMode() { - ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info() - return - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "Make bucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - tests := []struct { - header string - hasher hash.Hash - - // Checksum values - ChecksumCRC32 string - ChecksumCRC32C string - ChecksumSHA1 string - ChecksumSHA256 string - }{ - {header: "x-amz-checksum-crc32", hasher: crc32.NewIEEE()}, - {header: "x-amz-checksum-crc32c", hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli))}, - {header: "x-amz-checksum-sha1", hasher: sha1.New()}, - {header: "x-amz-checksum-sha256", hasher: sha256.New()}, - } - - for i, test := range tests { - bufSize := dataFileMap["datafile-10-kB"] - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - cmpChecksum := func(got, want string) { - if want != got { - logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got)) - return - } - } - - meta := map[string]string{} - reader := getDataReader("datafile-10-kB") - b, err := io.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "Read failed", err) - return - } - h := test.hasher - h.Reset() - // Wrong CRC. - meta[test.header] = base64.StdEncoding.EncodeToString(h.Sum(nil)) - args["metadata"] = meta - args["range"] = "false" - - resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{ - DisableMultipart: true, - UserMetadata: meta, - }) - if err == nil { - if i == 0 && resp.ChecksumCRC32 == "" { - ignoredLog(testName, function, args, startTime, "Checksums does not appear to be supported by backend").Info() - return - } - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Set correct CRC. - h.Write(b) - meta[test.header] = base64.StdEncoding.EncodeToString(h.Sum(nil)) - reader.Close() - - resp, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{ - DisableMultipart: true, - DisableContentSha256: true, - UserMetadata: meta, - }) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"]) - cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"]) - cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"]) - cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) - - // Read the data back - gopts := minio.GetObjectOptions{Checksum: true} - - r, err := c.GetObject(context.Background(), bucketName, objectName, gopts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - cmpChecksum(st.ChecksumSHA256, meta["x-amz-checksum-sha256"]) - cmpChecksum(st.ChecksumSHA1, meta["x-amz-checksum-sha1"]) - cmpChecksum(st.ChecksumCRC32, meta["x-amz-checksum-crc32"]) - cmpChecksum(st.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err) - return - } - - if err := r.Close(); err != nil { - logError(testName, function, args, startTime, "", "Object Close failed", err) - return - } - if err := r.Close(); err == nil { - logError(testName, function, args, startTime, "", "Object already closed, should respond with error", err) - return - } - - args["range"] = "true" - err = gopts.SetRange(100, 1000) - if err != nil { - logError(testName, function, args, startTime, "", "SetRange failed", err) - return - } - r, err = c.GetObject(context.Background(), bucketName, objectName, gopts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - b, err = io.ReadAll(r) - if err != nil { - logError(testName, function, args, startTime, "", "Read failed", err) - return - } - st, err = r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - - // Range requests should return empty checksums... - cmpChecksum(st.ChecksumSHA256, "") - cmpChecksum(st.ChecksumSHA1, "") - cmpChecksum(st.ChecksumCRC32, "") - cmpChecksum(st.ChecksumCRC32C, "") - - delete(args, "range") - delete(args, "metadata") - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test PutObject with custom checksums. -func testPutMultipartObjectWithChecksums() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader,size, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}", - } - - if !isFullMode() { - ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info() - return - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "Make bucket failed", err) - return - } - - hashMultiPart := func(b []byte, partSize int, hasher hash.Hash) string { - r := bytes.NewReader(b) - tmp := make([]byte, partSize) - parts := 0 - var all []byte - for { - n, err := io.ReadFull(r, tmp) - if err != nil && err != io.ErrUnexpectedEOF { - logError(testName, function, args, startTime, "", "Calc crc failed", err) - } - if n == 0 { - break - } - parts++ - hasher.Reset() - hasher.Write(tmp[:n]) - all = append(all, hasher.Sum(nil)...) - if err != nil { - break - } - } - hasher.Reset() - hasher.Write(all) - return fmt.Sprintf("%s-%d", base64.StdEncoding.EncodeToString(hasher.Sum(nil)), parts) - } - defer cleanupBucket(bucketName, c) - tests := []struct { - header string - hasher hash.Hash - - // Checksum values - ChecksumCRC32 string - ChecksumCRC32C string - ChecksumSHA1 string - ChecksumSHA256 string - }{ - // Currently there is no way to override the checksum type. - {header: "x-amz-checksum-crc32c", hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)), ChecksumCRC32C: "OpEx0Q==-13"}, - } - - for _, test := range tests { - bufSize := dataFileMap["datafile-129-MB"] - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - cmpChecksum := func(got, want string) { - if want != got { - // logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got)) - fmt.Printf("want %s, got %s\n", want, got) - return - } - } - - const partSize = 10 << 20 - reader := getDataReader("datafile-129-MB") - b, err := io.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "Read failed", err) - return - } - reader.Close() - h := test.hasher - h.Reset() - test.ChecksumCRC32C = hashMultiPart(b, partSize, test.hasher) - - // Set correct CRC. - - resp, err := c.PutObject(context.Background(), bucketName, objectName, io.NopCloser(bytes.NewReader(b)), int64(bufSize), minio.PutObjectOptions{ - DisableContentSha256: true, - DisableMultipart: false, - UserMetadata: nil, - PartSize: partSize, - }) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - cmpChecksum(resp.ChecksumSHA256, test.ChecksumSHA256) - cmpChecksum(resp.ChecksumSHA1, test.ChecksumSHA1) - cmpChecksum(resp.ChecksumCRC32, test.ChecksumCRC32) - cmpChecksum(resp.ChecksumCRC32C, test.ChecksumCRC32C) - - // Read the data back - gopts := minio.GetObjectOptions{Checksum: true} - gopts.PartNumber = 2 - - // We cannot use StatObject, since it ignores partnumber. - r, err := c.GetObject(context.Background(), bucketName, objectName, gopts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - io.Copy(io.Discard, r) - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - - // Test part 2 checksum... - h.Reset() - h.Write(b[partSize : 2*partSize]) - got := base64.StdEncoding.EncodeToString(h.Sum(nil)) - if test.ChecksumSHA256 != "" { - cmpChecksum(st.ChecksumSHA256, got) - } - if test.ChecksumSHA1 != "" { - cmpChecksum(st.ChecksumSHA1, got) - } - if test.ChecksumCRC32 != "" { - cmpChecksum(st.ChecksumCRC32, got) - } - if test.ChecksumCRC32C != "" { - cmpChecksum(st.ChecksumCRC32C, got) - } - - delete(args, "metadata") - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test PutObject with trailing checksums. -func testTrailingChecksums() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader,size, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}", - } - - if !isFullMode() { - ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info() - return - } - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - TrailingHeaders: true, - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "Make bucket failed", err) - return - } - - hashMultiPart := func(b []byte, partSize int, hasher hash.Hash) string { - r := bytes.NewReader(b) - tmp := make([]byte, partSize) - parts := 0 - var all []byte - for { - n, err := io.ReadFull(r, tmp) - if err != nil && err != io.ErrUnexpectedEOF { - logError(testName, function, args, startTime, "", "Calc crc failed", err) - } - if n == 0 { - break - } - parts++ - hasher.Reset() - hasher.Write(tmp[:n]) - all = append(all, hasher.Sum(nil)...) - if err != nil { - break - } - } - hasher.Reset() - hasher.Write(all) - return fmt.Sprintf("%s-%d", base64.StdEncoding.EncodeToString(hasher.Sum(nil)), parts) - } - defer cleanupBucket(bucketName, c) - tests := []struct { - header string - hasher hash.Hash - - // Checksum values - ChecksumCRC32 string - ChecksumCRC32C string - ChecksumSHA1 string - ChecksumSHA256 string - PO minio.PutObjectOptions - }{ - // Currently there is no way to override the checksum type. - { - header: "x-amz-checksum-crc32c", - hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)), - ChecksumCRC32C: "set", - PO: minio.PutObjectOptions{ - DisableContentSha256: true, - DisableMultipart: false, - UserMetadata: nil, - PartSize: 5 << 20, - }, - }, - { - header: "x-amz-checksum-crc32c", - hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)), - ChecksumCRC32C: "set", - PO: minio.PutObjectOptions{ - DisableContentSha256: true, - DisableMultipart: false, - UserMetadata: nil, - PartSize: 6_645_654, // Rather arbitrary size - }, - }, - { - header: "x-amz-checksum-crc32c", - hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)), - ChecksumCRC32C: "set", - PO: minio.PutObjectOptions{ - DisableContentSha256: false, - DisableMultipart: false, - UserMetadata: nil, - PartSize: 5 << 20, - }, - }, - { - header: "x-amz-checksum-crc32c", - hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)), - ChecksumCRC32C: "set", - PO: minio.PutObjectOptions{ - DisableContentSha256: false, - DisableMultipart: false, - UserMetadata: nil, - PartSize: 6_645_654, // Rather arbitrary size - }, - }, - } - - for _, test := range tests { - bufSize := dataFileMap["datafile-11-MB"] - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - cmpChecksum := func(got, want string) { - if want != got { - logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %q, got %q", want, got)) - return - } - } - - reader := getDataReader("datafile-11-MB") - b, err := io.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "Read failed", err) - return - } - reader.Close() - h := test.hasher - h.Reset() - test.ChecksumCRC32C = hashMultiPart(b, int(test.PO.PartSize), test.hasher) - - // Set correct CRC. - // c.TraceOn(os.Stderr) - resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), test.PO) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - // c.TraceOff() - cmpChecksum(resp.ChecksumSHA256, test.ChecksumSHA256) - cmpChecksum(resp.ChecksumSHA1, test.ChecksumSHA1) - cmpChecksum(resp.ChecksumCRC32, test.ChecksumCRC32) - cmpChecksum(resp.ChecksumCRC32C, test.ChecksumCRC32C) - - // Read the data back - gopts := minio.GetObjectOptions{Checksum: true} - gopts.PartNumber = 2 - - // We cannot use StatObject, since it ignores partnumber. - r, err := c.GetObject(context.Background(), bucketName, objectName, gopts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - io.Copy(io.Discard, r) - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - - // Test part 2 checksum... - h.Reset() - p2 := b[test.PO.PartSize:] - if len(p2) > int(test.PO.PartSize) { - p2 = p2[:test.PO.PartSize] - } - h.Write(p2) - got := base64.StdEncoding.EncodeToString(h.Sum(nil)) - if test.ChecksumSHA256 != "" { - cmpChecksum(st.ChecksumSHA256, got) - } - if test.ChecksumSHA1 != "" { - cmpChecksum(st.ChecksumSHA1, got) - } - if test.ChecksumCRC32 != "" { - cmpChecksum(st.ChecksumCRC32, got) - } - if test.ChecksumCRC32C != "" { - cmpChecksum(st.ChecksumCRC32C, got) - } - - delete(args, "metadata") - } -} - -// Test PutObject with custom checksums. -func testPutObjectWithAutomaticChecksums() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader,size, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}", - } - - if !isFullMode() { - ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info() - return - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - TrailingHeaders: true, - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "Make bucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - tests := []struct { - header string - hasher hash.Hash - - // Checksum values - ChecksumCRC32 string - ChecksumCRC32C string - ChecksumSHA1 string - ChecksumSHA256 string - }{ - // Built-in will only add crc32c, when no MD5 nor SHA256. - {header: "x-amz-checksum-crc32c", hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli))}, - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - // defer c.TraceOff() - - for i, test := range tests { - bufSize := dataFileMap["datafile-10-kB"] - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - cmpChecksum := func(got, want string) { - if want != got { - logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got)) - return - } - } - - meta := map[string]string{} - reader := getDataReader("datafile-10-kB") - b, err := io.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "Read failed", err) - return - } - - h := test.hasher - h.Reset() - h.Write(b) - meta[test.header] = base64.StdEncoding.EncodeToString(h.Sum(nil)) - args["metadata"] = meta - - resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{ - DisableMultipart: true, - UserMetadata: nil, - DisableContentSha256: true, - SendContentMd5: false, - }) - if err == nil { - if i == 0 && resp.ChecksumCRC32C == "" { - ignoredLog(testName, function, args, startTime, "Checksums does not appear to be supported by backend").Info() - return - } - } else { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"]) - cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"]) - cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"]) - cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) - - // Usually this will be the same as above, since we skip automatic checksum when SHA256 content is sent. - // When/if we add a checksum control to PutObjectOptions this will make more sense. - resp, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{ - DisableMultipart: true, - UserMetadata: nil, - DisableContentSha256: false, - SendContentMd5: false, - }) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - // The checksum will not be enabled on HTTP, since it uses SHA256 blocks. - if mustParseBool(os.Getenv(enableHTTPS)) { - cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"]) - cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"]) - cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"]) - cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) - } - - // Set SHA256 header manually - sh256 := sha256.Sum256(b) - meta = map[string]string{"x-amz-checksum-sha256": base64.StdEncoding.EncodeToString(sh256[:])} - args["metadata"] = meta - resp, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{ - DisableMultipart: true, - UserMetadata: meta, - DisableContentSha256: true, - SendContentMd5: false, - }) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"]) - cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"]) - cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"]) - cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) - delete(args, "metadata") - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test PutObject using a large data to trigger multipart readat -func testPutObjectWithMetadata() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader,size, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}", - } - - if !isFullMode() { - ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info() - return - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "Make bucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - bufSize := dataFileMap["datafile-129-MB"] - reader := getDataReader("datafile-129-MB") - defer reader.Close() - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - // Object custom metadata - customContentType := "custom/contenttype" - - args["metadata"] = map[string][]string{ - "Content-Type": {customContentType}, - "X-Amz-Meta-CustomKey": {"extra spaces in value"}, - } - - _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ - ContentType: customContentType, - }) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Read the data back - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err) - return - } - if st.ContentType != customContentType && st.ContentType != "application/octet-stream" { - logError(testName, function, args, startTime, "", "ContentType does not match, expected "+customContentType+" got "+st.ContentType, err) - return - } - if err := crcMatchesName(r, "datafile-129-MB"); err != nil { - logError(testName, function, args, startTime, "", "data CRC check failed", err) - return - } - if err := r.Close(); err != nil { - logError(testName, function, args, startTime, "", "Object Close failed", err) - return - } - if err := r.Close(); err == nil { - logError(testName, function, args, startTime, "", "Object already closed, should respond with error", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -func testPutObjectWithContentLanguage() { - // initialize logging params - objectName := "test-object" - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader, size, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": objectName, - "size": -1, - "opts": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - data := []byte{} - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(data), int64(0), minio.PutObjectOptions{ - ContentLanguage: "en", - }) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - objInfo, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - - if objInfo.Metadata.Get("Content-Language") != "en" { - logError(testName, function, args, startTime, "", "Expected content-language 'en' doesn't match with StatObject return value", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test put object with streaming signature. -func testPutObjectStreaming() { - // initialize logging params - objectName := "test-object" - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader,size,opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": objectName, - "size": -1, - "opts": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Upload an object. - sizes := []int64{0, 64*1024 - 1, 64 * 1024} - - for _, size := range sizes { - data := newRandomReader(size, size) - ui, err := c.PutObject(context.Background(), bucketName, objectName, data, int64(size), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err) - return - } - - if ui.Size != size { - logError(testName, function, args, startTime, "", "PutObjectStreaming result has unexpected size", nil) - return - } - - objInfo, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - if objInfo.Size != size { - logError(testName, function, args, startTime, "", "Unexpected size", err) - return - } - - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test get object seeker from the end, using whence set to '2'. -func testGetObjectSeekEnd() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - reader := getDataReader("datafile-33-kB") - defer reader.Close() - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - buf, err := io.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Read the data back - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err) - return - } - - pos, err := r.Seek(-100, 2) - if err != nil { - logError(testName, function, args, startTime, "", "Object Seek failed", err) - return - } - if pos != st.Size-100 { - logError(testName, function, args, startTime, "", "Incorrect position", err) - return - } - buf2 := make([]byte, 100) - m, err := readFull(r, buf2) - if err != nil { - logError(testName, function, args, startTime, "", "Error reading through readFull", err) - return - } - if m != len(buf2) { - logError(testName, function, args, startTime, "", "Number of bytes dont match, expected "+string(len(buf2))+" got "+string(m), err) - return - } - hexBuf1 := fmt.Sprintf("%02x", buf[len(buf)-100:]) - hexBuf2 := fmt.Sprintf("%02x", buf2[:m]) - if hexBuf1 != hexBuf2 { - logError(testName, function, args, startTime, "", "Values at same index dont match", err) - return - } - pos, err = r.Seek(-100, 2) - if err != nil { - logError(testName, function, args, startTime, "", "Object Seek failed", err) - return - } - if pos != st.Size-100 { - logError(testName, function, args, startTime, "", "Incorrect position", err) - return - } - if err = r.Close(); err != nil { - logError(testName, function, args, startTime, "", "ObjectClose failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test get object reader to not throw error on being closed twice. -func testGetObjectClosedTwice() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - reader := getDataReader("datafile-33-kB") - defer reader.Close() - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Read the data back - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err) - return - } - if err := crcMatchesName(r, "datafile-33-kB"); err != nil { - logError(testName, function, args, startTime, "", "data CRC check failed", err) - return - } - if err := r.Close(); err != nil { - logError(testName, function, args, startTime, "", "Object Close failed", err) - return - } - if err := r.Close(); err == nil { - logError(testName, function, args, startTime, "", "Already closed object. No error returned", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test RemoveObjects request where context cancels after timeout -func testRemoveObjectsContext() { - // Initialize logging params. - startTime := time.Now() - testName := getFuncName() - function := "RemoveObjects(ctx, bucketName, objectsCh)" - args := map[string]interface{}{ - "bucketName": "", - } - - // Seed random based on current tie. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - // Enable tracing, write to stdout. - // c.TraceOn(os.Stderr) - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Generate put data. - r := bytes.NewReader(bytes.Repeat([]byte("a"), 8)) - - // Multi remove of 20 objects. - nrObjects := 20 - objectsCh := make(chan minio.ObjectInfo) - go func() { - defer close(objectsCh) - for i := 0; i < nrObjects; i++ { - objectName := "sample" + strconv.Itoa(i) + ".txt" - info, err := c.PutObject(context.Background(), bucketName, objectName, r, 8, - minio.PutObjectOptions{ContentType: "application/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - continue - } - objectsCh <- minio.ObjectInfo{ - Key: info.Key, - VersionID: info.VersionID, - } - } - }() - // Set context to cancel in 1 nanosecond. - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) - args["ctx"] = ctx - defer cancel() - - // Call RemoveObjects API with short timeout. - errorCh := c.RemoveObjects(ctx, bucketName, objectsCh, minio.RemoveObjectsOptions{}) - // Check for error. - select { - case r := <-errorCh: - if r.Err == nil { - logError(testName, function, args, startTime, "", "RemoveObjects should fail on short timeout", err) - return - } - } - // Set context with longer timeout. - ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) - args["ctx"] = ctx - defer cancel() - // Perform RemoveObjects with the longer timeout. Expect the removals to succeed. - errorCh = c.RemoveObjects(ctx, bucketName, objectsCh, minio.RemoveObjectsOptions{}) - select { - case r, more := <-errorCh: - if more || r.Err != nil { - logError(testName, function, args, startTime, "", "Unexpected error", r.Err) - return - } - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test removing multiple objects with Remove API -func testRemoveMultipleObjects() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "RemoveObjects(bucketName, objectsCh)" - args := map[string]interface{}{ - "bucketName": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Enable tracing, write to stdout. - // c.TraceOn(os.Stderr) - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - r := bytes.NewReader(bytes.Repeat([]byte("a"), 8)) - - // Multi remove of 1100 objects - nrObjects := 200 - - objectsCh := make(chan minio.ObjectInfo) - - go func() { - defer close(objectsCh) - // Upload objects and send them to objectsCh - for i := 0; i < nrObjects; i++ { - objectName := "sample" + strconv.Itoa(i) + ".txt" - info, err := c.PutObject(context.Background(), bucketName, objectName, r, 8, - minio.PutObjectOptions{ContentType: "application/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - continue - } - objectsCh <- minio.ObjectInfo{ - Key: info.Key, - VersionID: info.VersionID, - } - } - }() - - // Call RemoveObjects API - errorCh := c.RemoveObjects(context.Background(), bucketName, objectsCh, minio.RemoveObjectsOptions{}) - - // Check if errorCh doesn't receive any error - select { - case r, more := <-errorCh: - if more { - logError(testName, function, args, startTime, "", "Unexpected error", r.Err) - return - } - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test removing multiple objects and check for results -func testRemoveMultipleObjectsWithResult() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "RemoveObjects(bucketName, objectsCh)" - args := map[string]interface{}{ - "bucketName": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Enable tracing, write to stdout. - // c.TraceOn(os.Stderr) - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupVersionedBucket(bucketName, c) - - r := bytes.NewReader(bytes.Repeat([]byte("a"), 8)) - - nrObjects := 10 - nrLockedObjects := 5 - - objectsCh := make(chan minio.ObjectInfo) - - go func() { - defer close(objectsCh) - // Upload objects and send them to objectsCh - for i := 0; i < nrObjects; i++ { - objectName := "sample" + strconv.Itoa(i) + ".txt" - info, err := c.PutObject(context.Background(), bucketName, objectName, r, 8, - minio.PutObjectOptions{ContentType: "application/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - if i < nrLockedObjects { - // t := time.Date(2130, time.April, 25, 14, 0, 0, 0, time.UTC) - t := time.Now().Add(5 * time.Minute) - m := minio.RetentionMode(minio.Governance) - opts := minio.PutObjectRetentionOptions{ - GovernanceBypass: false, - RetainUntilDate: &t, - Mode: &m, - VersionID: info.VersionID, - } - err = c.PutObjectRetention(context.Background(), bucketName, objectName, opts) - if err != nil { - logError(testName, function, args, startTime, "", "Error setting retention", err) - return - } - } - - objectsCh <- minio.ObjectInfo{ - Key: info.Key, - VersionID: info.VersionID, - } - } - }() - - // Call RemoveObjects API - resultCh := c.RemoveObjectsWithResult(context.Background(), bucketName, objectsCh, minio.RemoveObjectsOptions{}) - - var foundNil, foundErr int - - for { - // Check if errorCh doesn't receive any error - select { - case deleteRes, ok := <-resultCh: - if !ok { - goto out - } - if deleteRes.ObjectName == "" { - logError(testName, function, args, startTime, "", "Unexpected object name", nil) - return - } - if deleteRes.ObjectVersionID == "" { - logError(testName, function, args, startTime, "", "Unexpected object version ID", nil) - return - } - - if deleteRes.Err == nil { - foundNil++ - } else { - foundErr++ - } - } - } -out: - if foundNil+foundErr != nrObjects { - logError(testName, function, args, startTime, "", "Unexpected number of results", nil) - return - } - - if foundNil != nrObjects-nrLockedObjects { - logError(testName, function, args, startTime, "", "Unexpected number of nil errors", nil) - return - } - - if foundErr != nrLockedObjects { - logError(testName, function, args, startTime, "", "Unexpected number of errors", nil) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests FPutObject of a big file to trigger multipart -func testFPutObjectMultipart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "FPutObject(bucketName, objectName, fileName, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "fileName": "", - "opts": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload. - fileName := getMintDataDirFilePath("datafile-129-MB") - if fileName == "" { - // Make a temp file with minPartSize bytes of data. - file, err := os.CreateTemp(os.TempDir(), "FPutObjectTest") - if err != nil { - logError(testName, function, args, startTime, "", "TempFile creation failed", err) - return - } - // Upload 2 parts to utilize all 3 'workers' in multipart and still have a part to upload. - if _, err = io.Copy(file, getDataReader("datafile-129-MB")); err != nil { - logError(testName, function, args, startTime, "", "Copy failed", err) - return - } - if err = file.Close(); err != nil { - logError(testName, function, args, startTime, "", "File Close failed", err) - return - } - fileName = file.Name() - args["fileName"] = fileName - } - totalSize := dataFileMap["datafile-129-MB"] - // Set base object name - objectName := bucketName + "FPutObject" + "-standard" - args["objectName"] = objectName - - objectContentType := "testapplication/octet-stream" - args["objectContentType"] = objectContentType - - // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) - _, err = c.FPutObject(context.Background(), bucketName, objectName, fileName, minio.PutObjectOptions{ContentType: objectContentType}) - if err != nil { - logError(testName, function, args, startTime, "", "FPutObject failed", err) - return - } - - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - objInfo, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Unexpected error", err) - return - } - if objInfo.Size != int64(totalSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(totalSize))+" got "+string(objInfo.Size), err) - return - } - if objInfo.ContentType != objectContentType && objInfo.ContentType != "application/octet-stream" { - logError(testName, function, args, startTime, "", "ContentType doesn't match", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests FPutObject with null contentType (default = application/octet-stream) -func testFPutObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "FPutObject(bucketName, objectName, fileName, opts)" - - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "fileName": "", - "opts": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - location := "us-east-1" - - // Make a new bucket. - args["bucketName"] = bucketName - args["location"] = location - function = "MakeBucket(bucketName, location)" - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: location}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Upload 3 parts worth of data to use all 3 of multiparts 'workers' and have an extra part. - // Use different data in part for multipart tests to check parts are uploaded in correct order. - fName := getMintDataDirFilePath("datafile-129-MB") - if fName == "" { - // Make a temp file with minPartSize bytes of data. - file, err := os.CreateTemp(os.TempDir(), "FPutObjectTest") - if err != nil { - logError(testName, function, args, startTime, "", "TempFile creation failed", err) - return - } - - // Upload 3 parts to utilize all 3 'workers' in multipart and still have a part to upload. - if _, err = io.Copy(file, getDataReader("datafile-129-MB")); err != nil { - logError(testName, function, args, startTime, "", "File copy failed", err) - return - } - // Close the file pro-actively for windows. - if err = file.Close(); err != nil { - logError(testName, function, args, startTime, "", "File close failed", err) - return - } - defer os.Remove(file.Name()) - fName = file.Name() - } - - // Set base object name - function = "FPutObject(bucketName, objectName, fileName, opts)" - objectName := bucketName + "FPutObject" - args["objectName"] = objectName + "-standard" - args["fileName"] = fName - args["opts"] = minio.PutObjectOptions{ContentType: "application/octet-stream"} - - // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) - ui, err := c.FPutObject(context.Background(), bucketName, objectName+"-standard", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "FPutObject failed", err) - return - } - - if ui.Size != int64(dataFileMap["datafile-129-MB"]) { - logError(testName, function, args, startTime, "", "FPutObject returned an unexpected upload size", err) - return - } - - // Perform FPutObject with no contentType provided (Expecting application/octet-stream) - args["objectName"] = objectName + "-Octet" - _, err = c.FPutObject(context.Background(), bucketName, objectName+"-Octet", fName, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "File close failed", err) - return - } - - srcFile, err := os.Open(fName) - if err != nil { - logError(testName, function, args, startTime, "", "File open failed", err) - return - } - defer srcFile.Close() - // Add extension to temp file name - tmpFile, err := os.Create(fName + ".gtar") - if err != nil { - logError(testName, function, args, startTime, "", "File create failed", err) - return - } - _, err = io.Copy(tmpFile, srcFile) - if err != nil { - logError(testName, function, args, startTime, "", "File copy failed", err) - return - } - tmpFile.Close() - - // Perform FPutObject with no contentType provided (Expecting application/x-gtar) - args["objectName"] = objectName + "-GTar" - args["opts"] = minio.PutObjectOptions{} - _, err = c.FPutObject(context.Background(), bucketName, objectName+"-GTar", fName+".gtar", minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "FPutObject failed", err) - return - } - - // Check headers - function = "StatObject(bucketName, objectName, opts)" - args["objectName"] = objectName + "-standard" - rStandard, err := c.StatObject(context.Background(), bucketName, objectName+"-standard", minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - if rStandard.ContentType != "application/octet-stream" { - logError(testName, function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rStandard.ContentType, err) - return - } - - function = "StatObject(bucketName, objectName, opts)" - args["objectName"] = objectName + "-Octet" - rOctet, err := c.StatObject(context.Background(), bucketName, objectName+"-Octet", minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - if rOctet.ContentType != "application/octet-stream" { - logError(testName, function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rOctet.ContentType, err) - return - } - - function = "StatObject(bucketName, objectName, opts)" - args["objectName"] = objectName + "-GTar" - rGTar, err := c.StatObject(context.Background(), bucketName, objectName+"-GTar", minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" && rGTar.ContentType != "application/x-tar" { - logError(testName, function, args, startTime, "", "ContentType does not match, expected application/x-tar or application/octet-stream, got "+rGTar.ContentType, err) - return - } - - os.Remove(fName + ".gtar") - successLogger(testName, function, args, startTime).Info() -} - -// Tests FPutObject request when context cancels after timeout -func testFPutObjectContext() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "FPutObject(bucketName, objectName, fileName, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "fileName": "", - "opts": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Upload 1 parts worth of data to use multipart upload. - // Use different data in part for multipart tests to check parts are uploaded in correct order. - fName := getMintDataDirFilePath("datafile-1-MB") - if fName == "" { - // Make a temp file with 1 MiB bytes of data. - file, err := os.CreateTemp(os.TempDir(), "FPutObjectContextTest") - if err != nil { - logError(testName, function, args, startTime, "", "TempFile creation failed", err) - return - } - - // Upload 1 parts to trigger multipart upload - if _, err = io.Copy(file, getDataReader("datafile-1-MB")); err != nil { - logError(testName, function, args, startTime, "", "File copy failed", err) - return - } - // Close the file pro-actively for windows. - if err = file.Close(); err != nil { - logError(testName, function, args, startTime, "", "File close failed", err) - return - } - defer os.Remove(file.Name()) - fName = file.Name() - } - - // Set base object name - objectName := bucketName + "FPutObjectContext" - args["objectName"] = objectName - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) - args["ctx"] = ctx - defer cancel() - - // Perform FPutObject with contentType provided (Expecting application/octet-stream) - _, err = c.FPutObject(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) - if err == nil { - logError(testName, function, args, startTime, "", "FPutObject should fail on short timeout", err) - return - } - ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) - defer cancel() - // Perform FPutObject with a long timeout. Expect the put object to succeed - _, err = c.FPutObject(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "FPutObject shouldn't fail on long timeout", err) - return - } - - _, err = c.StatObject(context.Background(), bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests FPutObject request when context cancels after timeout -func testFPutObjectContextV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "FPutObjectContext(ctx, bucketName, objectName, fileName, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "opts": "minio.PutObjectOptions{ContentType:objectContentType}", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Upload 1 parts worth of data to use multipart upload. - // Use different data in part for multipart tests to check parts are uploaded in correct order. - fName := getMintDataDirFilePath("datafile-1-MB") - if fName == "" { - // Make a temp file with 1 MiB bytes of data. - file, err := os.CreateTemp(os.TempDir(), "FPutObjectContextTest") - if err != nil { - logError(testName, function, args, startTime, "", "Temp file creation failed", err) - return - } - - // Upload 1 parts to trigger multipart upload - if _, err = io.Copy(file, getDataReader("datafile-1-MB")); err != nil { - logError(testName, function, args, startTime, "", "File copy failed", err) - return - } - - // Close the file pro-actively for windows. - if err = file.Close(); err != nil { - logError(testName, function, args, startTime, "", "File close failed", err) - return - } - defer os.Remove(file.Name()) - fName = file.Name() - } - - // Set base object name - objectName := bucketName + "FPutObjectContext" - args["objectName"] = objectName - - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) - args["ctx"] = ctx - defer cancel() - - // Perform FPutObject with contentType provided (Expecting application/octet-stream) - _, err = c.FPutObject(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) - if err == nil { - logError(testName, function, args, startTime, "", "FPutObject should fail on short timeout", err) - return - } - ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) - defer cancel() - // Perform FPutObject with a long timeout. Expect the put object to succeed - _, err = c.FPutObject(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "FPutObject shouldn't fail on longer timeout", err) - return - } - - _, err = c.StatObject(context.Background(), bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test validates putObject with context to see if request cancellation is honored. -func testPutObjectContext() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObject(ctx, bucketName, objectName, fileName, opts)" - args := map[string]interface{}{ - "ctx": "", - "bucketName": "", - "objectName": "", - "opts": "", - } - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Make a new bucket. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket call failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - bufSize := dataFileMap["datafile-33-kB"] - reader := getDataReader("datafile-33-kB") - defer reader.Close() - objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) - args["objectName"] = objectName - - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) - cancel() - args["ctx"] = ctx - args["opts"] = minio.PutObjectOptions{ContentType: "binary/octet-stream"} - - _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err == nil { - logError(testName, function, args, startTime, "", "PutObject should fail on short timeout", err) - return - } - - ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) - args["ctx"] = ctx - - defer cancel() - reader = getDataReader("datafile-33-kB") - defer reader.Close() - _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject with long timeout failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests get object with s3zip extensions. -func testGetObjectS3Zip() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{"x-minio-extract": true} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer func() { - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed", err) - return - } - }() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + ".zip" - args["objectName"] = objectName - - var zipFile bytes.Buffer - zw := zip.NewWriter(&zipFile) - rng := rand.New(rand.NewSource(0xc0cac01a)) - const nFiles = 500 - for i := 0; i <= nFiles; i++ { - if i == nFiles { - // Make one large, compressible file. - i = 1000000 - } - b := make([]byte, i) - if i < nFiles { - rng.Read(b) - } - wc, err := zw.Create(fmt.Sprintf("test/small/file-%d.bin", i)) - if err != nil { - logError(testName, function, args, startTime, "", "zw.Create failed", err) - return - } - wc.Write(b) - } - err = zw.Close() - if err != nil { - logError(testName, function, args, startTime, "", "zw.Close failed", err) - return - } - buf := zipFile.Bytes() - - // Save the data - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Read the data back - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat object failed", err) - return - } - - if st.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(len(buf))+", got "+string(st.Size), err) - return - } - r.Close() - - zr, err := zip.NewReader(bytes.NewReader(buf), int64(len(buf))) - if err != nil { - logError(testName, function, args, startTime, "", "zip.NewReader failed", err) - return - } - lOpts := minio.ListObjectsOptions{} - lOpts.Set("x-minio-extract", "true") - lOpts.Prefix = objectName + "/" - lOpts.Recursive = true - list := c.ListObjects(context.Background(), bucketName, lOpts) - listed := map[string]minio.ObjectInfo{} - for item := range list { - if item.Err != nil { - break - } - listed[item.Key] = item - } - if len(listed) == 0 { - // Assume we are running against non-minio. - args["SKIPPED"] = true - ignoredLog(testName, function, args, startTime, "s3zip does not appear to be present").Info() - return - } - - for _, file := range zr.File { - if file.FileInfo().IsDir() { - continue - } - args["zipfile"] = file.Name - zfr, err := file.Open() - if err != nil { - logError(testName, function, args, startTime, "", "file.Open failed", err) - return - } - want, err := io.ReadAll(zfr) - if err != nil { - logError(testName, function, args, startTime, "", "fzip file read failed", err) - return - } - - opts := minio.GetObjectOptions{} - opts.Set("x-minio-extract", "true") - key := path.Join(objectName, file.Name) - r, err = c.GetObject(context.Background(), bucketName, key, opts) - if err != nil { - terr := minio.ToErrorResponse(err) - if terr.StatusCode != http.StatusNotFound { - logError(testName, function, args, startTime, "", "GetObject failed", err) - } - return - } - got, err := io.ReadAll(r) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - r.Close() - if !bytes.Equal(want, got) { - logError(testName, function, args, startTime, "", "Content mismatch", err) - return - } - oi, ok := listed[key] - if !ok { - logError(testName, function, args, startTime, "", "Object Missing", fmt.Errorf("%s not present in listing", key)) - return - } - if int(oi.Size) != len(got) { - logError(testName, function, args, startTime, "", "Object Size Incorrect", fmt.Errorf("listing %d, read %d", oi.Size, len(got))) - return - } - delete(listed, key) - } - delete(args, "zipfile") - if len(listed) > 0 { - logError(testName, function, args, startTime, "", "Extra listed objects", fmt.Errorf("left over: %v", listed)) - return - } - successLogger(testName, function, args, startTime).Info() -} - -// Tests get object ReaderSeeker interface methods. -func testGetObjectReadSeekFunctional() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer func() { - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed", err) - return - } - }() - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - reader := getDataReader("datafile-33-kB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - buf, err := io.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - // Save the data - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Read the data back - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat object failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) - return - } - - // This following function helps us to compare data from the reader after seek - // with the data from the original buffer - cmpData := func(r io.Reader, start, end int) { - if end-start == 0 { - return - } - buffer := bytes.NewBuffer([]byte{}) - if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "CopyN failed", err) - return - } - } - if !bytes.Equal(buf[start:end], buffer.Bytes()) { - logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) - return - } - } - - // Generic seek error for errors other than io.EOF - seekErr := errors.New("seek error") - - testCases := []struct { - offset int64 - whence int - pos int64 - err error - shouldCmp bool - start int - end int - }{ - // Start from offset 0, fetch data and compare - {0, 0, 0, nil, true, 0, 0}, - // Start from offset 2048, fetch data and compare - {2048, 0, 2048, nil, true, 2048, bufSize}, - // Start from offset larger than possible - {int64(bufSize) + 1024, 0, 0, seekErr, false, 0, 0}, - // Move to offset 0 without comparing - {0, 0, 0, nil, false, 0, 0}, - // Move one step forward and compare - {1, 1, 1, nil, true, 1, bufSize}, - // Move larger than possible - {int64(bufSize), 1, 0, seekErr, false, 0, 0}, - // Provide negative offset with CUR_SEEK - {int64(-1), 1, 0, seekErr, false, 0, 0}, - // Test with whence SEEK_END and with positive offset - {1024, 2, int64(bufSize) - 1024, io.EOF, true, 0, 0}, - // Test with whence SEEK_END and with negative offset - {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize}, - // Test with whence SEEK_END and with large negative offset - {-int64(bufSize) * 2, 2, 0, seekErr, true, 0, 0}, - } - - for i, testCase := range testCases { - // Perform seek operation - n, err := r.Seek(testCase.offset, testCase.whence) - // We expect an error - if testCase.err == seekErr && err == nil { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err) - return - } - // We expect a specific error - if testCase.err != seekErr && testCase.err != err { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err) - return - } - // If we expect an error go to the next loop - if testCase.err != nil { - continue - } - // Check the returned seek pos - if n != testCase.pos { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", number of bytes seeked does not match, expected "+string(testCase.pos)+", got "+string(n), err) - return - } - // Compare only if shouldCmp is activated - if testCase.shouldCmp { - cmpData(r, testCase.start, testCase.end) - } - } - successLogger(testName, function, args, startTime).Info() -} - -// Tests get object ReaderAt interface methods. -func testGetObjectReadAtFunctional() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - reader := getDataReader("datafile-33-kB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - buf, err := io.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - // Save the data - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // read the data back - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - offset := int64(2048) - - // read directly - buf1 := make([]byte, 512) - buf2 := make([]byte, 512) - buf3 := make([]byte, 512) - buf4 := make([]byte, 512) - - // Test readAt before stat is called such that objectInfo doesn't change. - m, err := r.ReadAt(buf1, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf1) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) - return - } - if !bytes.Equal(buf1, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) - return - } - - m, err = r.ReadAt(buf2, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf2) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) - return - } - if !bytes.Equal(buf2, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - - offset += 512 - m, err = r.ReadAt(buf3, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf3) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err) - return - } - if !bytes.Equal(buf3, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - m, err = r.ReadAt(buf4, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf4) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err) - return - } - if !bytes.Equal(buf4, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - - buf5 := make([]byte, len(buf)) - // Read the whole object. - m, err = r.ReadAt(buf5, 0) - if err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - } - if m != len(buf5) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err) - return - } - if !bytes.Equal(buf, buf5) { - logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) - return - } - - buf6 := make([]byte, len(buf)+1) - // Read the whole object and beyond. - _, err = r.ReadAt(buf6, 0) - if err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - } - - successLogger(testName, function, args, startTime).Info() -} - -// Reproduces issue https://github.com/minio/minio-go/issues/1137 -func testGetObjectReadAtWhenEOFWasReached() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - reader := getDataReader("datafile-33-kB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - buf, err := io.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - // Save the data - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // read the data back - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // read directly - buf1 := make([]byte, len(buf)) - buf2 := make([]byte, 512) - - m, err := r.Read(buf1) - if err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "Read failed", err) - return - } - } - if m != len(buf1) { - logError(testName, function, args, startTime, "", "Read read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) - return - } - if !bytes.Equal(buf1, buf) { - logError(testName, function, args, startTime, "", "Incorrect count of Read data", err) - return - } - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) - return - } - - m, err = r.ReadAt(buf2, 512) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf2) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) - return - } - if !bytes.Equal(buf2, buf[512:1024]) { - logError(testName, function, args, startTime, "", "Incorrect count of ReadAt data", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test Presigned Post Policy -func testPresignedPostPolicy() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PresignedPostPolicy(policy)" - args := map[string]interface{}{ - "policy": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - // Make a new bucket in 'us-east-1' (source bucket). - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Generate 33K of data. - reader := getDataReader("datafile-33-kB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - // Azure requires the key to not start with a number - metadataKey := randString(60, rand.NewSource(time.Now().UnixNano()), "user") - metadataValue := randString(60, rand.NewSource(time.Now().UnixNano()), "") - - buf, err := io.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - // Save the data - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - policy := minio.NewPostPolicy() - - if err := policy.SetBucket(""); err == nil { - logError(testName, function, args, startTime, "", "SetBucket did not fail for invalid conditions", err) - return - } - if err := policy.SetKey(""); err == nil { - logError(testName, function, args, startTime, "", "SetKey did not fail for invalid conditions", err) - return - } - if err := policy.SetExpires(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)); err == nil { - logError(testName, function, args, startTime, "", "SetExpires did not fail for invalid conditions", err) - return - } - if err := policy.SetContentType(""); err == nil { - logError(testName, function, args, startTime, "", "SetContentType did not fail for invalid conditions", err) - return - } - if err := policy.SetContentLengthRange(1024*1024, 1024); err == nil { - logError(testName, function, args, startTime, "", "SetContentLengthRange did not fail for invalid conditions", err) - return - } - if err := policy.SetUserMetadata("", ""); err == nil { - logError(testName, function, args, startTime, "", "SetUserMetadata did not fail for invalid conditions", err) - return - } - - policy.SetBucket(bucketName) - policy.SetKey(objectName) - policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days - policy.SetContentType("binary/octet-stream") - policy.SetContentLengthRange(10, 1024*1024) - policy.SetUserMetadata(metadataKey, metadataValue) - - // Add CRC32C - checksum := minio.ChecksumCRC32C.ChecksumBytes(buf) - policy.SetChecksum(checksum) - - args["policy"] = policy.String() - - presignedPostPolicyURL, formData, err := c.PresignedPostPolicy(context.Background(), policy) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedPostPolicy failed", err) - return - } - - var formBuf bytes.Buffer - writer := multipart.NewWriter(&formBuf) - for k, v := range formData { - writer.WriteField(k, v) - } - - // Get a 33KB file to upload and test if set post policy works - filePath := getMintDataDirFilePath("datafile-33-kB") - if filePath == "" { - // Make a temp file with 33 KB data. - file, err := os.CreateTemp(os.TempDir(), "PresignedPostPolicyTest") - if err != nil { - logError(testName, function, args, startTime, "", "TempFile creation failed", err) - return - } - if _, err = io.Copy(file, getDataReader("datafile-33-kB")); err != nil { - logError(testName, function, args, startTime, "", "Copy failed", err) - return - } - if err = file.Close(); err != nil { - logError(testName, function, args, startTime, "", "File Close failed", err) - return - } - filePath = file.Name() - } - - // add file to post request - f, err := os.Open(filePath) - defer f.Close() - if err != nil { - logError(testName, function, args, startTime, "", "File open failed", err) - return - } - w, err := writer.CreateFormFile("file", filePath) - if err != nil { - logError(testName, function, args, startTime, "", "CreateFormFile failed", err) - return - } - - _, err = io.Copy(w, f) - if err != nil { - logError(testName, function, args, startTime, "", "Copy failed", err) - return - } - writer.Close() - - transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS))) - if err != nil { - logError(testName, function, args, startTime, "", "DefaultTransport failed", err) - return - } - - httpClient := &http.Client{ - // Setting a sensible time out of 30secs to wait for response - // headers. Request is pro-actively canceled after 30secs - // with no response. - Timeout: 30 * time.Second, - Transport: transport, - } - args["url"] = presignedPostPolicyURL.String() - - req, err := http.NewRequest(http.MethodPost, presignedPostPolicyURL.String(), bytes.NewReader(formBuf.Bytes())) - if err != nil { - logError(testName, function, args, startTime, "", "Http request failed", err) - return - } - - req.Header.Set("Content-Type", writer.FormDataContentType()) - - // make post request with correct form data - res, err := httpClient.Do(req) - if err != nil { - logError(testName, function, args, startTime, "", "Http request failed", err) - return - } - defer res.Body.Close() - if res.StatusCode != http.StatusNoContent { - logError(testName, function, args, startTime, "", "Http request failed", errors.New(res.Status)) - return - } - - // expected path should be absolute path of the object - var scheme string - if mustParseBool(os.Getenv(enableHTTPS)) { - scheme = "https://" - } else { - scheme = "http://" - } - - expectedLocation := scheme + os.Getenv(serverEndpoint) + "/" + bucketName + "/" + objectName - expectedLocationBucketDNS := scheme + bucketName + "." + os.Getenv(serverEndpoint) + "/" + objectName - - if !strings.Contains(expectedLocation, "s3.amazonaws.com/") { - // Test when not against AWS S3. - if val, ok := res.Header["Location"]; ok { - if val[0] != expectedLocation && val[0] != expectedLocationBucketDNS { - logError(testName, function, args, startTime, "", fmt.Sprintf("Location in header response is incorrect. Want %q or %q, got %q", expectedLocation, expectedLocationBucketDNS, val[0]), err) - return - } - } else { - logError(testName, function, args, startTime, "", "Location not found in header response", err) - return - } - } - want := checksum.Encoded() - if got := res.Header.Get("X-Amz-Checksum-Crc32c"); got != want { - logError(testName, function, args, startTime, "", fmt.Sprintf("Want checksum %q, got %q", want, got), nil) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests copy object -func testCopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(dst, src)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - // Make a new bucket in 'us-east-1' (source bucket). - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Make a new bucket in 'us-east-1' (destination bucket). - err = c.MakeBucket(context.Background(), bucketName+"-copy", minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - defer cleanupBucket(bucketName+"-copy", c) - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - reader := getDataReader("datafile-33-kB") - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - // Check the various fields of source object against destination object. - objInfo, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - - // Copy Source - src := minio.CopySrcOptions{ - Bucket: bucketName, - Object: objectName, - // Set copy conditions. - MatchETag: objInfo.ETag, - MatchModifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), - } - args["src"] = src - - dst := minio.CopyDestOptions{ - Bucket: bucketName + "-copy", - Object: objectName + "-copy", - } - - // Perform the Copy - if _, err = c.CopyObject(context.Background(), dst, src); err != nil { - logError(testName, function, args, startTime, "", "CopyObject failed", err) - return - } - - // Source object - r, err = c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - // Destination object - readerCopy, err := c.GetObject(context.Background(), bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - // Check the various fields of source object against destination object. - objInfo, err = r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - objInfoCopy, err := readerCopy.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - if objInfo.Size != objInfoCopy.Size { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+", got "+string(objInfo.Size), err) - return - } - - if err := crcMatchesName(r, "datafile-33-kB"); err != nil { - logError(testName, function, args, startTime, "", "data CRC check failed", err) - return - } - if err := crcMatchesName(readerCopy, "datafile-33-kB"); err != nil { - logError(testName, function, args, startTime, "", "copy data CRC check failed", err) - return - } - // Close all the get readers before proceeding with CopyObject operations. - r.Close() - readerCopy.Close() - - // CopyObject again but with wrong conditions - src = minio.CopySrcOptions{ - Bucket: bucketName, - Object: objectName, - MatchUnmodifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), - NoMatchETag: objInfo.ETag, - } - - // Perform the Copy which should fail - _, err = c.CopyObject(context.Background(), dst, src) - if err == nil { - logError(testName, function, args, startTime, "", "CopyObject did not fail for invalid conditions", err) - return - } - - src = minio.CopySrcOptions{ - Bucket: bucketName, - Object: objectName, - } - - dst = minio.CopyDestOptions{ - Bucket: bucketName, - Object: objectName, - ReplaceMetadata: true, - UserMetadata: map[string]string{ - "Copy": "should be same", - }, - } - args["dst"] = dst - args["src"] = src - - _, err = c.CopyObject(context.Background(), dst, src) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObject shouldn't fail", err) - return - } - - oi, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - - stOpts := minio.StatObjectOptions{} - stOpts.SetMatchETag(oi.ETag) - objInfo, err = c.StatObject(context.Background(), bucketName, objectName, stOpts) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObject ETag should match and not fail", err) - return - } - - if objInfo.Metadata.Get("x-amz-meta-copy") != "should be same" { - logError(testName, function, args, startTime, "", "CopyObject modified metadata should match", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests SSE-C get object ReaderSeeker interface methods. -func testSSECEncryptedGetObjectReadSeekFunctional() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer func() { - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed", err) - return - } - }() - - // Generate 129MiB of data. - bufSize := dataFileMap["datafile-129-MB"] - reader := getDataReader("datafile-129-MB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - buf, err := io.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - // Save the data - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ - ContentType: "binary/octet-stream", - ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Read the data back - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ - ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - defer r.Close() - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat object failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) - return - } - - // This following function helps us to compare data from the reader after seek - // with the data from the original buffer - cmpData := func(r io.Reader, start, end int) { - if end-start == 0 { - return - } - buffer := bytes.NewBuffer([]byte{}) - if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "CopyN failed", err) - return - } - } - if !bytes.Equal(buf[start:end], buffer.Bytes()) { - logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) - return - } - } - - testCases := []struct { - offset int64 - whence int - pos int64 - err error - shouldCmp bool - start int - end int - }{ - // Start from offset 0, fetch data and compare - {0, 0, 0, nil, true, 0, 0}, - // Start from offset 2048, fetch data and compare - {2048, 0, 2048, nil, true, 2048, bufSize}, - // Start from offset larger than possible - {int64(bufSize) + 1024, 0, 0, io.EOF, false, 0, 0}, - // Move to offset 0 without comparing - {0, 0, 0, nil, false, 0, 0}, - // Move one step forward and compare - {1, 1, 1, nil, true, 1, bufSize}, - // Move larger than possible - {int64(bufSize), 1, 0, io.EOF, false, 0, 0}, - // Provide negative offset with CUR_SEEK - {int64(-1), 1, 0, fmt.Errorf("Negative position not allowed for 1"), false, 0, 0}, - // Test with whence SEEK_END and with positive offset - {1024, 2, 0, io.EOF, false, 0, 0}, - // Test with whence SEEK_END and with negative offset - {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize}, - // Test with whence SEEK_END and with large negative offset - {-int64(bufSize) * 2, 2, 0, fmt.Errorf("Seeking at negative offset not allowed for 2"), false, 0, 0}, - // Test with invalid whence - {0, 3, 0, fmt.Errorf("Invalid whence 3"), false, 0, 0}, - } - - for i, testCase := range testCases { - // Perform seek operation - n, err := r.Seek(testCase.offset, testCase.whence) - if err != nil && testCase.err == nil { - // We expected success. - logError(testName, function, args, startTime, "", - fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) - return - } - if err == nil && testCase.err != nil { - // We expected failure, but got success. - logError(testName, function, args, startTime, "", - fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) - return - } - if err != nil && testCase.err != nil { - if err.Error() != testCase.err.Error() { - // We expect a specific error - logError(testName, function, args, startTime, "", - fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) - return - } - } - // Check the returned seek pos - if n != testCase.pos { - logError(testName, function, args, startTime, "", - fmt.Sprintf("Test %d, number of bytes seeked does not match, expected %d, got %d", i+1, testCase.pos, n), err) - return - } - // Compare only if shouldCmp is activated - if testCase.shouldCmp { - cmpData(r, testCase.start, testCase.end) - } - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests SSE-S3 get object ReaderSeeker interface methods. -func testSSES3EncryptedGetObjectReadSeekFunctional() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer func() { - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed", err) - return - } - }() - - // Generate 129MiB of data. - bufSize := dataFileMap["datafile-129-MB"] - reader := getDataReader("datafile-129-MB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - buf, err := io.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - // Save the data - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ - ContentType: "binary/octet-stream", - ServerSideEncryption: encrypt.NewSSE(), - }) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Read the data back - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - defer r.Close() - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat object failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) - return - } - - // This following function helps us to compare data from the reader after seek - // with the data from the original buffer - cmpData := func(r io.Reader, start, end int) { - if end-start == 0 { - return - } - buffer := bytes.NewBuffer([]byte{}) - if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "CopyN failed", err) - return - } - } - if !bytes.Equal(buf[start:end], buffer.Bytes()) { - logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) - return - } - } - - testCases := []struct { - offset int64 - whence int - pos int64 - err error - shouldCmp bool - start int - end int - }{ - // Start from offset 0, fetch data and compare - {0, 0, 0, nil, true, 0, 0}, - // Start from offset 2048, fetch data and compare - {2048, 0, 2048, nil, true, 2048, bufSize}, - // Start from offset larger than possible - {int64(bufSize) + 1024, 0, 0, io.EOF, false, 0, 0}, - // Move to offset 0 without comparing - {0, 0, 0, nil, false, 0, 0}, - // Move one step forward and compare - {1, 1, 1, nil, true, 1, bufSize}, - // Move larger than possible - {int64(bufSize), 1, 0, io.EOF, false, 0, 0}, - // Provide negative offset with CUR_SEEK - {int64(-1), 1, 0, fmt.Errorf("Negative position not allowed for 1"), false, 0, 0}, - // Test with whence SEEK_END and with positive offset - {1024, 2, 0, io.EOF, false, 0, 0}, - // Test with whence SEEK_END and with negative offset - {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize}, - // Test with whence SEEK_END and with large negative offset - {-int64(bufSize) * 2, 2, 0, fmt.Errorf("Seeking at negative offset not allowed for 2"), false, 0, 0}, - // Test with invalid whence - {0, 3, 0, fmt.Errorf("Invalid whence 3"), false, 0, 0}, - } - - for i, testCase := range testCases { - // Perform seek operation - n, err := r.Seek(testCase.offset, testCase.whence) - if err != nil && testCase.err == nil { - // We expected success. - logError(testName, function, args, startTime, "", - fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) - return - } - if err == nil && testCase.err != nil { - // We expected failure, but got success. - logError(testName, function, args, startTime, "", - fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) - return - } - if err != nil && testCase.err != nil { - if err.Error() != testCase.err.Error() { - // We expect a specific error - logError(testName, function, args, startTime, "", - fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) - return - } - } - // Check the returned seek pos - if n != testCase.pos { - logError(testName, function, args, startTime, "", - fmt.Sprintf("Test %d, number of bytes seeked does not match, expected %d, got %d", i+1, testCase.pos, n), err) - return - } - // Compare only if shouldCmp is activated - if testCase.shouldCmp { - cmpData(r, testCase.start, testCase.end) - } - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests SSE-C get object ReaderAt interface methods. -func testSSECEncryptedGetObjectReadAtFunctional() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Generate 129MiB of data. - bufSize := dataFileMap["datafile-129-MB"] - reader := getDataReader("datafile-129-MB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - buf, err := io.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - // Save the data - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ - ContentType: "binary/octet-stream", - ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // read the data back - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ - ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - defer r.Close() - - offset := int64(2048) - - // read directly - buf1 := make([]byte, 512) - buf2 := make([]byte, 512) - buf3 := make([]byte, 512) - buf4 := make([]byte, 512) - - // Test readAt before stat is called such that objectInfo doesn't change. - m, err := r.ReadAt(buf1, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf1) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) - return - } - if !bytes.Equal(buf1, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) - return - } - - m, err = r.ReadAt(buf2, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf2) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) - return - } - if !bytes.Equal(buf2, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - m, err = r.ReadAt(buf3, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf3) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err) - return - } - if !bytes.Equal(buf3, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - m, err = r.ReadAt(buf4, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf4) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err) - return - } - if !bytes.Equal(buf4, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - - buf5 := make([]byte, len(buf)) - // Read the whole object. - m, err = r.ReadAt(buf5, 0) - if err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - } - if m != len(buf5) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err) - return - } - if !bytes.Equal(buf, buf5) { - logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) - return - } - - buf6 := make([]byte, len(buf)+1) - // Read the whole object and beyond. - _, err = r.ReadAt(buf6, 0) - if err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests SSE-S3 get object ReaderAt interface methods. -func testSSES3EncryptedGetObjectReadAtFunctional() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Generate 129MiB of data. - bufSize := dataFileMap["datafile-129-MB"] - reader := getDataReader("datafile-129-MB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - buf, err := io.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - // Save the data - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ - ContentType: "binary/octet-stream", - ServerSideEncryption: encrypt.NewSSE(), - }) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // read the data back - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - defer r.Close() - - offset := int64(2048) - - // read directly - buf1 := make([]byte, 512) - buf2 := make([]byte, 512) - buf3 := make([]byte, 512) - buf4 := make([]byte, 512) - - // Test readAt before stat is called such that objectInfo doesn't change. - m, err := r.ReadAt(buf1, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf1) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) - return - } - if !bytes.Equal(buf1, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) - return - } - - m, err = r.ReadAt(buf2, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf2) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) - return - } - if !bytes.Equal(buf2, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - m, err = r.ReadAt(buf3, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf3) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err) - return - } - if !bytes.Equal(buf3, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - m, err = r.ReadAt(buf4, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf4) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err) - return - } - if !bytes.Equal(buf4, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - - buf5 := make([]byte, len(buf)) - // Read the whole object. - m, err = r.ReadAt(buf5, 0) - if err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - } - if m != len(buf5) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err) - return - } - if !bytes.Equal(buf, buf5) { - logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) - return - } - - buf6 := make([]byte, len(buf)+1) - // Read the whole object and beyond. - _, err = r.ReadAt(buf6, 0) - if err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - } - - successLogger(testName, function, args, startTime).Info() -} - -// testSSECEncryptionPutGet tests encryption with customer provided encryption keys -func testSSECEncryptionPutGet() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutEncryptedObject(bucketName, objectName, reader, sse)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "sse": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - testCases := []struct { - buf []byte - }{ - {buf: bytes.Repeat([]byte("F"), 1)}, - {buf: bytes.Repeat([]byte("F"), 15)}, - {buf: bytes.Repeat([]byte("F"), 16)}, - {buf: bytes.Repeat([]byte("F"), 17)}, - {buf: bytes.Repeat([]byte("F"), 31)}, - {buf: bytes.Repeat([]byte("F"), 32)}, - {buf: bytes.Repeat([]byte("F"), 33)}, - {buf: bytes.Repeat([]byte("F"), 1024)}, - {buf: bytes.Repeat([]byte("F"), 1024*2)}, - {buf: bytes.Repeat([]byte("F"), 1024*1024)}, - } - - const password = "correct horse battery staple" // https://xkcd.com/936/ - - for i, testCase := range testCases { - // Generate a random object name - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - // Secured object - sse := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) - args["sse"] = sse - - // Put encrypted data - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(testCase.buf), int64(len(testCase.buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) - if err != nil { - logError(testName, function, args, startTime, "", "PutEncryptedObject failed", err) - return - } - - // Read the data back - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse}) - if err != nil { - logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) - return - } - defer r.Close() - - // Compare the sent object with the received one - recvBuffer := bytes.NewBuffer([]byte{}) - if _, err = io.Copy(recvBuffer, r); err != nil { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) - return - } - if recvBuffer.Len() != len(testCase.buf) { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) - return - } - if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) - return - } - - successLogger(testName, function, args, startTime).Info() - - } - - successLogger(testName, function, args, startTime).Info() -} - -// TestEncryptionFPut tests encryption with customer specified encryption keys -func testSSECEncryptionFPut() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, sse)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "filePath": "", - "contentType": "", - "sse": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Object custom metadata - customContentType := "custom/contenttype" - args["metadata"] = customContentType - - testCases := []struct { - buf []byte - }{ - {buf: bytes.Repeat([]byte("F"), 0)}, - {buf: bytes.Repeat([]byte("F"), 1)}, - {buf: bytes.Repeat([]byte("F"), 15)}, - {buf: bytes.Repeat([]byte("F"), 16)}, - {buf: bytes.Repeat([]byte("F"), 17)}, - {buf: bytes.Repeat([]byte("F"), 31)}, - {buf: bytes.Repeat([]byte("F"), 32)}, - {buf: bytes.Repeat([]byte("F"), 33)}, - {buf: bytes.Repeat([]byte("F"), 1024)}, - {buf: bytes.Repeat([]byte("F"), 1024*2)}, - {buf: bytes.Repeat([]byte("F"), 1024*1024)}, - } - - const password = "correct horse battery staple" // https://xkcd.com/936/ - for i, testCase := range testCases { - // Generate a random object name - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - // Secured object - sse := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) - args["sse"] = sse - - // Generate a random file name. - fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - file, err := os.Create(fileName) - if err != nil { - logError(testName, function, args, startTime, "", "file create failed", err) - return - } - _, err = file.Write(testCase.buf) - if err != nil { - logError(testName, function, args, startTime, "", "file write failed", err) - return - } - file.Close() - // Put encrypted data - if _, err = c.FPutObject(context.Background(), bucketName, objectName, fileName, minio.PutObjectOptions{ServerSideEncryption: sse}); err != nil { - logError(testName, function, args, startTime, "", "FPutEncryptedObject failed", err) - return - } - - // Read the data back - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse}) - if err != nil { - logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) - return - } - defer r.Close() - - // Compare the sent object with the received one - recvBuffer := bytes.NewBuffer([]byte{}) - if _, err = io.Copy(recvBuffer, r); err != nil { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) - return - } - if recvBuffer.Len() != len(testCase.buf) { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) - return - } - if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) - return - } - - os.Remove(fileName) - } - - successLogger(testName, function, args, startTime).Info() -} - -// testSSES3EncryptionPutGet tests SSE-S3 encryption -func testSSES3EncryptionPutGet() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutEncryptedObject(bucketName, objectName, reader, sse)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "sse": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - testCases := []struct { - buf []byte - }{ - {buf: bytes.Repeat([]byte("F"), 1)}, - {buf: bytes.Repeat([]byte("F"), 15)}, - {buf: bytes.Repeat([]byte("F"), 16)}, - {buf: bytes.Repeat([]byte("F"), 17)}, - {buf: bytes.Repeat([]byte("F"), 31)}, - {buf: bytes.Repeat([]byte("F"), 32)}, - {buf: bytes.Repeat([]byte("F"), 33)}, - {buf: bytes.Repeat([]byte("F"), 1024)}, - {buf: bytes.Repeat([]byte("F"), 1024*2)}, - {buf: bytes.Repeat([]byte("F"), 1024*1024)}, - } - - for i, testCase := range testCases { - // Generate a random object name - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - // Secured object - sse := encrypt.NewSSE() - args["sse"] = sse - - // Put encrypted data - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(testCase.buf), int64(len(testCase.buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) - if err != nil { - logError(testName, function, args, startTime, "", "PutEncryptedObject failed", err) - return - } - - // Read the data back without any encryption headers - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) - return - } - defer r.Close() - - // Compare the sent object with the received one - recvBuffer := bytes.NewBuffer([]byte{}) - if _, err = io.Copy(recvBuffer, r); err != nil { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) - return - } - if recvBuffer.Len() != len(testCase.buf) { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) - return - } - if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) - return - } - - successLogger(testName, function, args, startTime).Info() - - } - - successLogger(testName, function, args, startTime).Info() -} - -// TestSSES3EncryptionFPut tests server side encryption -func testSSES3EncryptionFPut() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, sse)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "filePath": "", - "contentType": "", - "sse": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Object custom metadata - customContentType := "custom/contenttype" - args["metadata"] = customContentType - - testCases := []struct { - buf []byte - }{ - {buf: bytes.Repeat([]byte("F"), 0)}, - {buf: bytes.Repeat([]byte("F"), 1)}, - {buf: bytes.Repeat([]byte("F"), 15)}, - {buf: bytes.Repeat([]byte("F"), 16)}, - {buf: bytes.Repeat([]byte("F"), 17)}, - {buf: bytes.Repeat([]byte("F"), 31)}, - {buf: bytes.Repeat([]byte("F"), 32)}, - {buf: bytes.Repeat([]byte("F"), 33)}, - {buf: bytes.Repeat([]byte("F"), 1024)}, - {buf: bytes.Repeat([]byte("F"), 1024*2)}, - {buf: bytes.Repeat([]byte("F"), 1024*1024)}, - } - - for i, testCase := range testCases { - // Generate a random object name - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - // Secured object - sse := encrypt.NewSSE() - args["sse"] = sse - - // Generate a random file name. - fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - file, err := os.Create(fileName) - if err != nil { - logError(testName, function, args, startTime, "", "file create failed", err) - return - } - _, err = file.Write(testCase.buf) - if err != nil { - logError(testName, function, args, startTime, "", "file write failed", err) - return - } - file.Close() - // Put encrypted data - if _, err = c.FPutObject(context.Background(), bucketName, objectName, fileName, minio.PutObjectOptions{ServerSideEncryption: sse}); err != nil { - logError(testName, function, args, startTime, "", "FPutEncryptedObject failed", err) - return - } - - // Read the data back - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) - return - } - defer r.Close() - - // Compare the sent object with the received one - recvBuffer := bytes.NewBuffer([]byte{}) - if _, err = io.Copy(recvBuffer, r); err != nil { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) - return - } - if recvBuffer.Len() != len(testCase.buf) { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) - return - } - if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) - return - } - - os.Remove(fileName) - } - - successLogger(testName, function, args, startTime).Info() -} - -func testBucketNotification() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "SetBucketNotification(bucketName)" - args := map[string]interface{}{ - "bucketName": "", - } - - if os.Getenv("NOTIFY_BUCKET") == "" || - os.Getenv("NOTIFY_SERVICE") == "" || - os.Getenv("NOTIFY_REGION") == "" || - os.Getenv("NOTIFY_ACCOUNTID") == "" || - os.Getenv("NOTIFY_RESOURCE") == "" { - ignoredLog(testName, function, args, startTime, "Skipped notification test as it is not configured").Info() - return - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable to debug - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - bucketName := os.Getenv("NOTIFY_BUCKET") - args["bucketName"] = bucketName - - topicArn := notification.NewArn("aws", os.Getenv("NOTIFY_SERVICE"), os.Getenv("NOTIFY_REGION"), os.Getenv("NOTIFY_ACCOUNTID"), os.Getenv("NOTIFY_RESOURCE")) - queueArn := notification.NewArn("aws", "dummy-service", "dummy-region", "dummy-accountid", "dummy-resource") - - topicConfig := notification.NewConfig(topicArn) - topicConfig.AddEvents(notification.ObjectCreatedAll, notification.ObjectRemovedAll) - topicConfig.AddFilterSuffix("jpg") - - queueConfig := notification.NewConfig(queueArn) - queueConfig.AddEvents(notification.ObjectCreatedAll) - queueConfig.AddFilterPrefix("photos/") - - config := notification.Configuration{} - config.AddTopic(topicConfig) - - // Add the same topicConfig again, should have no effect - // because it is duplicated - config.AddTopic(topicConfig) - if len(config.TopicConfigs) != 1 { - logError(testName, function, args, startTime, "", "Duplicate entry added", err) - return - } - - // Add and remove a queue config - config.AddQueue(queueConfig) - config.RemoveQueueByArn(queueArn) - - err = c.SetBucketNotification(context.Background(), bucketName, config) - if err != nil { - logError(testName, function, args, startTime, "", "SetBucketNotification failed", err) - return - } - - config, err = c.GetBucketNotification(context.Background(), bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "GetBucketNotification failed", err) - return - } - - if len(config.TopicConfigs) != 1 { - logError(testName, function, args, startTime, "", "Topic config is empty", err) - return - } - - if config.TopicConfigs[0].Filter.S3Key.FilterRules[0].Value != "jpg" { - logError(testName, function, args, startTime, "", "Couldn't get the suffix", err) - return - } - - err = c.RemoveAllBucketNotification(context.Background(), bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "RemoveAllBucketNotification failed", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests comprehensive list of all methods. -func testFunctional() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "testFunctional()" - functionAll := "" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, nil, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable to debug - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - // Make a new bucket. - function = "MakeBucket(bucketName, region)" - functionAll = "MakeBucket(bucketName, region)" - args["bucketName"] = bucketName - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - - defer cleanupBucket(bucketName, c) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Generate a random file name. - fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - file, err := os.Create(fileName) - if err != nil { - logError(testName, function, args, startTime, "", "File creation failed", err) - return - } - for i := 0; i < 3; i++ { - buf := make([]byte, rand.Intn(1<<19)) - _, err = file.Write(buf) - if err != nil { - logError(testName, function, args, startTime, "", "File write failed", err) - return - } - } - file.Close() - - // Verify if bucket exits and you have access. - var exists bool - function = "BucketExists(bucketName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - } - exists, err = c.BucketExists(context.Background(), bucketName) - - if err != nil { - logError(testName, function, args, startTime, "", "BucketExists failed", err) - return - } - if !exists { - logError(testName, function, args, startTime, "", "Could not find the bucket", err) - return - } - - // Asserting the default bucket policy. - function = "GetBucketPolicy(ctx, bucketName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - } - nilPolicy, err := c.GetBucketPolicy(context.Background(), bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) - return - } - if nilPolicy != "" { - logError(testName, function, args, startTime, "", "policy should be set to nil", err) - return - } - - // Set the bucket policy to 'public readonly'. - function = "SetBucketPolicy(bucketName, readOnlyPolicy)" - functionAll += ", " + function - - readOnlyPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucket"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}` - args = map[string]interface{}{ - "bucketName": bucketName, - "bucketPolicy": readOnlyPolicy, - } - - err = c.SetBucketPolicy(context.Background(), bucketName, readOnlyPolicy) - if err != nil { - logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) - return - } - // should return policy `readonly`. - function = "GetBucketPolicy(ctx, bucketName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - } - _, err = c.GetBucketPolicy(context.Background(), bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) - return - } - - // Make the bucket 'public writeonly'. - function = "SetBucketPolicy(bucketName, writeOnlyPolicy)" - functionAll += ", " + function - - writeOnlyPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucketMultipartUploads"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}` - args = map[string]interface{}{ - "bucketName": bucketName, - "bucketPolicy": writeOnlyPolicy, - } - err = c.SetBucketPolicy(context.Background(), bucketName, writeOnlyPolicy) - - if err != nil { - logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) - return - } - // should return policy `writeonly`. - function = "GetBucketPolicy(ctx, bucketName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - } - - _, err = c.GetBucketPolicy(context.Background(), bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) - return - } - - // Make the bucket 'public read/write'. - function = "SetBucketPolicy(bucketName, readWritePolicy)" - functionAll += ", " + function - - readWritePolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}` - - args = map[string]interface{}{ - "bucketName": bucketName, - "bucketPolicy": readWritePolicy, - } - err = c.SetBucketPolicy(context.Background(), bucketName, readWritePolicy) - - if err != nil { - logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) - return - } - // should return policy `readwrite`. - function = "GetBucketPolicy(bucketName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - } - _, err = c.GetBucketPolicy(context.Background(), bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) - return - } - - // List all buckets. - function = "ListBuckets()" - functionAll += ", " + function - args = nil - buckets, err := c.ListBuckets(context.Background()) - - if len(buckets) == 0 { - logError(testName, function, args, startTime, "", "Found bucket list to be empty", err) - return - } - if err != nil { - logError(testName, function, args, startTime, "", "ListBuckets failed", err) - return - } - - // Verify if previously created bucket is listed in list buckets. - bucketFound := false - for _, bucket := range buckets { - if bucket.Name == bucketName { - bucketFound = true - } - } - - // If bucket not found error out. - if !bucketFound { - logError(testName, function, args, startTime, "", "Bucket: "+bucketName+" not found", err) - return - } - - objectName := bucketName + "unique" - - // Generate data - buf := bytes.Repeat([]byte("f"), 1<<19) - - function = "PutObject(bucketName, objectName, reader, contentType)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "contentType": "", - } - - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName + "-nolength", - "contentType": "binary/octet-stream", - } - - _, err = c.PutObject(context.Background(), bucketName, objectName+"-nolength", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Instantiate a done channel to close all listing. - doneCh := make(chan struct{}) - defer close(doneCh) - - objFound := false - isRecursive := true // Recursive is true. - - function = "ListObjects(bucketName, objectName, isRecursive, doneCh)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "isRecursive": isRecursive, - } - - for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Prefix: objectName, Recursive: true}) { - if obj.Key == objectName { - objFound = true - break - } - } - if !objFound { - logError(testName, function, args, startTime, "", "Object "+objectName+" not found", err) - return - } - - objFound = false - isRecursive = true // Recursive is true. - function = "ListObjects()" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "isRecursive": isRecursive, - } - - for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{Prefix: objectName, Recursive: isRecursive}) { - if obj.Key == objectName { - objFound = true - break - } - } - if !objFound { - logError(testName, function, args, startTime, "", "Object "+objectName+" not found", err) - return - } - - incompObjNotFound := true - - function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "isRecursive": isRecursive, - } - - for objIncompl := range c.ListIncompleteUploads(context.Background(), bucketName, objectName, isRecursive) { - if objIncompl.Key != "" { - incompObjNotFound = false - break - } - } - if !incompObjNotFound { - logError(testName, function, args, startTime, "", "Unexpected dangling incomplete upload found", err) - return - } - - function = "GetObject(bucketName, objectName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - } - newReader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - newReadBytes, err := io.ReadAll(newReader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - if !bytes.Equal(newReadBytes, buf) { - logError(testName, function, args, startTime, "", "GetObject bytes mismatch", err) - return - } - newReader.Close() - - function = "FGetObject(bucketName, objectName, fileName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "fileName": fileName + "-f", - } - err = c.FGetObject(context.Background(), bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) - - if err != nil { - logError(testName, function, args, startTime, "", "FGetObject failed", err) - return - } - - function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": "", - "expires": 3600 * time.Second, - } - if _, err = c.PresignedHeadObject(context.Background(), bucketName, "", 3600*time.Second, nil); err == nil { - logError(testName, function, args, startTime, "", "PresignedHeadObject success", err) - return - } - - // Generate presigned HEAD object url. - function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "expires": 3600 * time.Second, - } - presignedHeadURL, err := c.PresignedHeadObject(context.Background(), bucketName, objectName, 3600*time.Second, nil) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err) - return - } - - transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS))) - if err != nil { - logError(testName, function, args, startTime, "", "DefaultTransport failed", err) - return - } - - httpClient := &http.Client{ - // Setting a sensible time out of 30secs to wait for response - // headers. Request is pro-actively canceled after 30secs - // with no response. - Timeout: 30 * time.Second, - Transport: transport, - } - - req, err := http.NewRequest(http.MethodHead, presignedHeadURL.String(), nil) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedHeadObject request was incorrect", err) - return - } - - // Verify if presigned url works. - resp, err := httpClient.Do(req) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect", err) - return - } - if resp.StatusCode != http.StatusOK { - logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect, status "+string(resp.StatusCode), err) - return - } - if resp.Header.Get("ETag") == "" { - logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect", err) - return - } - resp.Body.Close() - - function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": "", - "expires": 3600 * time.Second, - } - _, err = c.PresignedGetObject(context.Background(), bucketName, "", 3600*time.Second, nil) - if err == nil { - logError(testName, function, args, startTime, "", "PresignedGetObject success", err) - return - } - - // Generate presigned GET object url. - function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "expires": 3600 * time.Second, - } - presignedGetURL, err := c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, nil) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) - return - } - - // Verify if presigned url works. - req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err) - return - } - - resp, err = httpClient.Do(req) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) - return - } - if resp.StatusCode != http.StatusOK { - logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err) - return - } - newPresignedBytes, err := io.ReadAll(resp.Body) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) - return - } - resp.Body.Close() - if !bytes.Equal(newPresignedBytes, buf) { - logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) - return - } - - // Set request parameters. - reqParams := make(url.Values) - reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"") - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "expires": 3600 * time.Second, - "reqParams": reqParams, - } - presignedGetURL, err = c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, reqParams) - - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) - return - } - - // Verify if presigned url works. - req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err) - return - } - - resp, err = httpClient.Do(req) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) - return - } - if resp.StatusCode != http.StatusOK { - logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err) - return - } - newPresignedBytes, err = io.ReadAll(resp.Body) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) - return - } - if !bytes.Equal(newPresignedBytes, buf) { - logError(testName, function, args, startTime, "", "Bytes mismatch for presigned GET URL", err) - return - } - if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" { - logError(testName, function, args, startTime, "", "wrong Content-Disposition received "+string(resp.Header.Get("Content-Disposition")), err) - return - } - - function = "PresignedPutObject(bucketName, objectName, expires)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": "", - "expires": 3600 * time.Second, - } - _, err = c.PresignedPutObject(context.Background(), bucketName, "", 3600*time.Second) - if err == nil { - logError(testName, function, args, startTime, "", "PresignedPutObject success", err) - return - } - - function = "PresignedPutObject(bucketName, objectName, expires)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName + "-presigned", - "expires": 3600 * time.Second, - } - presignedPutURL, err := c.PresignedPutObject(context.Background(), bucketName, objectName+"-presigned", 3600*time.Second) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedPutObject failed", err) - return - } - - buf = bytes.Repeat([]byte("g"), 1<<19) - - req, err = http.NewRequest(http.MethodPut, presignedPutURL.String(), bytes.NewReader(buf)) - if err != nil { - logError(testName, function, args, startTime, "", "Couldn't make HTTP request with PresignedPutObject URL", err) - return - } - - resp, err = httpClient.Do(req) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedPutObject failed", err) - return - } - - newReader, err = c.GetObject(context.Background(), bucketName, objectName+"-presigned", minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject after PresignedPutObject failed", err) - return - } - - newReadBytes, err = io.ReadAll(newReader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll after GetObject failed", err) - return - } - - if !bytes.Equal(newReadBytes, buf) { - logError(testName, function, args, startTime, "", "Bytes mismatch", err) - return - } - - function = "PresignHeader(method, bucketName, objectName, expires, reqParams, extraHeaders)" - functionAll += ", " + function - presignExtraHeaders := map[string][]string{ - "mysecret": {"abcxxx"}, - } - args = map[string]interface{}{ - "method": "PUT", - "bucketName": bucketName, - "objectName": objectName + "-presign-custom", - "expires": 3600 * time.Second, - "extraHeaders": presignExtraHeaders, - } - presignedURL, err := c.PresignHeader(context.Background(), "PUT", bucketName, objectName+"-presign-custom", 3600*time.Second, nil, presignExtraHeaders) - if err != nil { - logError(testName, function, args, startTime, "", "Presigned failed", err) - return - } - - // Generate data more than 32K - buf = bytes.Repeat([]byte("1"), rand.Intn(1<<10)+32*1024) - - req, err = http.NewRequest(http.MethodPut, presignedURL.String(), bytes.NewReader(buf)) - if err != nil { - logError(testName, function, args, startTime, "", "HTTP request to Presigned URL failed", err) - return - } - - req.Header.Add("mysecret", "abcxxx") - resp, err = httpClient.Do(req) - if err != nil { - logError(testName, function, args, startTime, "", "HTTP request to Presigned URL failed", err) - return - } - - // Download the uploaded object to verify - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName + "-presign-custom", - } - newReader, err = c.GetObject(context.Background(), bucketName, objectName+"-presign-custom", minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject of uploaded custom-presigned object failed", err) - return - } - - newReadBytes, err = io.ReadAll(newReader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed during get on custom-presigned put object", err) - return - } - newReader.Close() - - if !bytes.Equal(newReadBytes, buf) { - logError(testName, function, args, startTime, "", "Bytes mismatch on custom-presigned object upload verification", err) - return - } - - function = "RemoveObject(bucketName, objectName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - } - err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{}) - - if err != nil { - logError(testName, function, args, startTime, "", "RemoveObject failed", err) - return - } - args["objectName"] = objectName + "-f" - err = c.RemoveObject(context.Background(), bucketName, objectName+"-f", minio.RemoveObjectOptions{}) - - if err != nil { - logError(testName, function, args, startTime, "", "RemoveObject failed", err) - return - } - - args["objectName"] = objectName + "-nolength" - err = c.RemoveObject(context.Background(), bucketName, objectName+"-nolength", minio.RemoveObjectOptions{}) - - if err != nil { - logError(testName, function, args, startTime, "", "RemoveObject failed", err) - return - } - - args["objectName"] = objectName + "-presigned" - err = c.RemoveObject(context.Background(), bucketName, objectName+"-presigned", minio.RemoveObjectOptions{}) - - if err != nil { - logError(testName, function, args, startTime, "", "RemoveObject failed", err) - return - } - - args["objectName"] = objectName + "-presign-custom" - err = c.RemoveObject(context.Background(), bucketName, objectName+"-presign-custom", minio.RemoveObjectOptions{}) - - if err != nil { - logError(testName, function, args, startTime, "", "RemoveObject failed", err) - return - } - - function = "RemoveBucket(bucketName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - } - err = c.RemoveBucket(context.Background(), bucketName) - - if err != nil { - logError(testName, function, args, startTime, "", "RemoveBucket failed", err) - return - } - err = c.RemoveBucket(context.Background(), bucketName) - if err == nil { - logError(testName, function, args, startTime, "", "RemoveBucket did not fail for invalid bucket name", err) - return - } - if err.Error() != "The specified bucket does not exist" { - logError(testName, function, args, startTime, "", "RemoveBucket failed", err) - return - } - - os.Remove(fileName) - os.Remove(fileName + "-f") - successLogger(testName, functionAll, args, startTime).Info() -} - -// Test for validating GetObject Reader* methods functioning when the -// object is modified in the object store. -func testGetObjectModified() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Make a new bucket. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Upload an object. - objectName := "myobject" - args["objectName"] = objectName - content := "helloworld" - _, err = c.PutObject(context.Background(), bucketName, objectName, strings.NewReader(content), int64(len(content)), minio.PutObjectOptions{ContentType: "application/text"}) - if err != nil { - logError(testName, function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err) - return - } - - defer c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{}) - - reader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "Failed to GetObject "+objectName+", from bucket "+bucketName, err) - return - } - defer reader.Close() - - // Read a few bytes of the object. - b := make([]byte, 5) - n, err := reader.ReadAt(b, 0) - if err != nil { - logError(testName, function, args, startTime, "", "Failed to read object "+objectName+", from bucket "+bucketName+" at an offset", err) - return - } - - // Upload different contents to the same object while object is being read. - newContent := "goodbyeworld" - _, err = c.PutObject(context.Background(), bucketName, objectName, strings.NewReader(newContent), int64(len(newContent)), minio.PutObjectOptions{ContentType: "application/text"}) - if err != nil { - logError(testName, function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err) - return - } - - // Confirm that a Stat() call in between doesn't change the Object's cached etag. - _, err = reader.Stat() - expectedError := "At least one of the pre-conditions you specified did not hold" - if err.Error() != expectedError { - logError(testName, function, args, startTime, "", "Expected Stat to fail with error "+expectedError+", but received "+err.Error(), err) - return - } - - // Read again only to find object contents have been modified since last read. - _, err = reader.ReadAt(b, int64(n)) - if err.Error() != expectedError { - logError(testName, function, args, startTime, "", "Expected ReadAt to fail with error "+expectedError+", but received "+err.Error(), err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test validates putObject to upload a file seeked at a given offset. -func testPutObjectUploadSeekedObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, fileToUpload, contentType)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "fileToUpload": "", - "contentType": "binary/octet-stream", - } - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Make a new bucket. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - defer cleanupBucket(bucketName, c) - - var tempfile *os.File - - if fileName := getMintDataDirFilePath("datafile-100-kB"); fileName != "" { - tempfile, err = os.Open(fileName) - if err != nil { - logError(testName, function, args, startTime, "", "File open failed", err) - return - } - args["fileToUpload"] = fileName - } else { - tempfile, err = os.CreateTemp("", "minio-go-upload-test-") - if err != nil { - logError(testName, function, args, startTime, "", "TempFile create failed", err) - return - } - args["fileToUpload"] = tempfile.Name() - - // Generate 100kB data - if _, err = io.Copy(tempfile, getDataReader("datafile-100-kB")); err != nil { - logError(testName, function, args, startTime, "", "File copy failed", err) - return - } - - defer os.Remove(tempfile.Name()) - - // Seek back to the beginning of the file. - tempfile.Seek(0, 0) - } - length := 100 * humanize.KiByte - objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) - args["objectName"] = objectName - - offset := length / 2 - if _, err = tempfile.Seek(int64(offset), 0); err != nil { - logError(testName, function, args, startTime, "", "TempFile seek failed", err) - return - } - - _, err = c.PutObject(context.Background(), bucketName, objectName, tempfile, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - tempfile.Close() - - obj, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - defer obj.Close() - - n, err := obj.Seek(int64(offset), 0) - if err != nil { - logError(testName, function, args, startTime, "", "Seek failed", err) - return - } - if n != int64(offset) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid offset returned, expected %d got %d", int64(offset), n), err) - return - } - - _, err = c.PutObject(context.Background(), bucketName, objectName+"getobject", obj, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - st, err := c.StatObject(context.Background(), bucketName, objectName+"getobject", minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - if st.Size != int64(length-offset) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid offset returned, expected %d got %d", int64(length-offset), n), err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests bucket re-create errors. -func testMakeBucketErrorV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "MakeBucket(bucketName, region)" - args := map[string]interface{}{ - "bucketName": "", - "region": "eu-west-1", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - region := "eu-west-1" - args["bucketName"] = bucketName - args["region"] = region - - // Make a new bucket in 'eu-west-1'. - if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err == nil { - logError(testName, function, args, startTime, "", "MakeBucket did not fail for existing bucket name", err) - return - } - // Verify valid error response from server. - if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" && - minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" { - logError(testName, function, args, startTime, "", "Invalid error returned by server", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test get object reader to not throw error on being closed twice. -func testGetObjectClosedTwiceV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "MakeBucket(bucketName, region)" - args := map[string]interface{}{ - "bucketName": "", - "region": "eu-west-1", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - reader := getDataReader("datafile-33-kB") - defer reader.Close() - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Read the data back - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(st.Size), err) - return - } - if err := r.Close(); err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - if err := r.Close(); err == nil { - logError(testName, function, args, startTime, "", "Object is already closed, should return error", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests FPutObject hidden contentType setting -func testFPutObjectV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "FPutObject(bucketName, objectName, fileName, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "fileName": "", - "opts": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Make a temp file with 11*1024*1024 bytes of data. - file, err := os.CreateTemp(os.TempDir(), "FPutObjectTest") - if err != nil { - logError(testName, function, args, startTime, "", "TempFile creation failed", err) - return - } - - r := bytes.NewReader(bytes.Repeat([]byte("b"), 11*1024*1024)) - n, err := io.CopyN(file, r, 11*1024*1024) - if err != nil { - logError(testName, function, args, startTime, "", "Copy failed", err) - return - } - if n != int64(11*1024*1024) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err) - return - } - - // Close the file pro-actively for windows. - err = file.Close() - if err != nil { - logError(testName, function, args, startTime, "", "File close failed", err) - return - } - - // Set base object name - objectName := bucketName + "FPutObject" - args["objectName"] = objectName - args["fileName"] = file.Name() - - // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) - _, err = c.FPutObject(context.Background(), bucketName, objectName+"-standard", file.Name(), minio.PutObjectOptions{ContentType: "application/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "FPutObject failed", err) - return - } - - // Perform FPutObject with no contentType provided (Expecting application/octet-stream) - args["objectName"] = objectName + "-Octet" - args["contentType"] = "" - - _, err = c.FPutObject(context.Background(), bucketName, objectName+"-Octet", file.Name(), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "FPutObject failed", err) - return - } - - // Add extension to temp file name - fileName := file.Name() - err = os.Rename(fileName, fileName+".gtar") - if err != nil { - logError(testName, function, args, startTime, "", "Rename failed", err) - return - } - - // Perform FPutObject with no contentType provided (Expecting application/x-gtar) - args["objectName"] = objectName + "-Octet" - args["contentType"] = "" - args["fileName"] = fileName + ".gtar" - - _, err = c.FPutObject(context.Background(), bucketName, objectName+"-GTar", fileName+".gtar", minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "FPutObject failed", err) - return - } - - // Check headers and sizes - rStandard, err := c.StatObject(context.Background(), bucketName, objectName+"-standard", minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - - if rStandard.Size != 11*1024*1024 { - logError(testName, function, args, startTime, "", "Unexpected size", nil) - return - } - - if rStandard.ContentType != "application/octet-stream" { - logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rStandard.ContentType, err) - return - } - - rOctet, err := c.StatObject(context.Background(), bucketName, objectName+"-Octet", minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - if rOctet.ContentType != "application/octet-stream" { - logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rOctet.ContentType, err) - return - } - - if rOctet.Size != 11*1024*1024 { - logError(testName, function, args, startTime, "", "Unexpected size", nil) - return - } - - rGTar, err := c.StatObject(context.Background(), bucketName, objectName+"-GTar", minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - if rGTar.Size != 11*1024*1024 { - logError(testName, function, args, startTime, "", "Unexpected size", nil) - return - } - if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" && rGTar.ContentType != "application/x-tar" { - logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/x-tar , got "+rGTar.ContentType, err) - return - } - - os.Remove(fileName + ".gtar") - successLogger(testName, function, args, startTime).Info() -} - -// Tests various bucket supported formats. -func testMakeBucketRegionsV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "MakeBucket(bucketName, region)" - args := map[string]interface{}{ - "bucketName": "", - "region": "eu-west-1", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket in 'eu-central-1'. - if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "eu-west-1"}); err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed while removing bucket recursively", err) - return - } - - // Make a new bucket with '.' in its name, in 'us-west-2'. This - // request is internally staged into a path style instead of - // virtual host style. - if err = c.MakeBucket(context.Background(), bucketName+".withperiod", minio.MakeBucketOptions{Region: "us-west-2"}); err != nil { - args["bucketName"] = bucketName + ".withperiod" - args["region"] = "us-west-2" - logError(testName, function, args, startTime, "", "MakeBucket test with a bucket name with period, '.', failed", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName+".withperiod", c); err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed while removing bucket recursively", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests get object ReaderSeeker interface methods. -func testGetObjectReadSeekFunctionalV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - reader := getDataReader("datafile-33-kB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - buf, err := io.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - // Save the data. - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Read the data back - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - defer r.Close() - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err) - return - } - - offset := int64(2048) - n, err := r.Seek(offset, 0) - if err != nil { - logError(testName, function, args, startTime, "", "Seek failed", err) - return - } - if n != offset { - logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err) - return - } - n, err = r.Seek(0, 1) - if err != nil { - logError(testName, function, args, startTime, "", "Seek failed", err) - return - } - if n != offset { - logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err) - return - } - _, err = r.Seek(offset, 2) - if err == nil { - logError(testName, function, args, startTime, "", "Seek on positive offset for whence '2' should error out", err) - return - } - n, err = r.Seek(-offset, 2) - if err != nil { - logError(testName, function, args, startTime, "", "Seek failed", err) - return - } - if n != st.Size-offset { - logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(st.Size-offset)+" got "+string(n), err) - return - } - - var buffer1 bytes.Buffer - if _, err = io.CopyN(&buffer1, r, st.Size); err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "Copy failed", err) - return - } - } - if !bytes.Equal(buf[len(buf)-int(offset):], buffer1.Bytes()) { - logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) - return - } - - // Seek again and read again. - n, err = r.Seek(offset-1, 0) - if err != nil { - logError(testName, function, args, startTime, "", "Seek failed", err) - return - } - if n != (offset - 1) { - logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset-1)+" got "+string(n), err) - return - } - - var buffer2 bytes.Buffer - if _, err = io.CopyN(&buffer2, r, st.Size); err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "Copy failed", err) - return - } - } - // Verify now lesser bytes. - if !bytes.Equal(buf[2047:], buffer2.Bytes()) { - logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests get object ReaderAt interface methods. -func testGetObjectReadAtFunctionalV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - reader := getDataReader("datafile-33-kB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - buf, err := io.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - // Save the data - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Read the data back - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - defer r.Close() - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(st.Size), err) - return - } - - offset := int64(2048) - - // Read directly - buf2 := make([]byte, 512) - buf3 := make([]byte, 512) - buf4 := make([]byte, 512) - - m, err := r.ReadAt(buf2, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf2) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+" got "+string(m), err) - return - } - if !bytes.Equal(buf2, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - m, err = r.ReadAt(buf3, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf3) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+" got "+string(m), err) - return - } - if !bytes.Equal(buf3, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - m, err = r.ReadAt(buf4, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf4) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+" got "+string(m), err) - return - } - if !bytes.Equal(buf4, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - - buf5 := make([]byte, bufSize) - // Read the whole object. - m, err = r.ReadAt(buf5, 0) - if err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - } - if m != len(buf5) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+" got "+string(m), err) - return - } - if !bytes.Equal(buf, buf5) { - logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) - return - } - - buf6 := make([]byte, bufSize+1) - // Read the whole object and beyond. - _, err = r.ReadAt(buf6, 0) - if err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests copy object -func testCopyObjectV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - // Make a new bucket in 'us-east-1' (source bucket). - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - defer cleanupBucket(bucketName, c) - - // Make a new bucket in 'us-east-1' (destination bucket). - err = c.MakeBucket(context.Background(), bucketName+"-copy", minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - defer cleanupBucket(bucketName+"-copy", c) - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - reader := getDataReader("datafile-33-kB") - defer reader.Close() - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - // Check the various fields of source object against destination object. - objInfo, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - r.Close() - - // Copy Source - src := minio.CopySrcOptions{ - Bucket: bucketName, - Object: objectName, - MatchModifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), - MatchETag: objInfo.ETag, - } - args["source"] = src - - // Set copy conditions. - dst := minio.CopyDestOptions{ - Bucket: bucketName + "-copy", - Object: objectName + "-copy", - } - args["destination"] = dst - - // Perform the Copy - _, err = c.CopyObject(context.Background(), dst, src) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObject failed", err) - return - } - - // Source object - r, err = c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - // Destination object - readerCopy, err := c.GetObject(context.Background(), bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - // Check the various fields of source object against destination object. - objInfo, err = r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - objInfoCopy, err := readerCopy.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - if objInfo.Size != objInfoCopy.Size { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+" got "+string(objInfo.Size), err) - return - } - - // Close all the readers. - r.Close() - readerCopy.Close() - - // CopyObject again but with wrong conditions - src = minio.CopySrcOptions{ - Bucket: bucketName, - Object: objectName, - MatchUnmodifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), - NoMatchETag: objInfo.ETag, - } - - // Perform the Copy which should fail - _, err = c.CopyObject(context.Background(), dst, src) - if err == nil { - logError(testName, function, args, startTime, "", "CopyObject did not fail for invalid conditions", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -func testComposeObjectErrorCasesWrapper(c *minio.Client) { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "ComposeObject(destination, sourceList)" - args := map[string]interface{}{} - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - // Make a new bucket in 'us-east-1' (source bucket). - err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Test that more than 10K source objects cannot be - // concatenated. - srcArr := [10001]minio.CopySrcOptions{} - srcSlice := srcArr[:] - dst := minio.CopyDestOptions{ - Bucket: bucketName, - Object: "object", - } - - args["destination"] = dst - // Just explain about srcArr in args["sourceList"] - // to stop having 10,001 null headers logged - args["sourceList"] = "source array of 10,001 elements" - if _, err := c.ComposeObject(context.Background(), dst, srcSlice...); err == nil { - logError(testName, function, args, startTime, "", "Expected error in ComposeObject", err) - return - } else if err.Error() != "There must be as least one and up to 10000 source objects." { - logError(testName, function, args, startTime, "", "Got unexpected error", err) - return - } - - // Create a source with invalid offset spec and check that - // error is returned: - // 1. Create the source object. - const badSrcSize = 5 * 1024 * 1024 - buf := bytes.Repeat([]byte("1"), badSrcSize) - _, err = c.PutObject(context.Background(), bucketName, "badObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - // 2. Set invalid range spec on the object (going beyond - // object size) - badSrc := minio.CopySrcOptions{ - Bucket: bucketName, - Object: "badObject", - MatchRange: true, - Start: 1, - End: badSrcSize, - } - - // 3. ComposeObject call should fail. - if _, err := c.ComposeObject(context.Background(), dst, badSrc); err == nil { - logError(testName, function, args, startTime, "", "ComposeObject expected to fail", err) - return - } else if !strings.Contains(err.Error(), "has invalid segment-to-copy") { - logError(testName, function, args, startTime, "", "Got invalid error", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test expected error cases -func testComposeObjectErrorCasesV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "ComposeObject(destination, sourceList)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - testComposeObjectErrorCasesWrapper(c) -} - -func testComposeMultipleSources(c *minio.Client) { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "ComposeObject(destination, sourceList)" - args := map[string]interface{}{ - "destination": "", - "sourceList": "", - } - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - // Make a new bucket in 'us-east-1' (source bucket). - err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Upload a small source object - const srcSize = 1024 * 1024 * 5 - buf := bytes.Repeat([]byte("1"), srcSize) - _, err = c.PutObject(context.Background(), bucketName, "srcObject", bytes.NewReader(buf), int64(srcSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // We will append 10 copies of the object. - srcs := []minio.CopySrcOptions{} - for i := 0; i < 10; i++ { - srcs = append(srcs, minio.CopySrcOptions{ - Bucket: bucketName, - Object: "srcObject", - }) - } - - // make the last part very small - srcs[9].MatchRange = true - - args["sourceList"] = srcs - - dst := minio.CopyDestOptions{ - Bucket: bucketName, - Object: "dstObject", - } - args["destination"] = dst - - ui, err := c.ComposeObject(context.Background(), dst, srcs...) - if err != nil { - logError(testName, function, args, startTime, "", "ComposeObject failed", err) - return - } - - if ui.Size != 9*srcSize+1 { - logError(testName, function, args, startTime, "", "ComposeObject returned unexpected size", err) - return - } - - objProps, err := c.StatObject(context.Background(), bucketName, "dstObject", minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - - if objProps.Size != 9*srcSize+1 { - logError(testName, function, args, startTime, "", "Size mismatched! Expected "+string(10000*srcSize)+" got "+string(objProps.Size), err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test concatenating multiple 10K objects V2 -func testCompose10KSourcesV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "ComposeObject(destination, sourceList)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - testComposeMultipleSources(c) -} - -func testEncryptedEmptyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader, objectSize, opts)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - // Make a new bucket in 'us-east-1' (source bucket). - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - sse := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"object")) - - // 1. create an sse-c encrypted object to copy by uploading - const srcSize = 0 - var buf []byte // Empty buffer - args["objectName"] = "object" - _, err = c.PutObject(context.Background(), bucketName, "object", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - return - } - - // 2. Test CopyObject for an empty object - src := minio.CopySrcOptions{ - Bucket: bucketName, - Object: "object", - Encryption: sse, - } - - dst := minio.CopyDestOptions{ - Bucket: bucketName, - Object: "new-object", - Encryption: sse, - } - - if _, err = c.CopyObject(context.Background(), dst, src); err != nil { - function = "CopyObject(dst, src)" - logError(testName, function, map[string]interface{}{}, startTime, "", "CopyObject failed", err) - return - } - - // 3. Test Key rotation - newSSE := encrypt.DefaultPBKDF([]byte("Don't Panic"), []byte(bucketName+"new-object")) - src = minio.CopySrcOptions{ - Bucket: bucketName, - Object: "new-object", - Encryption: sse, - } - - dst = minio.CopyDestOptions{ - Bucket: bucketName, - Object: "new-object", - Encryption: newSSE, - } - - if _, err = c.CopyObject(context.Background(), dst, src); err != nil { - function = "CopyObject(dst, src)" - logError(testName, function, map[string]interface{}{}, startTime, "", "CopyObject with key rotation failed", err) - return - } - - // 4. Download the object. - reader, err := c.GetObject(context.Background(), bucketName, "new-object", minio.GetObjectOptions{ServerSideEncryption: newSSE}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - defer reader.Close() - - decBytes, err := io.ReadAll(reader) - if err != nil { - logError(testName, function, map[string]interface{}{}, startTime, "", "ReadAll failed", err) - return - } - if !bytes.Equal(decBytes, buf) { - logError(testName, function, map[string]interface{}{}, startTime, "", "Downloaded object doesn't match the empty encrypted object", err) - return - } - - delete(args, "objectName") - successLogger(testName, function, args, startTime).Info() -} - -func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, sseDst encrypt.ServerSide) { - // initialize logging params - startTime := time.Now() - testName := getFuncNameLoc(2) - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - var srcEncryption, dstEncryption encrypt.ServerSide - - // Make a new bucket in 'us-east-1' (source bucket). - err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // 1. create an sse-c encrypted object to copy by uploading - const srcSize = 1024 * 1024 - buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB - _, err = c.PutObject(context.Background(), bucketName, "srcObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ - ServerSideEncryption: sseSrc, - }) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - return - } - - if sseSrc != nil && sseSrc.Type() != encrypt.S3 { - srcEncryption = sseSrc - } - - // 2. copy object and change encryption key - src := minio.CopySrcOptions{ - Bucket: bucketName, - Object: "srcObject", - Encryption: srcEncryption, - } - args["source"] = src - - dst := minio.CopyDestOptions{ - Bucket: bucketName, - Object: "dstObject", - Encryption: sseDst, - } - args["destination"] = dst - - _, err = c.CopyObject(context.Background(), dst, src) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObject failed", err) - return - } - - if sseDst != nil && sseDst.Type() != encrypt.S3 { - dstEncryption = sseDst - } - // 3. get copied object and check if content is equal - coreClient := minio.Core{c} - reader, _, _, err := coreClient.GetObject(context.Background(), bucketName, "dstObject", minio.GetObjectOptions{ServerSideEncryption: dstEncryption}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - decBytes, err := io.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - if !bytes.Equal(decBytes, buf) { - logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) - return - } - reader.Close() - - // Test key rotation for source object in-place. - var newSSE encrypt.ServerSide - if sseSrc != nil && sseSrc.Type() == encrypt.SSEC { - newSSE = encrypt.DefaultPBKDF([]byte("Don't Panic"), []byte(bucketName+"srcObject")) // replace key - } - if sseSrc != nil && sseSrc.Type() == encrypt.S3 { - newSSE = encrypt.NewSSE() - } - if newSSE != nil { - dst = minio.CopyDestOptions{ - Bucket: bucketName, - Object: "srcObject", - Encryption: newSSE, - } - args["destination"] = dst - - _, err = c.CopyObject(context.Background(), dst, src) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObject failed", err) - return - } - - // Get copied object and check if content is equal - reader, _, _, err = coreClient.GetObject(context.Background(), bucketName, "srcObject", minio.GetObjectOptions{ServerSideEncryption: newSSE}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - decBytes, err = io.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - if !bytes.Equal(decBytes, buf) { - logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) - return - } - reader.Close() - - // Test in-place decryption. - dst = minio.CopyDestOptions{ - Bucket: bucketName, - Object: "srcObject", - } - args["destination"] = dst - - src = minio.CopySrcOptions{ - Bucket: bucketName, - Object: "srcObject", - Encryption: newSSE, - } - args["source"] = src - _, err = c.CopyObject(context.Background(), dst, src) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObject Key rotation failed", err) - return - } - } - - // Get copied decrypted object and check if content is equal - reader, _, _, err = coreClient.GetObject(context.Background(), bucketName, "srcObject", minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - defer reader.Close() - - decBytes, err = io.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - if !bytes.Equal(decBytes, buf) { - logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test encrypted copy object -func testUnencryptedToSSECCopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, nil, sseDst) -} - -// Test encrypted copy object -func testUnencryptedToSSES3CopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - var sseSrc encrypt.ServerSide - sseDst := encrypt.NewSSE() - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) -} - -// Test encrypted copy object -func testUnencryptedToUnencryptedCopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - var sseSrc, sseDst encrypt.ServerSide - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) -} - -// Test encrypted copy object -func testEncryptedSSECToSSECCopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) - sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) -} - -// Test encrypted copy object -func testEncryptedSSECToSSES3CopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) - sseDst := encrypt.NewSSE() - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) -} - -// Test encrypted copy object -func testEncryptedSSECToUnencryptedCopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) - var sseDst encrypt.ServerSide - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) -} - -// Test encrypted copy object -func testEncryptedSSES3ToSSECCopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - sseSrc := encrypt.NewSSE() - sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) -} - -// Test encrypted copy object -func testEncryptedSSES3ToSSES3CopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - sseSrc := encrypt.NewSSE() - sseDst := encrypt.NewSSE() - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) -} - -// Test encrypted copy object -func testEncryptedSSES3ToUnencryptedCopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - sseSrc := encrypt.NewSSE() - var sseDst encrypt.ServerSide - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) -} - -// Test encrypted copy object -func testEncryptedCopyObjectV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) - sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) -} - -func testDecryptedCopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - bucketName, objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-"), "object" - if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}); err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - encryption := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)) - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(bytes.Repeat([]byte("a"), 1024*1024)), 1024*1024, minio.PutObjectOptions{ - ServerSideEncryption: encryption, - }) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - return - } - - src := minio.CopySrcOptions{ - Bucket: bucketName, - Object: objectName, - Encryption: encrypt.SSECopy(encryption), - } - args["source"] = src - - dst := minio.CopyDestOptions{ - Bucket: bucketName, - Object: "decrypted-" + objectName, - } - args["destination"] = dst - - if _, err = c.CopyObject(context.Background(), dst, src); err != nil { - logError(testName, function, args, startTime, "", "CopyObject failed", err) - return - } - if _, err = c.GetObject(context.Background(), bucketName, "decrypted-"+objectName, minio.GetObjectOptions{}); err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - successLogger(testName, function, args, startTime).Info() -} - -func testSSECMultipartEncryptedToSSECCopyObjectPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 6MB of data - buf := bytes.Repeat([]byte("abcdef"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - password := "correct horse battery staple" - srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) - - // Upload a 6MB object using multipart mechanism - uploadID, err := c.NewMultipartUpload(context.Background(), bucketName, objectName, minio.PutObjectOptions{ServerSideEncryption: srcencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - return - } - - var completeParts []minio.CompletePart - - part, err := c.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 1, - bytes.NewReader(buf[:5*1024*1024]), 5*1024*1024, - minio.PutObjectPartOptions{SSE: srcencryption}, - ) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectPart call failed", err) - return - } - completeParts = append(completeParts, minio.CompletePart{PartNumber: part.PartNumber, ETag: part.ETag}) - - part, err = c.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 2, - bytes.NewReader(buf[5*1024*1024:]), 1024*1024, - minio.PutObjectPartOptions{SSE: srcencryption}, - ) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectPart call failed", err) - return - } - completeParts = append(completeParts, minio.CompletePart{PartNumber: part.PartNumber, ETag: part.ETag}) - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(context.Background(), bucketName, objectName, uploadID, completeParts, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - return - } - - // Stat the object and check its length matches - objInfo, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) - - uploadID, err = c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - return - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - encrypt.SSECopy(srcencryption).Marshal(header) - dstencryption.Marshal(header) - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = objInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - return - } - - // Stat the object and check its length matches - objInfo, err = c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - - if objInfo.Size != (6*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - return - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} - getOpts.SetRange(0, 6*1024*1024-1) - r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf := make([]byte, 6*1024*1024) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 6MB", err) - return - } - - getOpts.SetRange(6*1024*1024, 0) - r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf = make([]byte, 6*1024*1024+1) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf[:6*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 6MB", err) - return - } - if getBuf[6*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - return - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} - -// Test Core CopyObjectPart implementation -func testSSECEncryptedToSSECCopyObjectPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 5MB of data - buf := bytes.Repeat([]byte("abcde"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - password := "correct horse battery staple" - srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) - putmetadata := map[string]string{ - "Content-Type": "binary/octet-stream", - } - opts := minio.PutObjectOptions{ - UserMetadata: putmetadata, - ServerSideEncryption: srcencryption, - } - uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - return - } - - st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - - if st.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) - return - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) - - uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - return - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - encrypt.SSECopy(srcencryption).Marshal(header) - dstencryption.Marshal(header) - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - return - } - - // Stat the object and check its length matches - objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - - if objInfo.Size != (5*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - return - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} - getOpts.SetRange(0, 5*1024*1024-1) - r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf := make([]byte, 5*1024*1024) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) - return - } - - getOpts.SetRange(5*1024*1024, 0) - r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf = make([]byte, 5*1024*1024+1) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf[:5*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) - return - } - if getBuf[5*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - return - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} - -// Test Core CopyObjectPart implementation for SSEC encrypted to unencrypted copy -func testSSECEncryptedToUnencryptedCopyPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 5MB of data - buf := bytes.Repeat([]byte("abcde"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - password := "correct horse battery staple" - srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) - - opts := minio.PutObjectOptions{ - UserMetadata: map[string]string{ - "Content-Type": "binary/octet-stream", - }, - ServerSideEncryption: srcencryption, - } - uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - return - } - - st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - - if st.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) - return - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - var dstencryption encrypt.ServerSide - - uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - return - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - encrypt.SSECopy(srcencryption).Marshal(header) - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - return - } - - // Stat the object and check its length matches - objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - - if objInfo.Size != (5*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - return - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{} - getOpts.SetRange(0, 5*1024*1024-1) - r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf := make([]byte, 5*1024*1024) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) - return - } - - getOpts.SetRange(5*1024*1024, 0) - r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf = make([]byte, 5*1024*1024+1) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf[:5*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) - return - } - if getBuf[5*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - return - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} - -// Test Core CopyObjectPart implementation for SSEC encrypted to SSE-S3 encrypted copy -func testSSECEncryptedToSSES3CopyObjectPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 5MB of data - buf := bytes.Repeat([]byte("abcde"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - password := "correct horse battery staple" - srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) - putmetadata := map[string]string{ - "Content-Type": "binary/octet-stream", - } - opts := minio.PutObjectOptions{ - UserMetadata: putmetadata, - ServerSideEncryption: srcencryption, - } - - uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - return - } - - st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - - if st.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) - return - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - dstencryption := encrypt.NewSSE() - - uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - return - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - encrypt.SSECopy(srcencryption).Marshal(header) - dstencryption.Marshal(header) - - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - return - } - - // Stat the object and check its length matches - objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - - if objInfo.Size != (5*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - return - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{} - getOpts.SetRange(0, 5*1024*1024-1) - r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf := make([]byte, 5*1024*1024) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) - return - } - - getOpts.SetRange(5*1024*1024, 0) - r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf = make([]byte, 5*1024*1024+1) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf[:5*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) - return - } - if getBuf[5*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - return - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} - -// Test Core CopyObjectPart implementation for unencrypted to SSEC encryption copy part -func testUnencryptedToSSECCopyObjectPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 5MB of data - buf := bytes.Repeat([]byte("abcde"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - password := "correct horse battery staple" - putmetadata := map[string]string{ - "Content-Type": "binary/octet-stream", - } - opts := minio.PutObjectOptions{ - UserMetadata: putmetadata, - } - uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - return - } - - st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - - if st.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) - return - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) - - uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - return - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - dstencryption.Marshal(header) - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - return - } - - // Stat the object and check its length matches - objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - - if objInfo.Size != (5*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - return - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} - getOpts.SetRange(0, 5*1024*1024-1) - r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf := make([]byte, 5*1024*1024) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) - return - } - - getOpts.SetRange(5*1024*1024, 0) - r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf = make([]byte, 5*1024*1024+1) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf[:5*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) - return - } - if getBuf[5*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - return - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} - -// Test Core CopyObjectPart implementation for unencrypted to unencrypted copy -func testUnencryptedToUnencryptedCopyPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 5MB of data - buf := bytes.Repeat([]byte("abcde"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - putmetadata := map[string]string{ - "Content-Type": "binary/octet-stream", - } - opts := minio.PutObjectOptions{ - UserMetadata: putmetadata, - } - uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - return - } - st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - - if st.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) - return - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - - uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - return - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - return - } - - // Stat the object and check its length matches - objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - - if objInfo.Size != (5*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - return - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{} - getOpts.SetRange(0, 5*1024*1024-1) - r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf := make([]byte, 5*1024*1024) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) - return - } - - getOpts.SetRange(5*1024*1024, 0) - r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf = make([]byte, 5*1024*1024+1) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf[:5*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) - return - } - if getBuf[5*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - return - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} - -// Test Core CopyObjectPart implementation for unencrypted to SSE-S3 encrypted copy -func testUnencryptedToSSES3CopyObjectPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 5MB of data - buf := bytes.Repeat([]byte("abcde"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - opts := minio.PutObjectOptions{ - UserMetadata: map[string]string{ - "Content-Type": "binary/octet-stream", - }, - } - uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - return - } - st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - - if st.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) - return - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - dstencryption := encrypt.NewSSE() - - uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - return - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - dstencryption.Marshal(header) - - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - return - } - - // Stat the object and check its length matches - objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - - if objInfo.Size != (5*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - return - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{} - getOpts.SetRange(0, 5*1024*1024-1) - r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf := make([]byte, 5*1024*1024) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) - return - } - - getOpts.SetRange(5*1024*1024, 0) - r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf = make([]byte, 5*1024*1024+1) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf[:5*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) - return - } - if getBuf[5*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - return - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} - -// Test Core CopyObjectPart implementation for SSE-S3 to SSEC encryption copy part -func testSSES3EncryptedToSSECCopyObjectPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 5MB of data - buf := bytes.Repeat([]byte("abcde"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - password := "correct horse battery staple" - srcEncryption := encrypt.NewSSE() - opts := minio.PutObjectOptions{ - UserMetadata: map[string]string{ - "Content-Type": "binary/octet-stream", - }, - ServerSideEncryption: srcEncryption, - } - uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - return - } - - st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcEncryption}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - - if st.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) - return - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) - - uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - return - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - dstencryption.Marshal(header) - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - return - } - - // Stat the object and check its length matches - objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - - if objInfo.Size != (5*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - return - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} - getOpts.SetRange(0, 5*1024*1024-1) - r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf := make([]byte, 5*1024*1024) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) - return - } - - getOpts.SetRange(5*1024*1024, 0) - r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf = make([]byte, 5*1024*1024+1) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf[:5*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) - return - } - if getBuf[5*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - return - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} - -// Test Core CopyObjectPart implementation for unencrypted to unencrypted copy -func testSSES3EncryptedToUnencryptedCopyPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 5MB of data - buf := bytes.Repeat([]byte("abcde"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - srcEncryption := encrypt.NewSSE() - opts := minio.PutObjectOptions{ - UserMetadata: map[string]string{ - "Content-Type": "binary/octet-stream", - }, - ServerSideEncryption: srcEncryption, - } - uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - return - } - st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcEncryption}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - - if st.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) - return - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - - uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - return - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - return - } - - // Stat the object and check its length matches - objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - - if objInfo.Size != (5*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - return - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{} - getOpts.SetRange(0, 5*1024*1024-1) - r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf := make([]byte, 5*1024*1024) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) - return - } - - getOpts.SetRange(5*1024*1024, 0) - r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf = make([]byte, 5*1024*1024+1) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf[:5*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) - return - } - if getBuf[5*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - return - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} - -// Test Core CopyObjectPart implementation for unencrypted to SSE-S3 encrypted copy -func testSSES3EncryptedToSSES3CopyObjectPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 5MB of data - buf := bytes.Repeat([]byte("abcde"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - srcEncryption := encrypt.NewSSE() - opts := minio.PutObjectOptions{ - UserMetadata: map[string]string{ - "Content-Type": "binary/octet-stream", - }, - ServerSideEncryption: srcEncryption, - } - - uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - return - } - st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcEncryption}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - if st.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) - return - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - dstencryption := encrypt.NewSSE() - - uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - return - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - dstencryption.Marshal(header) - - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - return - } - - // Stat the object and check its length matches - objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - - if objInfo.Size != (5*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - return - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{} - getOpts.SetRange(0, 5*1024*1024-1) - r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf := make([]byte, 5*1024*1024) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) - return - } - - getOpts.SetRange(5*1024*1024, 0) - r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf = make([]byte, 5*1024*1024+1) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf[:5*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) - return - } - if getBuf[5*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - return - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} - -func testUserMetadataCopying() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // c.TraceOn(os.Stderr) - testUserMetadataCopyingWrapper(c) -} - -func testUserMetadataCopyingWrapper(c *minio.Client) { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - // Make a new bucket in 'us-east-1' (source bucket). - err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - fetchMeta := func(object string) (h http.Header) { - objInfo, err := c.StatObject(context.Background(), bucketName, object, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - h = make(http.Header) - for k, vs := range objInfo.Metadata { - if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") { - h.Add(k, vs[0]) - } - } - return h - } - - // 1. create a client encrypted object to copy by uploading - const srcSize = 1024 * 1024 - buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB - metadata := make(http.Header) - metadata.Set("x-amz-meta-myheader", "myvalue") - m := make(map[string]string) - m["x-amz-meta-myheader"] = "myvalue" - _, err = c.PutObject(context.Background(), bucketName, "srcObject", - bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{UserMetadata: m}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectWithMetadata failed", err) - return - } - if !reflect.DeepEqual(metadata, fetchMeta("srcObject")) { - logError(testName, function, args, startTime, "", "Metadata match failed", err) - return - } - - // 2. create source - src := minio.CopySrcOptions{ - Bucket: bucketName, - Object: "srcObject", - } - - // 2.1 create destination with metadata set - dst1 := minio.CopyDestOptions{ - Bucket: bucketName, - Object: "dstObject-1", - UserMetadata: map[string]string{"notmyheader": "notmyvalue"}, - ReplaceMetadata: true, - } - - // 3. Check that copying to an object with metadata set resets - // the headers on the copy. - args["source"] = src - args["destination"] = dst1 - _, err = c.CopyObject(context.Background(), dst1, src) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObject failed", err) - return - } - - expectedHeaders := make(http.Header) - expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue") - if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-1")) { - logError(testName, function, args, startTime, "", "Metadata match failed", err) - return - } - - // 4. create destination with no metadata set and same source - dst2 := minio.CopyDestOptions{ - Bucket: bucketName, - Object: "dstObject-2", - } - - // 5. Check that copying to an object with no metadata set, - // copies metadata. - args["source"] = src - args["destination"] = dst2 - _, err = c.CopyObject(context.Background(), dst2, src) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObject failed", err) - return - } - - expectedHeaders = metadata - if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-2")) { - logError(testName, function, args, startTime, "", "Metadata match failed", err) - return - } - - // 6. Compose a pair of sources. - dst3 := minio.CopyDestOptions{ - Bucket: bucketName, - Object: "dstObject-3", - ReplaceMetadata: true, - } - - function = "ComposeObject(destination, sources)" - args["source"] = []minio.CopySrcOptions{src, src} - args["destination"] = dst3 - _, err = c.ComposeObject(context.Background(), dst3, src, src) - if err != nil { - logError(testName, function, args, startTime, "", "ComposeObject failed", err) - return - } - - // Check that no headers are copied in this case - if !reflect.DeepEqual(make(http.Header), fetchMeta("dstObject-3")) { - logError(testName, function, args, startTime, "", "Metadata match failed", err) - return - } - - // 7. Compose a pair of sources with dest user metadata set. - dst4 := minio.CopyDestOptions{ - Bucket: bucketName, - Object: "dstObject-4", - UserMetadata: map[string]string{"notmyheader": "notmyvalue"}, - ReplaceMetadata: true, - } - - function = "ComposeObject(destination, sources)" - args["source"] = []minio.CopySrcOptions{src, src} - args["destination"] = dst4 - _, err = c.ComposeObject(context.Background(), dst4, src, src) - if err != nil { - logError(testName, function, args, startTime, "", "ComposeObject failed", err) - return - } - - // Check that no headers are copied in this case - expectedHeaders = make(http.Header) - expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue") - if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-4")) { - logError(testName, function, args, startTime, "", "Metadata match failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -func testUserMetadataCopyingV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) - return - } - - // c.TraceOn(os.Stderr) - testUserMetadataCopyingWrapper(c) -} - -func testStorageClassMetadataPutObject() { - // initialize logging params - startTime := time.Now() - function := "testStorageClassMetadataPutObject()" - args := map[string]interface{}{} - testName := getFuncName() - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - // Make a new bucket in 'us-east-1' (source bucket). - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - fetchMeta := func(object string) (h http.Header) { - objInfo, err := c.StatObject(context.Background(), bucketName, object, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - h = make(http.Header) - for k, vs := range objInfo.Metadata { - if strings.HasPrefix(strings.ToLower(k), "x-amz-storage-class") { - for _, v := range vs { - h.Add(k, v) - } - } - } - return h - } - - metadata := make(http.Header) - metadata.Set("x-amz-storage-class", "REDUCED_REDUNDANCY") - - emptyMetadata := make(http.Header) - - const srcSize = 1024 * 1024 - buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 1MiB - - _, err = c.PutObject(context.Background(), bucketName, "srcObjectRRSClass", - bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "REDUCED_REDUNDANCY"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Get the returned metadata - returnedMeta := fetchMeta("srcObjectRRSClass") - - // The response metada should either be equal to metadata (with REDUCED_REDUNDANCY) or emptyMetadata (in case of gateways) - if !reflect.DeepEqual(metadata, returnedMeta) && !reflect.DeepEqual(emptyMetadata, returnedMeta) { - logError(testName, function, args, startTime, "", "Metadata match failed", err) - return - } - - metadata = make(http.Header) - metadata.Set("x-amz-storage-class", "STANDARD") - - _, err = c.PutObject(context.Background(), bucketName, "srcObjectSSClass", - bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "STANDARD"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - if reflect.DeepEqual(metadata, fetchMeta("srcObjectSSClass")) { - logError(testName, function, args, startTime, "", "Metadata verification failed, STANDARD storage class should not be a part of response metadata", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -func testStorageClassInvalidMetadataPutObject() { - // initialize logging params - startTime := time.Now() - function := "testStorageClassInvalidMetadataPutObject()" - args := map[string]interface{}{} - testName := getFuncName() - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - // Make a new bucket in 'us-east-1' (source bucket). - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - const srcSize = 1024 * 1024 - buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 1MiB - - _, err = c.PutObject(context.Background(), bucketName, "srcObjectRRSClass", - bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "INVALID_STORAGE_CLASS"}) - if err == nil { - logError(testName, function, args, startTime, "", "PutObject with invalid storage class passed, was expected to fail", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -func testStorageClassMetadataCopyObject() { - // initialize logging params - startTime := time.Now() - function := "testStorageClassMetadataCopyObject()" - args := map[string]interface{}{} - testName := getFuncName() - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - // Make a new bucket in 'us-east-1' (source bucket). - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - fetchMeta := func(object string) (h http.Header) { - objInfo, err := c.StatObject(context.Background(), bucketName, object, minio.StatObjectOptions{}) - args["bucket"] = bucketName - args["object"] = object - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - h = make(http.Header) - for k, vs := range objInfo.Metadata { - if strings.HasPrefix(strings.ToLower(k), "x-amz-storage-class") { - for _, v := range vs { - h.Add(k, v) - } - } - } - return h - } - - metadata := make(http.Header) - metadata.Set("x-amz-storage-class", "REDUCED_REDUNDANCY") - - emptyMetadata := make(http.Header) - - const srcSize = 1024 * 1024 - buf := bytes.Repeat([]byte("abcde"), srcSize) - - // Put an object with RRS Storage class - _, err = c.PutObject(context.Background(), bucketName, "srcObjectRRSClass", - bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "REDUCED_REDUNDANCY"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Make server side copy of object uploaded in previous step - src := minio.CopySrcOptions{ - Bucket: bucketName, - Object: "srcObjectRRSClass", - } - dst := minio.CopyDestOptions{ - Bucket: bucketName, - Object: "srcObjectRRSClassCopy", - } - if _, err = c.CopyObject(context.Background(), dst, src); err != nil { - logError(testName, function, args, startTime, "", "CopyObject failed on RRS", err) - return - } - - // Get the returned metadata - returnedMeta := fetchMeta("srcObjectRRSClassCopy") - - // The response metada should either be equal to metadata (with REDUCED_REDUNDANCY) or emptyMetadata (in case of gateways) - if !reflect.DeepEqual(metadata, returnedMeta) && !reflect.DeepEqual(emptyMetadata, returnedMeta) { - logError(testName, function, args, startTime, "", "Metadata match failed", err) - return - } - - metadata = make(http.Header) - metadata.Set("x-amz-storage-class", "STANDARD") - - // Put an object with Standard Storage class - _, err = c.PutObject(context.Background(), bucketName, "srcObjectSSClass", - bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "STANDARD"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Make server side copy of object uploaded in previous step - src = minio.CopySrcOptions{ - Bucket: bucketName, - Object: "srcObjectSSClass", - } - dst = minio.CopyDestOptions{ - Bucket: bucketName, - Object: "srcObjectSSClassCopy", - } - if _, err = c.CopyObject(context.Background(), dst, src); err != nil { - logError(testName, function, args, startTime, "", "CopyObject failed on SS", err) - return - } - // Fetch the meta data of copied object - if reflect.DeepEqual(metadata, fetchMeta("srcObjectSSClassCopy")) { - logError(testName, function, args, startTime, "", "Metadata verification failed, STANDARD storage class should not be a part of response metadata", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test put object with size -1 byte object. -func testPutObjectNoLengthV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader, size, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "size": -1, - "opts": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - objectName := bucketName + "unique" - args["objectName"] = objectName - - bufSize := dataFileMap["datafile-129-MB"] - reader := getDataReader("datafile-129-MB") - defer reader.Close() - args["size"] = bufSize - - // Upload an object. - _, err = c.PutObject(context.Background(), bucketName, objectName, reader, -1, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err) - return - } - - st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Expected upload object size "+string(bufSize)+" got "+string(st.Size), err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test put objects of unknown size. -func testPutObjectsUnknownV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader,size,opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "size": "", - "opts": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Issues are revealed by trying to upload multiple files of unknown size - // sequentially (on 4GB machines) - for i := 1; i <= 4; i++ { - // Simulate that we could be receiving byte slices of data that we want - // to upload as a file - rpipe, wpipe := io.Pipe() - defer rpipe.Close() - go func() { - b := []byte("test") - wpipe.Write(b) - wpipe.Close() - }() - - // Upload the object. - objectName := fmt.Sprintf("%sunique%d", bucketName, i) - args["objectName"] = objectName - - ui, err := c.PutObject(context.Background(), bucketName, objectName, rpipe, -1, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err) - return - } - - if ui.Size != 4 { - logError(testName, function, args, startTime, "", "Expected upload object size "+string(4)+" got "+string(ui.Size), nil) - return - } - - st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObjectStreaming failed", err) - return - } - - if st.Size != int64(4) { - logError(testName, function, args, startTime, "", "Expected upload object size "+string(4)+" got "+string(st.Size), err) - return - } - - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test put object with 0 byte object. -func testPutObject0ByteV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader, size, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "size": 0, - "opts": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - objectName := bucketName + "unique" - args["objectName"] = objectName - args["opts"] = minio.PutObjectOptions{} - - // Upload an object. - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader([]byte("")), 0, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err) - return - } - st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObjectWithSize failed", err) - return - } - if st.Size != 0 { - logError(testName, function, args, startTime, "", "Expected upload object size 0 but got "+string(st.Size), err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test expected error cases -func testComposeObjectErrorCases() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "ComposeObject(destination, sourceList)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - testComposeObjectErrorCasesWrapper(c) -} - -// Test concatenating multiple 10K objects V4 -func testCompose10KSources() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "ComposeObject(destination, sourceList)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - testComposeMultipleSources(c) -} - -// Tests comprehensive list of all methods. -func testFunctionalV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "testFunctionalV2()" - functionAll := "" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) - return - } - - // Enable to debug - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - location := "us-east-1" - // Make a new bucket. - function = "MakeBucket(bucketName, location)" - functionAll = "MakeBucket(bucketName, location)" - args = map[string]interface{}{ - "bucketName": bucketName, - "location": location, - } - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: location}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Generate a random file name. - fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - file, err := os.Create(fileName) - if err != nil { - logError(testName, function, args, startTime, "", "file create failed", err) - return - } - for i := 0; i < 3; i++ { - buf := make([]byte, rand.Intn(1<<19)) - _, err = file.Write(buf) - if err != nil { - logError(testName, function, args, startTime, "", "file write failed", err) - return - } - } - file.Close() - - // Verify if bucket exits and you have access. - var exists bool - function = "BucketExists(bucketName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - } - exists, err = c.BucketExists(context.Background(), bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "BucketExists failed", err) - return - } - if !exists { - logError(testName, function, args, startTime, "", "Could not find existing bucket "+bucketName, err) - return - } - - // Make the bucket 'public read/write'. - function = "SetBucketPolicy(bucketName, bucketPolicy)" - functionAll += ", " + function - - readWritePolicy := `{"Version": "2012-10-17","Statement": [{"Action": ["s3:ListBucketMultipartUploads", "s3:ListBucket"],"Effect": "Allow","Principal": {"AWS": ["*"]},"Resource": ["arn:aws:s3:::` + bucketName + `"],"Sid": ""}]}` - - args = map[string]interface{}{ - "bucketName": bucketName, - "bucketPolicy": readWritePolicy, - } - err = c.SetBucketPolicy(context.Background(), bucketName, readWritePolicy) - - if err != nil { - logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) - return - } - - // List all buckets. - function = "ListBuckets()" - functionAll += ", " + function - args = nil - buckets, err := c.ListBuckets(context.Background()) - if len(buckets) == 0 { - logError(testName, function, args, startTime, "", "List buckets cannot be empty", err) - return - } - if err != nil { - logError(testName, function, args, startTime, "", "ListBuckets failed", err) - return - } - - // Verify if previously created bucket is listed in list buckets. - bucketFound := false - for _, bucket := range buckets { - if bucket.Name == bucketName { - bucketFound = true - } - } - - // If bucket not found error out. - if !bucketFound { - logError(testName, function, args, startTime, "", "Bucket "+bucketName+"not found", err) - return - } - - objectName := bucketName + "unique" - - // Generate data - buf := bytes.Repeat([]byte("n"), rand.Intn(1<<19)) - - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "contentType": "", - } - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - if st.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(st.Size), err) - return - } - - objectNameNoLength := objectName + "-nolength" - args["objectName"] = objectNameNoLength - _, err = c.PutObject(context.Background(), bucketName, objectNameNoLength, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - st, err = c.StatObject(context.Background(), bucketName, objectNameNoLength, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - if st.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(st.Size), err) - return - } - - // Instantiate a done channel to close all listing. - doneCh := make(chan struct{}) - defer close(doneCh) - - objFound := false - isRecursive := true // Recursive is true. - function = "ListObjects(bucketName, objectName, isRecursive, doneCh)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "isRecursive": isRecursive, - } - for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Prefix: objectName, Recursive: isRecursive}) { - if obj.Key == objectName { - objFound = true - break - } - } - if !objFound { - logError(testName, function, args, startTime, "", "Could not find existing object "+objectName, err) - return - } - - incompObjNotFound := true - function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "isRecursive": isRecursive, - } - for objIncompl := range c.ListIncompleteUploads(context.Background(), bucketName, objectName, isRecursive) { - if objIncompl.Key != "" { - incompObjNotFound = false - break - } - } - if !incompObjNotFound { - logError(testName, function, args, startTime, "", "Unexpected dangling incomplete upload found", err) - return - } - - function = "GetObject(bucketName, objectName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - } - newReader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - newReadBytes, err := io.ReadAll(newReader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - newReader.Close() - - if !bytes.Equal(newReadBytes, buf) { - logError(testName, function, args, startTime, "", "Bytes mismatch", err) - return - } - - function = "FGetObject(bucketName, objectName, fileName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "fileName": fileName + "-f", - } - err = c.FGetObject(context.Background(), bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "FgetObject failed", err) - return - } - - // Generate presigned HEAD object url. - function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "expires": 3600 * time.Second, - } - presignedHeadURL, err := c.PresignedHeadObject(context.Background(), bucketName, objectName, 3600*time.Second, nil) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err) - return - } - - transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS))) - if err != nil { - logError(testName, function, args, startTime, "", "DefaultTransport failed", err) - return - } - - httpClient := &http.Client{ - // Setting a sensible time out of 30secs to wait for response - // headers. Request is pro-actively canceled after 30secs - // with no response. - Timeout: 30 * time.Second, - Transport: transport, - } - - req, err := http.NewRequest(http.MethodHead, presignedHeadURL.String(), nil) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedHeadObject URL head request failed", err) - return - } - - // Verify if presigned url works. - resp, err := httpClient.Do(req) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedHeadObject URL head request failed", err) - return - } - if resp.StatusCode != http.StatusOK { - logError(testName, function, args, startTime, "", "PresignedHeadObject URL returns status "+string(resp.StatusCode), err) - return - } - if resp.Header.Get("ETag") == "" { - logError(testName, function, args, startTime, "", "Got empty ETag", err) - return - } - resp.Body.Close() - - // Generate presigned GET object url. - function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "expires": 3600 * time.Second, - } - presignedGetURL, err := c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, nil) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) - return - } - - // Verify if presigned url works. - req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err) - return - } - - resp, err = httpClient.Do(req) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) - return - } - - if resp.StatusCode != http.StatusOK { - logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err) - return - } - newPresignedBytes, err := io.ReadAll(resp.Body) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - resp.Body.Close() - if !bytes.Equal(newPresignedBytes, buf) { - logError(testName, function, args, startTime, "", "Bytes mismatch", err) - return - } - - // Set request parameters. - reqParams := make(url.Values) - reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"") - // Generate presigned GET object url. - args["reqParams"] = reqParams - presignedGetURL, err = c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, reqParams) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) - return - } - - // Verify if presigned url works. - req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err) - return - } - - resp, err = httpClient.Do(req) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) - return - } - - if resp.StatusCode != http.StatusOK { - logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err) - return - } - newPresignedBytes, err = io.ReadAll(resp.Body) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - if !bytes.Equal(newPresignedBytes, buf) { - logError(testName, function, args, startTime, "", "Bytes mismatch", err) - return - } - // Verify content disposition. - if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" { - logError(testName, function, args, startTime, "", "wrong Content-Disposition received ", err) - return - } - - function = "PresignedPutObject(bucketName, objectName, expires)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName + "-presigned", - "expires": 3600 * time.Second, - } - presignedPutURL, err := c.PresignedPutObject(context.Background(), bucketName, objectName+"-presigned", 3600*time.Second) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedPutObject failed", err) - return - } - - // Generate data more than 32K - buf = bytes.Repeat([]byte("1"), rand.Intn(1<<10)+32*1024) - - req, err = http.NewRequest(http.MethodPut, presignedPutURL.String(), bytes.NewReader(buf)) - if err != nil { - logError(testName, function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err) - return - } - - resp, err = httpClient.Do(req) - if err != nil { - logError(testName, function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err) - return - } - - // Download the uploaded object to verify - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName + "-presigned", - } - newReader, err = c.GetObject(context.Background(), bucketName, objectName+"-presigned", minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject of uploaded presigned object failed", err) - return - } - - newReadBytes, err = io.ReadAll(newReader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed during get on presigned put object", err) - return - } - newReader.Close() - - if !bytes.Equal(newReadBytes, buf) { - logError(testName, function, args, startTime, "", "Bytes mismatch on presigned object upload verification", err) - return - } - - function = "PresignHeader(method, bucketName, objectName, expires, reqParams, extraHeaders)" - functionAll += ", " + function - presignExtraHeaders := map[string][]string{ - "mysecret": {"abcxxx"}, - } - args = map[string]interface{}{ - "method": "PUT", - "bucketName": bucketName, - "objectName": objectName + "-presign-custom", - "expires": 3600 * time.Second, - "extraHeaders": presignExtraHeaders, - } - _, err = c.PresignHeader(context.Background(), "PUT", bucketName, objectName+"-presign-custom", 3600*time.Second, nil, presignExtraHeaders) - if err == nil { - logError(testName, function, args, startTime, "", "Presigned with extra headers succeeded", err) - return - } - - os.Remove(fileName) - os.Remove(fileName + "-f") - successLogger(testName, functionAll, args, startTime).Info() -} - -// Test get object with GetObject with context -func testGetObjectContext() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(ctx, bucketName, objectName)" - args := map[string]interface{}{ - "ctx": "", - "bucketName": "", - "objectName": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - bufSize := dataFileMap["datafile-33-kB"] - reader := getDataReader("datafile-33-kB") - defer reader.Close() - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) - args["ctx"] = ctx - cancel() - - r, err := c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed unexpectedly", err) - return - } - - if _, err = r.Stat(); err == nil { - logError(testName, function, args, startTime, "", "GetObject should fail on short timeout", err) - return - } - r.Close() - - ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) - args["ctx"] = ctx - defer cancel() - - // Read the data back - r, err = c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "object Stat call failed", err) - return - } - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes in stat does not match: want "+string(bufSize)+", got"+string(st.Size), err) - return - } - if err := r.Close(); err != nil { - logError(testName, function, args, startTime, "", "object Close() call failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test get object with FGetObject with a user provided context -func testFGetObjectContext() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "FGetObject(ctx, bucketName, objectName, fileName)" - args := map[string]interface{}{ - "ctx": "", - "bucketName": "", - "objectName": "", - "fileName": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - bufSize := dataFileMap["datafile-1-MB"] - reader := getDataReader("datafile-1-MB") - defer reader.Close() - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) - args["ctx"] = ctx - defer cancel() - - fileName := "tempfile-context" - args["fileName"] = fileName - // Read the data back - err = c.FGetObject(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) - if err == nil { - logError(testName, function, args, startTime, "", "FGetObject should fail on short timeout", err) - return - } - ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) - defer cancel() - - // Read the data back - err = c.FGetObject(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "FGetObject with long timeout failed", err) - return - } - if err = os.Remove(fileName + "-fcontext"); err != nil { - logError(testName, function, args, startTime, "", "Remove file failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test get object with GetObject with a user provided context -func testGetObjectRanges() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(ctx, bucketName, objectName, fileName)" - args := map[string]interface{}{ - "ctx": "", - "bucketName": "", - "objectName": "", - "fileName": "", - } - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) - defer cancel() - - rng := rand.NewSource(time.Now().UnixNano()) - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rng, "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - bufSize := dataFileMap["datafile-129-MB"] - reader := getDataReader("datafile-129-MB") - defer reader.Close() - // Save the data - objectName := randString(60, rng, "") - args["objectName"] = objectName - - _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Read the data back - tests := []struct { - start int64 - end int64 - }{ - { - start: 1024, - end: 1024 + 1<<20, - }, - { - start: 20e6, - end: 20e6 + 10000, - }, - { - start: 40e6, - end: 40e6 + 10000, - }, - { - start: 60e6, - end: 60e6 + 10000, - }, - { - start: 80e6, - end: 80e6 + 10000, - }, - { - start: 120e6, - end: int64(bufSize), - }, - } - for _, test := range tests { - wantRC := getDataReader("datafile-129-MB") - io.CopyN(io.Discard, wantRC, test.start) - want := mustCrcReader(io.LimitReader(wantRC, test.end-test.start+1)) - opts := minio.GetObjectOptions{} - opts.SetRange(test.start, test.end) - args["opts"] = fmt.Sprintf("%+v", test) - obj, err := c.GetObject(ctx, bucketName, objectName, opts) - if err != nil { - logError(testName, function, args, startTime, "", "FGetObject with long timeout failed", err) - return - } - err = crcMatches(obj, want) - if err != nil { - logError(testName, function, args, startTime, "", fmt.Sprintf("GetObject offset %d -> %d", test.start, test.end), err) - return - } - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test get object ACLs with GetObjectACL with custom provided context -func testGetObjectACLContext() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObjectACL(ctx, bucketName, objectName)" - args := map[string]interface{}{ - "ctx": "", - "bucketName": "", - "objectName": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - bufSize := dataFileMap["datafile-1-MB"] - reader := getDataReader("datafile-1-MB") - defer reader.Close() - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - // Add meta data to add a canned acl - metaData := map[string]string{ - "X-Amz-Acl": "public-read-write", - } - - _, err = c.PutObject(context.Background(), bucketName, - objectName, reader, int64(bufSize), - minio.PutObjectOptions{ - ContentType: "binary/octet-stream", - UserMetadata: metaData, - }) - - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - args["ctx"] = ctx - defer cancel() - - // Read the data back - objectInfo, getObjectACLErr := c.GetObjectACL(ctx, bucketName, objectName) - if getObjectACLErr != nil { - logError(testName, function, args, startTime, "", "GetObjectACL failed. ", getObjectACLErr) - return - } - - s, ok := objectInfo.Metadata["X-Amz-Acl"] - if !ok { - logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Acl\"", nil) - return - } - - if len(s) != 1 { - logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Acl\" canned acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil) - return - } - - // Do a very limited testing if this is not AWS S3 - if os.Getenv(serverEndpoint) != "s3.amazonaws.com" { - if s[0] != "private" { - logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Acl\" expected \"private\" but got"+fmt.Sprintf("%q", s[0]), nil) - return - } - - successLogger(testName, function, args, startTime).Info() - return - } - - if s[0] != "public-read-write" { - logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Acl\" expected \"public-read-write\" but got"+fmt.Sprintf("%q", s[0]), nil) - return - } - - bufSize = dataFileMap["datafile-1-MB"] - reader2 := getDataReader("datafile-1-MB") - defer reader2.Close() - // Save the data - objectName = randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - // Add meta data to add a canned acl - metaData = map[string]string{ - "X-Amz-Grant-Read": "id=fooread@minio.go", - "X-Amz-Grant-Write": "id=foowrite@minio.go", - } - - _, err = c.PutObject(context.Background(), bucketName, objectName, reader2, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream", UserMetadata: metaData}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second) - args["ctx"] = ctx - defer cancel() - - // Read the data back - objectInfo, getObjectACLErr = c.GetObjectACL(ctx, bucketName, objectName) - if getObjectACLErr == nil { - logError(testName, function, args, startTime, "", "GetObjectACL fail", getObjectACLErr) - return - } - - if len(objectInfo.Metadata) != 3 { - logError(testName, function, args, startTime, "", "GetObjectACL fail expected \"3\" ACLs but got "+fmt.Sprintf(`"%d"`, len(objectInfo.Metadata)), nil) - return - } - - s, ok = objectInfo.Metadata["X-Amz-Grant-Read"] - if !ok { - logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Grant-Read\"", nil) - return - } - - if len(s) != 1 { - logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Read\" acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil) - return - } - - if s[0] != "fooread@minio.go" { - logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Read\" acl expected \"fooread@minio.go\" got "+fmt.Sprintf("%q", s), nil) - return - } - - s, ok = objectInfo.Metadata["X-Amz-Grant-Write"] - if !ok { - logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Grant-Write\"", nil) - return - } - - if len(s) != 1 { - logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Write\" acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil) - return - } - - if s[0] != "foowrite@minio.go" { - logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Write\" acl expected \"foowrite@minio.go\" got "+fmt.Sprintf("%q", s), nil) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test validates putObject with context to see if request cancellation is honored for V2. -func testPutObjectContextV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObject(ctx, bucketName, objectName, reader, size, opts)" - args := map[string]interface{}{ - "ctx": "", - "bucketName": "", - "objectName": "", - "size": "", - "opts": "", - } - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Make a new bucket. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - defer cleanupBucket(bucketName, c) - bufSize := dataFileMap["datatfile-33-kB"] - reader := getDataReader("datafile-33-kB") - defer reader.Close() - - objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) - args["objectName"] = objectName - - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - args["ctx"] = ctx - args["size"] = bufSize - defer cancel() - - _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject with short timeout failed", err) - return - } - - ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) - args["ctx"] = ctx - - defer cancel() - reader = getDataReader("datafile-33-kB") - defer reader.Close() - _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject with long timeout failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test get object with GetObject with custom context -func testGetObjectContextV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(ctx, bucketName, objectName)" - args := map[string]interface{}{ - "ctx": "", - "bucketName": "", - "objectName": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - bufSize := dataFileMap["datafile-33-kB"] - reader := getDataReader("datafile-33-kB") - defer reader.Close() - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - return - } - - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) - args["ctx"] = ctx - cancel() - - r, err := c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed unexpectedly", err) - return - } - if _, err = r.Stat(); err == nil { - logError(testName, function, args, startTime, "", "GetObject should fail on short timeout", err) - return - } - r.Close() - - ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) - defer cancel() - - // Read the data back - r, err = c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject shouldn't fail on longer timeout", err) - return - } - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "object Stat call failed", err) - return - } - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(bufSize)+" got "+string(st.Size), err) - return - } - if err := r.Close(); err != nil { - logError(testName, function, args, startTime, "", " object Close() call failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test get object with FGetObject with custom context -func testFGetObjectContextV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "FGetObject(ctx, bucketName, objectName,fileName)" - args := map[string]interface{}{ - "ctx": "", - "bucketName": "", - "objectName": "", - "fileName": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket call failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - bufSize := dataFileMap["datatfile-1-MB"] - reader := getDataReader("datafile-1-MB") - defer reader.Close() - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - return - } - - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) - args["ctx"] = ctx - defer cancel() - - fileName := "tempfile-context" - args["fileName"] = fileName - - // Read the data back - err = c.FGetObject(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) - if err == nil { - logError(testName, function, args, startTime, "", "FGetObject should fail on short timeout", err) - return - } - ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) - defer cancel() - - // Read the data back - err = c.FGetObject(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "FGetObject call shouldn't fail on long timeout", err) - return - } - - if err = os.Remove(fileName + "-fcontext"); err != nil { - logError(testName, function, args, startTime, "", "Remove file failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test list object v1 and V2 -func testListObjects() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "ListObjects(bucketName, objectPrefix, recursive, doneCh)" - args := map[string]interface{}{ - "bucketName": "", - "objectPrefix": "", - "recursive": "true", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - testObjects := []struct { - name string - storageClass string - }{ - // Special characters - {"foo bar", "STANDARD"}, - {"foo-%", "STANDARD"}, - {"random-object-1", "STANDARD"}, - {"random-object-2", "REDUCED_REDUNDANCY"}, - } - - for i, object := range testObjects { - bufSize := dataFileMap["datafile-33-kB"] - reader := getDataReader("datafile-33-kB") - defer reader.Close() - _, err = c.PutObject(context.Background(), bucketName, object.name, reader, int64(bufSize), - minio.PutObjectOptions{ContentType: "binary/octet-stream", StorageClass: object.storageClass}) - if err != nil { - logError(testName, function, args, startTime, "", fmt.Sprintf("PutObject %d call failed", i+1), err) - return - } - } - - testList := func(listFn func(context.Context, string, minio.ListObjectsOptions) <-chan minio.ObjectInfo, bucket string, opts minio.ListObjectsOptions) { - var objCursor int - - // check for object name and storage-class from listing object result - for objInfo := range listFn(context.Background(), bucket, opts) { - if objInfo.Err != nil { - logError(testName, function, args, startTime, "", "ListObjects failed unexpectedly", err) - return - } - if objInfo.Key != testObjects[objCursor].name { - logError(testName, function, args, startTime, "", "ListObjects does not return expected object name", err) - return - } - if objInfo.StorageClass != testObjects[objCursor].storageClass { - // Ignored as Gateways (Azure/GCS etc) wont return storage class - ignoredLog(testName, function, args, startTime, "ListObjects doesn't return expected storage class").Info() - } - objCursor++ - } - - if objCursor != len(testObjects) { - logError(testName, function, args, startTime, "", "ListObjects returned unexpected number of items", errors.New("")) - return - } - } - - testList(c.ListObjects, bucketName, minio.ListObjectsOptions{Recursive: true, UseV1: true}) - testList(c.ListObjects, bucketName, minio.ListObjectsOptions{Recursive: true}) - testList(c.ListObjects, bucketName, minio.ListObjectsOptions{Recursive: true, WithMetadata: true}) - - successLogger(testName, function, args, startTime).Info() -} - -// Test deleting multiple objects with object retention set in Governance mode -func testRemoveObjects() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "RemoveObjects(bucketName, objectsCh, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectPrefix": "", - "recursive": "true", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - bufSize := dataFileMap["datafile-129-MB"] - reader := getDataReader("datafile-129-MB") - defer reader.Close() - - _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "Error uploading object", err) - return - } - - // Replace with smaller... - bufSize = dataFileMap["datafile-10-kB"] - reader = getDataReader("datafile-10-kB") - defer reader.Close() - - _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "Error uploading object", err) - } - - t := time.Date(2030, time.April, 25, 14, 0, 0, 0, time.UTC) - m := minio.RetentionMode(minio.Governance) - opts := minio.PutObjectRetentionOptions{ - GovernanceBypass: false, - RetainUntilDate: &t, - Mode: &m, - } - err = c.PutObjectRetention(context.Background(), bucketName, objectName, opts) - if err != nil { - logError(testName, function, args, startTime, "", "Error setting retention", err) - return - } - - objectsCh := make(chan minio.ObjectInfo) - // Send object names that are needed to be removed to objectsCh - go func() { - defer close(objectsCh) - // List all objects from a bucket-name with a matching prefix. - for object := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Recursive: true}) { - if object.Err != nil { - logError(testName, function, args, startTime, "", "Error listing objects", object.Err) - return - } - objectsCh <- object - } - }() - - for rErr := range c.RemoveObjects(context.Background(), bucketName, objectsCh, minio.RemoveObjectsOptions{}) { - // Error is expected here because Retention is set on the object - // and RemoveObjects is called without Bypass Governance - if rErr.Err == nil { - logError(testName, function, args, startTime, "", "Expected error during deletion", nil) - return - } - } - - objectsCh1 := make(chan minio.ObjectInfo) - - // Send object names that are needed to be removed to objectsCh - go func() { - defer close(objectsCh1) - // List all objects from a bucket-name with a matching prefix. - for object := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Recursive: true}) { - if object.Err != nil { - logError(testName, function, args, startTime, "", "Error listing objects", object.Err) - return - } - objectsCh1 <- object - } - }() - - opts1 := minio.RemoveObjectsOptions{ - GovernanceBypass: true, - } - - for rErr := range c.RemoveObjects(context.Background(), bucketName, objectsCh1, opts1) { - // Error is not expected here because Retention is set on the object - // and RemoveObjects is called with Bypass Governance - logError(testName, function, args, startTime, "", "Error detected during deletion", rErr.Err) - return - } - - // Delete all objects and buckets - if err = cleanupVersionedBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Convert string to bool and always return false if any error -func mustParseBool(str string) bool { - b, err := strconv.ParseBool(str) - if err != nil { - return false - } - return b -} - -func main() { - // Output to stdout instead of the default stderr - log.SetOutput(os.Stdout) - // create custom formatter - mintFormatter := mintJSONFormatter{} - // set custom formatter - log.SetFormatter(&mintFormatter) - // log Info or above -- success cases are Info level, failures are Fatal level - log.SetLevel(log.InfoLevel) - - tls := mustParseBool(os.Getenv(enableHTTPS)) - kms := mustParseBool(os.Getenv(enableKMS)) - if os.Getenv(enableKMS) == "" { - // Default to KMS tests. - kms = true - } - - // execute tests - if isFullMode() { - testMakeBucketErrorV2() - testGetObjectClosedTwiceV2() - testFPutObjectV2() - testMakeBucketRegionsV2() - testGetObjectReadSeekFunctionalV2() - testGetObjectReadAtFunctionalV2() - testGetObjectRanges() - testCopyObjectV2() - testFunctionalV2() - testComposeObjectErrorCasesV2() - testCompose10KSourcesV2() - testUserMetadataCopyingV2() - testPutObjectWithChecksums() - testPutMultipartObjectWithChecksums() - testPutObject0ByteV2() - testPutObjectNoLengthV2() - testPutObjectsUnknownV2() - testGetObjectContextV2() - testFPutObjectContextV2() - testFGetObjectContextV2() - testPutObjectContextV2() - testPutObjectWithVersioning() - testMakeBucketError() - testMakeBucketRegions() - testPutObjectWithMetadata() - testPutObjectReadAt() - testPutObjectStreaming() - testGetObjectSeekEnd() - testGetObjectClosedTwice() - testGetObjectS3Zip() - testRemoveMultipleObjects() - testRemoveMultipleObjectsWithResult() - testFPutObjectMultipart() - testFPutObject() - testGetObjectReadSeekFunctional() - testGetObjectReadAtFunctional() - testGetObjectReadAtWhenEOFWasReached() - testPresignedPostPolicy() - testCopyObject() - testComposeObjectErrorCases() - testCompose10KSources() - testUserMetadataCopying() - testBucketNotification() - testFunctional() - testGetObjectModified() - testPutObjectUploadSeekedObject() - testGetObjectContext() - testFPutObjectContext() - testFGetObjectContext() - testGetObjectACLContext() - testPutObjectContext() - testStorageClassMetadataPutObject() - testStorageClassInvalidMetadataPutObject() - testStorageClassMetadataCopyObject() - testPutObjectWithContentLanguage() - testListObjects() - testRemoveObjects() - testListObjectVersions() - testStatObjectWithVersioning() - testGetObjectWithVersioning() - testCopyObjectWithVersioning() - testConcurrentCopyObjectWithVersioning() - testComposeObjectWithVersioning() - testRemoveObjectWithVersioning() - testRemoveObjectsWithVersioning() - testObjectTaggingWithVersioning() - testTrailingChecksums() - testPutObjectWithAutomaticChecksums() - - // SSE-C tests will only work over TLS connection. - if tls { - testSSECEncryptionPutGet() - testSSECEncryptionFPut() - testSSECEncryptedGetObjectReadAtFunctional() - testSSECEncryptedGetObjectReadSeekFunctional() - testEncryptedCopyObjectV2() - testEncryptedSSECToSSECCopyObject() - testEncryptedSSECToUnencryptedCopyObject() - testUnencryptedToSSECCopyObject() - testUnencryptedToUnencryptedCopyObject() - testEncryptedEmptyObject() - testDecryptedCopyObject() - testSSECEncryptedToSSECCopyObjectPart() - testSSECMultipartEncryptedToSSECCopyObjectPart() - testSSECEncryptedToUnencryptedCopyPart() - testUnencryptedToSSECCopyObjectPart() - testUnencryptedToUnencryptedCopyPart() - testEncryptedSSECToSSES3CopyObject() - testEncryptedSSES3ToSSECCopyObject() - testSSECEncryptedToSSES3CopyObjectPart() - testSSES3EncryptedToSSECCopyObjectPart() - } - - // KMS tests - if kms { - testSSES3EncryptionPutGet() - testSSES3EncryptionFPut() - testSSES3EncryptedGetObjectReadAtFunctional() - testSSES3EncryptedGetObjectReadSeekFunctional() - testEncryptedSSES3ToSSES3CopyObject() - testEncryptedSSES3ToUnencryptedCopyObject() - testUnencryptedToSSES3CopyObject() - testUnencryptedToSSES3CopyObjectPart() - testSSES3EncryptedToUnencryptedCopyPart() - testSSES3EncryptedToSSES3CopyObjectPart() - } - } else { - testFunctional() - testFunctionalV2() - } -} diff --git a/vendor/github.com/minio/minio-go/v7/hook-reader.go b/vendor/github.com/minio/minio-go/v7/hook-reader.go deleted file mode 100644 index 07bc7db..0000000 --- a/vendor/github.com/minio/minio-go/v7/hook-reader.go +++ /dev/null @@ -1,101 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "fmt" - "io" - "sync" -) - -// hookReader hooks additional reader in the source stream. It is -// useful for making progress bars. Second reader is appropriately -// notified about the exact number of bytes read from the primary -// source on each Read operation. -type hookReader struct { - mu sync.RWMutex - source io.Reader - hook io.Reader -} - -// Seek implements io.Seeker. Seeks source first, and if necessary -// seeks hook if Seek method is appropriately found. -func (hr *hookReader) Seek(offset int64, whence int) (n int64, err error) { - hr.mu.Lock() - defer hr.mu.Unlock() - - // Verify for source has embedded Seeker, use it. - sourceSeeker, ok := hr.source.(io.Seeker) - if ok { - n, err = sourceSeeker.Seek(offset, whence) - if err != nil { - return 0, err - } - } - - if hr.hook != nil { - // Verify if hook has embedded Seeker, use it. - hookSeeker, ok := hr.hook.(io.Seeker) - if ok { - var m int64 - m, err = hookSeeker.Seek(offset, whence) - if err != nil { - return 0, err - } - if n != m { - return 0, fmt.Errorf("hook seeker seeked %d bytes, expected source %d bytes", m, n) - } - } - } - - return n, nil -} - -// Read implements io.Reader. Always reads from the source, the return -// value 'n' number of bytes are reported through the hook. Returns -// error for all non io.EOF conditions. -func (hr *hookReader) Read(b []byte) (n int, err error) { - hr.mu.RLock() - defer hr.mu.RUnlock() - - n, err = hr.source.Read(b) - if err != nil && err != io.EOF { - return n, err - } - if hr.hook != nil { - // Progress the hook with the total read bytes from the source. - if _, herr := hr.hook.Read(b[:n]); herr != nil { - if herr != io.EOF { - return n, herr - } - } - } - return n, err -} - -// newHook returns a io.ReadSeeker which implements hookReader that -// reports the data read from the source to the hook. -func newHook(source, hook io.Reader) io.Reader { - if hook == nil { - return &hookReader{source: source} - } - return &hookReader{ - source: source, - hook: hook, - } -} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go deleted file mode 100644 index 800c4a2..0000000 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go +++ /dev/null @@ -1,242 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import ( - "bytes" - "crypto/sha256" - "encoding/hex" - "encoding/xml" - "errors" - "io" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/minio/minio-go/v7/pkg/signer" -) - -// AssumeRoleResponse contains the result of successful AssumeRole request. -type AssumeRoleResponse struct { - XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleResponse" json:"-"` - - Result AssumeRoleResult `xml:"AssumeRoleResult"` - ResponseMetadata struct { - RequestID string `xml:"RequestId,omitempty"` - } `xml:"ResponseMetadata,omitempty"` -} - -// AssumeRoleResult - Contains the response to a successful AssumeRole -// request, including temporary credentials that can be used to make -// MinIO API requests. -type AssumeRoleResult struct { - // The identifiers for the temporary security credentials that the operation - // returns. - AssumedRoleUser AssumedRoleUser `xml:",omitempty"` - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. - // - // Note: The size of the security token that STS APIs return is not fixed. We - // strongly recommend that you make no assumptions about the maximum size. As - // of this writing, the typical size is less than 4096 bytes, but that can vary. - // Also, future updates to AWS might require larger sizes. - Credentials struct { - AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` - SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` - Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` - SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` - } `xml:",omitempty"` - - // A percentage value that indicates the size of the policy in packed form. - // The service rejects any policy with a packed size greater than 100 percent, - // which means the policy exceeded the allowed space. - PackedPolicySize int `xml:",omitempty"` -} - -// A STSAssumeRole retrieves credentials from MinIO service, and keeps track if -// those credentials are expired. -type STSAssumeRole struct { - Expiry - - // Required http Client to use when connecting to MinIO STS service. - Client *http.Client - - // STS endpoint to fetch STS credentials. - STSEndpoint string - - // various options for this request. - Options STSAssumeRoleOptions -} - -// STSAssumeRoleOptions collection of various input options -// to obtain AssumeRole credentials. -type STSAssumeRoleOptions struct { - // Mandatory inputs. - AccessKey string - SecretKey string - - SessionToken string // Optional if the first request is made with temporary credentials. - Policy string // Optional to assign a policy to the assumed role - - Location string // Optional commonly needed with AWS STS. - DurationSeconds int // Optional defaults to 1 hour. - - // Optional only valid if using with AWS STS - RoleARN string - RoleSessionName string - ExternalID string -} - -// NewSTSAssumeRole returns a pointer to a new -// Credentials object wrapping the STSAssumeRole. -func NewSTSAssumeRole(stsEndpoint string, opts STSAssumeRoleOptions) (*Credentials, error) { - if stsEndpoint == "" { - return nil, errors.New("STS endpoint cannot be empty") - } - if opts.AccessKey == "" || opts.SecretKey == "" { - return nil, errors.New("AssumeRole credentials access/secretkey is mandatory") - } - return New(&STSAssumeRole{ - Client: &http.Client{ - Transport: http.DefaultTransport, - }, - STSEndpoint: stsEndpoint, - Options: opts, - }), nil -} - -const defaultDurationSeconds = 3600 - -// closeResponse close non nil response with any response Body. -// convenient wrapper to drain any remaining data on response body. -// -// Subsequently this allows golang http RoundTripper -// to re-use the same connection for future requests. -func closeResponse(resp *http.Response) { - // Callers should close resp.Body when done reading from it. - // If resp.Body is not closed, the Client's underlying RoundTripper - // (typically Transport) may not be able to re-use a persistent TCP - // connection to the server for a subsequent "keep-alive" request. - if resp != nil && resp.Body != nil { - // Drain any remaining Body and then close the connection. - // Without this closing connection would disallow re-using - // the same connection for future uses. - // - http://stackoverflow.com/a/17961593/4465767 - io.Copy(io.Discard, resp.Body) - resp.Body.Close() - } -} - -func getAssumeRoleCredentials(clnt *http.Client, endpoint string, opts STSAssumeRoleOptions) (AssumeRoleResponse, error) { - v := url.Values{} - v.Set("Action", "AssumeRole") - v.Set("Version", STSVersion) - if opts.RoleARN != "" { - v.Set("RoleArn", opts.RoleARN) - } - if opts.RoleSessionName != "" { - v.Set("RoleSessionName", opts.RoleSessionName) - } - if opts.DurationSeconds > defaultDurationSeconds { - v.Set("DurationSeconds", strconv.Itoa(opts.DurationSeconds)) - } else { - v.Set("DurationSeconds", strconv.Itoa(defaultDurationSeconds)) - } - if opts.Policy != "" { - v.Set("Policy", opts.Policy) - } - if opts.ExternalID != "" { - v.Set("ExternalId", opts.ExternalID) - } - - u, err := url.Parse(endpoint) - if err != nil { - return AssumeRoleResponse{}, err - } - u.Path = "/" - - postBody := strings.NewReader(v.Encode()) - hash := sha256.New() - if _, err = io.Copy(hash, postBody); err != nil { - return AssumeRoleResponse{}, err - } - postBody.Seek(0, 0) - - req, err := http.NewRequest(http.MethodPost, u.String(), postBody) - if err != nil { - return AssumeRoleResponse{}, err - } - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(hash.Sum(nil))) - if opts.SessionToken != "" { - req.Header.Set("X-Amz-Security-Token", opts.SessionToken) - } - req = signer.SignV4STS(*req, opts.AccessKey, opts.SecretKey, opts.Location) - - resp, err := clnt.Do(req) - if err != nil { - return AssumeRoleResponse{}, err - } - defer closeResponse(resp) - if resp.StatusCode != http.StatusOK { - var errResp ErrorResponse - buf, err := io.ReadAll(resp.Body) - if err != nil { - return AssumeRoleResponse{}, err - } - _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp) - if err != nil { - var s3Err Error - if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil { - return AssumeRoleResponse{}, err - } - errResp.RequestID = s3Err.RequestID - errResp.STSError.Code = s3Err.Code - errResp.STSError.Message = s3Err.Message - } - return AssumeRoleResponse{}, errResp - } - - a := AssumeRoleResponse{} - if _, err = xmlDecodeAndBody(resp.Body, &a); err != nil { - return AssumeRoleResponse{}, err - } - return a, nil -} - -// Retrieve retrieves credentials from the MinIO service. -// Error will be returned if the request fails. -func (m *STSAssumeRole) Retrieve() (Value, error) { - a, err := getAssumeRoleCredentials(m.Client, m.STSEndpoint, m.Options) - if err != nil { - return Value{}, err - } - - // Expiry window is set to 10secs. - m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow) - - return Value{ - AccessKeyID: a.Result.Credentials.AccessKey, - SecretAccessKey: a.Result.Credentials.SecretKey, - SessionToken: a.Result.Credentials.SessionToken, - SignerType: SignatureV4, - }, nil -} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go deleted file mode 100644 index ddccfb1..0000000 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go +++ /dev/null @@ -1,88 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -// A Chain will search for a provider which returns credentials -// and cache that provider until Retrieve is called again. -// -// The Chain provides a way of chaining multiple providers together -// which will pick the first available using priority order of the -// Providers in the list. -// -// If none of the Providers retrieve valid credentials Value, ChainProvider's -// Retrieve() will return the no credentials value. -// -// If a Provider is found which returns valid credentials Value ChainProvider -// will cache that Provider for all calls to IsExpired(), until Retrieve is -// called again after IsExpired() is true. -// -// creds := credentials.NewChainCredentials( -// []credentials.Provider{ -// &credentials.EnvAWSS3{}, -// &credentials.EnvMinio{}, -// }) -// -// // Usage of ChainCredentials. -// mc, err := minio.NewWithCredentials(endpoint, creds, secure, "us-east-1") -// if err != nil { -// log.Fatalln(err) -// } -type Chain struct { - Providers []Provider - curr Provider -} - -// NewChainCredentials returns a pointer to a new Credentials object -// wrapping a chain of providers. -func NewChainCredentials(providers []Provider) *Credentials { - return New(&Chain{ - Providers: append([]Provider{}, providers...), - }) -} - -// Retrieve returns the credentials value, returns no credentials(anonymous) -// if no credentials provider returned any value. -// -// If a provider is found with credentials, it will be cached and any calls -// to IsExpired() will return the expired state of the cached provider. -func (c *Chain) Retrieve() (Value, error) { - for _, p := range c.Providers { - creds, _ := p.Retrieve() - // Always prioritize non-anonymous providers, if any. - if creds.AccessKeyID == "" && creds.SecretAccessKey == "" { - continue - } - c.curr = p - return creds, nil - } - // At this point we have exhausted all the providers and - // are left without any credentials return anonymous. - return Value{ - SignerType: SignatureAnonymous, - }, nil -} - -// IsExpired will returned the expired state of the currently cached provider -// if there is one. If there is no current provider, true will be returned. -func (c *Chain) IsExpired() bool { - if c.curr != nil { - return c.curr.IsExpired() - } - - return true -} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample b/vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample deleted file mode 100644 index d793c9e..0000000 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample +++ /dev/null @@ -1,17 +0,0 @@ -{ - "version": "8", - "hosts": { - "play": { - "url": "https://play.min.io", - "accessKey": "Q3AM3UQ867SPQQA43P2F", - "secretKey": "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", - "api": "S3v2" - }, - "s3": { - "url": "https://s3.amazonaws.com", - "accessKey": "accessKey", - "secretKey": "secret", - "api": "S3v4" - } - } -} \ No newline at end of file diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go deleted file mode 100644 index af61049..0000000 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go +++ /dev/null @@ -1,193 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import ( - "sync" - "time" -) - -const ( - // STSVersion sts version string - STSVersion = "2011-06-15" - - // How much duration to slash from the given expiration duration - defaultExpiryWindow = 0.8 -) - -// A Value is the AWS credentials value for individual credential fields. -type Value struct { - // AWS Access key ID - AccessKeyID string - - // AWS Secret Access Key - SecretAccessKey string - - // AWS Session Token - SessionToken string - - // Signature Type. - SignerType SignatureType -} - -// A Provider is the interface for any component which will provide credentials -// Value. A provider is required to manage its own Expired state, and what to -// be expired means. -type Provider interface { - // Retrieve returns nil if it successfully retrieved the value. - // Error is returned if the value were not obtainable, or empty. - Retrieve() (Value, error) - - // IsExpired returns if the credentials are no longer valid, and need - // to be retrieved. - IsExpired() bool -} - -// A Expiry provides shared expiration logic to be used by credentials -// providers to implement expiry functionality. -// -// The best method to use this struct is as an anonymous field within the -// provider's struct. -// -// Example: -// -// type IAMCredentialProvider struct { -// Expiry -// ... -// } -type Expiry struct { - // The date/time when to expire on - expiration time.Time - - // If set will be used by IsExpired to determine the current time. - // Defaults to time.Now if CurrentTime is not set. - CurrentTime func() time.Time -} - -// SetExpiration sets the expiration IsExpired will check when called. -// -// If window is greater than 0 the expiration time will be reduced by the -// window value. -// -// Using a window is helpful to trigger credentials to expire sooner than -// the expiration time given to ensure no requests are made with expired -// tokens. -func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { - if e.CurrentTime == nil { - e.CurrentTime = time.Now - } - cut := window - if cut < 0 { - expireIn := expiration.Sub(e.CurrentTime()) - cut = time.Duration(float64(expireIn) * (1 - defaultExpiryWindow)) - } - e.expiration = expiration.Add(-cut) -} - -// IsExpired returns if the credentials are expired. -func (e *Expiry) IsExpired() bool { - if e.CurrentTime == nil { - e.CurrentTime = time.Now - } - return e.expiration.Before(e.CurrentTime()) -} - -// Credentials - A container for synchronous safe retrieval of credentials Value. -// Credentials will cache the credentials value until they expire. Once the value -// expires the next Get will attempt to retrieve valid credentials. -// -// Credentials is safe to use across multiple goroutines and will manage the -// synchronous state so the Providers do not need to implement their own -// synchronization. -// -// The first Credentials.Get() will always call Provider.Retrieve() to get the -// first instance of the credentials Value. All calls to Get() after that -// will return the cached credentials Value until IsExpired() returns true. -type Credentials struct { - sync.Mutex - - creds Value - forceRefresh bool - provider Provider -} - -// New returns a pointer to a new Credentials with the provider set. -func New(provider Provider) *Credentials { - return &Credentials{ - provider: provider, - forceRefresh: true, - } -} - -// Get returns the credentials value, or error if the credentials Value failed -// to be retrieved. -// -// Will return the cached credentials Value if it has not expired. If the -// credentials Value has expired the Provider's Retrieve() will be called -// to refresh the credentials. -// -// If Credentials.Expire() was called the credentials Value will be force -// expired, and the next call to Get() will cause them to be refreshed. -func (c *Credentials) Get() (Value, error) { - if c == nil { - return Value{}, nil - } - - c.Lock() - defer c.Unlock() - - if c.isExpired() { - creds, err := c.provider.Retrieve() - if err != nil { - return Value{}, err - } - c.creds = creds - c.forceRefresh = false - } - - return c.creds, nil -} - -// Expire expires the credentials and forces them to be retrieved on the -// next call to Get(). -// -// This will override the Provider's expired state, and force Credentials -// to call the Provider's Retrieve(). -func (c *Credentials) Expire() { - c.Lock() - defer c.Unlock() - - c.forceRefresh = true -} - -// IsExpired returns if the credentials are no longer valid, and need -// to be refreshed. -// -// If the Credentials were forced to be expired with Expire() this will -// reflect that override. -func (c *Credentials) IsExpired() bool { - c.Lock() - defer c.Unlock() - - return c.isExpired() -} - -// isExpired helper method wrapping the definition of expired credentials. -func (c *Credentials) isExpired() bool { - return c.forceRefresh || c.provider.IsExpired() -} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json deleted file mode 100644 index afbfad5..0000000 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "Version": 1, - "SessionToken": "token", - "AccessKeyId": "accessKey", - "SecretAccessKey": "secret", - "Expiration": "9999-04-27T16:02:25.000Z" -} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample deleted file mode 100644 index e2dc1bf..0000000 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample +++ /dev/null @@ -1,15 +0,0 @@ -[default] -aws_access_key_id = accessKey -aws_secret_access_key = secret -aws_session_token = token - -[no_token] -aws_access_key_id = accessKey -aws_secret_access_key = secret - -[with_colon] -aws_access_key_id: accessKey -aws_secret_access_key: secret - -[with_process] -credential_process = /bin/cat credentials.json diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go deleted file mode 100644 index fbfb105..0000000 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go +++ /dev/null @@ -1,60 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package credentials provides credential retrieval and management -// for S3 compatible object storage. -// -// By default the Credentials.Get() will cache the successful result of a -// Provider's Retrieve() until Provider.IsExpired() returns true. At which -// point Credentials will call Provider's Retrieve() to get new credential Value. -// -// The Provider is responsible for determining when credentials have expired. -// It is also important to note that Credentials will always call Retrieve the -// first time Credentials.Get() is called. -// -// Example of using the environment variable credentials. -// -// creds := NewFromEnv() -// // Retrieve the credentials value -// credValue, err := creds.Get() -// if err != nil { -// // handle error -// } -// -// Example of forcing credentials to expire and be refreshed on the next Get(). -// This may be helpful to proactively expire credentials and refresh them sooner -// than they would naturally expire on their own. -// -// creds := NewFromIAM("") -// creds.Expire() -// credsValue, err := creds.Get() -// // New credentials will be retrieved instead of from cache. -// -// # Custom Provider -// -// Each Provider built into this package also provides a helper method to generate -// a Credentials pointer setup with the provider. To use a custom Provider just -// create a type which satisfies the Provider interface and pass it to the -// NewCredentials method. -// -// type MyProvider struct{} -// func (m *MyProvider) Retrieve() (Value, error) {...} -// func (m *MyProvider) IsExpired() bool {...} -// -// creds := NewCredentials(&MyProvider{}) -// credValue, err := creds.Get() -package credentials diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go deleted file mode 100644 index b6e60d0..0000000 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go +++ /dev/null @@ -1,71 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import "os" - -// A EnvAWS retrieves credentials from the environment variables of the -// running process. EnvAWSironment credentials never expire. -// -// EnvAWSironment variables used: -// -// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY. -// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY. -// * Secret Token: AWS_SESSION_TOKEN. -type EnvAWS struct { - retrieved bool -} - -// NewEnvAWS returns a pointer to a new Credentials object -// wrapping the environment variable provider. -func NewEnvAWS() *Credentials { - return New(&EnvAWS{}) -} - -// Retrieve retrieves the keys from the environment. -func (e *EnvAWS) Retrieve() (Value, error) { - e.retrieved = false - - id := os.Getenv("AWS_ACCESS_KEY_ID") - if id == "" { - id = os.Getenv("AWS_ACCESS_KEY") - } - - secret := os.Getenv("AWS_SECRET_ACCESS_KEY") - if secret == "" { - secret = os.Getenv("AWS_SECRET_KEY") - } - - signerType := SignatureV4 - if id == "" || secret == "" { - signerType = SignatureAnonymous - } - - e.retrieved = true - return Value{ - AccessKeyID: id, - SecretAccessKey: secret, - SessionToken: os.Getenv("AWS_SESSION_TOKEN"), - SignerType: signerType, - }, nil -} - -// IsExpired returns if the credentials have been retrieved. -func (e *EnvAWS) IsExpired() bool { - return !e.retrieved -} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go deleted file mode 100644 index 5bfeab1..0000000 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go +++ /dev/null @@ -1,68 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import "os" - -// A EnvMinio retrieves credentials from the environment variables of the -// running process. EnvMinioironment credentials never expire. -// -// Environment variables used: -// -// * Access Key ID: MINIO_ACCESS_KEY. -// * Secret Access Key: MINIO_SECRET_KEY. -// * Access Key ID: MINIO_ROOT_USER. -// * Secret Access Key: MINIO_ROOT_PASSWORD. -type EnvMinio struct { - retrieved bool -} - -// NewEnvMinio returns a pointer to a new Credentials object -// wrapping the environment variable provider. -func NewEnvMinio() *Credentials { - return New(&EnvMinio{}) -} - -// Retrieve retrieves the keys from the environment. -func (e *EnvMinio) Retrieve() (Value, error) { - e.retrieved = false - - id := os.Getenv("MINIO_ROOT_USER") - secret := os.Getenv("MINIO_ROOT_PASSWORD") - - signerType := SignatureV4 - if id == "" || secret == "" { - id = os.Getenv("MINIO_ACCESS_KEY") - secret = os.Getenv("MINIO_SECRET_KEY") - if id == "" || secret == "" { - signerType = SignatureAnonymous - } - } - - e.retrieved = true - return Value{ - AccessKeyID: id, - SecretAccessKey: secret, - SignerType: signerType, - }, nil -} - -// IsExpired returns if the credentials have been retrieved. -func (e *EnvMinio) IsExpired() bool { - return !e.retrieved -} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go deleted file mode 100644 index 07a9c2f..0000000 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go +++ /dev/null @@ -1,95 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2021 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import ( - "bytes" - "encoding/xml" - "fmt" - "io" -) - -// ErrorResponse - Is the typed error returned. -// ErrorResponse struct should be comparable since it is compared inside -// golang http API (https://github.com/golang/go/issues/29768) -type ErrorResponse struct { - XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ ErrorResponse" json:"-"` - STSError struct { - Type string `xml:"Type"` - Code string `xml:"Code"` - Message string `xml:"Message"` - } `xml:"Error"` - RequestID string `xml:"RequestId"` -} - -// Error - Is the typed error returned by all API operations. -type Error struct { - XMLName xml.Name `xml:"Error" json:"-"` - Code string - Message string - BucketName string - Key string - Resource string - RequestID string `xml:"RequestId"` - HostID string `xml:"HostId"` - - // Region where the bucket is located. This header is returned - // only in HEAD bucket and ListObjects response. - Region string - - // Captures the server string returned in response header. - Server string - - // Underlying HTTP status code for the returned error - StatusCode int `xml:"-" json:"-"` -} - -// Error - Returns S3 error string. -func (e Error) Error() string { - if e.Message == "" { - return fmt.Sprintf("Error response code %s.", e.Code) - } - return e.Message -} - -// Error - Returns STS error string. -func (e ErrorResponse) Error() string { - if e.STSError.Message == "" { - return fmt.Sprintf("Error response code %s.", e.STSError.Code) - } - return e.STSError.Message -} - -// xmlDecoder provide decoded value in xml. -func xmlDecoder(body io.Reader, v interface{}) error { - d := xml.NewDecoder(body) - return d.Decode(v) -} - -// xmlDecodeAndBody reads the whole body up to 1MB and -// tries to XML decode it into v. -// The body that was read and any error from reading or decoding is returned. -func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) { - // read the whole body (up to 1MB) - const maxBodyLength = 1 << 20 - body, err := io.ReadAll(io.LimitReader(bodyReader, maxBodyLength)) - if err != nil { - return nil, err - } - return bytes.TrimSpace(body), xmlDecoder(bytes.NewReader(body), v) -} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go deleted file mode 100644 index 5b07376..0000000 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go +++ /dev/null @@ -1,157 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import ( - "encoding/json" - "errors" - "os" - "os/exec" - "path/filepath" - "strings" - "time" - - ini "gopkg.in/ini.v1" -) - -// A externalProcessCredentials stores the output of a credential_process -type externalProcessCredentials struct { - Version int - SessionToken string - AccessKeyID string `json:"AccessKeyId"` - SecretAccessKey string - Expiration time.Time -} - -// A FileAWSCredentials retrieves credentials from the current user's home -// directory, and keeps track if those credentials are expired. -// -// Profile ini file example: $HOME/.aws/credentials -type FileAWSCredentials struct { - Expiry - - // Path to the shared credentials file. - // - // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the - // env value is empty will default to current user's home directory. - // Linux/OSX: "$HOME/.aws/credentials" - // Windows: "%USERPROFILE%\.aws\credentials" - Filename string - - // AWS Profile to extract credentials from the shared credentials file. If empty - // will default to environment variable "AWS_PROFILE" or "default" if - // environment variable is also not set. - Profile string - - // retrieved states if the credentials have been successfully retrieved. - retrieved bool -} - -// NewFileAWSCredentials returns a pointer to a new Credentials object -// wrapping the Profile file provider. -func NewFileAWSCredentials(filename, profile string) *Credentials { - return New(&FileAWSCredentials{ - Filename: filename, - Profile: profile, - }) -} - -// Retrieve reads and extracts the shared credentials from the current -// users home directory. -func (p *FileAWSCredentials) Retrieve() (Value, error) { - if p.Filename == "" { - p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE") - if p.Filename == "" { - homeDir, err := os.UserHomeDir() - if err != nil { - return Value{}, err - } - p.Filename = filepath.Join(homeDir, ".aws", "credentials") - } - } - if p.Profile == "" { - p.Profile = os.Getenv("AWS_PROFILE") - if p.Profile == "" { - p.Profile = "default" - } - } - - p.retrieved = false - - iniProfile, err := loadProfile(p.Filename, p.Profile) - if err != nil { - return Value{}, err - } - - // Default to empty string if not found. - id := iniProfile.Key("aws_access_key_id") - // Default to empty string if not found. - secret := iniProfile.Key("aws_secret_access_key") - // Default to empty string if not found. - token := iniProfile.Key("aws_session_token") - - // If credential_process is defined, obtain credentials by executing - // the external process - credentialProcess := strings.TrimSpace(iniProfile.Key("credential_process").String()) - if credentialProcess != "" { - args := strings.Fields(credentialProcess) - if len(args) <= 1 { - return Value{}, errors.New("invalid credential process args") - } - cmd := exec.Command(args[0], args[1:]...) - out, err := cmd.Output() - if err != nil { - return Value{}, err - } - var externalProcessCredentials externalProcessCredentials - err = json.Unmarshal([]byte(out), &externalProcessCredentials) - if err != nil { - return Value{}, err - } - p.retrieved = true - p.SetExpiration(externalProcessCredentials.Expiration, DefaultExpiryWindow) - return Value{ - AccessKeyID: externalProcessCredentials.AccessKeyID, - SecretAccessKey: externalProcessCredentials.SecretAccessKey, - SessionToken: externalProcessCredentials.SessionToken, - SignerType: SignatureV4, - }, nil - } - p.retrieved = true - return Value{ - AccessKeyID: id.String(), - SecretAccessKey: secret.String(), - SessionToken: token.String(), - SignerType: SignatureV4, - }, nil -} - -// loadProfiles loads from the file pointed to by shared credentials filename for profile. -// The credentials retrieved from the profile will be returned or error. Error will be -// returned if it fails to read from the file, or the data is invalid. -func loadProfile(filename, profile string) (*ini.Section, error) { - config, err := ini.Load(filename) - if err != nil { - return nil, err - } - iniProfile, err := config.GetSection(profile) - if err != nil { - return nil, err - } - return iniProfile, nil -} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go deleted file mode 100644 index eb77767..0000000 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go +++ /dev/null @@ -1,139 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import ( - "os" - "path/filepath" - "runtime" - - jsoniter "github.com/json-iterator/go" -) - -// A FileMinioClient retrieves credentials from the current user's home -// directory, and keeps track if those credentials are expired. -// -// Configuration file example: $HOME/.mc/config.json -type FileMinioClient struct { - // Path to the shared credentials file. - // - // If empty will look for "MINIO_SHARED_CREDENTIALS_FILE" env variable. If the - // env value is empty will default to current user's home directory. - // Linux/OSX: "$HOME/.mc/config.json" - // Windows: "%USERALIAS%\mc\config.json" - Filename string - - // MinIO Alias to extract credentials from the shared credentials file. If empty - // will default to environment variable "MINIO_ALIAS" or "default" if - // environment variable is also not set. - Alias string - - // retrieved states if the credentials have been successfully retrieved. - retrieved bool -} - -// NewFileMinioClient returns a pointer to a new Credentials object -// wrapping the Alias file provider. -func NewFileMinioClient(filename, alias string) *Credentials { - return New(&FileMinioClient{ - Filename: filename, - Alias: alias, - }) -} - -// Retrieve reads and extracts the shared credentials from the current -// users home directory. -func (p *FileMinioClient) Retrieve() (Value, error) { - if p.Filename == "" { - if value, ok := os.LookupEnv("MINIO_SHARED_CREDENTIALS_FILE"); ok { - p.Filename = value - } else { - homeDir, err := os.UserHomeDir() - if err != nil { - return Value{}, err - } - p.Filename = filepath.Join(homeDir, ".mc", "config.json") - if runtime.GOOS == "windows" { - p.Filename = filepath.Join(homeDir, "mc", "config.json") - } - } - } - - if p.Alias == "" { - p.Alias = os.Getenv("MINIO_ALIAS") - if p.Alias == "" { - p.Alias = "s3" - } - } - - p.retrieved = false - - hostCfg, err := loadAlias(p.Filename, p.Alias) - if err != nil { - return Value{}, err - } - - p.retrieved = true - return Value{ - AccessKeyID: hostCfg.AccessKey, - SecretAccessKey: hostCfg.SecretKey, - SignerType: parseSignatureType(hostCfg.API), - }, nil -} - -// IsExpired returns if the shared credentials have expired. -func (p *FileMinioClient) IsExpired() bool { - return !p.retrieved -} - -// hostConfig configuration of a host. -type hostConfig struct { - URL string `json:"url"` - AccessKey string `json:"accessKey"` - SecretKey string `json:"secretKey"` - API string `json:"api"` -} - -// config config version. -type config struct { - Version string `json:"version"` - Hosts map[string]hostConfig `json:"hosts"` - Aliases map[string]hostConfig `json:"aliases"` -} - -// loadAliass loads from the file pointed to by shared credentials filename for alias. -// The credentials retrieved from the alias will be returned or error. Error will be -// returned if it fails to read from the file. -func loadAlias(filename, alias string) (hostConfig, error) { - cfg := &config{} - json := jsoniter.ConfigCompatibleWithStandardLibrary - - configBytes, err := os.ReadFile(filename) - if err != nil { - return hostConfig{}, err - } - if err = json.Unmarshal(configBytes, cfg); err != nil { - return hostConfig{}, err - } - - if cfg.Version == "10" { - return cfg.Aliases[alias], nil - } - - return cfg.Hosts[alias], nil -} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go deleted file mode 100644 index c5153c4..0000000 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go +++ /dev/null @@ -1,433 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import ( - "bufio" - "context" - "errors" - "fmt" - "io" - "net" - "net/http" - "net/url" - "os" - "path" - "strings" - "time" - - jsoniter "github.com/json-iterator/go" -) - -// DefaultExpiryWindow - Default expiry window. -// ExpiryWindow will allow the credentials to trigger refreshing -// prior to the credentials actually expiring. This is beneficial -// so race conditions with expiring credentials do not cause -// request to fail unexpectedly due to ExpiredTokenException exceptions. -// DefaultExpiryWindow can be used as parameter to (*Expiry).SetExpiration. -// When used the tokens refresh will be triggered when 80% of the elapsed -// time until the actual expiration time is passed. -const DefaultExpiryWindow = -1 - -// A IAM retrieves credentials from the EC2 service, and keeps track if -// those credentials are expired. -type IAM struct { - Expiry - - // Required http Client to use when connecting to IAM metadata service. - Client *http.Client - - // Custom endpoint to fetch IAM role credentials. - Endpoint string - - // Region configurable custom region for STS - Region string - - // Support for container authorization token https://docs.aws.amazon.com/sdkref/latest/guide/feature-container-credentials.html - Container struct { - AuthorizationToken string - CredentialsFullURI string - CredentialsRelativeURI string - } - - // EKS based k8s RBAC authorization - https://docs.aws.amazon.com/eks/latest/userguide/pod-configuration.html - EKSIdentity struct { - TokenFile string - RoleARN string - RoleSessionName string - } -} - -// IAM Roles for Amazon EC2 -// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html -const ( - DefaultIAMRoleEndpoint = "http://169.254.169.254" - DefaultECSRoleEndpoint = "http://169.254.170.2" - DefaultSTSRoleEndpoint = "https://sts.amazonaws.com" - DefaultIAMSecurityCredsPath = "/latest/meta-data/iam/security-credentials/" - TokenRequestTTLHeader = "X-aws-ec2-metadata-token-ttl-seconds" - TokenPath = "/latest/api/token" - TokenTTL = "21600" - TokenRequestHeader = "X-aws-ec2-metadata-token" -) - -// NewIAM returns a pointer to a new Credentials object wrapping the IAM. -func NewIAM(endpoint string) *Credentials { - return New(&IAM{ - Client: &http.Client{ - Transport: http.DefaultTransport, - }, - Endpoint: endpoint, - }) -} - -// Retrieve retrieves credentials from the EC2 service. -// Error will be returned if the request fails, or unable to extract -// the desired -func (m *IAM) Retrieve() (Value, error) { - token := os.Getenv("AWS_CONTAINER_AUTHORIZATION_TOKEN") - if token == "" { - token = m.Container.AuthorizationToken - } - - relativeURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI") - if relativeURI == "" { - relativeURI = m.Container.CredentialsRelativeURI - } - - fullURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_FULL_URI") - if fullURI == "" { - fullURI = m.Container.CredentialsFullURI - } - - identityFile := os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE") - if identityFile == "" { - identityFile = m.EKSIdentity.TokenFile - } - - roleArn := os.Getenv("AWS_ROLE_ARN") - if roleArn == "" { - roleArn = m.EKSIdentity.RoleARN - } - - roleSessionName := os.Getenv("AWS_ROLE_SESSION_NAME") - if roleSessionName == "" { - roleSessionName = m.EKSIdentity.RoleSessionName - } - - region := os.Getenv("AWS_REGION") - if region == "" { - region = m.Region - } - - var roleCreds ec2RoleCredRespBody - var err error - - endpoint := m.Endpoint - switch { - case identityFile != "": - if len(endpoint) == 0 { - if region != "" { - if strings.HasPrefix(region, "cn-") { - endpoint = "https://sts." + region + ".amazonaws.com.cn" - } else { - endpoint = "https://sts." + region + ".amazonaws.com" - } - } else { - endpoint = DefaultSTSRoleEndpoint - } - } - - creds := &STSWebIdentity{ - Client: m.Client, - STSEndpoint: endpoint, - GetWebIDTokenExpiry: func() (*WebIdentityToken, error) { - token, err := os.ReadFile(identityFile) - if err != nil { - return nil, err - } - - return &WebIdentityToken{Token: string(token)}, nil - }, - RoleARN: roleArn, - roleSessionName: roleSessionName, - } - - stsWebIdentityCreds, err := creds.Retrieve() - if err == nil { - m.SetExpiration(creds.Expiration(), DefaultExpiryWindow) - } - return stsWebIdentityCreds, err - - case relativeURI != "": - if len(endpoint) == 0 { - endpoint = fmt.Sprintf("%s%s", DefaultECSRoleEndpoint, relativeURI) - } - - roleCreds, err = getEcsTaskCredentials(m.Client, endpoint, token) - - case fullURI != "": - if len(endpoint) == 0 { - endpoint = fullURI - var ok bool - if ok, err = isLoopback(endpoint); !ok { - if err == nil { - err = fmt.Errorf("uri host is not a loopback address: %s", endpoint) - } - break - } - } - - roleCreds, err = getEcsTaskCredentials(m.Client, endpoint, token) - - default: - roleCreds, err = getCredentials(m.Client, endpoint) - } - - if err != nil { - return Value{}, err - } - // Expiry window is set to 10secs. - m.SetExpiration(roleCreds.Expiration, DefaultExpiryWindow) - - return Value{ - AccessKeyID: roleCreds.AccessKeyID, - SecretAccessKey: roleCreds.SecretAccessKey, - SessionToken: roleCreds.Token, - SignerType: SignatureV4, - }, nil -} - -// A ec2RoleCredRespBody provides the shape for unmarshaling credential -// request responses. -type ec2RoleCredRespBody struct { - // Success State - Expiration time.Time - AccessKeyID string - SecretAccessKey string - Token string - - // Error state - Code string - Message string - - // Unused params. - LastUpdated time.Time - Type string -} - -// Get the final IAM role URL where the request will -// be sent to fetch the rolling access credentials. -// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html -func getIAMRoleURL(endpoint string) (*url.URL, error) { - u, err := url.Parse(endpoint) - if err != nil { - return nil, err - } - u.Path = DefaultIAMSecurityCredsPath - return u, nil -} - -// listRoleNames lists of credential role names associated -// with the current EC2 service. If there are no credentials, -// or there is an error making or receiving the request. -// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html -func listRoleNames(client *http.Client, u *url.URL, token string) ([]string, error) { - req, err := http.NewRequest(http.MethodGet, u.String(), nil) - if err != nil { - return nil, err - } - if token != "" { - req.Header.Add(TokenRequestHeader, token) - } - resp, err := client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return nil, errors.New(resp.Status) - } - - credsList := []string{} - s := bufio.NewScanner(resp.Body) - for s.Scan() { - credsList = append(credsList, s.Text()) - } - - if err := s.Err(); err != nil { - return nil, err - } - - return credsList, nil -} - -func getEcsTaskCredentials(client *http.Client, endpoint, token string) (ec2RoleCredRespBody, error) { - req, err := http.NewRequest(http.MethodGet, endpoint, nil) - if err != nil { - return ec2RoleCredRespBody{}, err - } - - if token != "" { - req.Header.Set("Authorization", token) - } - - resp, err := client.Do(req) - if err != nil { - return ec2RoleCredRespBody{}, err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return ec2RoleCredRespBody{}, errors.New(resp.Status) - } - - respCreds := ec2RoleCredRespBody{} - if err := jsoniter.NewDecoder(resp.Body).Decode(&respCreds); err != nil { - return ec2RoleCredRespBody{}, err - } - - return respCreds, nil -} - -func fetchIMDSToken(client *http.Client, endpoint string) (string, error) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - req, err := http.NewRequestWithContext(ctx, http.MethodPut, endpoint+TokenPath, nil) - if err != nil { - return "", err - } - req.Header.Add(TokenRequestTTLHeader, TokenTTL) - resp, err := client.Do(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - data, err := io.ReadAll(resp.Body) - if err != nil { - return "", err - } - if resp.StatusCode != http.StatusOK { - return "", errors.New(resp.Status) - } - return string(data), nil -} - -// getCredentials - obtains the credentials from the IAM role name associated with -// the current EC2 service. -// -// If the credentials cannot be found, or there is an error -// reading the response an error will be returned. -func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) { - if endpoint == "" { - endpoint = DefaultIAMRoleEndpoint - } - - // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html - token, err := fetchIMDSToken(client, endpoint) - if err != nil { - // Return only errors for valid situations, if the IMDSv2 is not enabled - // we will not be able to get the token, in such a situation we have - // to rely on IMDSv1 behavior as a fallback, this check ensures that. - // Refer https://github.com/minio/minio-go/issues/1866 - if !errors.Is(err, context.DeadlineExceeded) && !errors.Is(err, context.Canceled) { - return ec2RoleCredRespBody{}, err - } - } - - // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html - u, err := getIAMRoleURL(endpoint) - if err != nil { - return ec2RoleCredRespBody{}, err - } - - // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html - roleNames, err := listRoleNames(client, u, token) - if err != nil { - return ec2RoleCredRespBody{}, err - } - - if len(roleNames) == 0 { - return ec2RoleCredRespBody{}, errors.New("No IAM roles attached to this EC2 service") - } - - // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html - // - An instance profile can contain only one IAM role. This limit cannot be increased. - roleName := roleNames[0] - - // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html - // The following command retrieves the security credentials for an - // IAM role named `s3access`. - // - // $ curl http://169.254.169.254/latest/meta-data/iam/security-credentials/s3access - // - u.Path = path.Join(u.Path, roleName) - req, err := http.NewRequest(http.MethodGet, u.String(), nil) - if err != nil { - return ec2RoleCredRespBody{}, err - } - if token != "" { - req.Header.Add(TokenRequestHeader, token) - } - - resp, err := client.Do(req) - if err != nil { - return ec2RoleCredRespBody{}, err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return ec2RoleCredRespBody{}, errors.New(resp.Status) - } - - respCreds := ec2RoleCredRespBody{} - if err := jsoniter.NewDecoder(resp.Body).Decode(&respCreds); err != nil { - return ec2RoleCredRespBody{}, err - } - - if respCreds.Code != "Success" { - // If an error code was returned something failed requesting the role. - return ec2RoleCredRespBody{}, errors.New(respCreds.Message) - } - - return respCreds, nil -} - -// isLoopback identifies if a uri's host is on a loopback address -func isLoopback(uri string) (bool, error) { - u, err := url.Parse(uri) - if err != nil { - return false, err - } - - host := u.Hostname() - if len(host) == 0 { - return false, fmt.Errorf("can't parse host from uri: %s", uri) - } - - ips, err := net.LookupHost(host) - if err != nil { - return false, err - } - for _, ip := range ips { - if !net.ParseIP(ip).IsLoopback() { - return false, nil - } - } - - return true, nil -} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go deleted file mode 100644 index b794333..0000000 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go +++ /dev/null @@ -1,77 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import "strings" - -// SignatureType is type of Authorization requested for a given HTTP request. -type SignatureType int - -// Different types of supported signatures - default is SignatureV4 or SignatureDefault. -const ( - // SignatureDefault is always set to v4. - SignatureDefault SignatureType = iota - SignatureV4 - SignatureV2 - SignatureV4Streaming - SignatureAnonymous // Anonymous signature signifies, no signature. -) - -// IsV2 - is signature SignatureV2? -func (s SignatureType) IsV2() bool { - return s == SignatureV2 -} - -// IsV4 - is signature SignatureV4? -func (s SignatureType) IsV4() bool { - return s == SignatureV4 || s == SignatureDefault -} - -// IsStreamingV4 - is signature SignatureV4Streaming? -func (s SignatureType) IsStreamingV4() bool { - return s == SignatureV4Streaming -} - -// IsAnonymous - is signature empty? -func (s SignatureType) IsAnonymous() bool { - return s == SignatureAnonymous -} - -// Stringer humanized version of signature type, -// strings returned here are case insensitive. -func (s SignatureType) String() string { - if s.IsV2() { - return "S3v2" - } else if s.IsV4() { - return "S3v4" - } else if s.IsStreamingV4() { - return "S3v4Streaming" - } - return "Anonymous" -} - -func parseSignatureType(str string) SignatureType { - if strings.EqualFold(str, "S3v4") { - return SignatureV4 - } else if strings.EqualFold(str, "S3v2") { - return SignatureV2 - } else if strings.EqualFold(str, "S3v4Streaming") { - return SignatureV4Streaming - } - return SignatureAnonymous -} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go deleted file mode 100644 index 7dde00b..0000000 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go +++ /dev/null @@ -1,67 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -// A Static is a set of credentials which are set programmatically, -// and will never expire. -type Static struct { - Value -} - -// NewStaticV2 returns a pointer to a new Credentials object -// wrapping a static credentials value provider, signature is -// set to v2. If access and secret are not specified then -// regardless of signature type set it Value will return -// as anonymous. -func NewStaticV2(id, secret, token string) *Credentials { - return NewStatic(id, secret, token, SignatureV2) -} - -// NewStaticV4 is similar to NewStaticV2 with similar considerations. -func NewStaticV4(id, secret, token string) *Credentials { - return NewStatic(id, secret, token, SignatureV4) -} - -// NewStatic returns a pointer to a new Credentials object -// wrapping a static credentials value provider. -func NewStatic(id, secret, token string, signerType SignatureType) *Credentials { - return New(&Static{ - Value: Value{ - AccessKeyID: id, - SecretAccessKey: secret, - SessionToken: token, - SignerType: signerType, - }, - }) -} - -// Retrieve returns the static credentials. -func (s *Static) Retrieve() (Value, error) { - if s.AccessKeyID == "" || s.SecretAccessKey == "" { - // Anonymous is not an error - return Value{SignerType: SignatureAnonymous}, nil - } - return s.Value, nil -} - -// IsExpired returns if the credentials are expired. -// -// For Static, the credentials never expired. -func (s *Static) IsExpired() bool { - return false -} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go deleted file mode 100644 index 9e92c1e..0000000 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go +++ /dev/null @@ -1,182 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2019-2022 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import ( - "bytes" - "encoding/xml" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "strings" - "time" -) - -// AssumedRoleUser - The identifiers for the temporary security credentials that -// the operation returns. Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumedRoleUser -type AssumedRoleUser struct { - Arn string - AssumedRoleID string `xml:"AssumeRoleId"` -} - -// AssumeRoleWithClientGrantsResponse contains the result of successful AssumeRoleWithClientGrants request. -type AssumeRoleWithClientGrantsResponse struct { - XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithClientGrantsResponse" json:"-"` - Result ClientGrantsResult `xml:"AssumeRoleWithClientGrantsResult"` - ResponseMetadata struct { - RequestID string `xml:"RequestId,omitempty"` - } `xml:"ResponseMetadata,omitempty"` -} - -// ClientGrantsResult - Contains the response to a successful AssumeRoleWithClientGrants -// request, including temporary credentials that can be used to make MinIO API requests. -type ClientGrantsResult struct { - AssumedRoleUser AssumedRoleUser `xml:",omitempty"` - Audience string `xml:",omitempty"` - Credentials struct { - AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` - SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` - Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` - SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` - } `xml:",omitempty"` - PackedPolicySize int `xml:",omitempty"` - Provider string `xml:",omitempty"` - SubjectFromClientGrantsToken string `xml:",omitempty"` -} - -// ClientGrantsToken - client grants token with expiry. -type ClientGrantsToken struct { - Token string - Expiry int -} - -// A STSClientGrants retrieves credentials from MinIO service, and keeps track if -// those credentials are expired. -type STSClientGrants struct { - Expiry - - // Required http Client to use when connecting to MinIO STS service. - Client *http.Client - - // MinIO endpoint to fetch STS credentials. - STSEndpoint string - - // getClientGrantsTokenExpiry function to retrieve tokens - // from IDP This function should return two values one is - // accessToken which is a self contained access token (JWT) - // and second return value is the expiry associated with - // this token. This is a customer provided function and - // is mandatory. - GetClientGrantsTokenExpiry func() (*ClientGrantsToken, error) -} - -// NewSTSClientGrants returns a pointer to a new -// Credentials object wrapping the STSClientGrants. -func NewSTSClientGrants(stsEndpoint string, getClientGrantsTokenExpiry func() (*ClientGrantsToken, error)) (*Credentials, error) { - if stsEndpoint == "" { - return nil, errors.New("STS endpoint cannot be empty") - } - if getClientGrantsTokenExpiry == nil { - return nil, errors.New("Client grants access token and expiry retrieval function should be defined") - } - return New(&STSClientGrants{ - Client: &http.Client{ - Transport: http.DefaultTransport, - }, - STSEndpoint: stsEndpoint, - GetClientGrantsTokenExpiry: getClientGrantsTokenExpiry, - }), nil -} - -func getClientGrantsCredentials(clnt *http.Client, endpoint string, - getClientGrantsTokenExpiry func() (*ClientGrantsToken, error), -) (AssumeRoleWithClientGrantsResponse, error) { - accessToken, err := getClientGrantsTokenExpiry() - if err != nil { - return AssumeRoleWithClientGrantsResponse{}, err - } - - v := url.Values{} - v.Set("Action", "AssumeRoleWithClientGrants") - v.Set("Token", accessToken.Token) - v.Set("DurationSeconds", fmt.Sprintf("%d", accessToken.Expiry)) - v.Set("Version", STSVersion) - - u, err := url.Parse(endpoint) - if err != nil { - return AssumeRoleWithClientGrantsResponse{}, err - } - - req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode())) - if err != nil { - return AssumeRoleWithClientGrantsResponse{}, err - } - - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - - resp, err := clnt.Do(req) - if err != nil { - return AssumeRoleWithClientGrantsResponse{}, err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - var errResp ErrorResponse - buf, err := io.ReadAll(resp.Body) - if err != nil { - return AssumeRoleWithClientGrantsResponse{}, err - } - _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp) - if err != nil { - var s3Err Error - if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil { - return AssumeRoleWithClientGrantsResponse{}, err - } - errResp.RequestID = s3Err.RequestID - errResp.STSError.Code = s3Err.Code - errResp.STSError.Message = s3Err.Message - } - return AssumeRoleWithClientGrantsResponse{}, errResp - } - - a := AssumeRoleWithClientGrantsResponse{} - if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil { - return AssumeRoleWithClientGrantsResponse{}, err - } - return a, nil -} - -// Retrieve retrieves credentials from the MinIO service. -// Error will be returned if the request fails. -func (m *STSClientGrants) Retrieve() (Value, error) { - a, err := getClientGrantsCredentials(m.Client, m.STSEndpoint, m.GetClientGrantsTokenExpiry) - if err != nil { - return Value{}, err - } - - // Expiry window is set to 10secs. - m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow) - - return Value{ - AccessKeyID: a.Result.Credentials.AccessKey, - SecretAccessKey: a.Result.Credentials.SecretKey, - SessionToken: a.Result.Credentials.SessionToken, - SignerType: SignatureV4, - }, nil -} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go deleted file mode 100644 index e1f9ce4..0000000 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go +++ /dev/null @@ -1,146 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2022 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import ( - "encoding/xml" - "errors" - "fmt" - "net/http" - "net/url" - "time" -) - -// CustomTokenResult - Contains temporary creds and user metadata. -type CustomTokenResult struct { - Credentials struct { - AccessKey string `xml:"AccessKeyId"` - SecretKey string `xml:"SecretAccessKey"` - Expiration time.Time `xml:"Expiration"` - SessionToken string `xml:"SessionToken"` - } `xml:",omitempty"` - - AssumedUser string `xml:",omitempty"` -} - -// AssumeRoleWithCustomTokenResponse contains the result of a successful -// AssumeRoleWithCustomToken request. -type AssumeRoleWithCustomTokenResponse struct { - XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithCustomTokenResponse" json:"-"` - Result CustomTokenResult `xml:"AssumeRoleWithCustomTokenResult"` - Metadata struct { - RequestID string `xml:"RequestId,omitempty"` - } `xml:"ResponseMetadata,omitempty"` -} - -// CustomTokenIdentity - satisfies the Provider interface, and retrieves -// credentials from MinIO using the AssumeRoleWithCustomToken STS API. -type CustomTokenIdentity struct { - Expiry - - Client *http.Client - - // MinIO server STS endpoint to fetch STS credentials. - STSEndpoint string - - // The custom token to use with the request. - Token string - - // RoleArn associated with the identity - RoleArn string - - // RequestedExpiry is to set the validity of the generated credentials - // (this value bounded by server). - RequestedExpiry time.Duration -} - -// Retrieve - to satisfy Provider interface; fetches credentials from MinIO. -func (c *CustomTokenIdentity) Retrieve() (value Value, err error) { - u, err := url.Parse(c.STSEndpoint) - if err != nil { - return value, err - } - - v := url.Values{} - v.Set("Action", "AssumeRoleWithCustomToken") - v.Set("Version", STSVersion) - v.Set("RoleArn", c.RoleArn) - v.Set("Token", c.Token) - if c.RequestedExpiry != 0 { - v.Set("DurationSeconds", fmt.Sprintf("%d", int(c.RequestedExpiry.Seconds()))) - } - - u.RawQuery = v.Encode() - - req, err := http.NewRequest(http.MethodPost, u.String(), nil) - if err != nil { - return value, err - } - - resp, err := c.Client.Do(req) - if err != nil { - return value, err - } - - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return value, errors.New(resp.Status) - } - - r := AssumeRoleWithCustomTokenResponse{} - if err = xml.NewDecoder(resp.Body).Decode(&r); err != nil { - return - } - - cr := r.Result.Credentials - c.SetExpiration(cr.Expiration, DefaultExpiryWindow) - return Value{ - AccessKeyID: cr.AccessKey, - SecretAccessKey: cr.SecretKey, - SessionToken: cr.SessionToken, - SignerType: SignatureV4, - }, nil -} - -// NewCustomTokenCredentials - returns credentials using the -// AssumeRoleWithCustomToken STS API. -func NewCustomTokenCredentials(stsEndpoint, token, roleArn string, optFuncs ...CustomTokenOpt) (*Credentials, error) { - c := CustomTokenIdentity{ - Client: &http.Client{Transport: http.DefaultTransport}, - STSEndpoint: stsEndpoint, - Token: token, - RoleArn: roleArn, - } - for _, optFunc := range optFuncs { - optFunc(&c) - } - return New(&c), nil -} - -// CustomTokenOpt is a function type to configure the custom-token based -// credentials using NewCustomTokenCredentials. -type CustomTokenOpt func(*CustomTokenIdentity) - -// CustomTokenValidityOpt sets the validity duration of the requested -// credentials. This value is ignored if the server enforces a lower validity -// period. -func CustomTokenValidityOpt(d time.Duration) CustomTokenOpt { - return func(c *CustomTokenIdentity) { - c.RequestedExpiry = d - } -} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go deleted file mode 100644 index ec5f3f0..0000000 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go +++ /dev/null @@ -1,189 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2019-2022 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import ( - "bytes" - "encoding/xml" - "fmt" - "io" - "net/http" - "net/url" - "strings" - "time" -) - -// AssumeRoleWithLDAPResponse contains the result of successful -// AssumeRoleWithLDAPIdentity request -type AssumeRoleWithLDAPResponse struct { - XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithLDAPIdentityResponse" json:"-"` - Result LDAPIdentityResult `xml:"AssumeRoleWithLDAPIdentityResult"` - ResponseMetadata struct { - RequestID string `xml:"RequestId,omitempty"` - } `xml:"ResponseMetadata,omitempty"` -} - -// LDAPIdentityResult - contains credentials for a successful -// AssumeRoleWithLDAPIdentity request. -type LDAPIdentityResult struct { - Credentials struct { - AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` - SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` - Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` - SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` - } `xml:",omitempty"` - - SubjectFromToken string `xml:",omitempty"` -} - -// LDAPIdentity retrieves credentials from MinIO -type LDAPIdentity struct { - Expiry - - // Required http Client to use when connecting to MinIO STS service. - Client *http.Client - - // Exported STS endpoint to fetch STS credentials. - STSEndpoint string - - // LDAP username/password used to fetch LDAP STS credentials. - LDAPUsername, LDAPPassword string - - // Session policy to apply to the generated credentials. Leave empty to - // use the full access policy available to the user. - Policy string - - // RequestedExpiry is the configured expiry duration for credentials - // requested from LDAP. - RequestedExpiry time.Duration -} - -// NewLDAPIdentity returns new credentials object that uses LDAP -// Identity. -func NewLDAPIdentity(stsEndpoint, ldapUsername, ldapPassword string, optFuncs ...LDAPIdentityOpt) (*Credentials, error) { - l := LDAPIdentity{ - Client: &http.Client{Transport: http.DefaultTransport}, - STSEndpoint: stsEndpoint, - LDAPUsername: ldapUsername, - LDAPPassword: ldapPassword, - } - for _, optFunc := range optFuncs { - optFunc(&l) - } - return New(&l), nil -} - -// LDAPIdentityOpt is a function type used to configured the LDAPIdentity -// instance. -type LDAPIdentityOpt func(*LDAPIdentity) - -// LDAPIdentityPolicyOpt sets the session policy for requested credentials. -func LDAPIdentityPolicyOpt(policy string) LDAPIdentityOpt { - return func(k *LDAPIdentity) { - k.Policy = policy - } -} - -// LDAPIdentityExpiryOpt sets the expiry duration for requested credentials. -func LDAPIdentityExpiryOpt(d time.Duration) LDAPIdentityOpt { - return func(k *LDAPIdentity) { - k.RequestedExpiry = d - } -} - -// NewLDAPIdentityWithSessionPolicy returns new credentials object that uses -// LDAP Identity with a specified session policy. The `policy` parameter must be -// a JSON string specifying the policy document. -// -// Deprecated: Use the `LDAPIdentityPolicyOpt` with `NewLDAPIdentity` instead. -func NewLDAPIdentityWithSessionPolicy(stsEndpoint, ldapUsername, ldapPassword, policy string) (*Credentials, error) { - return New(&LDAPIdentity{ - Client: &http.Client{Transport: http.DefaultTransport}, - STSEndpoint: stsEndpoint, - LDAPUsername: ldapUsername, - LDAPPassword: ldapPassword, - Policy: policy, - }), nil -} - -// Retrieve gets the credential by calling the MinIO STS API for -// LDAP on the configured stsEndpoint. -func (k *LDAPIdentity) Retrieve() (value Value, err error) { - u, err := url.Parse(k.STSEndpoint) - if err != nil { - return value, err - } - - v := url.Values{} - v.Set("Action", "AssumeRoleWithLDAPIdentity") - v.Set("Version", STSVersion) - v.Set("LDAPUsername", k.LDAPUsername) - v.Set("LDAPPassword", k.LDAPPassword) - if k.Policy != "" { - v.Set("Policy", k.Policy) - } - if k.RequestedExpiry != 0 { - v.Set("DurationSeconds", fmt.Sprintf("%d", int(k.RequestedExpiry.Seconds()))) - } - - req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode())) - if err != nil { - return value, err - } - - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - - resp, err := k.Client.Do(req) - if err != nil { - return value, err - } - - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - var errResp ErrorResponse - buf, err := io.ReadAll(resp.Body) - if err != nil { - return value, err - } - _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp) - if err != nil { - var s3Err Error - if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil { - return value, err - } - errResp.RequestID = s3Err.RequestID - errResp.STSError.Code = s3Err.Code - errResp.STSError.Message = s3Err.Message - } - return value, errResp - } - - r := AssumeRoleWithLDAPResponse{} - if err = xml.NewDecoder(resp.Body).Decode(&r); err != nil { - return - } - - cr := r.Result.Credentials - k.SetExpiration(cr.Expiration, DefaultExpiryWindow) - return Value{ - AccessKeyID: cr.AccessKey, - SecretAccessKey: cr.SecretKey, - SessionToken: cr.SessionToken, - SignerType: SignatureV4, - }, nil -} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go deleted file mode 100644 index dee0a8c..0000000 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go +++ /dev/null @@ -1,211 +0,0 @@ -// MinIO Go Library for Amazon S3 Compatible Cloud Storage -// Copyright 2021 MinIO, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package credentials - -import ( - "bytes" - "crypto/tls" - "encoding/xml" - "errors" - "io" - "net" - "net/http" - "net/url" - "strconv" - "time" -) - -// CertificateIdentityOption is an optional AssumeRoleWithCertificate -// parameter - e.g. a custom HTTP transport configuration or S3 credental -// livetime. -type CertificateIdentityOption func(*STSCertificateIdentity) - -// CertificateIdentityWithTransport returns a CertificateIdentityOption that -// customizes the STSCertificateIdentity with the given http.RoundTripper. -func CertificateIdentityWithTransport(t http.RoundTripper) CertificateIdentityOption { - return CertificateIdentityOption(func(i *STSCertificateIdentity) { i.Client.Transport = t }) -} - -// CertificateIdentityWithExpiry returns a CertificateIdentityOption that -// customizes the STSCertificateIdentity with the given livetime. -// -// Fetched S3 credentials will have the given livetime if the STS server -// allows such credentials. -func CertificateIdentityWithExpiry(livetime time.Duration) CertificateIdentityOption { - return CertificateIdentityOption(func(i *STSCertificateIdentity) { i.S3CredentialLivetime = livetime }) -} - -// A STSCertificateIdentity retrieves S3 credentials from the MinIO STS API and -// rotates those credentials once they expire. -type STSCertificateIdentity struct { - Expiry - - // STSEndpoint is the base URL endpoint of the STS API. - // For example, https://minio.local:9000 - STSEndpoint string - - // S3CredentialLivetime is the duration temp. S3 access - // credentials should be valid. - // - // It represents the access credential livetime requested - // by the client. The STS server may choose to issue - // temp. S3 credentials that have a different - usually - // shorter - livetime. - // - // The default livetime is one hour. - S3CredentialLivetime time.Duration - - // Client is the HTTP client used to authenticate and fetch - // S3 credentials. - // - // A custom TLS client configuration can be specified by - // using a custom http.Transport: - // Client: http.Client { - // Transport: &http.Transport{ - // TLSClientConfig: &tls.Config{}, - // }, - // } - Client http.Client -} - -var _ Provider = (*STSWebIdentity)(nil) // compiler check - -// NewSTSCertificateIdentity returns a STSCertificateIdentity that authenticates -// to the given STS endpoint with the given TLS certificate and retrieves and -// rotates S3 credentials. -func NewSTSCertificateIdentity(endpoint string, certificate tls.Certificate, options ...CertificateIdentityOption) (*Credentials, error) { - if endpoint == "" { - return nil, errors.New("STS endpoint cannot be empty") - } - if _, err := url.Parse(endpoint); err != nil { - return nil, err - } - identity := &STSCertificateIdentity{ - STSEndpoint: endpoint, - Client: http.Client{ - Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).DialContext, - ForceAttemptHTTP2: true, - MaxIdleConns: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 5 * time.Second, - TLSClientConfig: &tls.Config{ - Certificates: []tls.Certificate{certificate}, - }, - }, - }, - } - for _, option := range options { - option(identity) - } - return New(identity), nil -} - -// Retrieve fetches a new set of S3 credentials from the configured -// STS API endpoint. -func (i *STSCertificateIdentity) Retrieve() (Value, error) { - endpointURL, err := url.Parse(i.STSEndpoint) - if err != nil { - return Value{}, err - } - livetime := i.S3CredentialLivetime - if livetime == 0 { - livetime = 1 * time.Hour - } - - queryValues := url.Values{} - queryValues.Set("Action", "AssumeRoleWithCertificate") - queryValues.Set("Version", STSVersion) - endpointURL.RawQuery = queryValues.Encode() - - req, err := http.NewRequest(http.MethodPost, endpointURL.String(), nil) - if err != nil { - return Value{}, err - } - if req.Form == nil { - req.Form = url.Values{} - } - req.Form.Add("DurationSeconds", strconv.FormatUint(uint64(livetime.Seconds()), 10)) - - resp, err := i.Client.Do(req) - if err != nil { - return Value{}, err - } - if resp.Body != nil { - defer resp.Body.Close() - } - if resp.StatusCode != http.StatusOK { - var errResp ErrorResponse - buf, err := io.ReadAll(resp.Body) - if err != nil { - return Value{}, err - } - _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp) - if err != nil { - var s3Err Error - if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil { - return Value{}, err - } - errResp.RequestID = s3Err.RequestID - errResp.STSError.Code = s3Err.Code - errResp.STSError.Message = s3Err.Message - } - return Value{}, errResp - } - - const MaxSize = 10 * 1 << 20 - var body io.Reader = resp.Body - if resp.ContentLength > 0 && resp.ContentLength < MaxSize { - body = io.LimitReader(body, resp.ContentLength) - } else { - body = io.LimitReader(body, MaxSize) - } - - var response assumeRoleWithCertificateResponse - if err = xml.NewDecoder(body).Decode(&response); err != nil { - return Value{}, err - } - i.SetExpiration(response.Result.Credentials.Expiration, DefaultExpiryWindow) - return Value{ - AccessKeyID: response.Result.Credentials.AccessKey, - SecretAccessKey: response.Result.Credentials.SecretKey, - SessionToken: response.Result.Credentials.SessionToken, - SignerType: SignatureDefault, - }, nil -} - -// Expiration returns the expiration time of the current S3 credentials. -func (i *STSCertificateIdentity) Expiration() time.Time { return i.expiration } - -type assumeRoleWithCertificateResponse struct { - XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithCertificateResponse" json:"-"` - Result struct { - Credentials struct { - AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` - SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` - Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` - SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` - } `xml:"Credentials" json:"credentials,omitempty"` - } `xml:"AssumeRoleWithCertificateResult"` - ResponseMetadata struct { - RequestID string `xml:"RequestId,omitempty"` - } `xml:"ResponseMetadata,omitempty"` -} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go deleted file mode 100644 index 2e2af50..0000000 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go +++ /dev/null @@ -1,205 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2019-2022 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import ( - "bytes" - "encoding/xml" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - "strings" - "time" -) - -// AssumeRoleWithWebIdentityResponse contains the result of successful AssumeRoleWithWebIdentity request. -type AssumeRoleWithWebIdentityResponse struct { - XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithWebIdentityResponse" json:"-"` - Result WebIdentityResult `xml:"AssumeRoleWithWebIdentityResult"` - ResponseMetadata struct { - RequestID string `xml:"RequestId,omitempty"` - } `xml:"ResponseMetadata,omitempty"` -} - -// WebIdentityResult - Contains the response to a successful AssumeRoleWithWebIdentity -// request, including temporary credentials that can be used to make MinIO API requests. -type WebIdentityResult struct { - AssumedRoleUser AssumedRoleUser `xml:",omitempty"` - Audience string `xml:",omitempty"` - Credentials struct { - AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` - SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` - Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` - SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` - } `xml:",omitempty"` - PackedPolicySize int `xml:",omitempty"` - Provider string `xml:",omitempty"` - SubjectFromWebIdentityToken string `xml:",omitempty"` -} - -// WebIdentityToken - web identity token with expiry. -type WebIdentityToken struct { - Token string - AccessToken string - Expiry int -} - -// A STSWebIdentity retrieves credentials from MinIO service, and keeps track if -// those credentials are expired. -type STSWebIdentity struct { - Expiry - - // Required http Client to use when connecting to MinIO STS service. - Client *http.Client - - // Exported STS endpoint to fetch STS credentials. - STSEndpoint string - - // Exported GetWebIDTokenExpiry function which returns ID - // tokens from IDP. This function should return two values - // one is ID token which is a self contained ID token (JWT) - // and second return value is the expiry associated with - // this token. - // This is a customer provided function and is mandatory. - GetWebIDTokenExpiry func() (*WebIdentityToken, error) - - // RoleARN is the Amazon Resource Name (ARN) of the role that the caller is - // assuming. - RoleARN string - - // roleSessionName is the identifier for the assumed role session. - roleSessionName string -} - -// NewSTSWebIdentity returns a pointer to a new -// Credentials object wrapping the STSWebIdentity. -func NewSTSWebIdentity(stsEndpoint string, getWebIDTokenExpiry func() (*WebIdentityToken, error)) (*Credentials, error) { - if stsEndpoint == "" { - return nil, errors.New("STS endpoint cannot be empty") - } - if getWebIDTokenExpiry == nil { - return nil, errors.New("Web ID token and expiry retrieval function should be defined") - } - return New(&STSWebIdentity{ - Client: &http.Client{ - Transport: http.DefaultTransport, - }, - STSEndpoint: stsEndpoint, - GetWebIDTokenExpiry: getWebIDTokenExpiry, - }), nil -} - -func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSessionName string, - getWebIDTokenExpiry func() (*WebIdentityToken, error), -) (AssumeRoleWithWebIdentityResponse, error) { - idToken, err := getWebIDTokenExpiry() - if err != nil { - return AssumeRoleWithWebIdentityResponse{}, err - } - - v := url.Values{} - v.Set("Action", "AssumeRoleWithWebIdentity") - if len(roleARN) > 0 { - v.Set("RoleArn", roleARN) - - if len(roleSessionName) == 0 { - roleSessionName = strconv.FormatInt(time.Now().UnixNano(), 10) - } - v.Set("RoleSessionName", roleSessionName) - } - v.Set("WebIdentityToken", idToken.Token) - if idToken.AccessToken != "" { - // Usually set when server is using extended userInfo endpoint. - v.Set("WebIdentityAccessToken", idToken.AccessToken) - } - if idToken.Expiry > 0 { - v.Set("DurationSeconds", fmt.Sprintf("%d", idToken.Expiry)) - } - v.Set("Version", STSVersion) - - u, err := url.Parse(endpoint) - if err != nil { - return AssumeRoleWithWebIdentityResponse{}, err - } - - req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode())) - if err != nil { - return AssumeRoleWithWebIdentityResponse{}, err - } - - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - - resp, err := clnt.Do(req) - if err != nil { - return AssumeRoleWithWebIdentityResponse{}, err - } - - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - var errResp ErrorResponse - buf, err := io.ReadAll(resp.Body) - if err != nil { - return AssumeRoleWithWebIdentityResponse{}, err - } - _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp) - if err != nil { - var s3Err Error - if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil { - return AssumeRoleWithWebIdentityResponse{}, err - } - errResp.RequestID = s3Err.RequestID - errResp.STSError.Code = s3Err.Code - errResp.STSError.Message = s3Err.Message - } - return AssumeRoleWithWebIdentityResponse{}, errResp - } - - a := AssumeRoleWithWebIdentityResponse{} - if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil { - return AssumeRoleWithWebIdentityResponse{}, err - } - - return a, nil -} - -// Retrieve retrieves credentials from the MinIO service. -// Error will be returned if the request fails. -func (m *STSWebIdentity) Retrieve() (Value, error) { - a, err := getWebIdentityCredentials(m.Client, m.STSEndpoint, m.RoleARN, m.roleSessionName, m.GetWebIDTokenExpiry) - if err != nil { - return Value{}, err - } - - // Expiry window is set to 10secs. - m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow) - - return Value{ - AccessKeyID: a.Result.Credentials.AccessKey, - SecretAccessKey: a.Result.Credentials.SecretKey, - SessionToken: a.Result.Credentials.SessionToken, - SignerType: SignatureV4, - }, nil -} - -// Expiration returns the expiration time of the credentials -func (m *STSWebIdentity) Expiration() time.Time { - return m.expiration -} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go deleted file mode 100644 index 6db26c0..0000000 --- a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go +++ /dev/null @@ -1,24 +0,0 @@ -//go:build !fips -// +build !fips - -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2022 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package encrypt - -// FIPS is true if 'fips' build tag was specified. -const FIPS = false diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go deleted file mode 100644 index 6402582..0000000 --- a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go +++ /dev/null @@ -1,24 +0,0 @@ -//go:build fips -// +build fips - -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2022 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package encrypt - -// FIPS is true if 'fips' build tag was specified. -const FIPS = true diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go deleted file mode 100644 index a7081c5..0000000 --- a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go +++ /dev/null @@ -1,198 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package encrypt - -import ( - "crypto/md5" - "encoding/base64" - "errors" - "net/http" - - jsoniter "github.com/json-iterator/go" - "golang.org/x/crypto/argon2" -) - -const ( - // SseGenericHeader is the AWS SSE header used for SSE-S3 and SSE-KMS. - SseGenericHeader = "X-Amz-Server-Side-Encryption" - - // SseKmsKeyID is the AWS SSE-KMS key id. - SseKmsKeyID = SseGenericHeader + "-Aws-Kms-Key-Id" - // SseEncryptionContext is the AWS SSE-KMS Encryption Context data. - SseEncryptionContext = SseGenericHeader + "-Context" - - // SseCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key. - SseCustomerAlgorithm = SseGenericHeader + "-Customer-Algorithm" - // SseCustomerKey is the AWS SSE-C encryption key HTTP header key. - SseCustomerKey = SseGenericHeader + "-Customer-Key" - // SseCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key. - SseCustomerKeyMD5 = SseGenericHeader + "-Customer-Key-MD5" - - // SseCopyCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key for CopyObject API. - SseCopyCustomerAlgorithm = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm" - // SseCopyCustomerKey is the AWS SSE-C encryption key HTTP header key for CopyObject API. - SseCopyCustomerKey = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key" - // SseCopyCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key for CopyObject API. - SseCopyCustomerKeyMD5 = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-MD5" -) - -// PBKDF creates a SSE-C key from the provided password and salt. -// PBKDF is a password-based key derivation function -// which can be used to derive a high-entropy cryptographic -// key from a low-entropy password and a salt. -type PBKDF func(password, salt []byte) ServerSide - -// DefaultPBKDF is the default PBKDF. It uses Argon2id with the -// recommended parameters from the RFC draft (1 pass, 64 MB memory, 4 threads). -var DefaultPBKDF PBKDF = func(password, salt []byte) ServerSide { - sse := ssec{} - copy(sse[:], argon2.IDKey(password, salt, 1, 64*1024, 4, 32)) - return sse -} - -// Type is the server-side-encryption method. It represents one of -// the following encryption methods: -// - SSE-C: server-side-encryption with customer provided keys -// - KMS: server-side-encryption with managed keys -// - S3: server-side-encryption using S3 storage encryption -type Type string - -const ( - // SSEC represents server-side-encryption with customer provided keys - SSEC Type = "SSE-C" - // KMS represents server-side-encryption with managed keys - KMS Type = "KMS" - // S3 represents server-side-encryption using S3 storage encryption - S3 Type = "S3" -) - -// ServerSide is a form of S3 server-side-encryption. -type ServerSide interface { - // Type returns the server-side-encryption method. - Type() Type - - // Marshal adds encryption headers to the provided HTTP headers. - // It marks an HTTP request as server-side-encryption request - // and inserts the required data into the headers. - Marshal(h http.Header) -} - -// NewSSE returns a server-side-encryption using S3 storage encryption. -// Using SSE-S3 the server will encrypt the object with server-managed keys. -func NewSSE() ServerSide { return s3{} } - -// NewSSEKMS returns a new server-side-encryption using SSE-KMS and the provided Key Id and context. -func NewSSEKMS(keyID string, context interface{}) (ServerSide, error) { - if context == nil { - return kms{key: keyID, hasContext: false}, nil - } - json := jsoniter.ConfigCompatibleWithStandardLibrary - serializedContext, err := json.Marshal(context) - if err != nil { - return nil, err - } - return kms{key: keyID, context: serializedContext, hasContext: true}, nil -} - -// NewSSEC returns a new server-side-encryption using SSE-C and the provided key. -// The key must be 32 bytes long. -func NewSSEC(key []byte) (ServerSide, error) { - if len(key) != 32 { - return nil, errors.New("encrypt: SSE-C key must be 256 bit long") - } - sse := ssec{} - copy(sse[:], key) - return sse, nil -} - -// SSE transforms a SSE-C copy encryption into a SSE-C encryption. -// It is the inverse of SSECopy(...). -// -// If the provided sse is no SSE-C copy encryption SSE returns -// sse unmodified. -func SSE(sse ServerSide) ServerSide { - if sse == nil || sse.Type() != SSEC { - return sse - } - if sse, ok := sse.(ssecCopy); ok { - return ssec(sse) - } - return sse -} - -// SSECopy transforms a SSE-C encryption into a SSE-C copy -// encryption. This is required for SSE-C key rotation or a SSE-C -// copy where the source and the destination should be encrypted. -// -// If the provided sse is no SSE-C encryption SSECopy returns -// sse unmodified. -func SSECopy(sse ServerSide) ServerSide { - if sse == nil || sse.Type() != SSEC { - return sse - } - if sse, ok := sse.(ssec); ok { - return ssecCopy(sse) - } - return sse -} - -type ssec [32]byte - -func (s ssec) Type() Type { return SSEC } - -func (s ssec) Marshal(h http.Header) { - keyMD5 := md5.Sum(s[:]) - h.Set(SseCustomerAlgorithm, "AES256") - h.Set(SseCustomerKey, base64.StdEncoding.EncodeToString(s[:])) - h.Set(SseCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:])) -} - -type ssecCopy [32]byte - -func (s ssecCopy) Type() Type { return SSEC } - -func (s ssecCopy) Marshal(h http.Header) { - keyMD5 := md5.Sum(s[:]) - h.Set(SseCopyCustomerAlgorithm, "AES256") - h.Set(SseCopyCustomerKey, base64.StdEncoding.EncodeToString(s[:])) - h.Set(SseCopyCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:])) -} - -type s3 struct{} - -func (s s3) Type() Type { return S3 } - -func (s s3) Marshal(h http.Header) { h.Set(SseGenericHeader, "AES256") } - -type kms struct { - key string - context []byte - hasContext bool -} - -func (s kms) Type() Type { return KMS } - -func (s kms) Marshal(h http.Header) { - h.Set(SseGenericHeader, "aws:kms") - if s.key != "" { - h.Set(SseKmsKeyID, s.key) - } - if s.hasContext { - h.Set(SseEncryptionContext, base64.StdEncoding.EncodeToString(s.context)) - } -} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go deleted file mode 100644 index c52f78c..0000000 --- a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go +++ /dev/null @@ -1,491 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package lifecycle contains all the lifecycle related data types and marshallers. -package lifecycle - -import ( - "encoding/json" - "encoding/xml" - "errors" - "time" -) - -var errMissingStorageClass = errors.New("storage-class cannot be empty") - -// AbortIncompleteMultipartUpload structure, not supported yet on MinIO -type AbortIncompleteMultipartUpload struct { - XMLName xml.Name `xml:"AbortIncompleteMultipartUpload,omitempty" json:"-"` - DaysAfterInitiation ExpirationDays `xml:"DaysAfterInitiation,omitempty" json:"DaysAfterInitiation,omitempty"` -} - -// IsDaysNull returns true if days field is null -func (n AbortIncompleteMultipartUpload) IsDaysNull() bool { - return n.DaysAfterInitiation == ExpirationDays(0) -} - -// MarshalXML if days after initiation is set to non-zero value -func (n AbortIncompleteMultipartUpload) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - if n.IsDaysNull() { - return nil - } - type abortIncompleteMultipartUploadWrapper AbortIncompleteMultipartUpload - return e.EncodeElement(abortIncompleteMultipartUploadWrapper(n), start) -} - -// NoncurrentVersionExpiration - Specifies when noncurrent object versions expire. -// Upon expiration, server permanently deletes the noncurrent object versions. -// Set this lifecycle configuration action on a bucket that has versioning enabled -// (or suspended) to request server delete noncurrent object versions at a -// specific period in the object's lifetime. -type NoncurrentVersionExpiration struct { - XMLName xml.Name `xml:"NoncurrentVersionExpiration" json:"-"` - NoncurrentDays ExpirationDays `xml:"NoncurrentDays,omitempty" json:"NoncurrentDays,omitempty"` - NewerNoncurrentVersions int `xml:"NewerNoncurrentVersions,omitempty" json:"NewerNoncurrentVersions,omitempty"` -} - -// MarshalXML if n is non-empty, i.e has a non-zero NoncurrentDays or NewerNoncurrentVersions. -func (n NoncurrentVersionExpiration) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - if n.isNull() { - return nil - } - type noncurrentVersionExpirationWrapper NoncurrentVersionExpiration - return e.EncodeElement(noncurrentVersionExpirationWrapper(n), start) -} - -// IsDaysNull returns true if days field is null -func (n NoncurrentVersionExpiration) IsDaysNull() bool { - return n.NoncurrentDays == ExpirationDays(0) -} - -func (n NoncurrentVersionExpiration) isNull() bool { - return n.IsDaysNull() && n.NewerNoncurrentVersions == 0 -} - -// NoncurrentVersionTransition structure, set this action to request server to -// transition noncurrent object versions to different set storage classes -// at a specific period in the object's lifetime. -type NoncurrentVersionTransition struct { - XMLName xml.Name `xml:"NoncurrentVersionTransition,omitempty" json:"-"` - StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"` - NoncurrentDays ExpirationDays `xml:"NoncurrentDays" json:"NoncurrentDays"` - NewerNoncurrentVersions int `xml:"NewerNoncurrentVersions,omitempty" json:"NewerNoncurrentVersions,omitempty"` -} - -// IsDaysNull returns true if days field is null -func (n NoncurrentVersionTransition) IsDaysNull() bool { - return n.NoncurrentDays == ExpirationDays(0) -} - -// IsStorageClassEmpty returns true if storage class field is empty -func (n NoncurrentVersionTransition) IsStorageClassEmpty() bool { - return n.StorageClass == "" -} - -func (n NoncurrentVersionTransition) isNull() bool { - return n.StorageClass == "" -} - -// UnmarshalJSON implements NoncurrentVersionTransition JSONify -func (n *NoncurrentVersionTransition) UnmarshalJSON(b []byte) error { - type noncurrentVersionTransition NoncurrentVersionTransition - var nt noncurrentVersionTransition - err := json.Unmarshal(b, &nt) - if err != nil { - return err - } - - if nt.StorageClass == "" { - return errMissingStorageClass - } - *n = NoncurrentVersionTransition(nt) - return nil -} - -// MarshalXML is extended to leave out -// tags -func (n NoncurrentVersionTransition) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - if n.isNull() { - return nil - } - type noncurrentVersionTransitionWrapper NoncurrentVersionTransition - return e.EncodeElement(noncurrentVersionTransitionWrapper(n), start) -} - -// Tag structure key/value pair representing an object tag to apply lifecycle configuration -type Tag struct { - XMLName xml.Name `xml:"Tag,omitempty" json:"-"` - Key string `xml:"Key,omitempty" json:"Key,omitempty"` - Value string `xml:"Value,omitempty" json:"Value,omitempty"` -} - -// IsEmpty returns whether this tag is empty or not. -func (tag Tag) IsEmpty() bool { - return tag.Key == "" -} - -// Transition structure - transition details of lifecycle configuration -type Transition struct { - XMLName xml.Name `xml:"Transition" json:"-"` - Date ExpirationDate `xml:"Date,omitempty" json:"Date,omitempty"` - StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"` - Days ExpirationDays `xml:"Days" json:"Days"` -} - -// UnmarshalJSON returns an error if storage-class is empty. -func (t *Transition) UnmarshalJSON(b []byte) error { - type transition Transition - var tr transition - err := json.Unmarshal(b, &tr) - if err != nil { - return err - } - - if tr.StorageClass == "" { - return errMissingStorageClass - } - *t = Transition(tr) - return nil -} - -// MarshalJSON customizes json encoding by omitting empty values -func (t Transition) MarshalJSON() ([]byte, error) { - if t.IsNull() { - return nil, nil - } - type transition struct { - Date *ExpirationDate `json:"Date,omitempty"` - StorageClass string `json:"StorageClass,omitempty"` - Days *ExpirationDays `json:"Days"` - } - - newt := transition{ - StorageClass: t.StorageClass, - } - - if !t.IsDateNull() { - newt.Date = &t.Date - } else { - newt.Days = &t.Days - } - return json.Marshal(newt) -} - -// IsDaysNull returns true if days field is null -func (t Transition) IsDaysNull() bool { - return t.Days == ExpirationDays(0) -} - -// IsDateNull returns true if date field is null -func (t Transition) IsDateNull() bool { - return t.Date.Time.IsZero() -} - -// IsNull returns true if no storage-class is set. -func (t Transition) IsNull() bool { - return t.StorageClass == "" -} - -// MarshalXML is transition is non null -func (t Transition) MarshalXML(en *xml.Encoder, startElement xml.StartElement) error { - if t.IsNull() { - return nil - } - type transitionWrapper Transition - return en.EncodeElement(transitionWrapper(t), startElement) -} - -// And And Rule for LifecycleTag, to be used in LifecycleRuleFilter -type And struct { - XMLName xml.Name `xml:"And" json:"-"` - Prefix string `xml:"Prefix" json:"Prefix,omitempty"` - Tags []Tag `xml:"Tag" json:"Tags,omitempty"` - ObjectSizeLessThan int64 `xml:"ObjectSizeLessThan,omitempty" json:"ObjectSizeLessThan,omitempty"` - ObjectSizeGreaterThan int64 `xml:"ObjectSizeGreaterThan,omitempty" json:"ObjectSizeGreaterThan,omitempty"` -} - -// IsEmpty returns true if Tags field is null -func (a And) IsEmpty() bool { - return len(a.Tags) == 0 && a.Prefix == "" && - a.ObjectSizeLessThan == 0 && a.ObjectSizeGreaterThan == 0 -} - -// Filter will be used in selecting rule(s) for lifecycle configuration -type Filter struct { - XMLName xml.Name `xml:"Filter" json:"-"` - And And `xml:"And,omitempty" json:"And,omitempty"` - Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"` - Tag Tag `xml:"Tag,omitempty" json:"Tag,omitempty"` - ObjectSizeLessThan int64 `xml:"ObjectSizeLessThan,omitempty" json:"ObjectSizeLessThan,omitempty"` - ObjectSizeGreaterThan int64 `xml:"ObjectSizeGreaterThan,omitempty" json:"ObjectSizeGreaterThan,omitempty"` -} - -// IsNull returns true if all Filter fields are empty. -func (f Filter) IsNull() bool { - return f.Tag.IsEmpty() && f.And.IsEmpty() && f.Prefix == "" && - f.ObjectSizeLessThan == 0 && f.ObjectSizeGreaterThan == 0 -} - -// MarshalJSON customizes json encoding by removing empty values. -func (f Filter) MarshalJSON() ([]byte, error) { - type filter struct { - And *And `json:"And,omitempty"` - Prefix string `json:"Prefix,omitempty"` - Tag *Tag `json:"Tag,omitempty"` - ObjectSizeLessThan int64 `json:"ObjectSizeLessThan,omitempty"` - ObjectSizeGreaterThan int64 `json:"ObjectSizeGreaterThan,omitempty"` - } - - newf := filter{ - Prefix: f.Prefix, - } - if !f.Tag.IsEmpty() { - newf.Tag = &f.Tag - } - if !f.And.IsEmpty() { - newf.And = &f.And - } - newf.ObjectSizeLessThan = f.ObjectSizeLessThan - newf.ObjectSizeGreaterThan = f.ObjectSizeGreaterThan - return json.Marshal(newf) -} - -// MarshalXML - produces the xml representation of the Filter struct -// only one of Prefix, And and Tag should be present in the output. -func (f Filter) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - if err := e.EncodeToken(start); err != nil { - return err - } - - switch { - case !f.And.IsEmpty(): - if err := e.EncodeElement(f.And, xml.StartElement{Name: xml.Name{Local: "And"}}); err != nil { - return err - } - case !f.Tag.IsEmpty(): - if err := e.EncodeElement(f.Tag, xml.StartElement{Name: xml.Name{Local: "Tag"}}); err != nil { - return err - } - default: - if f.ObjectSizeLessThan > 0 { - if err := e.EncodeElement(f.ObjectSizeLessThan, xml.StartElement{Name: xml.Name{Local: "ObjectSizeLessThan"}}); err != nil { - return err - } - break - } - if f.ObjectSizeGreaterThan > 0 { - if err := e.EncodeElement(f.ObjectSizeGreaterThan, xml.StartElement{Name: xml.Name{Local: "ObjectSizeGreaterThan"}}); err != nil { - return err - } - break - } - // Print empty Prefix field only when everything else is empty - if err := e.EncodeElement(f.Prefix, xml.StartElement{Name: xml.Name{Local: "Prefix"}}); err != nil { - return err - } - } - - return e.EncodeToken(xml.EndElement{Name: start.Name}) -} - -// ExpirationDays is a type alias to unmarshal Days in Expiration -type ExpirationDays int - -// MarshalXML encodes number of days to expire if it is non-zero and -// encodes empty string otherwise -func (eDays ExpirationDays) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { - if eDays == 0 { - return nil - } - return e.EncodeElement(int(eDays), startElement) -} - -// ExpirationDate is a embedded type containing time.Time to unmarshal -// Date in Expiration -type ExpirationDate struct { - time.Time -} - -// MarshalXML encodes expiration date if it is non-zero and encodes -// empty string otherwise -func (eDate ExpirationDate) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { - if eDate.Time.IsZero() { - return nil - } - return e.EncodeElement(eDate.Format(time.RFC3339), startElement) -} - -// ExpireDeleteMarker represents value of ExpiredObjectDeleteMarker field in Expiration XML element. -type ExpireDeleteMarker ExpirationBoolean - -// IsEnabled returns true if the auto delete-marker expiration is enabled -func (e ExpireDeleteMarker) IsEnabled() bool { - return bool(e) -} - -// ExpirationBoolean represents an XML version of 'bool' type -type ExpirationBoolean bool - -// MarshalXML encodes delete marker boolean into an XML form. -func (b ExpirationBoolean) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { - if !b { - return nil - } - type booleanWrapper ExpirationBoolean - return e.EncodeElement(booleanWrapper(b), startElement) -} - -// IsEnabled returns true if the expiration boolean is enabled -func (b ExpirationBoolean) IsEnabled() bool { - return bool(b) -} - -// Expiration structure - expiration details of lifecycle configuration -type Expiration struct { - XMLName xml.Name `xml:"Expiration,omitempty" json:"-"` - Date ExpirationDate `xml:"Date,omitempty" json:"Date,omitempty"` - Days ExpirationDays `xml:"Days,omitempty" json:"Days,omitempty"` - DeleteMarker ExpireDeleteMarker `xml:"ExpiredObjectDeleteMarker,omitempty" json:"ExpiredObjectDeleteMarker,omitempty"` - DeleteAll ExpirationBoolean `xml:"ExpiredObjectAllVersions,omitempty" json:"ExpiredObjectAllVersions,omitempty"` -} - -// MarshalJSON customizes json encoding by removing empty day/date specification. -func (e Expiration) MarshalJSON() ([]byte, error) { - type expiration struct { - Date *ExpirationDate `json:"Date,omitempty"` - Days *ExpirationDays `json:"Days,omitempty"` - DeleteMarker ExpireDeleteMarker `json:"ExpiredObjectDeleteMarker,omitempty"` - DeleteAll ExpirationBoolean `json:"ExpiredObjectAllVersions,omitempty"` - } - - newexp := expiration{ - DeleteMarker: e.DeleteMarker, - DeleteAll: e.DeleteAll, - } - if !e.IsDaysNull() { - newexp.Days = &e.Days - } - if !e.IsDateNull() { - newexp.Date = &e.Date - } - return json.Marshal(newexp) -} - -// IsDaysNull returns true if days field is null -func (e Expiration) IsDaysNull() bool { - return e.Days == ExpirationDays(0) -} - -// IsDateNull returns true if date field is null -func (e Expiration) IsDateNull() bool { - return e.Date.Time.IsZero() -} - -// IsDeleteMarkerExpirationEnabled returns true if the auto-expiration of delete marker is enabled -func (e Expiration) IsDeleteMarkerExpirationEnabled() bool { - return e.DeleteMarker.IsEnabled() -} - -// IsNull returns true if both date and days fields are null -func (e Expiration) IsNull() bool { - return e.IsDaysNull() && e.IsDateNull() && !e.IsDeleteMarkerExpirationEnabled() -} - -// MarshalXML is expiration is non null -func (e Expiration) MarshalXML(en *xml.Encoder, startElement xml.StartElement) error { - if e.IsNull() { - return nil - } - type expirationWrapper Expiration - return en.EncodeElement(expirationWrapper(e), startElement) -} - -// MarshalJSON customizes json encoding by omitting empty values -func (r Rule) MarshalJSON() ([]byte, error) { - type rule struct { - AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `json:"AbortIncompleteMultipartUpload,omitempty"` - Expiration *Expiration `json:"Expiration,omitempty"` - ID string `json:"ID"` - RuleFilter *Filter `json:"Filter,omitempty"` - NoncurrentVersionExpiration *NoncurrentVersionExpiration `json:"NoncurrentVersionExpiration,omitempty"` - NoncurrentVersionTransition *NoncurrentVersionTransition `json:"NoncurrentVersionTransition,omitempty"` - Prefix string `json:"Prefix,omitempty"` - Status string `json:"Status"` - Transition *Transition `json:"Transition,omitempty"` - } - newr := rule{ - Prefix: r.Prefix, - Status: r.Status, - ID: r.ID, - } - - if !r.RuleFilter.IsNull() { - newr.RuleFilter = &r.RuleFilter - } - if !r.AbortIncompleteMultipartUpload.IsDaysNull() { - newr.AbortIncompleteMultipartUpload = &r.AbortIncompleteMultipartUpload - } - if !r.Expiration.IsNull() { - newr.Expiration = &r.Expiration - } - if !r.Transition.IsNull() { - newr.Transition = &r.Transition - } - if !r.NoncurrentVersionExpiration.isNull() { - newr.NoncurrentVersionExpiration = &r.NoncurrentVersionExpiration - } - if !r.NoncurrentVersionTransition.isNull() { - newr.NoncurrentVersionTransition = &r.NoncurrentVersionTransition - } - - return json.Marshal(newr) -} - -// Rule represents a single rule in lifecycle configuration -type Rule struct { - XMLName xml.Name `xml:"Rule,omitempty" json:"-"` - AbortIncompleteMultipartUpload AbortIncompleteMultipartUpload `xml:"AbortIncompleteMultipartUpload,omitempty" json:"AbortIncompleteMultipartUpload,omitempty"` - Expiration Expiration `xml:"Expiration,omitempty" json:"Expiration,omitempty"` - ID string `xml:"ID" json:"ID"` - RuleFilter Filter `xml:"Filter,omitempty" json:"Filter,omitempty"` - NoncurrentVersionExpiration NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty" json:"NoncurrentVersionExpiration,omitempty"` - NoncurrentVersionTransition NoncurrentVersionTransition `xml:"NoncurrentVersionTransition,omitempty" json:"NoncurrentVersionTransition,omitempty"` - Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"` - Status string `xml:"Status" json:"Status"` - Transition Transition `xml:"Transition,omitempty" json:"Transition,omitempty"` -} - -// Configuration is a collection of Rule objects. -type Configuration struct { - XMLName xml.Name `xml:"LifecycleConfiguration,omitempty" json:"-"` - Rules []Rule `xml:"Rule"` -} - -// Empty check if lifecycle configuration is empty -func (c *Configuration) Empty() bool { - if c == nil { - return true - } - return len(c.Rules) == 0 -} - -// NewConfiguration initializes a fresh lifecycle configuration -// for manipulation, such as setting and removing lifecycle rules -// and filters. -func NewConfiguration() *Configuration { - return &Configuration{} -} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go b/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go deleted file mode 100644 index 126661a..0000000 --- a/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go +++ /dev/null @@ -1,78 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017-2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package notification - -// Indentity represents the user id, this is a compliance field. -type identity struct { - PrincipalID string `json:"principalId"` -} - -// event bucket metadata. -type bucketMeta struct { - Name string `json:"name"` - OwnerIdentity identity `json:"ownerIdentity"` - ARN string `json:"arn"` -} - -// event object metadata. -type objectMeta struct { - Key string `json:"key"` - Size int64 `json:"size,omitempty"` - ETag string `json:"eTag,omitempty"` - ContentType string `json:"contentType,omitempty"` - UserMetadata map[string]string `json:"userMetadata,omitempty"` - VersionID string `json:"versionId,omitempty"` - Sequencer string `json:"sequencer"` -} - -// event server specific metadata. -type eventMeta struct { - SchemaVersion string `json:"s3SchemaVersion"` - ConfigurationID string `json:"configurationId"` - Bucket bucketMeta `json:"bucket"` - Object objectMeta `json:"object"` -} - -// sourceInfo represents information on the client that -// triggered the event notification. -type sourceInfo struct { - Host string `json:"host"` - Port string `json:"port"` - UserAgent string `json:"userAgent"` -} - -// Event represents an Amazon an S3 bucket notification event. -type Event struct { - EventVersion string `json:"eventVersion"` - EventSource string `json:"eventSource"` - AwsRegion string `json:"awsRegion"` - EventTime string `json:"eventTime"` - EventName string `json:"eventName"` - UserIdentity identity `json:"userIdentity"` - RequestParameters map[string]string `json:"requestParameters"` - ResponseElements map[string]string `json:"responseElements"` - S3 eventMeta `json:"s3"` - Source sourceInfo `json:"source"` -} - -// Info - represents the collection of notification events, additionally -// also reports errors if any while listening on bucket notifications. -type Info struct { - Records []Event - Err error -} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go deleted file mode 100644 index a44799d..0000000 --- a/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go +++ /dev/null @@ -1,440 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package notification - -import ( - "encoding/xml" - "errors" - "fmt" - "strings" - - "github.com/minio/minio-go/v7/pkg/set" -) - -// EventType is a S3 notification event associated to the bucket notification configuration -type EventType string - -// The role of all event types are described in : -// -// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations -const ( - ObjectCreatedAll EventType = "s3:ObjectCreated:*" - ObjectCreatedPut EventType = "s3:ObjectCreated:Put" - ObjectCreatedPost EventType = "s3:ObjectCreated:Post" - ObjectCreatedCopy EventType = "s3:ObjectCreated:Copy" - ObjectCreatedDeleteTagging EventType = "s3:ObjectCreated:DeleteTagging" - ObjectCreatedCompleteMultipartUpload EventType = "s3:ObjectCreated:CompleteMultipartUpload" - ObjectCreatedPutLegalHold EventType = "s3:ObjectCreated:PutLegalHold" - ObjectCreatedPutRetention EventType = "s3:ObjectCreated:PutRetention" - ObjectCreatedPutTagging EventType = "s3:ObjectCreated:PutTagging" - ObjectAccessedGet EventType = "s3:ObjectAccessed:Get" - ObjectAccessedHead EventType = "s3:ObjectAccessed:Head" - ObjectAccessedGetRetention EventType = "s3:ObjectAccessed:GetRetention" - ObjectAccessedGetLegalHold EventType = "s3:ObjectAccessed:GetLegalHold" - ObjectAccessedAll EventType = "s3:ObjectAccessed:*" - ObjectRemovedAll EventType = "s3:ObjectRemoved:*" - ObjectRemovedDelete EventType = "s3:ObjectRemoved:Delete" - ObjectRemovedDeleteMarkerCreated EventType = "s3:ObjectRemoved:DeleteMarkerCreated" - ObjectReducedRedundancyLostObject EventType = "s3:ReducedRedundancyLostObject" - ObjectTransitionAll EventType = "s3:ObjectTransition:*" - ObjectTransitionFailed EventType = "s3:ObjectTransition:Failed" - ObjectTransitionComplete EventType = "s3:ObjectTransition:Complete" - ObjectTransitionPost EventType = "s3:ObjectRestore:Post" - ObjectTransitionCompleted EventType = "s3:ObjectRestore:Completed" - ObjectReplicationAll EventType = "s3:Replication:*" - ObjectReplicationOperationCompletedReplication EventType = "s3:Replication:OperationCompletedReplication" - ObjectReplicationOperationFailedReplication EventType = "s3:Replication:OperationFailedReplication" - ObjectReplicationOperationMissedThreshold EventType = "s3:Replication:OperationMissedThreshold" - ObjectReplicationOperationNotTracked EventType = "s3:Replication:OperationNotTracked" - ObjectReplicationOperationReplicatedAfterThreshold EventType = "s3:Replication:OperationReplicatedAfterThreshold" - ObjectScannerManyVersions EventType = "s3:Scanner:ManyVersions" - ObjectScannerBigPrefix EventType = "s3:Scanner:BigPrefix" - ObjectScannerAll EventType = "s3:Scanner:*" - BucketCreatedAll EventType = "s3:BucketCreated:*" - BucketRemovedAll EventType = "s3:BucketRemoved:*" -) - -// FilterRule - child of S3Key, a tag in the notification xml which -// carries suffix/prefix filters -type FilterRule struct { - Name string `xml:"Name"` - Value string `xml:"Value"` -} - -// S3Key - child of Filter, a tag in the notification xml which -// carries suffix/prefix filters -type S3Key struct { - FilterRules []FilterRule `xml:"FilterRule,omitempty"` -} - -// Filter - a tag in the notification xml structure which carries -// suffix/prefix filters -type Filter struct { - S3Key S3Key `xml:"S3Key,omitempty"` -} - -// Arn - holds ARN information that will be sent to the web service, -// ARN desciption can be found in http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html -type Arn struct { - Partition string - Service string - Region string - AccountID string - Resource string -} - -// NewArn creates new ARN based on the given partition, service, region, account id and resource -func NewArn(partition, service, region, accountID, resource string) Arn { - return Arn{ - Partition: partition, - Service: service, - Region: region, - AccountID: accountID, - Resource: resource, - } -} - -var ( - // ErrInvalidArnPrefix is returned when ARN string format does not start with 'arn' - ErrInvalidArnPrefix = errors.New("invalid ARN format, must start with 'arn:'") - // ErrInvalidArnFormat is returned when ARN string format is not valid - ErrInvalidArnFormat = errors.New("invalid ARN format, must be 'arn:::::'") -) - -// NewArnFromString parses string representation of ARN into Arn object. -// Returns an error if the string format is incorrect. -func NewArnFromString(arn string) (Arn, error) { - parts := strings.Split(arn, ":") - if len(parts) != 6 { - return Arn{}, ErrInvalidArnFormat - } - if parts[0] != "arn" { - return Arn{}, ErrInvalidArnPrefix - } - - return NewArn(parts[1], parts[2], parts[3], parts[4], parts[5]), nil -} - -// String returns the string format of the ARN -func (arn Arn) String() string { - return "arn:" + arn.Partition + ":" + arn.Service + ":" + arn.Region + ":" + arn.AccountID + ":" + arn.Resource -} - -// Config - represents one single notification configuration -// such as topic, queue or lambda configuration. -type Config struct { - ID string `xml:"Id,omitempty"` - Arn Arn `xml:"-"` - Events []EventType `xml:"Event"` - Filter *Filter `xml:"Filter,omitempty"` -} - -// NewConfig creates one notification config and sets the given ARN -func NewConfig(arn Arn) Config { - return Config{Arn: arn, Filter: &Filter{}} -} - -// AddEvents adds one event to the current notification config -func (t *Config) AddEvents(events ...EventType) { - t.Events = append(t.Events, events...) -} - -// AddFilterSuffix sets the suffix configuration to the current notification config -func (t *Config) AddFilterSuffix(suffix string) { - if t.Filter == nil { - t.Filter = &Filter{} - } - newFilterRule := FilterRule{Name: "suffix", Value: suffix} - // Replace any suffix rule if existing and add to the list otherwise - for index := range t.Filter.S3Key.FilterRules { - if t.Filter.S3Key.FilterRules[index].Name == "suffix" { - t.Filter.S3Key.FilterRules[index] = newFilterRule - return - } - } - t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule) -} - -// AddFilterPrefix sets the prefix configuration to the current notification config -func (t *Config) AddFilterPrefix(prefix string) { - if t.Filter == nil { - t.Filter = &Filter{} - } - newFilterRule := FilterRule{Name: "prefix", Value: prefix} - // Replace any prefix rule if existing and add to the list otherwise - for index := range t.Filter.S3Key.FilterRules { - if t.Filter.S3Key.FilterRules[index].Name == "prefix" { - t.Filter.S3Key.FilterRules[index] = newFilterRule - return - } - } - t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule) -} - -// EqualEventTypeList tells whether a and b contain the same events -func EqualEventTypeList(a, b []EventType) bool { - if len(a) != len(b) { - return false - } - setA := set.NewStringSet() - for _, i := range a { - setA.Add(string(i)) - } - - setB := set.NewStringSet() - for _, i := range b { - setB.Add(string(i)) - } - - return setA.Difference(setB).IsEmpty() -} - -// EqualFilterRuleList tells whether a and b contain the same filters -func EqualFilterRuleList(a, b []FilterRule) bool { - if len(a) != len(b) { - return false - } - - setA := set.NewStringSet() - for _, i := range a { - setA.Add(fmt.Sprintf("%s-%s", i.Name, i.Value)) - } - - setB := set.NewStringSet() - for _, i := range b { - setB.Add(fmt.Sprintf("%s-%s", i.Name, i.Value)) - } - - return setA.Difference(setB).IsEmpty() -} - -// Equal returns whether this `Config` is equal to another defined by the passed parameters -func (t *Config) Equal(events []EventType, prefix, suffix string) bool { - if t == nil { - return false - } - - // Compare events - passEvents := EqualEventTypeList(t.Events, events) - - // Compare filters - var newFilterRules []FilterRule - if prefix != "" { - newFilterRules = append(newFilterRules, FilterRule{Name: "prefix", Value: prefix}) - } - if suffix != "" { - newFilterRules = append(newFilterRules, FilterRule{Name: "suffix", Value: suffix}) - } - - var currentFilterRules []FilterRule - if t.Filter != nil { - currentFilterRules = t.Filter.S3Key.FilterRules - } - - passFilters := EqualFilterRuleList(currentFilterRules, newFilterRules) - return passEvents && passFilters -} - -// TopicConfig carries one single topic notification configuration -type TopicConfig struct { - Config - Topic string `xml:"Topic"` -} - -// QueueConfig carries one single queue notification configuration -type QueueConfig struct { - Config - Queue string `xml:"Queue"` -} - -// LambdaConfig carries one single cloudfunction notification configuration -type LambdaConfig struct { - Config - Lambda string `xml:"CloudFunction"` -} - -// Configuration - the struct that represents the whole XML to be sent to the web service -type Configuration struct { - XMLName xml.Name `xml:"NotificationConfiguration"` - LambdaConfigs []LambdaConfig `xml:"CloudFunctionConfiguration"` - TopicConfigs []TopicConfig `xml:"TopicConfiguration"` - QueueConfigs []QueueConfig `xml:"QueueConfiguration"` -} - -// AddTopic adds a given topic config to the general bucket notification config -func (b *Configuration) AddTopic(topicConfig Config) bool { - newTopicConfig := TopicConfig{Config: topicConfig, Topic: topicConfig.Arn.String()} - for _, n := range b.TopicConfigs { - // If new config matches existing one - if n.Topic == newTopicConfig.Arn.String() && newTopicConfig.Filter == n.Filter { - - existingConfig := set.NewStringSet() - for _, v := range n.Events { - existingConfig.Add(string(v)) - } - - newConfig := set.NewStringSet() - for _, v := range topicConfig.Events { - newConfig.Add(string(v)) - } - - if !newConfig.Intersection(existingConfig).IsEmpty() { - return false - } - } - } - b.TopicConfigs = append(b.TopicConfigs, newTopicConfig) - return true -} - -// AddQueue adds a given queue config to the general bucket notification config -func (b *Configuration) AddQueue(queueConfig Config) bool { - newQueueConfig := QueueConfig{Config: queueConfig, Queue: queueConfig.Arn.String()} - for _, n := range b.QueueConfigs { - if n.Queue == newQueueConfig.Arn.String() && newQueueConfig.Filter == n.Filter { - - existingConfig := set.NewStringSet() - for _, v := range n.Events { - existingConfig.Add(string(v)) - } - - newConfig := set.NewStringSet() - for _, v := range queueConfig.Events { - newConfig.Add(string(v)) - } - - if !newConfig.Intersection(existingConfig).IsEmpty() { - return false - } - } - } - b.QueueConfigs = append(b.QueueConfigs, newQueueConfig) - return true -} - -// AddLambda adds a given lambda config to the general bucket notification config -func (b *Configuration) AddLambda(lambdaConfig Config) bool { - newLambdaConfig := LambdaConfig{Config: lambdaConfig, Lambda: lambdaConfig.Arn.String()} - for _, n := range b.LambdaConfigs { - if n.Lambda == newLambdaConfig.Arn.String() && newLambdaConfig.Filter == n.Filter { - - existingConfig := set.NewStringSet() - for _, v := range n.Events { - existingConfig.Add(string(v)) - } - - newConfig := set.NewStringSet() - for _, v := range lambdaConfig.Events { - newConfig.Add(string(v)) - } - - if !newConfig.Intersection(existingConfig).IsEmpty() { - return false - } - } - } - b.LambdaConfigs = append(b.LambdaConfigs, newLambdaConfig) - return true -} - -// RemoveTopicByArn removes all topic configurations that match the exact specified ARN -func (b *Configuration) RemoveTopicByArn(arn Arn) { - var topics []TopicConfig - for _, topic := range b.TopicConfigs { - if topic.Topic != arn.String() { - topics = append(topics, topic) - } - } - b.TopicConfigs = topics -} - -// ErrNoConfigMatch is returned when a notification configuration (sqs,sns,lambda) is not found when trying to delete -var ErrNoConfigMatch = errors.New("no notification configuration matched") - -// RemoveTopicByArnEventsPrefixSuffix removes a topic configuration that match the exact specified ARN, events, prefix and suffix -func (b *Configuration) RemoveTopicByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error { - removeIndex := -1 - for i, v := range b.TopicConfigs { - // if it matches events and filters, mark the index for deletion - if v.Topic == arn.String() && v.Config.Equal(events, prefix, suffix) { - removeIndex = i - break // since we have at most one matching config - } - } - if removeIndex >= 0 { - b.TopicConfigs = append(b.TopicConfigs[:removeIndex], b.TopicConfigs[removeIndex+1:]...) - return nil - } - return ErrNoConfigMatch -} - -// RemoveQueueByArn removes all queue configurations that match the exact specified ARN -func (b *Configuration) RemoveQueueByArn(arn Arn) { - var queues []QueueConfig - for _, queue := range b.QueueConfigs { - if queue.Queue != arn.String() { - queues = append(queues, queue) - } - } - b.QueueConfigs = queues -} - -// RemoveQueueByArnEventsPrefixSuffix removes a queue configuration that match the exact specified ARN, events, prefix and suffix -func (b *Configuration) RemoveQueueByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error { - removeIndex := -1 - for i, v := range b.QueueConfigs { - // if it matches events and filters, mark the index for deletion - if v.Queue == arn.String() && v.Config.Equal(events, prefix, suffix) { - removeIndex = i - break // since we have at most one matching config - } - } - if removeIndex >= 0 { - b.QueueConfigs = append(b.QueueConfigs[:removeIndex], b.QueueConfigs[removeIndex+1:]...) - return nil - } - return ErrNoConfigMatch -} - -// RemoveLambdaByArn removes all lambda configurations that match the exact specified ARN -func (b *Configuration) RemoveLambdaByArn(arn Arn) { - var lambdas []LambdaConfig - for _, lambda := range b.LambdaConfigs { - if lambda.Lambda != arn.String() { - lambdas = append(lambdas, lambda) - } - } - b.LambdaConfigs = lambdas -} - -// RemoveLambdaByArnEventsPrefixSuffix removes a topic configuration that match the exact specified ARN, events, prefix and suffix -func (b *Configuration) RemoveLambdaByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error { - removeIndex := -1 - for i, v := range b.LambdaConfigs { - // if it matches events and filters, mark the index for deletion - if v.Lambda == arn.String() && v.Config.Equal(events, prefix, suffix) { - removeIndex = i - break // since we have at most one matching config - } - } - if removeIndex >= 0 { - b.LambdaConfigs = append(b.LambdaConfigs[:removeIndex], b.LambdaConfigs[removeIndex+1:]...) - return nil - } - return ErrNoConfigMatch -} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go deleted file mode 100644 index 0abbf6e..0000000 --- a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go +++ /dev/null @@ -1,971 +0,0 @@ -/* - * MinIO Client (C) 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package replication - -import ( - "bytes" - "encoding/xml" - "fmt" - "math" - "strconv" - "strings" - "time" - "unicode/utf8" - - "github.com/rs/xid" -) - -var errInvalidFilter = fmt.Errorf("invalid filter") - -// OptionType specifies operation to be performed on config -type OptionType string - -const ( - // AddOption specifies addition of rule to config - AddOption OptionType = "Add" - // SetOption specifies modification of existing rule to config - SetOption OptionType = "Set" - - // RemoveOption specifies rule options are for removing a rule - RemoveOption OptionType = "Remove" - // ImportOption is for getting current config - ImportOption OptionType = "Import" -) - -// Options represents options to set a replication configuration rule -type Options struct { - Op OptionType - RoleArn string - ID string - Prefix string - RuleStatus string - Priority string - TagString string - StorageClass string - DestBucket string - IsTagSet bool - IsSCSet bool - ReplicateDeletes string // replicate versioned deletes - ReplicateDeleteMarkers string // replicate soft deletes - ReplicaSync string // replicate replica metadata modifications - ExistingObjectReplicate string -} - -// Tags returns a slice of tags for a rule -func (opts Options) Tags() ([]Tag, error) { - var tagList []Tag - tagTokens := strings.Split(opts.TagString, "&") - for _, tok := range tagTokens { - if tok == "" { - break - } - kv := strings.SplitN(tok, "=", 2) - if len(kv) != 2 { - return []Tag{}, fmt.Errorf("tags should be entered as comma separated k=v pairs") - } - tagList = append(tagList, Tag{ - Key: kv[0], - Value: kv[1], - }) - } - return tagList, nil -} - -// Config - replication configuration specified in -// https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html -type Config struct { - XMLName xml.Name `xml:"ReplicationConfiguration" json:"-"` - Rules []Rule `xml:"Rule" json:"Rules"` - Role string `xml:"Role" json:"Role"` -} - -// Empty returns true if config is not set -func (c *Config) Empty() bool { - return len(c.Rules) == 0 -} - -// AddRule adds a new rule to existing replication config. If a rule exists with the -// same ID, then the rule is replaced. -func (c *Config) AddRule(opts Options) error { - priority, err := strconv.Atoi(opts.Priority) - if err != nil { - return err - } - var compatSw bool // true if RoleArn is used with new mc client and older minio version prior to multisite - if opts.RoleArn != "" { - tokens := strings.Split(opts.RoleArn, ":") - if len(tokens) != 6 { - return fmt.Errorf("invalid format for replication Role Arn: %v", opts.RoleArn) - } - switch { - case strings.HasPrefix(opts.RoleArn, "arn:minio:replication") && len(c.Rules) == 0: - c.Role = opts.RoleArn - compatSw = true - case strings.HasPrefix(opts.RoleArn, "arn:aws:iam"): - c.Role = opts.RoleArn - default: - return fmt.Errorf("RoleArn invalid for AWS replication configuration: %v", opts.RoleArn) - } - } - - var status Status - // toggle rule status for edit option - switch opts.RuleStatus { - case "enable": - status = Enabled - case "disable": - status = Disabled - default: - return fmt.Errorf("rule state should be either [enable|disable]") - } - - tags, err := opts.Tags() - if err != nil { - return err - } - andVal := And{ - Tags: tags, - } - filter := Filter{Prefix: opts.Prefix} - // only a single tag is set. - if opts.Prefix == "" && len(tags) == 1 { - filter.Tag = tags[0] - } - // both prefix and tag are present - if len(andVal.Tags) > 1 || opts.Prefix != "" { - filter.And = andVal - filter.And.Prefix = opts.Prefix - filter.Prefix = "" - filter.Tag = Tag{} - } - if opts.ID == "" { - opts.ID = xid.New().String() - } - - destBucket := opts.DestBucket - // ref https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html - if btokens := strings.Split(destBucket, ":"); len(btokens) != 6 { - if len(btokens) == 1 && compatSw { - destBucket = fmt.Sprintf("arn:aws:s3:::%s", destBucket) - } else { - return fmt.Errorf("destination bucket needs to be in Arn format") - } - } - dmStatus := Disabled - if opts.ReplicateDeleteMarkers != "" { - switch opts.ReplicateDeleteMarkers { - case "enable": - dmStatus = Enabled - case "disable": - dmStatus = Disabled - default: - return fmt.Errorf("ReplicateDeleteMarkers should be either enable|disable") - } - } - - vDeleteStatus := Disabled - if opts.ReplicateDeletes != "" { - switch opts.ReplicateDeletes { - case "enable": - vDeleteStatus = Enabled - case "disable": - vDeleteStatus = Disabled - default: - return fmt.Errorf("ReplicateDeletes should be either enable|disable") - } - } - var replicaSync Status - // replica sync is by default Enabled, unless specified. - switch opts.ReplicaSync { - case "enable", "": - replicaSync = Enabled - case "disable": - replicaSync = Disabled - default: - return fmt.Errorf("replica metadata sync should be either [enable|disable]") - } - - var existingStatus Status - if opts.ExistingObjectReplicate != "" { - switch opts.ExistingObjectReplicate { - case "enable": - existingStatus = Enabled - case "disable", "": - existingStatus = Disabled - default: - return fmt.Errorf("existingObjectReplicate should be either enable|disable") - } - } - newRule := Rule{ - ID: opts.ID, - Priority: priority, - Status: status, - Filter: filter, - Destination: Destination{ - Bucket: destBucket, - StorageClass: opts.StorageClass, - }, - DeleteMarkerReplication: DeleteMarkerReplication{Status: dmStatus}, - DeleteReplication: DeleteReplication{Status: vDeleteStatus}, - // MinIO enables replica metadata syncing by default in the case of bi-directional replication to allow - // automatic failover as the expectation in this case is that replica and source should be identical. - // However AWS leaves this configurable https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-for-metadata-changes.html - SourceSelectionCriteria: SourceSelectionCriteria{ - ReplicaModifications: ReplicaModifications{ - Status: replicaSync, - }, - }, - // By default disable existing object replication unless selected - ExistingObjectReplication: ExistingObjectReplication{ - Status: existingStatus, - }, - } - - // validate rule after overlaying priority for pre-existing rule being disabled. - if err := newRule.Validate(); err != nil { - return err - } - // if replication config uses RoleArn, migrate this to the destination element as target ARN for remote bucket for MinIO configuration - if c.Role != "" && !strings.HasPrefix(c.Role, "arn:aws:iam") && !compatSw { - for i := range c.Rules { - c.Rules[i].Destination.Bucket = c.Role - } - c.Role = "" - } - - for _, rule := range c.Rules { - if rule.Priority == newRule.Priority { - return fmt.Errorf("priority must be unique. Replication configuration already has a rule with this priority") - } - if rule.ID == newRule.ID { - return fmt.Errorf("a rule exists with this ID") - } - } - - c.Rules = append(c.Rules, newRule) - return nil -} - -// EditRule modifies an existing rule in replication config -func (c *Config) EditRule(opts Options) error { - if opts.ID == "" { - return fmt.Errorf("rule ID missing") - } - // if replication config uses RoleArn, migrate this to the destination element as target ARN for remote bucket for non AWS. - if c.Role != "" && !strings.HasPrefix(c.Role, "arn:aws:iam") && len(c.Rules) > 1 { - for i := range c.Rules { - c.Rules[i].Destination.Bucket = c.Role - } - c.Role = "" - } - - rIdx := -1 - var newRule Rule - for i, rule := range c.Rules { - if rule.ID == opts.ID { - rIdx = i - newRule = rule - break - } - } - if rIdx < 0 { - return fmt.Errorf("rule with ID %s not found in replication configuration", opts.ID) - } - prefixChg := opts.Prefix != newRule.Prefix() - if opts.IsTagSet || prefixChg { - prefix := newRule.Prefix() - if prefix != opts.Prefix { - prefix = opts.Prefix - } - tags := []Tag{newRule.Filter.Tag} - if len(newRule.Filter.And.Tags) != 0 { - tags = newRule.Filter.And.Tags - } - var err error - if opts.IsTagSet { - tags, err = opts.Tags() - if err != nil { - return err - } - } - andVal := And{ - Tags: tags, - } - - filter := Filter{Prefix: prefix} - // only a single tag is set. - if prefix == "" && len(tags) == 1 { - filter.Tag = tags[0] - } - // both prefix and tag are present - if len(andVal.Tags) > 1 || prefix != "" { - filter.And = andVal - filter.And.Prefix = prefix - filter.Prefix = "" - filter.Tag = Tag{} - } - newRule.Filter = filter - } - - // toggle rule status for edit option - if opts.RuleStatus != "" { - switch opts.RuleStatus { - case "enable": - newRule.Status = Enabled - case "disable": - newRule.Status = Disabled - default: - return fmt.Errorf("rule state should be either [enable|disable]") - } - } - // set DeleteMarkerReplication rule status for edit option - if opts.ReplicateDeleteMarkers != "" { - switch opts.ReplicateDeleteMarkers { - case "enable": - newRule.DeleteMarkerReplication.Status = Enabled - case "disable": - newRule.DeleteMarkerReplication.Status = Disabled - default: - return fmt.Errorf("ReplicateDeleteMarkers state should be either [enable|disable]") - } - } - - // set DeleteReplication rule status for edit option. This is a MinIO specific - // option to replicate versioned deletes - if opts.ReplicateDeletes != "" { - switch opts.ReplicateDeletes { - case "enable": - newRule.DeleteReplication.Status = Enabled - case "disable": - newRule.DeleteReplication.Status = Disabled - default: - return fmt.Errorf("ReplicateDeletes state should be either [enable|disable]") - } - } - - if opts.ReplicaSync != "" { - switch opts.ReplicaSync { - case "enable", "": - newRule.SourceSelectionCriteria.ReplicaModifications.Status = Enabled - case "disable": - newRule.SourceSelectionCriteria.ReplicaModifications.Status = Disabled - default: - return fmt.Errorf("replica metadata sync should be either [enable|disable]") - } - } - - if opts.ExistingObjectReplicate != "" { - switch opts.ExistingObjectReplicate { - case "enable": - newRule.ExistingObjectReplication.Status = Enabled - case "disable": - newRule.ExistingObjectReplication.Status = Disabled - default: - return fmt.Errorf("existingObjectsReplication state should be either [enable|disable]") - } - } - if opts.IsSCSet { - newRule.Destination.StorageClass = opts.StorageClass - } - if opts.Priority != "" { - priority, err := strconv.Atoi(opts.Priority) - if err != nil { - return err - } - newRule.Priority = priority - } - if opts.DestBucket != "" { - destBucket := opts.DestBucket - // ref https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html - if btokens := strings.Split(opts.DestBucket, ":"); len(btokens) != 6 { - return fmt.Errorf("destination bucket needs to be in Arn format") - } - newRule.Destination.Bucket = destBucket - } - // validate rule - if err := newRule.Validate(); err != nil { - return err - } - // ensure priority and destination bucket restrictions are not violated - for idx, rule := range c.Rules { - if rule.Priority == newRule.Priority && rIdx != idx { - return fmt.Errorf("priority must be unique. Replication configuration already has a rule with this priority") - } - if rule.Destination.Bucket != newRule.Destination.Bucket && rule.ID == newRule.ID { - return fmt.Errorf("invalid destination bucket for this rule") - } - } - - c.Rules[rIdx] = newRule - return nil -} - -// RemoveRule removes a rule from replication config. -func (c *Config) RemoveRule(opts Options) error { - var newRules []Rule - ruleFound := false - for _, rule := range c.Rules { - if rule.ID != opts.ID { - newRules = append(newRules, rule) - continue - } - ruleFound = true - } - if !ruleFound { - return fmt.Errorf("Rule with ID %s not found", opts.ID) - } - if len(newRules) == 0 { - return fmt.Errorf("replication configuration should have at least one rule") - } - c.Rules = newRules - return nil -} - -// Rule - a rule for replication configuration. -type Rule struct { - XMLName xml.Name `xml:"Rule" json:"-"` - ID string `xml:"ID,omitempty"` - Status Status `xml:"Status"` - Priority int `xml:"Priority"` - DeleteMarkerReplication DeleteMarkerReplication `xml:"DeleteMarkerReplication"` - DeleteReplication DeleteReplication `xml:"DeleteReplication"` - Destination Destination `xml:"Destination"` - Filter Filter `xml:"Filter" json:"Filter"` - SourceSelectionCriteria SourceSelectionCriteria `xml:"SourceSelectionCriteria" json:"SourceSelectionCriteria"` - ExistingObjectReplication ExistingObjectReplication `xml:"ExistingObjectReplication,omitempty" json:"ExistingObjectReplication,omitempty"` -} - -// Validate validates the rule for correctness -func (r Rule) Validate() error { - if err := r.validateID(); err != nil { - return err - } - if err := r.validateStatus(); err != nil { - return err - } - if err := r.validateFilter(); err != nil { - return err - } - - if r.Priority < 0 && r.Status == Enabled { - return fmt.Errorf("priority must be set for the rule") - } - - if err := r.validateStatus(); err != nil { - return err - } - return r.ExistingObjectReplication.Validate() -} - -// validateID - checks if ID is valid or not. -func (r Rule) validateID() error { - // cannot be longer than 255 characters - if len(r.ID) > 255 { - return fmt.Errorf("ID must be less than 255 characters") - } - return nil -} - -// validateStatus - checks if status is valid or not. -func (r Rule) validateStatus() error { - // Status can't be empty - if len(r.Status) == 0 { - return fmt.Errorf("status cannot be empty") - } - - // Status must be one of Enabled or Disabled - if r.Status != Enabled && r.Status != Disabled { - return fmt.Errorf("status must be set to either Enabled or Disabled") - } - return nil -} - -func (r Rule) validateFilter() error { - return r.Filter.Validate() -} - -// Prefix - a rule can either have prefix under or under -// . This method returns the prefix from the -// location where it is available -func (r Rule) Prefix() string { - if r.Filter.Prefix != "" { - return r.Filter.Prefix - } - return r.Filter.And.Prefix -} - -// Tags - a rule can either have tag under or under -// . This method returns all the tags from the -// rule in the format tag1=value1&tag2=value2 -func (r Rule) Tags() string { - ts := []Tag{r.Filter.Tag} - if len(r.Filter.And.Tags) != 0 { - ts = r.Filter.And.Tags - } - - var buf bytes.Buffer - for _, t := range ts { - if buf.Len() > 0 { - buf.WriteString("&") - } - buf.WriteString(t.String()) - } - return buf.String() -} - -// Filter - a filter for a replication configuration Rule. -type Filter struct { - XMLName xml.Name `xml:"Filter" json:"-"` - Prefix string `json:"Prefix,omitempty"` - And And `xml:"And,omitempty" json:"And,omitempty"` - Tag Tag `xml:"Tag,omitempty" json:"Tag,omitempty"` -} - -// Validate - validates the filter element -func (f Filter) Validate() error { - // A Filter must have exactly one of Prefix, Tag, or And specified. - if !f.And.isEmpty() { - if f.Prefix != "" { - return errInvalidFilter - } - if !f.Tag.IsEmpty() { - return errInvalidFilter - } - } - if f.Prefix != "" { - if !f.Tag.IsEmpty() { - return errInvalidFilter - } - } - if !f.Tag.IsEmpty() { - if err := f.Tag.Validate(); err != nil { - return err - } - } - return nil -} - -// Tag - a tag for a replication configuration Rule filter. -type Tag struct { - XMLName xml.Name `json:"-"` - Key string `xml:"Key,omitempty" json:"Key,omitempty"` - Value string `xml:"Value,omitempty" json:"Value,omitempty"` -} - -func (tag Tag) String() string { - if tag.IsEmpty() { - return "" - } - return tag.Key + "=" + tag.Value -} - -// IsEmpty returns whether this tag is empty or not. -func (tag Tag) IsEmpty() bool { - return tag.Key == "" -} - -// Validate checks this tag. -func (tag Tag) Validate() error { - if len(tag.Key) == 0 || utf8.RuneCountInString(tag.Key) > 128 { - return fmt.Errorf("invalid Tag Key") - } - - if utf8.RuneCountInString(tag.Value) > 256 { - return fmt.Errorf("invalid Tag Value") - } - return nil -} - -// Destination - destination in ReplicationConfiguration. -type Destination struct { - XMLName xml.Name `xml:"Destination" json:"-"` - Bucket string `xml:"Bucket" json:"Bucket"` - StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"` -} - -// And - a tag to combine a prefix and multiple tags for replication configuration rule. -type And struct { - XMLName xml.Name `xml:"And,omitempty" json:"-"` - Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"` - Tags []Tag `xml:"Tag,omitempty" json:"Tag,omitempty"` -} - -// isEmpty returns true if Tags field is null -func (a And) isEmpty() bool { - return len(a.Tags) == 0 && a.Prefix == "" -} - -// Status represents Enabled/Disabled status -type Status string - -// Supported status types -const ( - Enabled Status = "Enabled" - Disabled Status = "Disabled" -) - -// DeleteMarkerReplication - whether delete markers are replicated - https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html -type DeleteMarkerReplication struct { - Status Status `xml:"Status" json:"Status"` // should be set to "Disabled" by default -} - -// IsEmpty returns true if DeleteMarkerReplication is not set -func (d DeleteMarkerReplication) IsEmpty() bool { - return len(d.Status) == 0 -} - -// DeleteReplication - whether versioned deletes are replicated - this -// is a MinIO specific extension -type DeleteReplication struct { - Status Status `xml:"Status" json:"Status"` // should be set to "Disabled" by default -} - -// IsEmpty returns true if DeleteReplication is not set -func (d DeleteReplication) IsEmpty() bool { - return len(d.Status) == 0 -} - -// ReplicaModifications specifies if replica modification sync is enabled -type ReplicaModifications struct { - Status Status `xml:"Status" json:"Status"` // should be set to "Enabled" by default -} - -// SourceSelectionCriteria - specifies additional source selection criteria in ReplicationConfiguration. -type SourceSelectionCriteria struct { - ReplicaModifications ReplicaModifications `xml:"ReplicaModifications" json:"ReplicaModifications"` -} - -// IsValid - checks whether SourceSelectionCriteria is valid or not. -func (s SourceSelectionCriteria) IsValid() bool { - return s.ReplicaModifications.Status == Enabled || s.ReplicaModifications.Status == Disabled -} - -// Validate source selection criteria -func (s SourceSelectionCriteria) Validate() error { - if (s == SourceSelectionCriteria{}) { - return nil - } - if !s.IsValid() { - return fmt.Errorf("invalid ReplicaModification status") - } - return nil -} - -// ExistingObjectReplication - whether existing object replication is enabled -type ExistingObjectReplication struct { - Status Status `xml:"Status"` // should be set to "Disabled" by default -} - -// IsEmpty returns true if DeleteMarkerReplication is not set -func (e ExistingObjectReplication) IsEmpty() bool { - return len(e.Status) == 0 -} - -// Validate validates whether the status is disabled. -func (e ExistingObjectReplication) Validate() error { - if e.IsEmpty() { - return nil - } - if e.Status != Disabled && e.Status != Enabled { - return fmt.Errorf("invalid ExistingObjectReplication status") - } - return nil -} - -// TargetMetrics represents inline replication metrics -// such as pending, failed and completed bytes in total for a bucket remote target -type TargetMetrics struct { - // Completed count - ReplicatedCount uint64 `json:"replicationCount,omitempty"` - // Completed size in bytes - ReplicatedSize uint64 `json:"completedReplicationSize,omitempty"` - // Bandwidth limit in bytes/sec for this target - BandWidthLimitInBytesPerSecond int64 `json:"limitInBits,omitempty"` - // Current bandwidth used in bytes/sec for this target - CurrentBandwidthInBytesPerSecond float64 `json:"currentBandwidth,omitempty"` - // errors seen in replication in last minute, hour and total - Failed TimedErrStats `json:"failed,omitempty"` - // Deprecated fields - // Pending size in bytes - PendingSize uint64 `json:"pendingReplicationSize,omitempty"` - // Total Replica size in bytes - ReplicaSize uint64 `json:"replicaSize,omitempty"` - // Failed size in bytes - FailedSize uint64 `json:"failedReplicationSize,omitempty"` - // Total number of pending operations including metadata updates - PendingCount uint64 `json:"pendingReplicationCount,omitempty"` - // Total number of failed operations including metadata updates - FailedCount uint64 `json:"failedReplicationCount,omitempty"` -} - -// Metrics represents inline replication metrics for a bucket. -type Metrics struct { - Stats map[string]TargetMetrics - // Completed size in bytes across targets - ReplicatedSize uint64 `json:"completedReplicationSize,omitempty"` - // Total Replica size in bytes across targets - ReplicaSize uint64 `json:"replicaSize,omitempty"` - // Total Replica counts - ReplicaCount int64 `json:"replicaCount,omitempty"` - // Total Replicated count - ReplicatedCount int64 `json:"replicationCount,omitempty"` - // errors seen in replication in last minute, hour and total - Errors TimedErrStats `json:"failed,omitempty"` - // Total number of entries that are queued for replication - QStats InQueueMetric `json:"queued"` - // Deprecated fields - // Total Pending size in bytes across targets - PendingSize uint64 `json:"pendingReplicationSize,omitempty"` - // Failed size in bytes across targets - FailedSize uint64 `json:"failedReplicationSize,omitempty"` - // Total number of pending operations including metadata updates across targets - PendingCount uint64 `json:"pendingReplicationCount,omitempty"` - // Total number of failed operations including metadata updates across targets - FailedCount uint64 `json:"failedReplicationCount,omitempty"` -} - -// RStat - has count and bytes for replication metrics -type RStat struct { - Count float64 `json:"count"` - Bytes int64 `json:"bytes"` -} - -// Add two RStat -func (r RStat) Add(r1 RStat) RStat { - return RStat{ - Count: r.Count + r1.Count, - Bytes: r.Bytes + r1.Bytes, - } -} - -// TimedErrStats holds error stats for a time period -type TimedErrStats struct { - LastMinute RStat `json:"lastMinute"` - LastHour RStat `json:"lastHour"` - Totals RStat `json:"totals"` -} - -// Add two TimedErrStats -func (te TimedErrStats) Add(o TimedErrStats) TimedErrStats { - return TimedErrStats{ - LastMinute: te.LastMinute.Add(o.LastMinute), - LastHour: te.LastHour.Add(o.LastHour), - Totals: te.Totals.Add(o.Totals), - } -} - -// ResyncTargetsInfo provides replication target information to resync replicated data. -type ResyncTargetsInfo struct { - Targets []ResyncTarget `json:"target,omitempty"` -} - -// ResyncTarget provides the replica resources and resetID to initiate resync replication. -type ResyncTarget struct { - Arn string `json:"arn"` - ResetID string `json:"resetid"` - StartTime time.Time `json:"startTime,omitempty"` - EndTime time.Time `json:"endTime,omitempty"` - // Status of resync operation - ResyncStatus string `json:"resyncStatus,omitempty"` - // Completed size in bytes - ReplicatedSize int64 `json:"completedReplicationSize,omitempty"` - // Failed size in bytes - FailedSize int64 `json:"failedReplicationSize,omitempty"` - // Total number of failed operations - FailedCount int64 `json:"failedReplicationCount,omitempty"` - // Total number of completed operations - ReplicatedCount int64 `json:"replicationCount,omitempty"` - // Last bucket/object replicated. - Bucket string `json:"bucket,omitempty"` - Object string `json:"object,omitempty"` -} - -// XferStats holds transfer rate info for uploads/sec -type XferStats struct { - AvgRate float64 `json:"avgRate"` - PeakRate float64 `json:"peakRate"` - CurrRate float64 `json:"currRate"` -} - -// Merge two XferStats -func (x *XferStats) Merge(x1 XferStats) { - x.AvgRate += x1.AvgRate - x.PeakRate += x1.PeakRate - x.CurrRate += x1.CurrRate -} - -// QStat holds count and bytes for objects in replication queue -type QStat struct { - Count float64 `json:"count"` - Bytes float64 `json:"bytes"` -} - -// Add 2 QStat entries -func (q *QStat) Add(q1 QStat) { - q.Count += q1.Count - q.Bytes += q1.Bytes -} - -// InQueueMetric holds stats for objects in replication queue -type InQueueMetric struct { - Curr QStat `json:"curr" msg:"cq"` - Avg QStat `json:"avg" msg:"aq"` - Max QStat `json:"peak" msg:"pq"` -} - -// MetricName name of replication metric -type MetricName string - -const ( - // Large is a metric name for large objects >=128MiB - Large MetricName = "Large" - // Small is a metric name for objects <128MiB size - Small MetricName = "Small" - // Total is a metric name for total objects - Total MetricName = "Total" -) - -// WorkerStat has stats on number of replication workers -type WorkerStat struct { - Curr int32 `json:"curr"` - Avg float32 `json:"avg"` - Max int32 `json:"max"` -} - -// ReplMRFStats holds stats of MRF backlog saved to disk in the last 5 minutes -// and number of entries that failed replication after 3 retries -type ReplMRFStats struct { - LastFailedCount uint64 `json:"failedCount_last5min"` - // Count of unreplicated entries that were dropped after MRF retry limit reached since cluster start. - TotalDroppedCount uint64 `json:"droppedCount_since_uptime"` - // Bytes of unreplicated entries that were dropped after MRF retry limit reached since cluster start. - TotalDroppedBytes uint64 `json:"droppedBytes_since_uptime"` -} - -// ReplQNodeStats holds stats for a node in replication queue -type ReplQNodeStats struct { - NodeName string `json:"nodeName"` - Uptime int64 `json:"uptime"` - Workers WorkerStat `json:"activeWorkers"` - - XferStats map[MetricName]XferStats `json:"transferSummary"` - TgtXferStats map[string]map[MetricName]XferStats `json:"tgtTransferStats"` - - QStats InQueueMetric `json:"queueStats"` - MRFStats ReplMRFStats `json:"mrfStats"` -} - -// ReplQueueStats holds stats for replication queue across nodes -type ReplQueueStats struct { - Nodes []ReplQNodeStats `json:"nodes"` -} - -// Workers returns number of workers across all nodes -func (q ReplQueueStats) Workers() (tot WorkerStat) { - for _, node := range q.Nodes { - tot.Avg += node.Workers.Avg - tot.Curr += node.Workers.Curr - if tot.Max < node.Workers.Max { - tot.Max = node.Workers.Max - } - } - if len(q.Nodes) > 0 { - tot.Avg /= float32(len(q.Nodes)) - tot.Curr /= int32(len(q.Nodes)) - } - return tot -} - -// qStatSummary returns cluster level stats for objects in replication queue -func (q ReplQueueStats) qStatSummary() InQueueMetric { - m := InQueueMetric{} - for _, v := range q.Nodes { - m.Avg.Add(v.QStats.Avg) - m.Curr.Add(v.QStats.Curr) - if m.Max.Count < v.QStats.Max.Count { - m.Max.Add(v.QStats.Max) - } - } - return m -} - -// ReplQStats holds stats for objects in replication queue -type ReplQStats struct { - Uptime int64 `json:"uptime"` - Workers WorkerStat `json:"workers"` - - XferStats map[MetricName]XferStats `json:"xferStats"` - TgtXferStats map[string]map[MetricName]XferStats `json:"tgtXferStats"` - - QStats InQueueMetric `json:"qStats"` - MRFStats ReplMRFStats `json:"mrfStats"` -} - -// QStats returns cluster level stats for objects in replication queue -func (q ReplQueueStats) QStats() (r ReplQStats) { - r.QStats = q.qStatSummary() - r.XferStats = make(map[MetricName]XferStats) - r.TgtXferStats = make(map[string]map[MetricName]XferStats) - r.Workers = q.Workers() - - for _, node := range q.Nodes { - for arn := range node.TgtXferStats { - xmap, ok := node.TgtXferStats[arn] - if !ok { - xmap = make(map[MetricName]XferStats) - } - for m, v := range xmap { - st, ok := r.XferStats[m] - if !ok { - st = XferStats{} - } - st.AvgRate += v.AvgRate - st.CurrRate += v.CurrRate - st.PeakRate = math.Max(st.PeakRate, v.PeakRate) - if _, ok := r.TgtXferStats[arn]; !ok { - r.TgtXferStats[arn] = make(map[MetricName]XferStats) - } - r.TgtXferStats[arn][m] = st - } - } - for k, v := range node.XferStats { - st, ok := r.XferStats[k] - if !ok { - st = XferStats{} - } - st.AvgRate += v.AvgRate - st.CurrRate += v.CurrRate - st.PeakRate = math.Max(st.PeakRate, v.PeakRate) - r.XferStats[k] = st - } - r.MRFStats.LastFailedCount += node.MRFStats.LastFailedCount - r.MRFStats.TotalDroppedCount += node.MRFStats.TotalDroppedCount - r.MRFStats.TotalDroppedBytes += node.MRFStats.TotalDroppedBytes - r.Uptime += node.Uptime - } - if len(q.Nodes) > 0 { - r.Uptime /= int64(len(q.Nodes)) // average uptime - } - return -} - -// MetricsV2 represents replication metrics for a bucket. -type MetricsV2 struct { - Uptime int64 `json:"uptime"` - CurrentStats Metrics `json:"currStats"` - QueueStats ReplQueueStats `json:"queueStats"` -} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go b/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go deleted file mode 100644 index 056e78a..0000000 --- a/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go +++ /dev/null @@ -1,411 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package s3utils - -import ( - "bytes" - "encoding/hex" - "errors" - "net" - "net/url" - "regexp" - "sort" - "strings" - "unicode/utf8" -) - -// Sentinel URL is the default url value which is invalid. -var sentinelURL = url.URL{} - -// IsValidDomain validates if input string is a valid domain name. -func IsValidDomain(host string) bool { - // See RFC 1035, RFC 3696. - host = strings.TrimSpace(host) - if len(host) == 0 || len(host) > 255 { - return false - } - // host cannot start or end with "-" - if host[len(host)-1:] == "-" || host[:1] == "-" { - return false - } - // host cannot start or end with "_" - if host[len(host)-1:] == "_" || host[:1] == "_" { - return false - } - // host cannot start with a "." - if host[:1] == "." { - return false - } - // All non alphanumeric characters are invalid. - if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:> 1 { - return parts[1] - } - - parts = amazonS3HostFIPS.FindStringSubmatch(endpointURL.Host) - if len(parts) > 1 { - return parts[1] - } - - parts = amazonS3HostDualStack.FindStringSubmatch(endpointURL.Host) - if len(parts) > 1 { - return parts[1] - } - - parts = amazonS3HostHyphen.FindStringSubmatch(endpointURL.Host) - if len(parts) > 1 { - return parts[1] - } - - parts = amazonS3ChinaHost.FindStringSubmatch(endpointURL.Host) - if len(parts) > 1 { - return parts[1] - } - - parts = amazonS3ChinaHostDualStack.FindStringSubmatch(endpointURL.Host) - if len(parts) > 1 { - return parts[1] - } - - parts = amazonS3HostDot.FindStringSubmatch(endpointURL.Host) - if len(parts) > 1 { - return parts[1] - } - - parts = amazonS3HostPrivateLink.FindStringSubmatch(endpointURL.Host) - if len(parts) > 1 { - return parts[1] - } - - return "" -} - -// IsAliyunOSSEndpoint - Match if it is exactly Aliyun OSS endpoint. -func IsAliyunOSSEndpoint(endpointURL url.URL) bool { - return strings.HasSuffix(endpointURL.Host, "aliyuncs.com") -} - -// IsAmazonEndpoint - Match if it is exactly Amazon S3 endpoint. -func IsAmazonEndpoint(endpointURL url.URL) bool { - if endpointURL.Host == "s3-external-1.amazonaws.com" || endpointURL.Host == "s3.amazonaws.com" { - return true - } - return GetRegionFromURL(endpointURL) != "" -} - -// IsAmazonGovCloudEndpoint - Match if it is exactly Amazon S3 GovCloud endpoint. -func IsAmazonGovCloudEndpoint(endpointURL url.URL) bool { - if endpointURL == sentinelURL { - return false - } - return (endpointURL.Host == "s3-us-gov-west-1.amazonaws.com" || - endpointURL.Host == "s3-us-gov-east-1.amazonaws.com" || - IsAmazonFIPSGovCloudEndpoint(endpointURL)) -} - -// IsAmazonFIPSGovCloudEndpoint - match if the endpoint is FIPS and GovCloud. -func IsAmazonFIPSGovCloudEndpoint(endpointURL url.URL) bool { - if endpointURL == sentinelURL { - return false - } - return IsAmazonFIPSEndpoint(endpointURL) && strings.Contains(endpointURL.Host, "us-gov-") -} - -// IsAmazonFIPSEndpoint - Match if it is exactly Amazon S3 FIPS endpoint. -// See https://aws.amazon.com/compliance/fips. -func IsAmazonFIPSEndpoint(endpointURL url.URL) bool { - if endpointURL == sentinelURL { - return false - } - return strings.HasPrefix(endpointURL.Host, "s3-fips") && strings.HasSuffix(endpointURL.Host, ".amazonaws.com") -} - -// IsAmazonPrivateLinkEndpoint - Match if it is exactly Amazon S3 PrivateLink interface endpoint -// See https://docs.aws.amazon.com/AmazonS3/latest/userguide/privatelink-interface-endpoints.html. -func IsAmazonPrivateLinkEndpoint(endpointURL url.URL) bool { - if endpointURL == sentinelURL { - return false - } - return amazonS3HostPrivateLink.MatchString(endpointURL.Host) -} - -// IsGoogleEndpoint - Match if it is exactly Google cloud storage endpoint. -func IsGoogleEndpoint(endpointURL url.URL) bool { - if endpointURL == sentinelURL { - return false - } - return endpointURL.Host == "storage.googleapis.com" -} - -// Expects ascii encoded strings - from output of urlEncodePath -func percentEncodeSlash(s string) string { - return strings.ReplaceAll(s, "/", "%2F") -} - -// QueryEncode - encodes query values in their URL encoded form. In -// addition to the percent encoding performed by urlEncodePath() used -// here, it also percent encodes '/' (forward slash) -func QueryEncode(v url.Values) string { - if v == nil { - return "" - } - var buf bytes.Buffer - keys := make([]string, 0, len(v)) - for k := range v { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - vs := v[k] - prefix := percentEncodeSlash(EncodePath(k)) + "=" - for _, v := range vs { - if buf.Len() > 0 { - buf.WriteByte('&') - } - buf.WriteString(prefix) - buf.WriteString(percentEncodeSlash(EncodePath(v))) - } - } - return buf.String() -} - -// TagDecode - decodes canonical tag into map of key and value. -func TagDecode(ctag string) map[string]string { - if ctag == "" { - return map[string]string{} - } - tags := strings.Split(ctag, "&") - tagMap := make(map[string]string, len(tags)) - var err error - for _, tag := range tags { - kvs := strings.SplitN(tag, "=", 2) - if len(kvs) == 0 { - return map[string]string{} - } - if len(kvs) == 1 { - return map[string]string{} - } - tagMap[kvs[0]], err = url.PathUnescape(kvs[1]) - if err != nil { - continue - } - } - return tagMap -} - -// TagEncode - encodes tag values in their URL encoded form. In -// addition to the percent encoding performed by urlEncodePath() used -// here, it also percent encodes '/' (forward slash) -func TagEncode(tags map[string]string) string { - if tags == nil { - return "" - } - values := url.Values{} - for k, v := range tags { - values[k] = []string{v} - } - return QueryEncode(values) -} - -// if object matches reserved string, no need to encode them -var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$") - -// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences -// -// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 -// non english characters cannot be parsed due to the nature in which url.Encode() is written -// -// This function on the other hand is a direct replacement for url.Encode() technique to support -// pretty much every UTF-8 character. -func EncodePath(pathName string) string { - if reservedObjectNames.MatchString(pathName) { - return pathName - } - var encodedPathname strings.Builder - for _, s := range pathName { - if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) - encodedPathname.WriteRune(s) - continue - } - switch s { - case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) - encodedPathname.WriteRune(s) - continue - default: - l := utf8.RuneLen(s) - if l < 0 { - // if utf8 cannot convert return the same string as is - return pathName - } - u := make([]byte, l) - utf8.EncodeRune(u, s) - for _, r := range u { - hex := hex.EncodeToString([]byte{r}) - encodedPathname.WriteString("%" + strings.ToUpper(hex)) - } - } - } - return encodedPathname.String() -} - -// We support '.' with bucket names but we fallback to using path -// style requests instead for such buckets. -var ( - validBucketName = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9\.\-\_\:]{1,61}[A-Za-z0-9]$`) - validBucketNameStrict = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) - ipAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) -) - -// Common checker for both stricter and basic validation. -func checkBucketNameCommon(bucketName string, strict bool) (err error) { - if strings.TrimSpace(bucketName) == "" { - return errors.New("Bucket name cannot be empty") - } - if len(bucketName) < 3 { - return errors.New("Bucket name cannot be shorter than 3 characters") - } - if len(bucketName) > 63 { - return errors.New("Bucket name cannot be longer than 63 characters") - } - if ipAddress.MatchString(bucketName) { - return errors.New("Bucket name cannot be an ip address") - } - if strings.Contains(bucketName, "..") || strings.Contains(bucketName, ".-") || strings.Contains(bucketName, "-.") { - return errors.New("Bucket name contains invalid characters") - } - if strict { - if !validBucketNameStrict.MatchString(bucketName) { - err = errors.New("Bucket name contains invalid characters") - } - return err - } - if !validBucketName.MatchString(bucketName) { - err = errors.New("Bucket name contains invalid characters") - } - return err -} - -// CheckValidBucketName - checks if we have a valid input bucket name. -func CheckValidBucketName(bucketName string) (err error) { - return checkBucketNameCommon(bucketName, false) -} - -// CheckValidBucketNameStrict - checks if we have a valid input bucket name. -// This is a stricter version. -// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html -func CheckValidBucketNameStrict(bucketName string) (err error) { - return checkBucketNameCommon(bucketName, true) -} - -// CheckValidObjectNamePrefix - checks if we have a valid input object name prefix. -// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html -func CheckValidObjectNamePrefix(objectName string) error { - if len(objectName) > 1024 { - return errors.New("Object name cannot be longer than 1024 characters") - } - if !utf8.ValidString(objectName) { - return errors.New("Object name with non UTF-8 strings are not supported") - } - return nil -} - -// CheckValidObjectName - checks if we have a valid input object name. -// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html -func CheckValidObjectName(objectName string) error { - if strings.TrimSpace(objectName) == "" { - return errors.New("Object name cannot be empty") - } - return CheckValidObjectNamePrefix(objectName) -} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go b/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go deleted file mode 100644 index c35e58e..0000000 --- a/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go +++ /dev/null @@ -1,200 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package set - -import ( - "fmt" - "sort" - - jsoniter "github.com/json-iterator/go" -) - -// StringSet - uses map as set of strings. -type StringSet map[string]struct{} - -var json = jsoniter.ConfigCompatibleWithStandardLibrary - -// ToSlice - returns StringSet as string slice. -func (set StringSet) ToSlice() []string { - keys := make([]string, 0, len(set)) - for k := range set { - keys = append(keys, k) - } - sort.Strings(keys) - return keys -} - -// IsEmpty - returns whether the set is empty or not. -func (set StringSet) IsEmpty() bool { - return len(set) == 0 -} - -// Add - adds string to the set. -func (set StringSet) Add(s string) { - set[s] = struct{}{} -} - -// Remove - removes string in the set. It does nothing if string does not exist in the set. -func (set StringSet) Remove(s string) { - delete(set, s) -} - -// Contains - checks if string is in the set. -func (set StringSet) Contains(s string) bool { - _, ok := set[s] - return ok -} - -// FuncMatch - returns new set containing each value who passes match function. -// A 'matchFn' should accept element in a set as first argument and -// 'matchString' as second argument. The function can do any logic to -// compare both the arguments and should return true to accept element in -// a set to include in output set else the element is ignored. -func (set StringSet) FuncMatch(matchFn func(string, string) bool, matchString string) StringSet { - nset := NewStringSet() - for k := range set { - if matchFn(k, matchString) { - nset.Add(k) - } - } - return nset -} - -// ApplyFunc - returns new set containing each value processed by 'applyFn'. -// A 'applyFn' should accept element in a set as a argument and return -// a processed string. The function can do any logic to return a processed -// string. -func (set StringSet) ApplyFunc(applyFn func(string) string) StringSet { - nset := NewStringSet() - for k := range set { - nset.Add(applyFn(k)) - } - return nset -} - -// Equals - checks whether given set is equal to current set or not. -func (set StringSet) Equals(sset StringSet) bool { - // If length of set is not equal to length of given set, the - // set is not equal to given set. - if len(set) != len(sset) { - return false - } - - // As both sets are equal in length, check each elements are equal. - for k := range set { - if _, ok := sset[k]; !ok { - return false - } - } - - return true -} - -// Intersection - returns the intersection with given set as new set. -func (set StringSet) Intersection(sset StringSet) StringSet { - nset := NewStringSet() - for k := range set { - if _, ok := sset[k]; ok { - nset.Add(k) - } - } - - return nset -} - -// Difference - returns the difference with given set as new set. -func (set StringSet) Difference(sset StringSet) StringSet { - nset := NewStringSet() - for k := range set { - if _, ok := sset[k]; !ok { - nset.Add(k) - } - } - - return nset -} - -// Union - returns the union with given set as new set. -func (set StringSet) Union(sset StringSet) StringSet { - nset := NewStringSet() - for k := range set { - nset.Add(k) - } - - for k := range sset { - nset.Add(k) - } - - return nset -} - -// MarshalJSON - converts to JSON data. -func (set StringSet) MarshalJSON() ([]byte, error) { - return json.Marshal(set.ToSlice()) -} - -// UnmarshalJSON - parses JSON data and creates new set with it. -// If 'data' contains JSON string array, the set contains each string. -// If 'data' contains JSON string, the set contains the string as one element. -// If 'data' contains Other JSON types, JSON parse error is returned. -func (set *StringSet) UnmarshalJSON(data []byte) error { - sl := []string{} - var err error - if err = json.Unmarshal(data, &sl); err == nil { - *set = make(StringSet) - for _, s := range sl { - set.Add(s) - } - } else { - var s string - if err = json.Unmarshal(data, &s); err == nil { - *set = make(StringSet) - set.Add(s) - } - } - - return err -} - -// String - returns printable string of the set. -func (set StringSet) String() string { - return fmt.Sprintf("%s", set.ToSlice()) -} - -// NewStringSet - creates new string set. -func NewStringSet() StringSet { - return make(StringSet) -} - -// CreateStringSet - creates new string set with given string values. -func CreateStringSet(sl ...string) StringSet { - set := make(StringSet) - for _, k := range sl { - set.Add(k) - } - return set -} - -// CopyStringSet - returns copy of given set. -func CopyStringSet(set StringSet) StringSet { - nset := NewStringSet() - for k, v := range set { - nset[k] = v - } - return nset -} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go deleted file mode 100644 index 77540e2..0000000 --- a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go +++ /dev/null @@ -1,224 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2022 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package signer - -import ( - "bytes" - "fmt" - "io" - "net/http" - "strconv" - "strings" - "time" -) - -// getUnsignedChunkLength - calculates the length of chunk metadata -func getUnsignedChunkLength(chunkDataSize int64) int64 { - return int64(len(fmt.Sprintf("%x", chunkDataSize))) + - crlfLen + - chunkDataSize + - crlfLen -} - -// getUSStreamLength - calculates the length of the overall stream (data + metadata) -func getUSStreamLength(dataLen, chunkSize int64, trailers http.Header) int64 { - if dataLen <= 0 { - return 0 - } - - chunksCount := int64(dataLen / chunkSize) - remainingBytes := int64(dataLen % chunkSize) - streamLen := int64(0) - streamLen += chunksCount * getUnsignedChunkLength(chunkSize) - if remainingBytes > 0 { - streamLen += getUnsignedChunkLength(remainingBytes) - } - streamLen += getUnsignedChunkLength(0) - if len(trailers) > 0 { - for name, placeholder := range trailers { - if len(placeholder) > 0 { - streamLen += int64(len(name) + len(trailerKVSeparator) + len(placeholder[0]) + 1) - } - } - streamLen += crlfLen - } - - return streamLen -} - -// prepareStreamingRequest - prepares a request with appropriate -// headers before computing the seed signature. -func prepareUSStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) { - req.TransferEncoding = []string{"aws-chunked"} - if sessionToken != "" { - req.Header.Set("X-Amz-Security-Token", sessionToken) - } - - req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat)) - // Set content length with streaming signature for each chunk included. - req.ContentLength = getUSStreamLength(dataLen, int64(payloadChunkSize), req.Trailer) -} - -// StreamingUSReader implements chunked upload signature as a reader on -// top of req.Body's ReaderCloser chunk header;data;... repeat -type StreamingUSReader struct { - contentLen int64 // Content-Length from req header - baseReadCloser io.ReadCloser // underlying io.Reader - bytesRead int64 // bytes read from underlying io.Reader - buf bytes.Buffer // holds signed chunk - chunkBuf []byte // holds raw data read from req Body - chunkBufLen int // no. of bytes read so far into chunkBuf - done bool // done reading the underlying reader to EOF - chunkNum int - totalChunks int - lastChunkSize int - trailer http.Header -} - -// writeChunk - signs a chunk read from s.baseReader of chunkLen size. -func (s *StreamingUSReader) writeChunk(chunkLen int, addCrLf bool) { - s.buf.WriteString(strconv.FormatInt(int64(chunkLen), 16) + "\r\n") - - // Write chunk data into streaming buffer - s.buf.Write(s.chunkBuf[:chunkLen]) - - // Write the chunk trailer. - if addCrLf { - s.buf.Write([]byte("\r\n")) - } - - // Reset chunkBufLen for next chunk read. - s.chunkBufLen = 0 - s.chunkNum++ -} - -// addSignedTrailer - adds a trailer with the provided headers, -// then signs a chunk and adds it to output. -func (s *StreamingUSReader) addTrailer(h http.Header) { - olen := len(s.chunkBuf) - s.chunkBuf = s.chunkBuf[:0] - for k, v := range h { - s.chunkBuf = append(s.chunkBuf, []byte(strings.ToLower(k)+trailerKVSeparator+v[0]+"\n")...) - } - - s.buf.Write(s.chunkBuf) - s.buf.WriteString("\r\n\r\n") - - // Reset chunkBufLen for next chunk read. - s.chunkBuf = s.chunkBuf[:olen] - s.chunkBufLen = 0 - s.chunkNum++ -} - -// StreamingUnsignedV4 - provides chunked upload -func StreamingUnsignedV4(req *http.Request, sessionToken string, dataLen int64, reqTime time.Time) *http.Request { - // Set headers needed for streaming signature. - prepareUSStreamingRequest(req, sessionToken, dataLen, reqTime) - - if req.Body == nil { - req.Body = io.NopCloser(bytes.NewReader([]byte(""))) - } - - stReader := &StreamingUSReader{ - baseReadCloser: req.Body, - chunkBuf: make([]byte, payloadChunkSize), - contentLen: dataLen, - chunkNum: 1, - totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1, - lastChunkSize: int(dataLen % payloadChunkSize), - } - if len(req.Trailer) > 0 { - stReader.trailer = req.Trailer - // Remove... - req.Trailer = nil - } - - req.Body = stReader - - return req -} - -// Read - this method performs chunk upload signature providing a -// io.Reader interface. -func (s *StreamingUSReader) Read(buf []byte) (int, error) { - switch { - // After the last chunk is read from underlying reader, we - // never re-fill s.buf. - case s.done: - - // s.buf will be (re-)filled with next chunk when has lesser - // bytes than asked for. - case s.buf.Len() < len(buf): - s.chunkBufLen = 0 - for { - n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:]) - // Usually we validate `err` first, but in this case - // we are validating n > 0 for the following reasons. - // - // 1. n > 0, err is one of io.EOF, nil (near end of stream) - // A Reader returning a non-zero number of bytes at the end - // of the input stream may return either err == EOF or err == nil - // - // 2. n == 0, err is io.EOF (actual end of stream) - // - // Callers should always process the n > 0 bytes returned - // before considering the error err. - if n1 > 0 { - s.chunkBufLen += n1 - s.bytesRead += int64(n1) - - if s.chunkBufLen == payloadChunkSize || - (s.chunkNum == s.totalChunks-1 && - s.chunkBufLen == s.lastChunkSize) { - // Sign the chunk and write it to s.buf. - s.writeChunk(s.chunkBufLen, true) - break - } - } - if err != nil { - if err == io.EOF { - // No more data left in baseReader - last chunk. - // Done reading the last chunk from baseReader. - s.done = true - - // bytes read from baseReader different than - // content length provided. - if s.bytesRead != s.contentLen { - return 0, fmt.Errorf("http: ContentLength=%d with Body length %d", s.contentLen, s.bytesRead) - } - - // Sign the chunk and write it to s.buf. - s.writeChunk(0, len(s.trailer) == 0) - if len(s.trailer) > 0 { - // Trailer must be set now. - s.addTrailer(s.trailer) - } - break - } - return 0, err - } - - } - } - return s.buf.Read(buf) -} - -// Close - this method makes underlying io.ReadCloser's Close method available. -func (s *StreamingUSReader) Close() error { - return s.baseReadCloser.Close() -} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go deleted file mode 100644 index 1c2f1dc..0000000 --- a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go +++ /dev/null @@ -1,403 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package signer - -import ( - "bytes" - "encoding/hex" - "fmt" - "io" - "net/http" - "strconv" - "strings" - "time" - - md5simd "github.com/minio/md5-simd" -) - -// Reference for constants used below - -// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming -const ( - streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" - streamingSignTrailerAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER" - streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD" - streamingTrailerHdr = "AWS4-HMAC-SHA256-TRAILER" - emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" - payloadChunkSize = 64 * 1024 - chunkSigConstLen = 17 // ";chunk-signature=" - signatureStrLen = 64 // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2" - crlfLen = 2 // CRLF - trailerKVSeparator = ":" - trailerSignature = "x-amz-trailer-signature" -) - -// Request headers to be ignored while calculating seed signature for -// a request. -var ignoredStreamingHeaders = map[string]bool{ - "Authorization": true, - "User-Agent": true, - "Content-Type": true, -} - -// getSignedChunkLength - calculates the length of chunk metadata -func getSignedChunkLength(chunkDataSize int64) int64 { - return int64(len(fmt.Sprintf("%x", chunkDataSize))) + - chunkSigConstLen + - signatureStrLen + - crlfLen + - chunkDataSize + - crlfLen -} - -// getStreamLength - calculates the length of the overall stream (data + metadata) -func getStreamLength(dataLen, chunkSize int64, trailers http.Header) int64 { - if dataLen <= 0 { - return 0 - } - - chunksCount := int64(dataLen / chunkSize) - remainingBytes := int64(dataLen % chunkSize) - streamLen := int64(0) - streamLen += chunksCount * getSignedChunkLength(chunkSize) - if remainingBytes > 0 { - streamLen += getSignedChunkLength(remainingBytes) - } - streamLen += getSignedChunkLength(0) - if len(trailers) > 0 { - for name, placeholder := range trailers { - if len(placeholder) > 0 { - streamLen += int64(len(name) + len(trailerKVSeparator) + len(placeholder[0]) + 1) - } - } - streamLen += int64(len(trailerSignature)+len(trailerKVSeparator)) + signatureStrLen + crlfLen + crlfLen - } - - return streamLen -} - -// buildChunkStringToSign - returns the string to sign given chunk data -// and previous signature. -func buildChunkStringToSign(t time.Time, region, previousSig, chunkChecksum string) string { - stringToSignParts := []string{ - streamingPayloadHdr, - t.Format(iso8601DateFormat), - getScope(region, t, ServiceTypeS3), - previousSig, - emptySHA256, - chunkChecksum, - } - - return strings.Join(stringToSignParts, "\n") -} - -// buildTrailerChunkStringToSign - returns the string to sign given chunk data -// and previous signature. -func buildTrailerChunkStringToSign(t time.Time, region, previousSig, chunkChecksum string) string { - stringToSignParts := []string{ - streamingTrailerHdr, - t.Format(iso8601DateFormat), - getScope(region, t, ServiceTypeS3), - previousSig, - chunkChecksum, - } - - return strings.Join(stringToSignParts, "\n") -} - -// prepareStreamingRequest - prepares a request with appropriate -// headers before computing the seed signature. -func prepareStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) { - // Set x-amz-content-sha256 header. - if len(req.Trailer) == 0 { - req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm) - } else { - req.Header.Set("X-Amz-Content-Sha256", streamingSignTrailerAlgorithm) - for k := range req.Trailer { - req.Header.Add("X-Amz-Trailer", strings.ToLower(k)) - } - req.TransferEncoding = []string{"aws-chunked"} - } - - if sessionToken != "" { - req.Header.Set("X-Amz-Security-Token", sessionToken) - } - - req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat)) - // Set content length with streaming signature for each chunk included. - req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize), req.Trailer) - req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(dataLen, 10)) -} - -// buildChunkHeader - returns the chunk header. -// e.g string(IntHexBase(chunk-size)) + ";chunk-signature=" + signature + \r\n + chunk-data + \r\n -func buildChunkHeader(chunkLen int64, signature string) []byte { - return []byte(strconv.FormatInt(chunkLen, 16) + ";chunk-signature=" + signature + "\r\n") -} - -// buildChunkSignature - returns chunk signature for a given chunk and previous signature. -func buildChunkSignature(chunkCheckSum string, reqTime time.Time, region, - previousSignature, secretAccessKey string, -) string { - chunkStringToSign := buildChunkStringToSign(reqTime, region, - previousSignature, chunkCheckSum) - signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3) - return getSignature(signingKey, chunkStringToSign) -} - -// buildChunkSignature - returns chunk signature for a given chunk and previous signature. -func buildTrailerChunkSignature(chunkChecksum string, reqTime time.Time, region, - previousSignature, secretAccessKey string, -) string { - chunkStringToSign := buildTrailerChunkStringToSign(reqTime, region, - previousSignature, chunkChecksum) - signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3) - return getSignature(signingKey, chunkStringToSign) -} - -// getSeedSignature - returns the seed signature for a given request. -func (s *StreamingReader) setSeedSignature(req *http.Request) { - // Get canonical request - canonicalRequest := getCanonicalRequest(*req, ignoredStreamingHeaders, getHashedPayload(*req)) - - // Get string to sign from canonical request. - stringToSign := getStringToSignV4(s.reqTime, s.region, canonicalRequest, ServiceTypeS3) - - signingKey := getSigningKey(s.secretAccessKey, s.region, s.reqTime, ServiceTypeS3) - - // Calculate signature. - s.seedSignature = getSignature(signingKey, stringToSign) -} - -// StreamingReader implements chunked upload signature as a reader on -// top of req.Body's ReaderCloser chunk header;data;... repeat -type StreamingReader struct { - accessKeyID string - secretAccessKey string - sessionToken string - region string - prevSignature string - seedSignature string - contentLen int64 // Content-Length from req header - baseReadCloser io.ReadCloser // underlying io.Reader - bytesRead int64 // bytes read from underlying io.Reader - buf bytes.Buffer // holds signed chunk - chunkBuf []byte // holds raw data read from req Body - chunkBufLen int // no. of bytes read so far into chunkBuf - done bool // done reading the underlying reader to EOF - reqTime time.Time - chunkNum int - totalChunks int - lastChunkSize int - trailer http.Header - sh256 md5simd.Hasher -} - -// signChunk - signs a chunk read from s.baseReader of chunkLen size. -func (s *StreamingReader) signChunk(chunkLen int, addCrLf bool) { - // Compute chunk signature for next header - s.sh256.Reset() - s.sh256.Write(s.chunkBuf[:chunkLen]) - chunckChecksum := hex.EncodeToString(s.sh256.Sum(nil)) - - signature := buildChunkSignature(chunckChecksum, s.reqTime, - s.region, s.prevSignature, s.secretAccessKey) - - // For next chunk signature computation - s.prevSignature = signature - - // Write chunk header into streaming buffer - chunkHdr := buildChunkHeader(int64(chunkLen), signature) - s.buf.Write(chunkHdr) - - // Write chunk data into streaming buffer - s.buf.Write(s.chunkBuf[:chunkLen]) - - // Write the chunk trailer. - if addCrLf { - s.buf.Write([]byte("\r\n")) - } - - // Reset chunkBufLen for next chunk read. - s.chunkBufLen = 0 - s.chunkNum++ -} - -// addSignedTrailer - adds a trailer with the provided headers, -// then signs a chunk and adds it to output. -func (s *StreamingReader) addSignedTrailer(h http.Header) { - olen := len(s.chunkBuf) - s.chunkBuf = s.chunkBuf[:0] - for k, v := range h { - s.chunkBuf = append(s.chunkBuf, []byte(strings.ToLower(k)+trailerKVSeparator+v[0]+"\n")...) - } - - s.sh256.Reset() - s.sh256.Write(s.chunkBuf) - chunkChecksum := hex.EncodeToString(s.sh256.Sum(nil)) - // Compute chunk signature - signature := buildTrailerChunkSignature(chunkChecksum, s.reqTime, - s.region, s.prevSignature, s.secretAccessKey) - - // For next chunk signature computation - s.prevSignature = signature - - s.buf.Write(s.chunkBuf) - s.buf.WriteString("\r\n" + trailerSignature + trailerKVSeparator + signature + "\r\n\r\n") - - // Reset chunkBufLen for next chunk read. - s.chunkBuf = s.chunkBuf[:olen] - s.chunkBufLen = 0 - s.chunkNum++ -} - -// setStreamingAuthHeader - builds and sets authorization header value -// for streaming signature. -func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) { - credential := GetCredential(s.accessKeyID, s.region, s.reqTime, ServiceTypeS3) - authParts := []string{ - signV4Algorithm + " Credential=" + credential, - "SignedHeaders=" + getSignedHeaders(*req, ignoredStreamingHeaders), - "Signature=" + s.seedSignature, - } - - // Set authorization header. - auth := strings.Join(authParts, ",") - req.Header.Set("Authorization", auth) -} - -// StreamingSignV4 - provides chunked upload signatureV4 support by -// implementing io.Reader. -func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionToken, - region string, dataLen int64, reqTime time.Time, sh256 md5simd.Hasher, -) *http.Request { - // Set headers needed for streaming signature. - prepareStreamingRequest(req, sessionToken, dataLen, reqTime) - - if req.Body == nil { - req.Body = io.NopCloser(bytes.NewReader([]byte(""))) - } - - stReader := &StreamingReader{ - baseReadCloser: req.Body, - accessKeyID: accessKeyID, - secretAccessKey: secretAccessKey, - sessionToken: sessionToken, - region: region, - reqTime: reqTime, - chunkBuf: make([]byte, payloadChunkSize), - contentLen: dataLen, - chunkNum: 1, - totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1, - lastChunkSize: int(dataLen % payloadChunkSize), - sh256: sh256, - } - if len(req.Trailer) > 0 { - stReader.trailer = req.Trailer - // Remove... - req.Trailer = nil - } - - // Add the request headers required for chunk upload signing. - - // Compute the seed signature. - stReader.setSeedSignature(req) - - // Set the authorization header with the seed signature. - stReader.setStreamingAuthHeader(req) - - // Set seed signature as prevSignature for subsequent - // streaming signing process. - stReader.prevSignature = stReader.seedSignature - req.Body = stReader - - return req -} - -// Read - this method performs chunk upload signature providing a -// io.Reader interface. -func (s *StreamingReader) Read(buf []byte) (int, error) { - switch { - // After the last chunk is read from underlying reader, we - // never re-fill s.buf. - case s.done: - - // s.buf will be (re-)filled with next chunk when has lesser - // bytes than asked for. - case s.buf.Len() < len(buf): - s.chunkBufLen = 0 - for { - n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:]) - // Usually we validate `err` first, but in this case - // we are validating n > 0 for the following reasons. - // - // 1. n > 0, err is one of io.EOF, nil (near end of stream) - // A Reader returning a non-zero number of bytes at the end - // of the input stream may return either err == EOF or err == nil - // - // 2. n == 0, err is io.EOF (actual end of stream) - // - // Callers should always process the n > 0 bytes returned - // before considering the error err. - if n1 > 0 { - s.chunkBufLen += n1 - s.bytesRead += int64(n1) - - if s.chunkBufLen == payloadChunkSize || - (s.chunkNum == s.totalChunks-1 && - s.chunkBufLen == s.lastChunkSize) { - // Sign the chunk and write it to s.buf. - s.signChunk(s.chunkBufLen, true) - break - } - } - if err != nil { - if err == io.EOF { - // No more data left in baseReader - last chunk. - // Done reading the last chunk from baseReader. - s.done = true - - // bytes read from baseReader different than - // content length provided. - if s.bytesRead != s.contentLen { - return 0, fmt.Errorf("http: ContentLength=%d with Body length %d", s.contentLen, s.bytesRead) - } - - // Sign the chunk and write it to s.buf. - s.signChunk(0, len(s.trailer) == 0) - if len(s.trailer) > 0 { - // Trailer must be set now. - s.addSignedTrailer(s.trailer) - } - break - } - return 0, err - } - - } - } - return s.buf.Read(buf) -} - -// Close - this method makes underlying io.ReadCloser's Close method available. -func (s *StreamingReader) Close() error { - if s.sh256 != nil { - s.sh256.Close() - s.sh256 = nil - } - return s.baseReadCloser.Close() -} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go deleted file mode 100644 index fa4f8c9..0000000 --- a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go +++ /dev/null @@ -1,319 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package signer - -import ( - "bytes" - "crypto/hmac" - "crypto/sha1" - "encoding/base64" - "fmt" - "net/http" - "net/url" - "sort" - "strconv" - "strings" - "time" - - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// Signature and API related constants. -const ( - signV2Algorithm = "AWS" -) - -// Encode input URL path to URL encoded path. -func encodeURL2Path(req *http.Request, virtualHost bool) (path string) { - if virtualHost { - reqHost := getHostAddr(req) - dotPos := strings.Index(reqHost, ".") - if dotPos > -1 { - bucketName := reqHost[:dotPos] - path = "/" + bucketName - path += req.URL.Path - path = s3utils.EncodePath(path) - return - } - } - path = s3utils.EncodePath(req.URL.Path) - return -} - -// PreSignV2 - presign the request in following style. -// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}. -func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64, virtualHost bool) *http.Request { - // Presign is not needed for anonymous credentials. - if accessKeyID == "" || secretAccessKey == "" { - return &req - } - - d := time.Now().UTC() - // Find epoch expires when the request will expire. - epochExpires := d.Unix() + expires - - // Add expires header if not present. - if expiresStr := req.Header.Get("Expires"); expiresStr == "" { - req.Header.Set("Expires", strconv.FormatInt(epochExpires, 10)) - } - - // Get presigned string to sign. - stringToSign := preStringToSignV2(req, virtualHost) - hm := hmac.New(sha1.New, []byte(secretAccessKey)) - hm.Write([]byte(stringToSign)) - - // Calculate signature. - signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) - - query := req.URL.Query() - // Handle specially for Google Cloud Storage. - if strings.Contains(getHostAddr(&req), ".storage.googleapis.com") { - query.Set("GoogleAccessId", accessKeyID) - } else { - query.Set("AWSAccessKeyId", accessKeyID) - } - - // Fill in Expires for presigned query. - query.Set("Expires", strconv.FormatInt(epochExpires, 10)) - - // Encode query and save. - req.URL.RawQuery = s3utils.QueryEncode(query) - - // Save signature finally. - req.URL.RawQuery += "&Signature=" + s3utils.EncodePath(signature) - - // Return. - return &req -} - -// PostPresignSignatureV2 - presigned signature for PostPolicy -// request. -func PostPresignSignatureV2(policyBase64, secretAccessKey string) string { - hm := hmac.New(sha1.New, []byte(secretAccessKey)) - hm.Write([]byte(policyBase64)) - signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) - return signature -} - -// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature; -// Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) ); -// -// StringToSign = HTTP-Verb + "\n" + -// Content-Md5 + "\n" + -// Content-Type + "\n" + -// Date + "\n" + -// CanonicalizedProtocolHeaders + -// CanonicalizedResource; -// -// CanonicalizedResource = [ "/" + Bucket ] + -// + -// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; -// -// CanonicalizedProtocolHeaders = - -// SignV2 sign the request before Do() (AWS Signature Version 2). -func SignV2(req http.Request, accessKeyID, secretAccessKey string, virtualHost bool) *http.Request { - // Signature calculation is not needed for anonymous credentials. - if accessKeyID == "" || secretAccessKey == "" { - return &req - } - - // Initial time. - d := time.Now().UTC() - - // Add date if not present. - if date := req.Header.Get("Date"); date == "" { - req.Header.Set("Date", d.Format(http.TimeFormat)) - } - - // Calculate HMAC for secretAccessKey. - stringToSign := stringToSignV2(req, virtualHost) - hm := hmac.New(sha1.New, []byte(secretAccessKey)) - hm.Write([]byte(stringToSign)) - - // Prepare auth header. - authHeader := new(bytes.Buffer) - authHeader.WriteString(fmt.Sprintf("%s %s:", signV2Algorithm, accessKeyID)) - encoder := base64.NewEncoder(base64.StdEncoding, authHeader) - encoder.Write(hm.Sum(nil)) - encoder.Close() - - // Set Authorization header. - req.Header.Set("Authorization", authHeader.String()) - - return &req -} - -// From the Amazon docs: -// -// StringToSign = HTTP-Verb + "\n" + -// -// Content-Md5 + "\n" + -// Content-Type + "\n" + -// Expires + "\n" + -// CanonicalizedProtocolHeaders + -// CanonicalizedResource; -func preStringToSignV2(req http.Request, virtualHost bool) string { - buf := new(bytes.Buffer) - // Write standard headers. - writePreSignV2Headers(buf, req) - // Write canonicalized protocol headers if any. - writeCanonicalizedHeaders(buf, req) - // Write canonicalized Query resources if any. - writeCanonicalizedResource(buf, req, virtualHost) - return buf.String() -} - -// writePreSignV2Headers - write preSign v2 required headers. -func writePreSignV2Headers(buf *bytes.Buffer, req http.Request) { - buf.WriteString(req.Method + "\n") - buf.WriteString(req.Header.Get("Content-Md5") + "\n") - buf.WriteString(req.Header.Get("Content-Type") + "\n") - buf.WriteString(req.Header.Get("Expires") + "\n") -} - -// From the Amazon docs: -// -// StringToSign = HTTP-Verb + "\n" + -// -// Content-Md5 + "\n" + -// Content-Type + "\n" + -// Date + "\n" + -// CanonicalizedProtocolHeaders + -// CanonicalizedResource; -func stringToSignV2(req http.Request, virtualHost bool) string { - buf := new(bytes.Buffer) - // Write standard headers. - writeSignV2Headers(buf, req) - // Write canonicalized protocol headers if any. - writeCanonicalizedHeaders(buf, req) - // Write canonicalized Query resources if any. - writeCanonicalizedResource(buf, req, virtualHost) - return buf.String() -} - -// writeSignV2Headers - write signV2 required headers. -func writeSignV2Headers(buf *bytes.Buffer, req http.Request) { - buf.WriteString(req.Method + "\n") - buf.WriteString(req.Header.Get("Content-Md5") + "\n") - buf.WriteString(req.Header.Get("Content-Type") + "\n") - buf.WriteString(req.Header.Get("Date") + "\n") -} - -// writeCanonicalizedHeaders - write canonicalized headers. -func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) { - var protoHeaders []string - vals := make(map[string][]string) - for k, vv := range req.Header { - // All the AMZ headers should be lowercase - lk := strings.ToLower(k) - if strings.HasPrefix(lk, "x-amz") { - protoHeaders = append(protoHeaders, lk) - vals[lk] = vv - } - } - sort.Strings(protoHeaders) - for _, k := range protoHeaders { - buf.WriteString(k) - buf.WriteByte(':') - for idx, v := range vals[k] { - if idx > 0 { - buf.WriteByte(',') - } - buf.WriteString(v) - } - buf.WriteByte('\n') - } -} - -// AWS S3 Signature V2 calculation rule is give here: -// http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationStringToSign - -// Whitelist resource list that will be used in query string for signature-V2 calculation. -// -// This list should be kept alphabetically sorted, do not hastily edit. -var resourceList = []string{ - "acl", - "cors", - "delete", - "encryption", - "legal-hold", - "lifecycle", - "location", - "logging", - "notification", - "partNumber", - "policy", - "replication", - "requestPayment", - "response-cache-control", - "response-content-disposition", - "response-content-encoding", - "response-content-language", - "response-content-type", - "response-expires", - "retention", - "select", - "select-type", - "tagging", - "torrent", - "uploadId", - "uploads", - "versionId", - "versioning", - "versions", - "website", -} - -// From the Amazon docs: -// -// CanonicalizedResource = [ "/" + Bucket ] + -// -// + -// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; -func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, virtualHost bool) { - // Save request URL. - requestURL := req.URL - // Get encoded URL path. - buf.WriteString(encodeURL2Path(&req, virtualHost)) - if requestURL.RawQuery != "" { - var n int - vals, _ := url.ParseQuery(requestURL.RawQuery) - // Verify if any sub resource queries are present, if yes - // canonicallize them. - for _, resource := range resourceList { - if vv, ok := vals[resource]; ok && len(vv) > 0 { - n++ - // First element - switch n { - case 1: - buf.WriteByte('?') - // The rest - default: - buf.WriteByte('&') - } - buf.WriteString(resource) - // Request parameters - if len(vv[0]) > 0 { - buf.WriteByte('=') - buf.WriteString(vv[0]) - } - } - } - } -} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go deleted file mode 100644 index ffd2514..0000000 --- a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go +++ /dev/null @@ -1,351 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package signer - -import ( - "bytes" - "encoding/hex" - "net/http" - "sort" - "strconv" - "strings" - "time" - - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// Signature and API related constants. -const ( - signV4Algorithm = "AWS4-HMAC-SHA256" - iso8601DateFormat = "20060102T150405Z" - yyyymmdd = "20060102" -) - -// Different service types -const ( - ServiceTypeS3 = "s3" - ServiceTypeSTS = "sts" -) - -// Excerpts from @lsegal - -// https:/github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258. -// -// * User-Agent -// This is ignored from signing because signing this causes problems with generating pre-signed -// URLs (that are executed by other agents) or when customers pass requests through proxies, which -// may modify the user-agent. -// -// * Authorization -// Is skipped for obvious reasons. -// -// * Accept-Encoding -// Some S3 servers like Hitachi Content Platform do not honor this header for signature -// calculation. -var v4IgnoredHeaders = map[string]bool{ - "Accept-Encoding": true, - "Authorization": true, - "User-Agent": true, -} - -// getSigningKey hmac seed to calculate final signature. -func getSigningKey(secret, loc string, t time.Time, serviceType string) []byte { - date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd))) - location := sumHMAC(date, []byte(loc)) - service := sumHMAC(location, []byte(serviceType)) - signingKey := sumHMAC(service, []byte("aws4_request")) - return signingKey -} - -// getSignature final signature in hexadecimal form. -func getSignature(signingKey []byte, stringToSign string) string { - return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) -} - -// getScope generate a string of a specific date, an AWS region, and a -// service. -func getScope(location string, t time.Time, serviceType string) string { - scope := strings.Join([]string{ - t.Format(yyyymmdd), - location, - serviceType, - "aws4_request", - }, "/") - return scope -} - -// GetCredential generate a credential string. -func GetCredential(accessKeyID, location string, t time.Time, serviceType string) string { - scope := getScope(location, t, serviceType) - return accessKeyID + "/" + scope -} - -// getHashedPayload get the hexadecimal value of the SHA256 hash of -// the request payload. -func getHashedPayload(req http.Request) string { - hashedPayload := req.Header.Get("X-Amz-Content-Sha256") - if hashedPayload == "" { - // Presign does not have a payload, use S3 recommended value. - hashedPayload = unsignedPayload - } - return hashedPayload -} - -// getCanonicalHeaders generate a list of request headers for -// signature. -func getCanonicalHeaders(req http.Request, ignoredHeaders map[string]bool) string { - var headers []string - vals := make(map[string][]string) - for k, vv := range req.Header { - if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { - continue // ignored header - } - headers = append(headers, strings.ToLower(k)) - vals[strings.ToLower(k)] = vv - } - if !headerExists("host", headers) { - headers = append(headers, "host") - } - sort.Strings(headers) - - var buf bytes.Buffer - // Save all the headers in canonical form
: newline - // separated for each header. - for _, k := range headers { - buf.WriteString(k) - buf.WriteByte(':') - switch { - case k == "host": - buf.WriteString(getHostAddr(&req)) - buf.WriteByte('\n') - default: - for idx, v := range vals[k] { - if idx > 0 { - buf.WriteByte(',') - } - buf.WriteString(signV4TrimAll(v)) - } - buf.WriteByte('\n') - } - } - return buf.String() -} - -func headerExists(key string, headers []string) bool { - for _, k := range headers { - if k == key { - return true - } - } - return false -} - -// getSignedHeaders generate all signed request headers. -// i.e lexically sorted, semicolon-separated list of lowercase -// request header names. -func getSignedHeaders(req http.Request, ignoredHeaders map[string]bool) string { - var headers []string - for k := range req.Header { - if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { - continue // Ignored header found continue. - } - headers = append(headers, strings.ToLower(k)) - } - if !headerExists("host", headers) { - headers = append(headers, "host") - } - sort.Strings(headers) - return strings.Join(headers, ";") -} - -// getCanonicalRequest generate a canonical request of style. -// -// canonicalRequest = -// -// \n -// \n -// \n -// \n -// \n -// -func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool, hashedPayload string) string { - req.URL.RawQuery = strings.ReplaceAll(req.URL.Query().Encode(), "+", "%20") - canonicalRequest := strings.Join([]string{ - req.Method, - s3utils.EncodePath(req.URL.Path), - req.URL.RawQuery, - getCanonicalHeaders(req, ignoredHeaders), - getSignedHeaders(req, ignoredHeaders), - hashedPayload, - }, "\n") - return canonicalRequest -} - -// getStringToSign a string based on selected query values. -func getStringToSignV4(t time.Time, location, canonicalRequest, serviceType string) string { - stringToSign := signV4Algorithm + "\n" + t.Format(iso8601DateFormat) + "\n" - stringToSign = stringToSign + getScope(location, t, serviceType) + "\n" - stringToSign += hex.EncodeToString(sum256([]byte(canonicalRequest))) - return stringToSign -} - -// PreSignV4 presign the request, in accordance with -// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html. -func PreSignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, expires int64) *http.Request { - // Presign is not needed for anonymous credentials. - if accessKeyID == "" || secretAccessKey == "" { - return &req - } - - // Initial time. - t := time.Now().UTC() - - // Get credential string. - credential := GetCredential(accessKeyID, location, t, ServiceTypeS3) - - // Get all signed headers. - signedHeaders := getSignedHeaders(req, v4IgnoredHeaders) - - // Set URL query. - query := req.URL.Query() - query.Set("X-Amz-Algorithm", signV4Algorithm) - query.Set("X-Amz-Date", t.Format(iso8601DateFormat)) - query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10)) - query.Set("X-Amz-SignedHeaders", signedHeaders) - query.Set("X-Amz-Credential", credential) - // Set session token if available. - if sessionToken != "" { - query.Set("X-Amz-Security-Token", sessionToken) - } - req.URL.RawQuery = query.Encode() - - // Get canonical request. - canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders, getHashedPayload(req)) - - // Get string to sign from canonical request. - stringToSign := getStringToSignV4(t, location, canonicalRequest, ServiceTypeS3) - - // Gext hmac signing key. - signingKey := getSigningKey(secretAccessKey, location, t, ServiceTypeS3) - - // Calculate signature. - signature := getSignature(signingKey, stringToSign) - - // Add signature header to RawQuery. - req.URL.RawQuery += "&X-Amz-Signature=" + signature - - return &req -} - -// PostPresignSignatureV4 - presigned signature for PostPolicy -// requests. -func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string { - // Get signining key. - signingkey := getSigningKey(secretAccessKey, location, t, ServiceTypeS3) - // Calculate signature. - signature := getSignature(signingkey, policyBase64) - return signature -} - -// SignV4STS - signature v4 for STS request. -func SignV4STS(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request { - return signV4(req, accessKeyID, secretAccessKey, "", location, ServiceTypeSTS, nil) -} - -// Internal function called for different service types. -func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location, serviceType string, trailer http.Header) *http.Request { - // Signature calculation is not needed for anonymous credentials. - if accessKeyID == "" || secretAccessKey == "" { - return &req - } - - // Initial time. - t := time.Now().UTC() - - // Set x-amz-date. - req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat)) - - // Set session token if available. - if sessionToken != "" { - req.Header.Set("X-Amz-Security-Token", sessionToken) - } - - if len(trailer) > 0 { - for k := range trailer { - req.Header.Add("X-Amz-Trailer", strings.ToLower(k)) - } - - req.Header.Set("Content-Encoding", "aws-chunked") - req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(req.ContentLength, 10)) - } - - hashedPayload := getHashedPayload(req) - if serviceType == ServiceTypeSTS { - // Content sha256 header is not sent with the request - // but it is expected to have sha256 of payload for signature - // in STS service type request. - req.Header.Del("X-Amz-Content-Sha256") - } - - // Get canonical request. - canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders, hashedPayload) - - // Get string to sign from canonical request. - stringToSign := getStringToSignV4(t, location, canonicalRequest, serviceType) - - // Get hmac signing key. - signingKey := getSigningKey(secretAccessKey, location, t, serviceType) - - // Get credential string. - credential := GetCredential(accessKeyID, location, t, serviceType) - - // Get all signed headers. - signedHeaders := getSignedHeaders(req, v4IgnoredHeaders) - - // Calculate signature. - signature := getSignature(signingKey, stringToSign) - - // If regular request, construct the final authorization header. - parts := []string{ - signV4Algorithm + " Credential=" + credential, - "SignedHeaders=" + signedHeaders, - "Signature=" + signature, - } - - // Set authorization header. - auth := strings.Join(parts, ", ") - req.Header.Set("Authorization", auth) - - if len(trailer) > 0 { - // Use custom chunked encoding. - req.Trailer = trailer - return StreamingUnsignedV4(&req, sessionToken, req.ContentLength, time.Now().UTC()) - } - return &req -} - -// SignV4 sign the request before Do(), in accordance with -// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html. -func SignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string) *http.Request { - return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3, nil) -} - -// SignV4Trailer sign the request before Do(), in accordance with -// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html -func SignV4Trailer(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, trailer http.Header) *http.Request { - return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3, trailer) -} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go deleted file mode 100644 index 87c9939..0000000 --- a/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go +++ /dev/null @@ -1,62 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package signer - -import ( - "crypto/hmac" - "crypto/sha256" - "net/http" - "strings" -) - -// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when -const unsignedPayload = "UNSIGNED-PAYLOAD" - -// sum256 calculate sha256 sum for an input byte array. -func sum256(data []byte) []byte { - hash := sha256.New() - hash.Write(data) - return hash.Sum(nil) -} - -// sumHMAC calculate hmac between two input byte array. -func sumHMAC(key, data []byte) []byte { - hash := hmac.New(sha256.New, key) - hash.Write(data) - return hash.Sum(nil) -} - -// getHostAddr returns host header if available, otherwise returns host from URL -func getHostAddr(req *http.Request) string { - host := req.Header.Get("host") - if host != "" && req.Host != host { - return host - } - if req.Host != "" { - return req.Host - } - return req.URL.Host -} - -// Trim leading and trailing spaces and replace sequential spaces with one space, following Trimall() -// in http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html -func signV4TrimAll(input string) string { - // Compress adjacent spaces (a space is determined by - // unicode.IsSpace() internally here) to one space and return - return strings.Join(strings.Fields(input), " ") -} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go b/vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go deleted file mode 100644 index b5fb956..0000000 --- a/vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go +++ /dev/null @@ -1,66 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sse - -import "encoding/xml" - -// ApplySSEByDefault defines default encryption configuration, KMS or SSE. To activate -// KMS, SSEAlgoritm needs to be set to "aws:kms" -// Minio currently does not support Kms. -type ApplySSEByDefault struct { - KmsMasterKeyID string `xml:"KMSMasterKeyID,omitempty"` - SSEAlgorithm string `xml:"SSEAlgorithm"` -} - -// Rule layer encapsulates default encryption configuration -type Rule struct { - Apply ApplySSEByDefault `xml:"ApplyServerSideEncryptionByDefault"` -} - -// Configuration is the default encryption configuration structure -type Configuration struct { - XMLName xml.Name `xml:"ServerSideEncryptionConfiguration"` - Rules []Rule `xml:"Rule"` -} - -// NewConfigurationSSES3 initializes a new SSE-S3 configuration -func NewConfigurationSSES3() *Configuration { - return &Configuration{ - Rules: []Rule{ - { - Apply: ApplySSEByDefault{ - SSEAlgorithm: "AES256", - }, - }, - }, - } -} - -// NewConfigurationSSEKMS initializes a new SSE-KMS configuration -func NewConfigurationSSEKMS(kmsMasterKey string) *Configuration { - return &Configuration{ - Rules: []Rule{ - { - Apply: ApplySSEByDefault{ - KmsMasterKeyID: kmsMasterKey, - SSEAlgorithm: "aws:kms", - }, - }, - }, - } -} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go b/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go deleted file mode 100644 index 7a84a6f..0000000 --- a/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go +++ /dev/null @@ -1,413 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2020-2022 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package tags - -import ( - "encoding/xml" - "io" - "net/url" - "regexp" - "sort" - "strings" - "unicode/utf8" -) - -// Error contains tag specific error. -type Error interface { - error - Code() string -} - -type errTag struct { - code string - message string -} - -// Code contains error code. -func (err errTag) Code() string { - return err.code -} - -// Error contains error message. -func (err errTag) Error() string { - return err.message -} - -var ( - errTooManyObjectTags = &errTag{"BadRequest", "Tags cannot be more than 10"} - errTooManyTags = &errTag{"BadRequest", "Tags cannot be more than 50"} - errInvalidTagKey = &errTag{"InvalidTag", "The TagKey you have provided is invalid"} - errInvalidTagValue = &errTag{"InvalidTag", "The TagValue you have provided is invalid"} - errDuplicateTagKey = &errTag{"InvalidTag", "Cannot provide multiple Tags with the same key"} -) - -// Tag comes with limitation as per -// https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html amd -// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions -const ( - maxKeyLength = 128 - maxValueLength = 256 - maxObjectTagCount = 10 - maxTagCount = 50 -) - -// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions -// borrowed from this article and also testing various ASCII characters following regex -// is supported by AWS S3 for both tags and values. -var validTagKeyValue = regexp.MustCompile(`^[a-zA-Z0-9-+\-._:/@ ]+$`) - -func checkKey(key string) error { - if len(key) == 0 { - return errInvalidTagKey - } - - if utf8.RuneCountInString(key) > maxKeyLength || !validTagKeyValue.MatchString(key) { - return errInvalidTagKey - } - - return nil -} - -func checkValue(value string) error { - if value != "" { - if utf8.RuneCountInString(value) > maxValueLength || !validTagKeyValue.MatchString(value) { - return errInvalidTagValue - } - } - - return nil -} - -// Tag denotes key and value. -type Tag struct { - Key string `xml:"Key"` - Value string `xml:"Value"` -} - -func (tag Tag) String() string { - return tag.Key + "=" + tag.Value -} - -// IsEmpty returns whether this tag is empty or not. -func (tag Tag) IsEmpty() bool { - return tag.Key == "" -} - -// Validate checks this tag. -func (tag Tag) Validate() error { - if err := checkKey(tag.Key); err != nil { - return err - } - - return checkValue(tag.Value) -} - -// MarshalXML encodes to XML data. -func (tag Tag) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - if err := tag.Validate(); err != nil { - return err - } - - type subTag Tag // to avoid recursively calling MarshalXML() - return e.EncodeElement(subTag(tag), start) -} - -// UnmarshalXML decodes XML data to tag. -func (tag *Tag) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - type subTag Tag // to avoid recursively calling UnmarshalXML() - var st subTag - if err := d.DecodeElement(&st, &start); err != nil { - return err - } - - if err := Tag(st).Validate(); err != nil { - return err - } - - *tag = Tag(st) - return nil -} - -// tagSet represents list of unique tags. -type tagSet struct { - tagMap map[string]string - isObject bool -} - -func (tags tagSet) String() string { - if len(tags.tagMap) == 0 { - return "" - } - var buf strings.Builder - keys := make([]string, 0, len(tags.tagMap)) - for k := range tags.tagMap { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - keyEscaped := url.QueryEscape(k) - valueEscaped := url.QueryEscape(tags.tagMap[k]) - if buf.Len() > 0 { - buf.WriteByte('&') - } - buf.WriteString(keyEscaped) - buf.WriteByte('=') - buf.WriteString(valueEscaped) - } - return buf.String() -} - -func (tags *tagSet) remove(key string) { - delete(tags.tagMap, key) -} - -func (tags *tagSet) set(key, value string, failOnExist bool) error { - if failOnExist { - if _, found := tags.tagMap[key]; found { - return errDuplicateTagKey - } - } - - if err := checkKey(key); err != nil { - return err - } - - if err := checkValue(value); err != nil { - return err - } - - if tags.isObject { - if len(tags.tagMap) == maxObjectTagCount { - return errTooManyObjectTags - } - } else if len(tags.tagMap) == maxTagCount { - return errTooManyTags - } - - tags.tagMap[key] = value - return nil -} - -func (tags tagSet) count() int { - return len(tags.tagMap) -} - -func (tags tagSet) toMap() map[string]string { - m := make(map[string]string, len(tags.tagMap)) - for key, value := range tags.tagMap { - m[key] = value - } - return m -} - -// MarshalXML encodes to XML data. -func (tags tagSet) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - tagList := struct { - Tags []Tag `xml:"Tag"` - }{} - - tagList.Tags = make([]Tag, 0, len(tags.tagMap)) - for key, value := range tags.tagMap { - tagList.Tags = append(tagList.Tags, Tag{key, value}) - } - - return e.EncodeElement(tagList, start) -} - -// UnmarshalXML decodes XML data to tag list. -func (tags *tagSet) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - tagList := struct { - Tags []Tag `xml:"Tag"` - }{} - - if err := d.DecodeElement(&tagList, &start); err != nil { - return err - } - - if tags.isObject { - if len(tagList.Tags) > maxObjectTagCount { - return errTooManyObjectTags - } - } else if len(tagList.Tags) > maxTagCount { - return errTooManyTags - } - - m := make(map[string]string, len(tagList.Tags)) - for _, tag := range tagList.Tags { - if _, found := m[tag.Key]; found { - return errDuplicateTagKey - } - - m[tag.Key] = tag.Value - } - - tags.tagMap = m - return nil -} - -type tagging struct { - XMLName xml.Name `xml:"Tagging"` - TagSet *tagSet `xml:"TagSet"` -} - -// Tags is list of tags of XML request/response as per -// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html#API_GetBucketTagging_RequestBody -type Tags tagging - -func (tags Tags) String() string { - return tags.TagSet.String() -} - -// Remove removes a tag by its key. -func (tags *Tags) Remove(key string) { - tags.TagSet.remove(key) -} - -// Set sets new tag. -func (tags *Tags) Set(key, value string) error { - return tags.TagSet.set(key, value, false) -} - -// Count - return number of tags accounted for -func (tags Tags) Count() int { - return tags.TagSet.count() -} - -// ToMap returns copy of tags. -func (tags Tags) ToMap() map[string]string { - return tags.TagSet.toMap() -} - -// MapToObjectTags converts an input map of key and value into -// *Tags data structure with validation. -func MapToObjectTags(tagMap map[string]string) (*Tags, error) { - return NewTags(tagMap, true) -} - -// MapToBucketTags converts an input map of key and value into -// *Tags data structure with validation. -func MapToBucketTags(tagMap map[string]string) (*Tags, error) { - return NewTags(tagMap, false) -} - -// NewTags creates Tags from tagMap, If isObject is set, it validates for object tags. -func NewTags(tagMap map[string]string, isObject bool) (*Tags, error) { - tagging := &Tags{ - TagSet: &tagSet{ - tagMap: make(map[string]string), - isObject: isObject, - }, - } - - for key, value := range tagMap { - if err := tagging.TagSet.set(key, value, true); err != nil { - return nil, err - } - } - - return tagging, nil -} - -func unmarshalXML(reader io.Reader, isObject bool) (*Tags, error) { - tagging := &Tags{ - TagSet: &tagSet{ - tagMap: make(map[string]string), - isObject: isObject, - }, - } - - if err := xml.NewDecoder(reader).Decode(tagging); err != nil { - return nil, err - } - - return tagging, nil -} - -// ParseBucketXML decodes XML data of tags in reader specified in -// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html#API_PutBucketTagging_RequestSyntax. -func ParseBucketXML(reader io.Reader) (*Tags, error) { - return unmarshalXML(reader, false) -} - -// ParseObjectXML decodes XML data of tags in reader specified in -// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html#API_PutObjectTagging_RequestSyntax -func ParseObjectXML(reader io.Reader) (*Tags, error) { - return unmarshalXML(reader, true) -} - -// stringsCut slices s around the first instance of sep, -// returning the text before and after sep. -// The found result reports whether sep appears in s. -// If sep does not appear in s, cut returns s, "", false. -func stringsCut(s, sep string) (before, after string, found bool) { - if i := strings.Index(s, sep); i >= 0 { - return s[:i], s[i+len(sep):], true - } - return s, "", false -} - -func (tags *tagSet) parseTags(tgs string) (err error) { - for tgs != "" { - var key string - key, tgs, _ = stringsCut(tgs, "&") - if key == "" { - continue - } - key, value, _ := stringsCut(key, "=") - key, err1 := url.QueryUnescape(key) - if err1 != nil { - if err == nil { - err = err1 - } - continue - } - value, err1 = url.QueryUnescape(value) - if err1 != nil { - if err == nil { - err = err1 - } - continue - } - if err = tags.set(key, value, true); err != nil { - return err - } - } - return err -} - -// Parse decodes HTTP query formatted string into tags which is limited by isObject. -// A query formatted string is like "key1=value1&key2=value2". -func Parse(s string, isObject bool) (*Tags, error) { - tagging := &Tags{ - TagSet: &tagSet{ - tagMap: make(map[string]string), - isObject: isObject, - }, - } - - if err := tagging.TagSet.parseTags(s); err != nil { - return nil, err - } - - return tagging, nil -} - -// ParseObjectTags decodes HTTP query formatted string into tags. A query formatted string is like "key1=value1&key2=value2". -func ParseObjectTags(s string) (*Tags, error) { - return Parse(s, true) -} diff --git a/vendor/github.com/minio/minio-go/v7/post-policy.go b/vendor/github.com/minio/minio-go/v7/post-policy.go deleted file mode 100644 index 3f4881e..0000000 --- a/vendor/github.com/minio/minio-go/v7/post-policy.go +++ /dev/null @@ -1,349 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2023 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "encoding/base64" - "fmt" - "net/http" - "strings" - "time" - - "github.com/minio/minio-go/v7/pkg/encrypt" -) - -// expirationDateFormat date format for expiration key in json policy. -const expirationDateFormat = "2006-01-02T15:04:05.000Z" - -// policyCondition explanation: -// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html -// -// Example: -// -// policyCondition { -// matchType: "$eq", -// key: "$Content-Type", -// value: "image/png", -// } -type policyCondition struct { - matchType string - condition string - value string -} - -// PostPolicy - Provides strict static type conversion and validation -// for Amazon S3's POST policy JSON string. -type PostPolicy struct { - // Expiration date and time of the POST policy. - expiration time.Time - // Collection of different policy conditions. - conditions []policyCondition - // ContentLengthRange minimum and maximum allowable size for the - // uploaded content. - contentLengthRange struct { - min int64 - max int64 - } - - // Post form data. - formData map[string]string -} - -// NewPostPolicy - Instantiate new post policy. -func NewPostPolicy() *PostPolicy { - p := &PostPolicy{} - p.conditions = make([]policyCondition, 0) - p.formData = make(map[string]string) - return p -} - -// SetExpires - Sets expiration time for the new policy. -func (p *PostPolicy) SetExpires(t time.Time) error { - if t.IsZero() { - return errInvalidArgument("No expiry time set.") - } - p.expiration = t - return nil -} - -// SetKey - Sets an object name for the policy based upload. -func (p *PostPolicy) SetKey(key string) error { - if strings.TrimSpace(key) == "" || key == "" { - return errInvalidArgument("Object name is empty.") - } - policyCond := policyCondition{ - matchType: "eq", - condition: "$key", - value: key, - } - if err := p.addNewPolicy(policyCond); err != nil { - return err - } - p.formData["key"] = key - return nil -} - -// SetKeyStartsWith - Sets an object name that an policy based upload -// can start with. -// Can use an empty value ("") to allow any key. -func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error { - policyCond := policyCondition{ - matchType: "starts-with", - condition: "$key", - value: keyStartsWith, - } - if err := p.addNewPolicy(policyCond); err != nil { - return err - } - p.formData["key"] = keyStartsWith - return nil -} - -// SetBucket - Sets bucket at which objects will be uploaded to. -func (p *PostPolicy) SetBucket(bucketName string) error { - if strings.TrimSpace(bucketName) == "" || bucketName == "" { - return errInvalidArgument("Bucket name is empty.") - } - policyCond := policyCondition{ - matchType: "eq", - condition: "$bucket", - value: bucketName, - } - if err := p.addNewPolicy(policyCond); err != nil { - return err - } - p.formData["bucket"] = bucketName - return nil -} - -// SetCondition - Sets condition for credentials, date and algorithm -func (p *PostPolicy) SetCondition(matchType, condition, value string) error { - if strings.TrimSpace(value) == "" || value == "" { - return errInvalidArgument("No value specified for condition") - } - - policyCond := policyCondition{ - matchType: matchType, - condition: "$" + condition, - value: value, - } - if condition == "X-Amz-Credential" || condition == "X-Amz-Date" || condition == "X-Amz-Algorithm" { - if err := p.addNewPolicy(policyCond); err != nil { - return err - } - p.formData[condition] = value - return nil - } - return errInvalidArgument("Invalid condition in policy") -} - -// SetContentType - Sets content-type of the object for this policy -// based upload. -func (p *PostPolicy) SetContentType(contentType string) error { - if strings.TrimSpace(contentType) == "" || contentType == "" { - return errInvalidArgument("No content type specified.") - } - policyCond := policyCondition{ - matchType: "eq", - condition: "$Content-Type", - value: contentType, - } - if err := p.addNewPolicy(policyCond); err != nil { - return err - } - p.formData["Content-Type"] = contentType - return nil -} - -// SetContentTypeStartsWith - Sets what content-type of the object for this policy -// based upload can start with. -// Can use an empty value ("") to allow any content-type. -func (p *PostPolicy) SetContentTypeStartsWith(contentTypeStartsWith string) error { - policyCond := policyCondition{ - matchType: "starts-with", - condition: "$Content-Type", - value: contentTypeStartsWith, - } - if err := p.addNewPolicy(policyCond); err != nil { - return err - } - p.formData["Content-Type"] = contentTypeStartsWith - return nil -} - -// SetContentLengthRange - Set new min and max content length -// condition for all incoming uploads. -func (p *PostPolicy) SetContentLengthRange(min, max int64) error { - if min > max { - return errInvalidArgument("Minimum limit is larger than maximum limit.") - } - if min < 0 { - return errInvalidArgument("Minimum limit cannot be negative.") - } - if max <= 0 { - return errInvalidArgument("Maximum limit cannot be non-positive.") - } - p.contentLengthRange.min = min - p.contentLengthRange.max = max - return nil -} - -// SetSuccessActionRedirect - Sets the redirect success url of the object for this policy -// based upload. -func (p *PostPolicy) SetSuccessActionRedirect(redirect string) error { - if strings.TrimSpace(redirect) == "" || redirect == "" { - return errInvalidArgument("Redirect is empty") - } - policyCond := policyCondition{ - matchType: "eq", - condition: "$success_action_redirect", - value: redirect, - } - if err := p.addNewPolicy(policyCond); err != nil { - return err - } - p.formData["success_action_redirect"] = redirect - return nil -} - -// SetSuccessStatusAction - Sets the status success code of the object for this policy -// based upload. -func (p *PostPolicy) SetSuccessStatusAction(status string) error { - if strings.TrimSpace(status) == "" || status == "" { - return errInvalidArgument("Status is empty") - } - policyCond := policyCondition{ - matchType: "eq", - condition: "$success_action_status", - value: status, - } - if err := p.addNewPolicy(policyCond); err != nil { - return err - } - p.formData["success_action_status"] = status - return nil -} - -// SetUserMetadata - Set user metadata as a key/value couple. -// Can be retrieved through a HEAD request or an event. -func (p *PostPolicy) SetUserMetadata(key, value string) error { - if strings.TrimSpace(key) == "" || key == "" { - return errInvalidArgument("Key is empty") - } - if strings.TrimSpace(value) == "" || value == "" { - return errInvalidArgument("Value is empty") - } - headerName := fmt.Sprintf("x-amz-meta-%s", key) - policyCond := policyCondition{ - matchType: "eq", - condition: fmt.Sprintf("$%s", headerName), - value: value, - } - if err := p.addNewPolicy(policyCond); err != nil { - return err - } - p.formData[headerName] = value - return nil -} - -// SetChecksum sets the checksum of the request. -func (p *PostPolicy) SetChecksum(c Checksum) { - if c.IsSet() { - p.formData[amzChecksumAlgo] = c.Type.String() - p.formData[c.Type.Key()] = c.Encoded() - } -} - -// SetEncryption - sets encryption headers for POST API -func (p *PostPolicy) SetEncryption(sse encrypt.ServerSide) { - if sse == nil { - return - } - h := http.Header{} - sse.Marshal(h) - for k, v := range h { - p.formData[k] = v[0] - } -} - -// SetUserData - Set user data as a key/value couple. -// Can be retrieved through a HEAD request or an event. -func (p *PostPolicy) SetUserData(key, value string) error { - if key == "" { - return errInvalidArgument("Key is empty") - } - if value == "" { - return errInvalidArgument("Value is empty") - } - headerName := fmt.Sprintf("x-amz-%s", key) - policyCond := policyCondition{ - matchType: "eq", - condition: fmt.Sprintf("$%s", headerName), - value: value, - } - if err := p.addNewPolicy(policyCond); err != nil { - return err - } - p.formData[headerName] = value - return nil -} - -// addNewPolicy - internal helper to validate adding new policies. -// Can use starts-with with an empty value ("") to allow any content within a form field. -func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error { - if policyCond.matchType == "" || policyCond.condition == "" { - return errInvalidArgument("Policy fields are empty.") - } - if policyCond.matchType != "starts-with" && policyCond.value == "" { - return errInvalidArgument("Policy value is empty.") - } - p.conditions = append(p.conditions, policyCond) - return nil -} - -// String function for printing policy in json formatted string. -func (p PostPolicy) String() string { - return string(p.marshalJSON()) -} - -// marshalJSON - Provides Marshaled JSON in bytes. -func (p PostPolicy) marshalJSON() []byte { - expirationStr := `"expiration":"` + p.expiration.Format(expirationDateFormat) + `"` - var conditionsStr string - conditions := []string{} - for _, po := range p.conditions { - conditions = append(conditions, fmt.Sprintf("[\"%s\",\"%s\",\"%s\"]", po.matchType, po.condition, po.value)) - } - if p.contentLengthRange.min != 0 || p.contentLengthRange.max != 0 { - conditions = append(conditions, fmt.Sprintf("[\"content-length-range\", %d, %d]", - p.contentLengthRange.min, p.contentLengthRange.max)) - } - if len(conditions) > 0 { - conditionsStr = `"conditions":[` + strings.Join(conditions, ",") + "]" - } - retStr := "{" - retStr = retStr + expirationStr + "," - retStr += conditionsStr - retStr += "}" - return []byte(retStr) -} - -// base64 - Produces base64 of PostPolicy's Marshaled json. -func (p PostPolicy) base64() string { - return base64.StdEncoding.EncodeToString(p.marshalJSON()) -} diff --git a/vendor/github.com/minio/minio-go/v7/retry-continous.go b/vendor/github.com/minio/minio-go/v7/retry-continous.go deleted file mode 100644 index bfeea95..0000000 --- a/vendor/github.com/minio/minio-go/v7/retry-continous.go +++ /dev/null @@ -1,69 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import "time" - -// newRetryTimerContinous creates a timer with exponentially increasing delays forever. -func (c *Client) newRetryTimerContinous(unit, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int { - attemptCh := make(chan int) - - // normalize jitter to the range [0, 1.0] - if jitter < NoJitter { - jitter = NoJitter - } - if jitter > MaxJitter { - jitter = MaxJitter - } - - // computes the exponential backoff duration according to - // https://www.awsarchitectureblog.com/2015/03/backoff.html - exponentialBackoffWait := func(attempt int) time.Duration { - // 1< maxAttempt { - attempt = maxAttempt - } - // sleep = random_between(0, min(cap, base * 2 ** attempt)) - sleep := unit * time.Duration(1< cap { - sleep = cap - } - if jitter != NoJitter { - sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter) - } - return sleep - } - - go func() { - defer close(attemptCh) - var nextBackoff int - for { - select { - // Attempts starts. - case attemptCh <- nextBackoff: - nextBackoff++ - case <-doneCh: - // Stop the routine. - return - } - time.Sleep(exponentialBackoffWait(nextBackoff)) - } - }() - return attemptCh -} diff --git a/vendor/github.com/minio/minio-go/v7/retry.go b/vendor/github.com/minio/minio-go/v7/retry.go deleted file mode 100644 index 1c6105e..0000000 --- a/vendor/github.com/minio/minio-go/v7/retry.go +++ /dev/null @@ -1,148 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "crypto/x509" - "errors" - "net/http" - "net/url" - "time" -) - -// MaxRetry is the maximum number of retries before stopping. -var MaxRetry = 10 - -// MaxJitter will randomize over the full exponential backoff time -const MaxJitter = 1.0 - -// NoJitter disables the use of jitter for randomizing the exponential backoff time -const NoJitter = 0.0 - -// DefaultRetryUnit - default unit multiplicative per retry. -// defaults to 200 * time.Millisecond -var DefaultRetryUnit = 200 * time.Millisecond - -// DefaultRetryCap - Each retry attempt never waits no longer than -// this maximum time duration. -var DefaultRetryCap = time.Second - -// newRetryTimer creates a timer with exponentially increasing -// delays until the maximum retry attempts are reached. -func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, unit, cap time.Duration, jitter float64) <-chan int { - attemptCh := make(chan int) - - // computes the exponential backoff duration according to - // https://www.awsarchitectureblog.com/2015/03/backoff.html - exponentialBackoffWait := func(attempt int) time.Duration { - // normalize jitter to the range [0, 1.0] - if jitter < NoJitter { - jitter = NoJitter - } - if jitter > MaxJitter { - jitter = MaxJitter - } - - // sleep = random_between(0, min(cap, base * 2 ** attempt)) - sleep := unit * time.Duration(1< cap { - sleep = cap - } - if jitter != NoJitter { - sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter) - } - return sleep - } - - go func() { - defer close(attemptCh) - for i := 0; i < maxRetry; i++ { - select { - case attemptCh <- i + 1: - case <-ctx.Done(): - return - } - - select { - case <-time.After(exponentialBackoffWait(i)): - case <-ctx.Done(): - return - } - } - }() - return attemptCh -} - -// List of AWS S3 error codes which are retryable. -var retryableS3Codes = map[string]struct{}{ - "RequestError": {}, - "RequestTimeout": {}, - "Throttling": {}, - "ThrottlingException": {}, - "RequestLimitExceeded": {}, - "RequestThrottled": {}, - "InternalError": {}, - "ExpiredToken": {}, - "ExpiredTokenException": {}, - "SlowDown": {}, - // Add more AWS S3 codes here. -} - -// isS3CodeRetryable - is s3 error code retryable. -func isS3CodeRetryable(s3Code string) (ok bool) { - _, ok = retryableS3Codes[s3Code] - return ok -} - -// List of HTTP status codes which are retryable. -var retryableHTTPStatusCodes = map[int]struct{}{ - 429: {}, // http.StatusTooManyRequests is not part of the Go 1.5 library, yet - 499: {}, // client closed request, retry. A non-standard status code introduced by nginx. - http.StatusInternalServerError: {}, - http.StatusBadGateway: {}, - http.StatusServiceUnavailable: {}, - http.StatusGatewayTimeout: {}, - // Add more HTTP status codes here. -} - -// isHTTPStatusRetryable - is HTTP error code retryable. -func isHTTPStatusRetryable(httpStatusCode int) (ok bool) { - _, ok = retryableHTTPStatusCodes[httpStatusCode] - return ok -} - -// For now, all http Do() requests are retriable except some well defined errors -func isRequestErrorRetryable(err error) bool { - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { - return false - } - if ue, ok := err.(*url.Error); ok { - e := ue.Unwrap() - switch e.(type) { - // x509: certificate signed by unknown authority - case x509.UnknownAuthorityError: - return false - } - switch e.Error() { - case "http: server gave HTTP response to HTTPS client": - return false - } - } - return true -} diff --git a/vendor/github.com/minio/minio-go/v7/s3-endpoints.go b/vendor/github.com/minio/minio-go/v7/s3-endpoints.go deleted file mode 100644 index b1de7b6..0000000 --- a/vendor/github.com/minio/minio-go/v7/s3-endpoints.go +++ /dev/null @@ -1,64 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -// awsS3EndpointMap Amazon S3 endpoint map. -var awsS3EndpointMap = map[string]string{ - "us-east-1": "s3.dualstack.us-east-1.amazonaws.com", - "us-east-2": "s3.dualstack.us-east-2.amazonaws.com", - "us-west-2": "s3.dualstack.us-west-2.amazonaws.com", - "us-west-1": "s3.dualstack.us-west-1.amazonaws.com", - "ca-central-1": "s3.dualstack.ca-central-1.amazonaws.com", - "eu-west-1": "s3.dualstack.eu-west-1.amazonaws.com", - "eu-west-2": "s3.dualstack.eu-west-2.amazonaws.com", - "eu-west-3": "s3.dualstack.eu-west-3.amazonaws.com", - "eu-central-1": "s3.dualstack.eu-central-1.amazonaws.com", - "eu-central-2": "s3.dualstack.eu-central-2.amazonaws.com", - "eu-north-1": "s3.dualstack.eu-north-1.amazonaws.com", - "eu-south-1": "s3.dualstack.eu-south-1.amazonaws.com", - "eu-south-2": "s3.dualstack.eu-south-2.amazonaws.com", - "ap-east-1": "s3.dualstack.ap-east-1.amazonaws.com", - "ap-south-1": "s3.dualstack.ap-south-1.amazonaws.com", - "ap-south-2": "s3.dualstack.ap-south-2.amazonaws.com", - "ap-southeast-1": "s3.dualstack.ap-southeast-1.amazonaws.com", - "ap-southeast-2": "s3.dualstack.ap-southeast-2.amazonaws.com", - "ap-northeast-1": "s3.dualstack.ap-northeast-1.amazonaws.com", - "ap-northeast-2": "s3.dualstack.ap-northeast-2.amazonaws.com", - "ap-northeast-3": "s3.dualstack.ap-northeast-3.amazonaws.com", - "af-south-1": "s3.dualstack.af-south-1.amazonaws.com", - "me-central-1": "s3.dualstack.me-central-1.amazonaws.com", - "me-south-1": "s3.dualstack.me-south-1.amazonaws.com", - "sa-east-1": "s3.dualstack.sa-east-1.amazonaws.com", - "us-gov-west-1": "s3.dualstack.us-gov-west-1.amazonaws.com", - "us-gov-east-1": "s3.dualstack.us-gov-east-1.amazonaws.com", - "cn-north-1": "s3.dualstack.cn-north-1.amazonaws.com.cn", - "cn-northwest-1": "s3.dualstack.cn-northwest-1.amazonaws.com.cn", - "ap-southeast-3": "s3.dualstack.ap-southeast-3.amazonaws.com", - "ap-southeast-4": "s3.dualstack.ap-southeast-4.amazonaws.com", - "il-central-1": "s3.dualstack.il-central-1.amazonaws.com", -} - -// getS3Endpoint get Amazon S3 endpoint based on the bucket location. -func getS3Endpoint(bucketLocation string) (s3Endpoint string) { - s3Endpoint, ok := awsS3EndpointMap[bucketLocation] - if !ok { - // Default to 's3.dualstack.us-east-1.amazonaws.com' endpoint. - s3Endpoint = "s3.dualstack.us-east-1.amazonaws.com" - } - return s3Endpoint -} diff --git a/vendor/github.com/minio/minio-go/v7/s3-error.go b/vendor/github.com/minio/minio-go/v7/s3-error.go deleted file mode 100644 index f365157..0000000 --- a/vendor/github.com/minio/minio-go/v7/s3-error.go +++ /dev/null @@ -1,61 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -// Non exhaustive list of AWS S3 standard error responses - -// http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html -var s3ErrorResponseMap = map[string]string{ - "AccessDenied": "Access Denied.", - "BadDigest": "The Content-Md5 you specified did not match what we received.", - "EntityTooSmall": "Your proposed upload is smaller than the minimum allowed object size.", - "EntityTooLarge": "Your proposed upload exceeds the maximum allowed object size.", - "IncompleteBody": "You did not provide the number of bytes specified by the Content-Length HTTP header.", - "InternalError": "We encountered an internal error, please try again.", - "InvalidAccessKeyId": "The access key ID you provided does not exist in our records.", - "InvalidBucketName": "The specified bucket is not valid.", - "InvalidDigest": "The Content-Md5 you specified is not valid.", - "InvalidRange": "The requested range is not satisfiable", - "MalformedXML": "The XML you provided was not well-formed or did not validate against our published schema.", - "MissingContentLength": "You must provide the Content-Length HTTP header.", - "MissingContentMD5": "Missing required header for this request: Content-Md5.", - "MissingRequestBodyError": "Request body is empty.", - "NoSuchBucket": "The specified bucket does not exist.", - "NoSuchBucketPolicy": "The bucket policy does not exist", - "NoSuchKey": "The specified key does not exist.", - "NoSuchUpload": "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.", - "NotImplemented": "A header you provided implies functionality that is not implemented", - "PreconditionFailed": "At least one of the pre-conditions you specified did not hold", - "RequestTimeTooSkewed": "The difference between the request time and the server's time is too large.", - "SignatureDoesNotMatch": "The request signature we calculated does not match the signature you provided. Check your key and signing method.", - "MethodNotAllowed": "The specified method is not allowed against this resource.", - "InvalidPart": "One or more of the specified parts could not be found.", - "InvalidPartOrder": "The list of parts was not in ascending order. The parts list must be specified in order by part number.", - "InvalidObjectState": "The operation is not valid for the current state of the object.", - "AuthorizationHeaderMalformed": "The authorization header is malformed; the region is wrong.", - "MalformedPOSTRequest": "The body of your POST request is not well-formed multipart/form-data.", - "BucketNotEmpty": "The bucket you tried to delete is not empty", - "AllAccessDisabled": "All access to this bucket has been disabled.", - "MalformedPolicy": "Policy has invalid resource.", - "MissingFields": "Missing fields in request.", - "AuthorizationQueryParametersError": "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"/YYYYMMDD/REGION/SERVICE/aws4_request\".", - "MalformedDate": "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.", - "BucketAlreadyOwnedByYou": "Your previous request to create the named bucket succeeded and you already own it.", - "InvalidDuration": "Duration provided in the request is invalid.", - "XAmzContentSHA256Mismatch": "The provided 'x-amz-content-sha256' header does not match what was computed.", - // Add new API errors here. -} diff --git a/vendor/github.com/minio/minio-go/v7/transport.go b/vendor/github.com/minio/minio-go/v7/transport.go deleted file mode 100644 index 1bff664..0000000 --- a/vendor/github.com/minio/minio-go/v7/transport.go +++ /dev/null @@ -1,83 +0,0 @@ -//go:build go1.7 || go1.8 -// +build go1.7 go1.8 - -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017-2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "crypto/tls" - "crypto/x509" - "net" - "net/http" - "os" - "time" -) - -// mustGetSystemCertPool - return system CAs or empty pool in case of error (or windows) -func mustGetSystemCertPool() *x509.CertPool { - pool, err := x509.SystemCertPool() - if err != nil { - return x509.NewCertPool() - } - return pool -} - -// DefaultTransport - this default transport is similar to -// http.DefaultTransport but with additional param DisableCompression -// is set to true to avoid decompressing content with 'gzip' encoding. -var DefaultTransport = func(secure bool) (*http.Transport, error) { - tr := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).DialContext, - MaxIdleConns: 256, - MaxIdleConnsPerHost: 16, - ResponseHeaderTimeout: time.Minute, - IdleConnTimeout: time.Minute, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 10 * time.Second, - // Set this value so that the underlying transport round-tripper - // doesn't try to auto decode the body of objects with - // content-encoding set to `gzip`. - // - // Refer: - // https://golang.org/src/net/http/transport.go?h=roundTrip#L1843 - DisableCompression: true, - } - - if secure { - tr.TLSClientConfig = &tls.Config{ - // Can't use SSLv3 because of POODLE and BEAST - // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher - // Can't use TLSv1.1 because of RC4 cipher usage - MinVersion: tls.VersionTLS12, - } - if f := os.Getenv("SSL_CERT_FILE"); f != "" { - rootCAs := mustGetSystemCertPool() - data, err := os.ReadFile(f) - if err == nil { - rootCAs.AppendCertsFromPEM(data) - } - tr.TLSClientConfig.RootCAs = rootCAs - } - } - return tr, nil -} diff --git a/vendor/github.com/minio/minio-go/v7/utils.go b/vendor/github.com/minio/minio-go/v7/utils.go deleted file mode 100644 index e39eba0..0000000 --- a/vendor/github.com/minio/minio-go/v7/utils.go +++ /dev/null @@ -1,693 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "crypto/md5" - fipssha256 "crypto/sha256" - "encoding/base64" - "encoding/hex" - "encoding/xml" - "errors" - "fmt" - "hash" - "io" - "math/rand" - "net" - "net/http" - "net/url" - "regexp" - "strconv" - "strings" - "sync" - "time" - - md5simd "github.com/minio/md5-simd" - "github.com/minio/minio-go/v7/pkg/encrypt" - "github.com/minio/minio-go/v7/pkg/s3utils" - "github.com/minio/sha256-simd" -) - -func trimEtag(etag string) string { - etag = strings.TrimPrefix(etag, "\"") - return strings.TrimSuffix(etag, "\"") -} - -var expirationRegex = regexp.MustCompile(`expiry-date="(.*?)", rule-id="(.*?)"`) - -func amzExpirationToExpiryDateRuleID(expiration string) (time.Time, string) { - if matches := expirationRegex.FindStringSubmatch(expiration); len(matches) == 3 { - expTime, err := parseRFC7231Time(matches[1]) - if err != nil { - return time.Time{}, "" - } - return expTime, matches[2] - } - return time.Time{}, "" -} - -var restoreRegex = regexp.MustCompile(`ongoing-request="(.*?)"(, expiry-date="(.*?)")?`) - -func amzRestoreToStruct(restore string) (ongoing bool, expTime time.Time, err error) { - matches := restoreRegex.FindStringSubmatch(restore) - if len(matches) != 4 { - return false, time.Time{}, errors.New("unexpected restore header") - } - ongoing, err = strconv.ParseBool(matches[1]) - if err != nil { - return false, time.Time{}, err - } - if matches[3] != "" { - expTime, err = parseRFC7231Time(matches[3]) - if err != nil { - return false, time.Time{}, err - } - } - return -} - -// xmlDecoder provide decoded value in xml. -func xmlDecoder(body io.Reader, v interface{}) error { - d := xml.NewDecoder(body) - return d.Decode(v) -} - -// sum256 calculate sha256sum for an input byte array, returns hex encoded. -func sum256Hex(data []byte) string { - hash := newSHA256Hasher() - defer hash.Close() - hash.Write(data) - return hex.EncodeToString(hash.Sum(nil)) -} - -// sumMD5Base64 calculate md5sum for an input byte array, returns base64 encoded. -func sumMD5Base64(data []byte) string { - hash := newMd5Hasher() - defer hash.Close() - hash.Write(data) - return base64.StdEncoding.EncodeToString(hash.Sum(nil)) -} - -// getEndpointURL - construct a new endpoint. -func getEndpointURL(endpoint string, secure bool) (*url.URL, error) { - // If secure is false, use 'http' scheme. - scheme := "https" - if !secure { - scheme = "http" - } - - // Construct a secured endpoint URL. - endpointURLStr := scheme + "://" + endpoint - endpointURL, err := url.Parse(endpointURLStr) - if err != nil { - return nil, err - } - - // Validate incoming endpoint URL. - if err := isValidEndpointURL(*endpointURL); err != nil { - return nil, err - } - return endpointURL, nil -} - -// closeResponse close non nil response with any response Body. -// convenient wrapper to drain any remaining data on response body. -// -// Subsequently this allows golang http RoundTripper -// to re-use the same connection for future requests. -func closeResponse(resp *http.Response) { - // Callers should close resp.Body when done reading from it. - // If resp.Body is not closed, the Client's underlying RoundTripper - // (typically Transport) may not be able to re-use a persistent TCP - // connection to the server for a subsequent "keep-alive" request. - if resp != nil && resp.Body != nil { - // Drain any remaining Body and then close the connection. - // Without this closing connection would disallow re-using - // the same connection for future uses. - // - http://stackoverflow.com/a/17961593/4465767 - io.Copy(io.Discard, resp.Body) - resp.Body.Close() - } -} - -var ( - // Hex encoded string of nil sha256sum bytes. - emptySHA256Hex = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" - - // Sentinel URL is the default url value which is invalid. - sentinelURL = url.URL{} -) - -// Verify if input endpoint URL is valid. -func isValidEndpointURL(endpointURL url.URL) error { - if endpointURL == sentinelURL { - return errInvalidArgument("Endpoint url cannot be empty.") - } - if endpointURL.Path != "/" && endpointURL.Path != "" { - return errInvalidArgument("Endpoint url cannot have fully qualified paths.") - } - host := endpointURL.Hostname() - if !s3utils.IsValidIP(host) && !s3utils.IsValidDomain(host) { - msg := "Endpoint: " + endpointURL.Host + " does not follow ip address or domain name standards." - return errInvalidArgument(msg) - } - - if strings.Contains(host, ".s3.amazonaws.com") { - if !s3utils.IsAmazonEndpoint(endpointURL) { - return errInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.") - } - } - if strings.Contains(host, ".googleapis.com") { - if !s3utils.IsGoogleEndpoint(endpointURL) { - return errInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'.") - } - } - return nil -} - -// Verify if input expires value is valid. -func isValidExpiry(expires time.Duration) error { - expireSeconds := int64(expires / time.Second) - if expireSeconds < 1 { - return errInvalidArgument("Expires cannot be lesser than 1 second.") - } - if expireSeconds > 604800 { - return errInvalidArgument("Expires cannot be greater than 7 days.") - } - return nil -} - -// Extract only necessary metadata header key/values by -// filtering them out with a list of custom header keys. -func extractObjMetadata(header http.Header) http.Header { - preserveKeys := []string{ - "Content-Type", - "Cache-Control", - "Content-Encoding", - "Content-Language", - "Content-Disposition", - "X-Amz-Storage-Class", - "X-Amz-Object-Lock-Mode", - "X-Amz-Object-Lock-Retain-Until-Date", - "X-Amz-Object-Lock-Legal-Hold", - "X-Amz-Website-Redirect-Location", - "X-Amz-Server-Side-Encryption", - "X-Amz-Tagging-Count", - "X-Amz-Meta-", - // Add new headers to be preserved. - // if you add new headers here, please extend - // PutObjectOptions{} to preserve them - // upon upload as well. - } - filteredHeader := make(http.Header) - for k, v := range header { - var found bool - for _, prefix := range preserveKeys { - if !strings.HasPrefix(k, prefix) { - continue - } - found = true - break - } - if found { - filteredHeader[k] = v - } - } - return filteredHeader -} - -const ( - // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT - rfc822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" - rfc822TimeFormatSingleDigitDay = "Mon, _2 Jan 2006 15:04:05 GMT" - rfc822TimeFormatSingleDigitDayTwoDigitYear = "Mon, _2 Jan 06 15:04:05 GMT" -) - -func parseTime(t string, formats ...string) (time.Time, error) { - for _, format := range formats { - tt, err := time.Parse(format, t) - if err == nil { - return tt, nil - } - } - return time.Time{}, fmt.Errorf("unable to parse %s in any of the input formats: %s", t, formats) -} - -func parseRFC7231Time(lastModified string) (time.Time, error) { - return parseTime(lastModified, rfc822TimeFormat, rfc822TimeFormatSingleDigitDay, rfc822TimeFormatSingleDigitDayTwoDigitYear) -} - -// ToObjectInfo converts http header values into ObjectInfo type, -// extracts metadata and fills in all the necessary fields in ObjectInfo. -func ToObjectInfo(bucketName, objectName string, h http.Header) (ObjectInfo, error) { - var err error - // Trim off the odd double quotes from ETag in the beginning and end. - etag := trimEtag(h.Get("ETag")) - - // Parse content length is exists - var size int64 = -1 - contentLengthStr := h.Get("Content-Length") - if contentLengthStr != "" { - size, err = strconv.ParseInt(contentLengthStr, 10, 64) - if err != nil { - // Content-Length is not valid - return ObjectInfo{}, ErrorResponse{ - Code: "InternalError", - Message: fmt.Sprintf("Content-Length is not an integer, failed with %v", err), - BucketName: bucketName, - Key: objectName, - RequestID: h.Get("x-amz-request-id"), - HostID: h.Get("x-amz-id-2"), - Region: h.Get("x-amz-bucket-region"), - } - } - } - - // Parse Last-Modified has http time format. - mtime, err := parseRFC7231Time(h.Get("Last-Modified")) - if err != nil { - return ObjectInfo{}, ErrorResponse{ - Code: "InternalError", - Message: fmt.Sprintf("Last-Modified time format is invalid, failed with %v", err), - BucketName: bucketName, - Key: objectName, - RequestID: h.Get("x-amz-request-id"), - HostID: h.Get("x-amz-id-2"), - Region: h.Get("x-amz-bucket-region"), - } - } - - // Fetch content type if any present. - contentType := strings.TrimSpace(h.Get("Content-Type")) - if contentType == "" { - contentType = "application/octet-stream" - } - - expiryStr := h.Get("Expires") - var expiry time.Time - if expiryStr != "" { - expiry, err = parseRFC7231Time(expiryStr) - if err != nil { - return ObjectInfo{}, ErrorResponse{ - Code: "InternalError", - Message: fmt.Sprintf("'Expiry' is not in supported format: %v", err), - BucketName: bucketName, - Key: objectName, - RequestID: h.Get("x-amz-request-id"), - HostID: h.Get("x-amz-id-2"), - Region: h.Get("x-amz-bucket-region"), - } - } - } - - metadata := extractObjMetadata(h) - userMetadata := make(map[string]string) - for k, v := range metadata { - if strings.HasPrefix(k, "X-Amz-Meta-") { - userMetadata[strings.TrimPrefix(k, "X-Amz-Meta-")] = v[0] - } - } - userTags := s3utils.TagDecode(h.Get(amzTaggingHeader)) - - var tagCount int - if count := h.Get(amzTaggingCount); count != "" { - tagCount, err = strconv.Atoi(count) - if err != nil { - return ObjectInfo{}, ErrorResponse{ - Code: "InternalError", - Message: fmt.Sprintf("x-amz-tagging-count is not an integer, failed with %v", err), - BucketName: bucketName, - Key: objectName, - RequestID: h.Get("x-amz-request-id"), - HostID: h.Get("x-amz-id-2"), - Region: h.Get("x-amz-bucket-region"), - } - } - } - - // Nil if not found - var restore *RestoreInfo - if restoreHdr := h.Get(amzRestore); restoreHdr != "" { - ongoing, expTime, err := amzRestoreToStruct(restoreHdr) - if err != nil { - return ObjectInfo{}, err - } - restore = &RestoreInfo{OngoingRestore: ongoing, ExpiryTime: expTime} - } - - // extract lifecycle expiry date and rule ID - expTime, ruleID := amzExpirationToExpiryDateRuleID(h.Get(amzExpiration)) - - deleteMarker := h.Get(amzDeleteMarker) == "true" - - // Save object metadata info. - return ObjectInfo{ - ETag: etag, - Key: objectName, - Size: size, - LastModified: mtime, - ContentType: contentType, - Expires: expiry, - VersionID: h.Get(amzVersionID), - IsDeleteMarker: deleteMarker, - ReplicationStatus: h.Get(amzReplicationStatus), - Expiration: expTime, - ExpirationRuleID: ruleID, - // Extract only the relevant header keys describing the object. - // following function filters out a list of standard set of keys - // which are not part of object metadata. - Metadata: metadata, - UserMetadata: userMetadata, - UserTags: userTags, - UserTagCount: tagCount, - Restore: restore, - - // Checksum values - ChecksumCRC32: h.Get("x-amz-checksum-crc32"), - ChecksumCRC32C: h.Get("x-amz-checksum-crc32c"), - ChecksumSHA1: h.Get("x-amz-checksum-sha1"), - ChecksumSHA256: h.Get("x-amz-checksum-sha256"), - }, nil -} - -var readFull = func(r io.Reader, buf []byte) (n int, err error) { - // ReadFull reads exactly len(buf) bytes from r into buf. - // It returns the number of bytes copied and an error if - // fewer bytes were read. The error is EOF only if no bytes - // were read. If an EOF happens after reading some but not - // all the bytes, ReadFull returns ErrUnexpectedEOF. - // On return, n == len(buf) if and only if err == nil. - // If r returns an error having read at least len(buf) bytes, - // the error is dropped. - for n < len(buf) && err == nil { - var nn int - nn, err = r.Read(buf[n:]) - // Some spurious io.Reader's return - // io.ErrUnexpectedEOF when nn == 0 - // this behavior is undocumented - // so we are on purpose not using io.ReadFull - // implementation because this can lead - // to custom handling, to avoid that - // we simply modify the original io.ReadFull - // implementation to avoid this issue. - // io.ErrUnexpectedEOF with nn == 0 really - // means that io.EOF - if err == io.ErrUnexpectedEOF && nn == 0 { - err = io.EOF - } - n += nn - } - if n >= len(buf) { - err = nil - } else if n > 0 && err == io.EOF { - err = io.ErrUnexpectedEOF - } - return -} - -// regCred matches credential string in HTTP header -var regCred = regexp.MustCompile("Credential=([A-Z0-9]+)/") - -// regCred matches signature string in HTTP header -var regSign = regexp.MustCompile("Signature=([[0-9a-f]+)") - -// Redact out signature value from authorization string. -func redactSignature(origAuth string) string { - if !strings.HasPrefix(origAuth, signV4Algorithm) { - // Set a temporary redacted auth - return "AWS **REDACTED**:**REDACTED**" - } - - // Signature V4 authorization header. - - // Strip out accessKeyID from: - // Credential=////aws4_request - newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/") - - // Strip out 256-bit signature from: Signature=<256-bit signature> - return regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**") -} - -// Get default location returns the location based on the input -// URL `u`, if region override is provided then all location -// defaults to regionOverride. -// -// If no other cases match then the location is set to `us-east-1` -// as a last resort. -func getDefaultLocation(u url.URL, regionOverride string) (location string) { - if regionOverride != "" { - return regionOverride - } - region := s3utils.GetRegionFromURL(u) - if region == "" { - region = "us-east-1" - } - return region -} - -var supportedHeaders = map[string]bool{ - "content-type": true, - "cache-control": true, - "content-encoding": true, - "content-disposition": true, - "content-language": true, - "x-amz-website-redirect-location": true, - "x-amz-object-lock-mode": true, - "x-amz-metadata-directive": true, - "x-amz-object-lock-retain-until-date": true, - "expires": true, - "x-amz-replication-status": true, - // Add more supported headers here. - // Must be lower case. -} - -// isStorageClassHeader returns true if the header is a supported storage class header -func isStorageClassHeader(headerKey string) bool { - return strings.EqualFold(amzStorageClass, headerKey) -} - -// isStandardHeader returns true if header is a supported header and not a custom header -func isStandardHeader(headerKey string) bool { - return supportedHeaders[strings.ToLower(headerKey)] -} - -// sseHeaders is list of server side encryption headers -var sseHeaders = map[string]bool{ - "x-amz-server-side-encryption": true, - "x-amz-server-side-encryption-aws-kms-key-id": true, - "x-amz-server-side-encryption-context": true, - "x-amz-server-side-encryption-customer-algorithm": true, - "x-amz-server-side-encryption-customer-key": true, - "x-amz-server-side-encryption-customer-key-md5": true, - // Add more supported headers here. - // Must be lower case. -} - -// isSSEHeader returns true if header is a server side encryption header. -func isSSEHeader(headerKey string) bool { - return sseHeaders[strings.ToLower(headerKey)] -} - -// isAmzHeader returns true if header is a x-amz-meta-* or x-amz-acl header. -func isAmzHeader(headerKey string) bool { - key := strings.ToLower(headerKey) - - return strings.HasPrefix(key, "x-amz-meta-") || strings.HasPrefix(key, "x-amz-grant-") || key == "x-amz-acl" || isSSEHeader(headerKey) || strings.HasPrefix(key, "x-amz-checksum-") -} - -// supportedQueryValues is a list of query strings that can be passed in when using GetObject. -var supportedQueryValues = map[string]bool{ - "partNumber": true, - "versionId": true, - "response-cache-control": true, - "response-content-disposition": true, - "response-content-encoding": true, - "response-content-language": true, - "response-content-type": true, - "response-expires": true, -} - -// isStandardQueryValue will return true when the passed in query string parameter is supported rather than customized. -func isStandardQueryValue(qsKey string) bool { - return supportedQueryValues[qsKey] -} - -// Per documentation at https://docs.aws.amazon.com/AmazonS3/latest/userguide/LogFormat.html#LogFormatCustom, the -// set of query params starting with "x-" are ignored by S3. -const allowedCustomQueryPrefix = "x-" - -func isCustomQueryValue(qsKey string) bool { - return strings.HasPrefix(qsKey, allowedCustomQueryPrefix) -} - -var ( - md5Pool = sync.Pool{New: func() interface{} { return md5.New() }} - sha256Pool = sync.Pool{New: func() interface{} { return sha256.New() }} -) - -func newMd5Hasher() md5simd.Hasher { - return &hashWrapper{Hash: md5Pool.Get().(hash.Hash), isMD5: true} -} - -func newSHA256Hasher() md5simd.Hasher { - if encrypt.FIPS { - return &hashWrapper{Hash: fipssha256.New(), isSHA256: true} - } - return &hashWrapper{Hash: sha256Pool.Get().(hash.Hash), isSHA256: true} -} - -// hashWrapper implements the md5simd.Hasher interface. -type hashWrapper struct { - hash.Hash - isMD5 bool - isSHA256 bool -} - -// Close will put the hasher back into the pool. -func (m *hashWrapper) Close() { - if m.isMD5 && m.Hash != nil { - m.Reset() - md5Pool.Put(m.Hash) - } - if m.isSHA256 && m.Hash != nil { - m.Reset() - sha256Pool.Put(m.Hash) - } - m.Hash = nil -} - -const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569" -const ( - letterIdxBits = 6 // 6 bits to represent a letter index - letterIdxMask = 1<= 0; { - if remain == 0 { - cache, remain = src.Int63(), letterIdxMax - } - if idx := int(cache & letterIdxMask); idx < len(letterBytes) { - b[i] = letterBytes[idx] - i-- - } - cache >>= letterIdxBits - remain-- - } - return prefix + string(b[0:30-len(prefix)]) -} - -// IsNetworkOrHostDown - if there was a network error or if the host is down. -// expectTimeouts indicates that *context* timeouts are expected and does not -// indicate a downed host. Other timeouts still returns down. -func IsNetworkOrHostDown(err error, expectTimeouts bool) bool { - if err == nil { - return false - } - - if errors.Is(err, context.Canceled) { - return false - } - - if expectTimeouts && errors.Is(err, context.DeadlineExceeded) { - return false - } - - if errors.Is(err, context.DeadlineExceeded) { - return true - } - - // We need to figure if the error either a timeout - // or a non-temporary error. - urlErr := &url.Error{} - if errors.As(err, &urlErr) { - switch urlErr.Err.(type) { - case *net.DNSError, *net.OpError, net.UnknownNetworkError: - return true - } - } - var e net.Error - if errors.As(err, &e) { - if e.Timeout() { - return true - } - } - - // Fallback to other mechanisms. - switch { - case strings.Contains(err.Error(), "Connection closed by foreign host"): - return true - case strings.Contains(err.Error(), "TLS handshake timeout"): - // If error is - tlsHandshakeTimeoutError. - return true - case strings.Contains(err.Error(), "i/o timeout"): - // If error is - tcp timeoutError. - return true - case strings.Contains(err.Error(), "connection timed out"): - // If err is a net.Dial timeout. - return true - case strings.Contains(err.Error(), "connection refused"): - // If err is connection refused - return true - - case strings.Contains(strings.ToLower(err.Error()), "503 service unavailable"): - // Denial errors - return true - } - return false -} - -// newHashReaderWrapper will hash all reads done through r. -// When r returns io.EOF the done function will be called with the sum. -func newHashReaderWrapper(r io.Reader, h hash.Hash, done func(hash []byte)) *hashReaderWrapper { - return &hashReaderWrapper{ - r: r, - h: h, - done: done, - } -} - -type hashReaderWrapper struct { - r io.Reader - h hash.Hash - done func(hash []byte) -} - -// Read implements the io.Reader interface. -func (h *hashReaderWrapper) Read(p []byte) (n int, err error) { - n, err = h.r.Read(p) - if n > 0 { - n2, err := h.h.Write(p[:n]) - if err != nil { - return 0, err - } - if n2 != n { - return 0, io.ErrShortWrite - } - } - if err == io.EOF { - // Call back - h.done(h.h.Sum(nil)) - } - return n, err -} -- cgit v1.2.3