From 404aeae4545d2426c089a5f8d5e82dae56f5212b Mon Sep 17 00:00:00 2001
From: Rutger Broekhoff
Date: Fri, 29 Dec 2023 21:31:53 +0100
Subject: Make Nix builds work
---
.../minio-go/v7/pkg/credentials/assume_role.go | 242 +++++
.../minio/minio-go/v7/pkg/credentials/chain.go | 88 ++
.../minio-go/v7/pkg/credentials/config.json.sample | 17 +
.../minio-go/v7/pkg/credentials/credentials.go | 193 ++++
.../minio-go/v7/pkg/credentials/credentials.json | 7 +
.../minio-go/v7/pkg/credentials/credentials.sample | 15 +
.../minio/minio-go/v7/pkg/credentials/doc.go | 60 ++
.../minio/minio-go/v7/pkg/credentials/env_aws.go | 71 ++
.../minio/minio-go/v7/pkg/credentials/env_minio.go | 68 ++
.../minio-go/v7/pkg/credentials/error_response.go | 95 ++
.../v7/pkg/credentials/file_aws_credentials.go | 157 ++++
.../v7/pkg/credentials/file_minio_client.go | 139 +++
.../minio/minio-go/v7/pkg/credentials/iam_aws.go | 433 +++++++++
.../minio-go/v7/pkg/credentials/signature_type.go | 77 ++
.../minio/minio-go/v7/pkg/credentials/static.go | 67 ++
.../v7/pkg/credentials/sts_client_grants.go | 182 ++++
.../v7/pkg/credentials/sts_custom_identity.go | 146 ++++
.../v7/pkg/credentials/sts_ldap_identity.go | 189 ++++
.../v7/pkg/credentials/sts_tls_identity.go | 211 +++++
.../v7/pkg/credentials/sts_web_identity.go | 205 +++++
.../minio/minio-go/v7/pkg/encrypt/fips_disabled.go | 24 +
.../minio/minio-go/v7/pkg/encrypt/fips_enabled.go | 24 +
.../minio/minio-go/v7/pkg/encrypt/server-side.go | 198 +++++
.../minio/minio-go/v7/pkg/lifecycle/lifecycle.go | 491 +++++++++++
.../minio/minio-go/v7/pkg/notification/info.go | 78 ++
.../minio-go/v7/pkg/notification/notification.go | 440 ++++++++++
.../minio-go/v7/pkg/replication/replication.go | 971 +++++++++++++++++++++
.../minio/minio-go/v7/pkg/s3utils/utils.go | 411 +++++++++
.../minio/minio-go/v7/pkg/set/stringset.go | 200 +++++
...request-signature-streaming-unsigned-trailer.go | 224 +++++
.../v7/pkg/signer/request-signature-streaming.go | 403 +++++++++
.../minio-go/v7/pkg/signer/request-signature-v2.go | 319 +++++++
.../minio-go/v7/pkg/signer/request-signature-v4.go | 351 ++++++++
.../minio/minio-go/v7/pkg/signer/utils.go | 62 ++
vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go | 66 ++
.../github.com/minio/minio-go/v7/pkg/tags/tags.go | 413 +++++++++
36 files changed, 7337 insertions(+)
create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go
create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go
create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample
create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go
create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json
create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample
create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go
create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go
create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go
create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go
create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go
create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go
create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go
create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go
create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go
create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go
create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go
create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go
create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go
create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go
create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go
create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go
create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go
create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go
create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/notification/info.go
create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go
create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go
create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go
create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go
create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go
create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go
create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go
create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go
create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go
create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go
create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go
(limited to 'vendor/github.com/minio/minio-go/v7/pkg')
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go
new file mode 100644
index 0000000..800c4a2
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go
@@ -0,0 +1,242 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2020 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "bytes"
+ "crypto/sha256"
+ "encoding/hex"
+ "encoding/xml"
+ "errors"
+ "io"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/minio/minio-go/v7/pkg/signer"
+)
+
+// AssumeRoleResponse contains the result of successful AssumeRole request.
+type AssumeRoleResponse struct {
+ XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleResponse" json:"-"`
+
+ Result AssumeRoleResult `xml:"AssumeRoleResult"`
+ ResponseMetadata struct {
+ RequestID string `xml:"RequestId,omitempty"`
+ } `xml:"ResponseMetadata,omitempty"`
+}
+
+// AssumeRoleResult - Contains the response to a successful AssumeRole
+// request, including temporary credentials that can be used to make
+// MinIO API requests.
+type AssumeRoleResult struct {
+ // The identifiers for the temporary security credentials that the operation
+ // returns.
+ AssumedRoleUser AssumedRoleUser `xml:",omitempty"`
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security (or session) token.
+ //
+ // Note: The size of the security token that STS APIs return is not fixed. We
+ // strongly recommend that you make no assumptions about the maximum size. As
+ // of this writing, the typical size is less than 4096 bytes, but that can vary.
+ // Also, future updates to AWS might require larger sizes.
+ Credentials struct {
+ AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"`
+ SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"`
+ Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"`
+ SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"`
+ } `xml:",omitempty"`
+
+ // A percentage value that indicates the size of the policy in packed form.
+ // The service rejects any policy with a packed size greater than 100 percent,
+ // which means the policy exceeded the allowed space.
+ PackedPolicySize int `xml:",omitempty"`
+}
+
+// A STSAssumeRole retrieves credentials from MinIO service, and keeps track if
+// those credentials are expired.
+type STSAssumeRole struct {
+ Expiry
+
+ // Required http Client to use when connecting to MinIO STS service.
+ Client *http.Client
+
+ // STS endpoint to fetch STS credentials.
+ STSEndpoint string
+
+ // various options for this request.
+ Options STSAssumeRoleOptions
+}
+
+// STSAssumeRoleOptions collection of various input options
+// to obtain AssumeRole credentials.
+type STSAssumeRoleOptions struct {
+ // Mandatory inputs.
+ AccessKey string
+ SecretKey string
+
+ SessionToken string // Optional if the first request is made with temporary credentials.
+ Policy string // Optional to assign a policy to the assumed role
+
+ Location string // Optional commonly needed with AWS STS.
+ DurationSeconds int // Optional defaults to 1 hour.
+
+ // Optional only valid if using with AWS STS
+ RoleARN string
+ RoleSessionName string
+ ExternalID string
+}
+
+// NewSTSAssumeRole returns a pointer to a new
+// Credentials object wrapping the STSAssumeRole.
+func NewSTSAssumeRole(stsEndpoint string, opts STSAssumeRoleOptions) (*Credentials, error) {
+ if stsEndpoint == "" {
+ return nil, errors.New("STS endpoint cannot be empty")
+ }
+ if opts.AccessKey == "" || opts.SecretKey == "" {
+ return nil, errors.New("AssumeRole credentials access/secretkey is mandatory")
+ }
+ return New(&STSAssumeRole{
+ Client: &http.Client{
+ Transport: http.DefaultTransport,
+ },
+ STSEndpoint: stsEndpoint,
+ Options: opts,
+ }), nil
+}
+
+const defaultDurationSeconds = 3600
+
+// closeResponse close non nil response with any response Body.
+// convenient wrapper to drain any remaining data on response body.
+//
+// Subsequently this allows golang http RoundTripper
+// to re-use the same connection for future requests.
+func closeResponse(resp *http.Response) {
+ // Callers should close resp.Body when done reading from it.
+ // If resp.Body is not closed, the Client's underlying RoundTripper
+ // (typically Transport) may not be able to re-use a persistent TCP
+ // connection to the server for a subsequent "keep-alive" request.
+ if resp != nil && resp.Body != nil {
+ // Drain any remaining Body and then close the connection.
+ // Without this closing connection would disallow re-using
+ // the same connection for future uses.
+ // - http://stackoverflow.com/a/17961593/4465767
+ io.Copy(io.Discard, resp.Body)
+ resp.Body.Close()
+ }
+}
+
+func getAssumeRoleCredentials(clnt *http.Client, endpoint string, opts STSAssumeRoleOptions) (AssumeRoleResponse, error) {
+ v := url.Values{}
+ v.Set("Action", "AssumeRole")
+ v.Set("Version", STSVersion)
+ if opts.RoleARN != "" {
+ v.Set("RoleArn", opts.RoleARN)
+ }
+ if opts.RoleSessionName != "" {
+ v.Set("RoleSessionName", opts.RoleSessionName)
+ }
+ if opts.DurationSeconds > defaultDurationSeconds {
+ v.Set("DurationSeconds", strconv.Itoa(opts.DurationSeconds))
+ } else {
+ v.Set("DurationSeconds", strconv.Itoa(defaultDurationSeconds))
+ }
+ if opts.Policy != "" {
+ v.Set("Policy", opts.Policy)
+ }
+ if opts.ExternalID != "" {
+ v.Set("ExternalId", opts.ExternalID)
+ }
+
+ u, err := url.Parse(endpoint)
+ if err != nil {
+ return AssumeRoleResponse{}, err
+ }
+ u.Path = "/"
+
+ postBody := strings.NewReader(v.Encode())
+ hash := sha256.New()
+ if _, err = io.Copy(hash, postBody); err != nil {
+ return AssumeRoleResponse{}, err
+ }
+ postBody.Seek(0, 0)
+
+ req, err := http.NewRequest(http.MethodPost, u.String(), postBody)
+ if err != nil {
+ return AssumeRoleResponse{}, err
+ }
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+ req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(hash.Sum(nil)))
+ if opts.SessionToken != "" {
+ req.Header.Set("X-Amz-Security-Token", opts.SessionToken)
+ }
+ req = signer.SignV4STS(*req, opts.AccessKey, opts.SecretKey, opts.Location)
+
+ resp, err := clnt.Do(req)
+ if err != nil {
+ return AssumeRoleResponse{}, err
+ }
+ defer closeResponse(resp)
+ if resp.StatusCode != http.StatusOK {
+ var errResp ErrorResponse
+ buf, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return AssumeRoleResponse{}, err
+ }
+ _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp)
+ if err != nil {
+ var s3Err Error
+ if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil {
+ return AssumeRoleResponse{}, err
+ }
+ errResp.RequestID = s3Err.RequestID
+ errResp.STSError.Code = s3Err.Code
+ errResp.STSError.Message = s3Err.Message
+ }
+ return AssumeRoleResponse{}, errResp
+ }
+
+ a := AssumeRoleResponse{}
+ if _, err = xmlDecodeAndBody(resp.Body, &a); err != nil {
+ return AssumeRoleResponse{}, err
+ }
+ return a, nil
+}
+
+// Retrieve retrieves credentials from the MinIO service.
+// Error will be returned if the request fails.
+func (m *STSAssumeRole) Retrieve() (Value, error) {
+ a, err := getAssumeRoleCredentials(m.Client, m.STSEndpoint, m.Options)
+ if err != nil {
+ return Value{}, err
+ }
+
+ // Expiry window is set to 10secs.
+ m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow)
+
+ return Value{
+ AccessKeyID: a.Result.Credentials.AccessKey,
+ SecretAccessKey: a.Result.Credentials.SecretKey,
+ SessionToken: a.Result.Credentials.SessionToken,
+ SignerType: SignatureV4,
+ }, nil
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go
new file mode 100644
index 0000000..ddccfb1
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go
@@ -0,0 +1,88 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+// A Chain will search for a provider which returns credentials
+// and cache that provider until Retrieve is called again.
+//
+// The Chain provides a way of chaining multiple providers together
+// which will pick the first available using priority order of the
+// Providers in the list.
+//
+// If none of the Providers retrieve valid credentials Value, ChainProvider's
+// Retrieve() will return the no credentials value.
+//
+// If a Provider is found which returns valid credentials Value ChainProvider
+// will cache that Provider for all calls to IsExpired(), until Retrieve is
+// called again after IsExpired() is true.
+//
+// creds := credentials.NewChainCredentials(
+// []credentials.Provider{
+// &credentials.EnvAWSS3{},
+// &credentials.EnvMinio{},
+// })
+//
+// // Usage of ChainCredentials.
+// mc, err := minio.NewWithCredentials(endpoint, creds, secure, "us-east-1")
+// if err != nil {
+// log.Fatalln(err)
+// }
+type Chain struct {
+ Providers []Provider
+ curr Provider
+}
+
+// NewChainCredentials returns a pointer to a new Credentials object
+// wrapping a chain of providers.
+func NewChainCredentials(providers []Provider) *Credentials {
+ return New(&Chain{
+ Providers: append([]Provider{}, providers...),
+ })
+}
+
+// Retrieve returns the credentials value, returns no credentials(anonymous)
+// if no credentials provider returned any value.
+//
+// If a provider is found with credentials, it will be cached and any calls
+// to IsExpired() will return the expired state of the cached provider.
+func (c *Chain) Retrieve() (Value, error) {
+ for _, p := range c.Providers {
+ creds, _ := p.Retrieve()
+ // Always prioritize non-anonymous providers, if any.
+ if creds.AccessKeyID == "" && creds.SecretAccessKey == "" {
+ continue
+ }
+ c.curr = p
+ return creds, nil
+ }
+ // At this point we have exhausted all the providers and
+ // are left without any credentials return anonymous.
+ return Value{
+ SignerType: SignatureAnonymous,
+ }, nil
+}
+
+// IsExpired will returned the expired state of the currently cached provider
+// if there is one. If there is no current provider, true will be returned.
+func (c *Chain) IsExpired() bool {
+ if c.curr != nil {
+ return c.curr.IsExpired()
+ }
+
+ return true
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample b/vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample
new file mode 100644
index 0000000..d793c9e
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample
@@ -0,0 +1,17 @@
+{
+ "version": "8",
+ "hosts": {
+ "play": {
+ "url": "https://play.min.io",
+ "accessKey": "Q3AM3UQ867SPQQA43P2F",
+ "secretKey": "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
+ "api": "S3v2"
+ },
+ "s3": {
+ "url": "https://s3.amazonaws.com",
+ "accessKey": "accessKey",
+ "secretKey": "secret",
+ "api": "S3v4"
+ }
+ }
+}
\ No newline at end of file
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go
new file mode 100644
index 0000000..af61049
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go
@@ -0,0 +1,193 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "sync"
+ "time"
+)
+
+const (
+ // STSVersion sts version string
+ STSVersion = "2011-06-15"
+
+ // How much duration to slash from the given expiration duration
+ defaultExpiryWindow = 0.8
+)
+
+// A Value is the AWS credentials value for individual credential fields.
+type Value struct {
+ // AWS Access key ID
+ AccessKeyID string
+
+ // AWS Secret Access Key
+ SecretAccessKey string
+
+ // AWS Session Token
+ SessionToken string
+
+ // Signature Type.
+ SignerType SignatureType
+}
+
+// A Provider is the interface for any component which will provide credentials
+// Value. A provider is required to manage its own Expired state, and what to
+// be expired means.
+type Provider interface {
+ // Retrieve returns nil if it successfully retrieved the value.
+ // Error is returned if the value were not obtainable, or empty.
+ Retrieve() (Value, error)
+
+ // IsExpired returns if the credentials are no longer valid, and need
+ // to be retrieved.
+ IsExpired() bool
+}
+
+// A Expiry provides shared expiration logic to be used by credentials
+// providers to implement expiry functionality.
+//
+// The best method to use this struct is as an anonymous field within the
+// provider's struct.
+//
+// Example:
+//
+// type IAMCredentialProvider struct {
+// Expiry
+// ...
+// }
+type Expiry struct {
+ // The date/time when to expire on
+ expiration time.Time
+
+ // If set will be used by IsExpired to determine the current time.
+ // Defaults to time.Now if CurrentTime is not set.
+ CurrentTime func() time.Time
+}
+
+// SetExpiration sets the expiration IsExpired will check when called.
+//
+// If window is greater than 0 the expiration time will be reduced by the
+// window value.
+//
+// Using a window is helpful to trigger credentials to expire sooner than
+// the expiration time given to ensure no requests are made with expired
+// tokens.
+func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
+ if e.CurrentTime == nil {
+ e.CurrentTime = time.Now
+ }
+ cut := window
+ if cut < 0 {
+ expireIn := expiration.Sub(e.CurrentTime())
+ cut = time.Duration(float64(expireIn) * (1 - defaultExpiryWindow))
+ }
+ e.expiration = expiration.Add(-cut)
+}
+
+// IsExpired returns if the credentials are expired.
+func (e *Expiry) IsExpired() bool {
+ if e.CurrentTime == nil {
+ e.CurrentTime = time.Now
+ }
+ return e.expiration.Before(e.CurrentTime())
+}
+
+// Credentials - A container for synchronous safe retrieval of credentials Value.
+// Credentials will cache the credentials value until they expire. Once the value
+// expires the next Get will attempt to retrieve valid credentials.
+//
+// Credentials is safe to use across multiple goroutines and will manage the
+// synchronous state so the Providers do not need to implement their own
+// synchronization.
+//
+// The first Credentials.Get() will always call Provider.Retrieve() to get the
+// first instance of the credentials Value. All calls to Get() after that
+// will return the cached credentials Value until IsExpired() returns true.
+type Credentials struct {
+ sync.Mutex
+
+ creds Value
+ forceRefresh bool
+ provider Provider
+}
+
+// New returns a pointer to a new Credentials with the provider set.
+func New(provider Provider) *Credentials {
+ return &Credentials{
+ provider: provider,
+ forceRefresh: true,
+ }
+}
+
+// Get returns the credentials value, or error if the credentials Value failed
+// to be retrieved.
+//
+// Will return the cached credentials Value if it has not expired. If the
+// credentials Value has expired the Provider's Retrieve() will be called
+// to refresh the credentials.
+//
+// If Credentials.Expire() was called the credentials Value will be force
+// expired, and the next call to Get() will cause them to be refreshed.
+func (c *Credentials) Get() (Value, error) {
+ if c == nil {
+ return Value{}, nil
+ }
+
+ c.Lock()
+ defer c.Unlock()
+
+ if c.isExpired() {
+ creds, err := c.provider.Retrieve()
+ if err != nil {
+ return Value{}, err
+ }
+ c.creds = creds
+ c.forceRefresh = false
+ }
+
+ return c.creds, nil
+}
+
+// Expire expires the credentials and forces them to be retrieved on the
+// next call to Get().
+//
+// This will override the Provider's expired state, and force Credentials
+// to call the Provider's Retrieve().
+func (c *Credentials) Expire() {
+ c.Lock()
+ defer c.Unlock()
+
+ c.forceRefresh = true
+}
+
+// IsExpired returns if the credentials are no longer valid, and need
+// to be refreshed.
+//
+// If the Credentials were forced to be expired with Expire() this will
+// reflect that override.
+func (c *Credentials) IsExpired() bool {
+ c.Lock()
+ defer c.Unlock()
+
+ return c.isExpired()
+}
+
+// isExpired helper method wrapping the definition of expired credentials.
+func (c *Credentials) isExpired() bool {
+ return c.forceRefresh || c.provider.IsExpired()
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json
new file mode 100644
index 0000000..afbfad5
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json
@@ -0,0 +1,7 @@
+{
+ "Version": 1,
+ "SessionToken": "token",
+ "AccessKeyId": "accessKey",
+ "SecretAccessKey": "secret",
+ "Expiration": "9999-04-27T16:02:25.000Z"
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample
new file mode 100644
index 0000000..e2dc1bf
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample
@@ -0,0 +1,15 @@
+[default]
+aws_access_key_id = accessKey
+aws_secret_access_key = secret
+aws_session_token = token
+
+[no_token]
+aws_access_key_id = accessKey
+aws_secret_access_key = secret
+
+[with_colon]
+aws_access_key_id: accessKey
+aws_secret_access_key: secret
+
+[with_process]
+credential_process = /bin/cat credentials.json
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go
new file mode 100644
index 0000000..fbfb105
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go
@@ -0,0 +1,60 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Package credentials provides credential retrieval and management
+// for S3 compatible object storage.
+//
+// By default the Credentials.Get() will cache the successful result of a
+// Provider's Retrieve() until Provider.IsExpired() returns true. At which
+// point Credentials will call Provider's Retrieve() to get new credential Value.
+//
+// The Provider is responsible for determining when credentials have expired.
+// It is also important to note that Credentials will always call Retrieve the
+// first time Credentials.Get() is called.
+//
+// Example of using the environment variable credentials.
+//
+// creds := NewFromEnv()
+// // Retrieve the credentials value
+// credValue, err := creds.Get()
+// if err != nil {
+// // handle error
+// }
+//
+// Example of forcing credentials to expire and be refreshed on the next Get().
+// This may be helpful to proactively expire credentials and refresh them sooner
+// than they would naturally expire on their own.
+//
+// creds := NewFromIAM("")
+// creds.Expire()
+// credsValue, err := creds.Get()
+// // New credentials will be retrieved instead of from cache.
+//
+// # Custom Provider
+//
+// Each Provider built into this package also provides a helper method to generate
+// a Credentials pointer setup with the provider. To use a custom Provider just
+// create a type which satisfies the Provider interface and pass it to the
+// NewCredentials method.
+//
+// type MyProvider struct{}
+// func (m *MyProvider) Retrieve() (Value, error) {...}
+// func (m *MyProvider) IsExpired() bool {...}
+//
+// creds := NewCredentials(&MyProvider{})
+// credValue, err := creds.Get()
+package credentials
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go
new file mode 100644
index 0000000..b6e60d0
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go
@@ -0,0 +1,71 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import "os"
+
+// A EnvAWS retrieves credentials from the environment variables of the
+// running process. EnvAWSironment credentials never expire.
+//
+// EnvAWSironment variables used:
+//
+// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY.
+// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY.
+// * Secret Token: AWS_SESSION_TOKEN.
+type EnvAWS struct {
+ retrieved bool
+}
+
+// NewEnvAWS returns a pointer to a new Credentials object
+// wrapping the environment variable provider.
+func NewEnvAWS() *Credentials {
+ return New(&EnvAWS{})
+}
+
+// Retrieve retrieves the keys from the environment.
+func (e *EnvAWS) Retrieve() (Value, error) {
+ e.retrieved = false
+
+ id := os.Getenv("AWS_ACCESS_KEY_ID")
+ if id == "" {
+ id = os.Getenv("AWS_ACCESS_KEY")
+ }
+
+ secret := os.Getenv("AWS_SECRET_ACCESS_KEY")
+ if secret == "" {
+ secret = os.Getenv("AWS_SECRET_KEY")
+ }
+
+ signerType := SignatureV4
+ if id == "" || secret == "" {
+ signerType = SignatureAnonymous
+ }
+
+ e.retrieved = true
+ return Value{
+ AccessKeyID: id,
+ SecretAccessKey: secret,
+ SessionToken: os.Getenv("AWS_SESSION_TOKEN"),
+ SignerType: signerType,
+ }, nil
+}
+
+// IsExpired returns if the credentials have been retrieved.
+func (e *EnvAWS) IsExpired() bool {
+ return !e.retrieved
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go
new file mode 100644
index 0000000..5bfeab1
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go
@@ -0,0 +1,68 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import "os"
+
+// A EnvMinio retrieves credentials from the environment variables of the
+// running process. EnvMinioironment credentials never expire.
+//
+// Environment variables used:
+//
+// * Access Key ID: MINIO_ACCESS_KEY.
+// * Secret Access Key: MINIO_SECRET_KEY.
+// * Access Key ID: MINIO_ROOT_USER.
+// * Secret Access Key: MINIO_ROOT_PASSWORD.
+type EnvMinio struct {
+ retrieved bool
+}
+
+// NewEnvMinio returns a pointer to a new Credentials object
+// wrapping the environment variable provider.
+func NewEnvMinio() *Credentials {
+ return New(&EnvMinio{})
+}
+
+// Retrieve retrieves the keys from the environment.
+func (e *EnvMinio) Retrieve() (Value, error) {
+ e.retrieved = false
+
+ id := os.Getenv("MINIO_ROOT_USER")
+ secret := os.Getenv("MINIO_ROOT_PASSWORD")
+
+ signerType := SignatureV4
+ if id == "" || secret == "" {
+ id = os.Getenv("MINIO_ACCESS_KEY")
+ secret = os.Getenv("MINIO_SECRET_KEY")
+ if id == "" || secret == "" {
+ signerType = SignatureAnonymous
+ }
+ }
+
+ e.retrieved = true
+ return Value{
+ AccessKeyID: id,
+ SecretAccessKey: secret,
+ SignerType: signerType,
+ }, nil
+}
+
+// IsExpired returns if the credentials have been retrieved.
+func (e *EnvMinio) IsExpired() bool {
+ return !e.retrieved
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go
new file mode 100644
index 0000000..07a9c2f
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go
@@ -0,0 +1,95 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2021 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "bytes"
+ "encoding/xml"
+ "fmt"
+ "io"
+)
+
+// ErrorResponse - Is the typed error returned.
+// ErrorResponse struct should be comparable since it is compared inside
+// golang http API (https://github.com/golang/go/issues/29768)
+type ErrorResponse struct {
+ XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ ErrorResponse" json:"-"`
+ STSError struct {
+ Type string `xml:"Type"`
+ Code string `xml:"Code"`
+ Message string `xml:"Message"`
+ } `xml:"Error"`
+ RequestID string `xml:"RequestId"`
+}
+
+// Error - Is the typed error returned by all API operations.
+type Error struct {
+ XMLName xml.Name `xml:"Error" json:"-"`
+ Code string
+ Message string
+ BucketName string
+ Key string
+ Resource string
+ RequestID string `xml:"RequestId"`
+ HostID string `xml:"HostId"`
+
+ // Region where the bucket is located. This header is returned
+ // only in HEAD bucket and ListObjects response.
+ Region string
+
+ // Captures the server string returned in response header.
+ Server string
+
+ // Underlying HTTP status code for the returned error
+ StatusCode int `xml:"-" json:"-"`
+}
+
+// Error - Returns S3 error string.
+func (e Error) Error() string {
+ if e.Message == "" {
+ return fmt.Sprintf("Error response code %s.", e.Code)
+ }
+ return e.Message
+}
+
+// Error - Returns STS error string.
+func (e ErrorResponse) Error() string {
+ if e.STSError.Message == "" {
+ return fmt.Sprintf("Error response code %s.", e.STSError.Code)
+ }
+ return e.STSError.Message
+}
+
+// xmlDecoder provide decoded value in xml.
+func xmlDecoder(body io.Reader, v interface{}) error {
+ d := xml.NewDecoder(body)
+ return d.Decode(v)
+}
+
+// xmlDecodeAndBody reads the whole body up to 1MB and
+// tries to XML decode it into v.
+// The body that was read and any error from reading or decoding is returned.
+func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) {
+ // read the whole body (up to 1MB)
+ const maxBodyLength = 1 << 20
+ body, err := io.ReadAll(io.LimitReader(bodyReader, maxBodyLength))
+ if err != nil {
+ return nil, err
+ }
+ return bytes.TrimSpace(body), xmlDecoder(bytes.NewReader(body), v)
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go
new file mode 100644
index 0000000..5b07376
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go
@@ -0,0 +1,157 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "encoding/json"
+ "errors"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "time"
+
+ ini "gopkg.in/ini.v1"
+)
+
+// A externalProcessCredentials stores the output of a credential_process
+type externalProcessCredentials struct {
+ Version int
+ SessionToken string
+ AccessKeyID string `json:"AccessKeyId"`
+ SecretAccessKey string
+ Expiration time.Time
+}
+
+// A FileAWSCredentials retrieves credentials from the current user's home
+// directory, and keeps track if those credentials are expired.
+//
+// Profile ini file example: $HOME/.aws/credentials
+type FileAWSCredentials struct {
+ Expiry
+
+ // Path to the shared credentials file.
+ //
+ // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the
+ // env value is empty will default to current user's home directory.
+ // Linux/OSX: "$HOME/.aws/credentials"
+ // Windows: "%USERPROFILE%\.aws\credentials"
+ Filename string
+
+ // AWS Profile to extract credentials from the shared credentials file. If empty
+ // will default to environment variable "AWS_PROFILE" or "default" if
+ // environment variable is also not set.
+ Profile string
+
+ // retrieved states if the credentials have been successfully retrieved.
+ retrieved bool
+}
+
+// NewFileAWSCredentials returns a pointer to a new Credentials object
+// wrapping the Profile file provider.
+func NewFileAWSCredentials(filename, profile string) *Credentials {
+ return New(&FileAWSCredentials{
+ Filename: filename,
+ Profile: profile,
+ })
+}
+
+// Retrieve reads and extracts the shared credentials from the current
+// users home directory.
+func (p *FileAWSCredentials) Retrieve() (Value, error) {
+ if p.Filename == "" {
+ p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE")
+ if p.Filename == "" {
+ homeDir, err := os.UserHomeDir()
+ if err != nil {
+ return Value{}, err
+ }
+ p.Filename = filepath.Join(homeDir, ".aws", "credentials")
+ }
+ }
+ if p.Profile == "" {
+ p.Profile = os.Getenv("AWS_PROFILE")
+ if p.Profile == "" {
+ p.Profile = "default"
+ }
+ }
+
+ p.retrieved = false
+
+ iniProfile, err := loadProfile(p.Filename, p.Profile)
+ if err != nil {
+ return Value{}, err
+ }
+
+ // Default to empty string if not found.
+ id := iniProfile.Key("aws_access_key_id")
+ // Default to empty string if not found.
+ secret := iniProfile.Key("aws_secret_access_key")
+ // Default to empty string if not found.
+ token := iniProfile.Key("aws_session_token")
+
+ // If credential_process is defined, obtain credentials by executing
+ // the external process
+ credentialProcess := strings.TrimSpace(iniProfile.Key("credential_process").String())
+ if credentialProcess != "" {
+ args := strings.Fields(credentialProcess)
+ if len(args) <= 1 {
+ return Value{}, errors.New("invalid credential process args")
+ }
+ cmd := exec.Command(args[0], args[1:]...)
+ out, err := cmd.Output()
+ if err != nil {
+ return Value{}, err
+ }
+ var externalProcessCredentials externalProcessCredentials
+ err = json.Unmarshal([]byte(out), &externalProcessCredentials)
+ if err != nil {
+ return Value{}, err
+ }
+ p.retrieved = true
+ p.SetExpiration(externalProcessCredentials.Expiration, DefaultExpiryWindow)
+ return Value{
+ AccessKeyID: externalProcessCredentials.AccessKeyID,
+ SecretAccessKey: externalProcessCredentials.SecretAccessKey,
+ SessionToken: externalProcessCredentials.SessionToken,
+ SignerType: SignatureV4,
+ }, nil
+ }
+ p.retrieved = true
+ return Value{
+ AccessKeyID: id.String(),
+ SecretAccessKey: secret.String(),
+ SessionToken: token.String(),
+ SignerType: SignatureV4,
+ }, nil
+}
+
+// loadProfiles loads from the file pointed to by shared credentials filename for profile.
+// The credentials retrieved from the profile will be returned or error. Error will be
+// returned if it fails to read from the file, or the data is invalid.
+func loadProfile(filename, profile string) (*ini.Section, error) {
+ config, err := ini.Load(filename)
+ if err != nil {
+ return nil, err
+ }
+ iniProfile, err := config.GetSection(profile)
+ if err != nil {
+ return nil, err
+ }
+ return iniProfile, nil
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go
new file mode 100644
index 0000000..eb77767
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go
@@ -0,0 +1,139 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "os"
+ "path/filepath"
+ "runtime"
+
+ jsoniter "github.com/json-iterator/go"
+)
+
+// A FileMinioClient retrieves credentials from the current user's home
+// directory, and keeps track if those credentials are expired.
+//
+// Configuration file example: $HOME/.mc/config.json
+type FileMinioClient struct {
+ // Path to the shared credentials file.
+ //
+ // If empty will look for "MINIO_SHARED_CREDENTIALS_FILE" env variable. If the
+ // env value is empty will default to current user's home directory.
+ // Linux/OSX: "$HOME/.mc/config.json"
+ // Windows: "%USERALIAS%\mc\config.json"
+ Filename string
+
+ // MinIO Alias to extract credentials from the shared credentials file. If empty
+ // will default to environment variable "MINIO_ALIAS" or "default" if
+ // environment variable is also not set.
+ Alias string
+
+ // retrieved states if the credentials have been successfully retrieved.
+ retrieved bool
+}
+
+// NewFileMinioClient returns a pointer to a new Credentials object
+// wrapping the Alias file provider.
+func NewFileMinioClient(filename, alias string) *Credentials {
+ return New(&FileMinioClient{
+ Filename: filename,
+ Alias: alias,
+ })
+}
+
+// Retrieve reads and extracts the shared credentials from the current
+// users home directory.
+func (p *FileMinioClient) Retrieve() (Value, error) {
+ if p.Filename == "" {
+ if value, ok := os.LookupEnv("MINIO_SHARED_CREDENTIALS_FILE"); ok {
+ p.Filename = value
+ } else {
+ homeDir, err := os.UserHomeDir()
+ if err != nil {
+ return Value{}, err
+ }
+ p.Filename = filepath.Join(homeDir, ".mc", "config.json")
+ if runtime.GOOS == "windows" {
+ p.Filename = filepath.Join(homeDir, "mc", "config.json")
+ }
+ }
+ }
+
+ if p.Alias == "" {
+ p.Alias = os.Getenv("MINIO_ALIAS")
+ if p.Alias == "" {
+ p.Alias = "s3"
+ }
+ }
+
+ p.retrieved = false
+
+ hostCfg, err := loadAlias(p.Filename, p.Alias)
+ if err != nil {
+ return Value{}, err
+ }
+
+ p.retrieved = true
+ return Value{
+ AccessKeyID: hostCfg.AccessKey,
+ SecretAccessKey: hostCfg.SecretKey,
+ SignerType: parseSignatureType(hostCfg.API),
+ }, nil
+}
+
+// IsExpired returns if the shared credentials have expired.
+func (p *FileMinioClient) IsExpired() bool {
+ return !p.retrieved
+}
+
+// hostConfig configuration of a host.
+type hostConfig struct {
+ URL string `json:"url"`
+ AccessKey string `json:"accessKey"`
+ SecretKey string `json:"secretKey"`
+ API string `json:"api"`
+}
+
+// config config version.
+type config struct {
+ Version string `json:"version"`
+ Hosts map[string]hostConfig `json:"hosts"`
+ Aliases map[string]hostConfig `json:"aliases"`
+}
+
+// loadAliass loads from the file pointed to by shared credentials filename for alias.
+// The credentials retrieved from the alias will be returned or error. Error will be
+// returned if it fails to read from the file.
+func loadAlias(filename, alias string) (hostConfig, error) {
+ cfg := &config{}
+ json := jsoniter.ConfigCompatibleWithStandardLibrary
+
+ configBytes, err := os.ReadFile(filename)
+ if err != nil {
+ return hostConfig{}, err
+ }
+ if err = json.Unmarshal(configBytes, cfg); err != nil {
+ return hostConfig{}, err
+ }
+
+ if cfg.Version == "10" {
+ return cfg.Aliases[alias], nil
+ }
+
+ return cfg.Hosts[alias], nil
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go
new file mode 100644
index 0000000..c5153c4
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go
@@ -0,0 +1,433 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "bufio"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "path"
+ "strings"
+ "time"
+
+ jsoniter "github.com/json-iterator/go"
+)
+
+// DefaultExpiryWindow - Default expiry window.
+// ExpiryWindow will allow the credentials to trigger refreshing
+// prior to the credentials actually expiring. This is beneficial
+// so race conditions with expiring credentials do not cause
+// request to fail unexpectedly due to ExpiredTokenException exceptions.
+// DefaultExpiryWindow can be used as parameter to (*Expiry).SetExpiration.
+// When used the tokens refresh will be triggered when 80% of the elapsed
+// time until the actual expiration time is passed.
+const DefaultExpiryWindow = -1
+
+// A IAM retrieves credentials from the EC2 service, and keeps track if
+// those credentials are expired.
+type IAM struct {
+ Expiry
+
+ // Required http Client to use when connecting to IAM metadata service.
+ Client *http.Client
+
+ // Custom endpoint to fetch IAM role credentials.
+ Endpoint string
+
+ // Region configurable custom region for STS
+ Region string
+
+ // Support for container authorization token https://docs.aws.amazon.com/sdkref/latest/guide/feature-container-credentials.html
+ Container struct {
+ AuthorizationToken string
+ CredentialsFullURI string
+ CredentialsRelativeURI string
+ }
+
+ // EKS based k8s RBAC authorization - https://docs.aws.amazon.com/eks/latest/userguide/pod-configuration.html
+ EKSIdentity struct {
+ TokenFile string
+ RoleARN string
+ RoleSessionName string
+ }
+}
+
+// IAM Roles for Amazon EC2
+// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
+const (
+ DefaultIAMRoleEndpoint = "http://169.254.169.254"
+ DefaultECSRoleEndpoint = "http://169.254.170.2"
+ DefaultSTSRoleEndpoint = "https://sts.amazonaws.com"
+ DefaultIAMSecurityCredsPath = "/latest/meta-data/iam/security-credentials/"
+ TokenRequestTTLHeader = "X-aws-ec2-metadata-token-ttl-seconds"
+ TokenPath = "/latest/api/token"
+ TokenTTL = "21600"
+ TokenRequestHeader = "X-aws-ec2-metadata-token"
+)
+
+// NewIAM returns a pointer to a new Credentials object wrapping the IAM.
+func NewIAM(endpoint string) *Credentials {
+ return New(&IAM{
+ Client: &http.Client{
+ Transport: http.DefaultTransport,
+ },
+ Endpoint: endpoint,
+ })
+}
+
+// Retrieve retrieves credentials from the EC2 service.
+// Error will be returned if the request fails, or unable to extract
+// the desired
+func (m *IAM) Retrieve() (Value, error) {
+ token := os.Getenv("AWS_CONTAINER_AUTHORIZATION_TOKEN")
+ if token == "" {
+ token = m.Container.AuthorizationToken
+ }
+
+ relativeURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI")
+ if relativeURI == "" {
+ relativeURI = m.Container.CredentialsRelativeURI
+ }
+
+ fullURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_FULL_URI")
+ if fullURI == "" {
+ fullURI = m.Container.CredentialsFullURI
+ }
+
+ identityFile := os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE")
+ if identityFile == "" {
+ identityFile = m.EKSIdentity.TokenFile
+ }
+
+ roleArn := os.Getenv("AWS_ROLE_ARN")
+ if roleArn == "" {
+ roleArn = m.EKSIdentity.RoleARN
+ }
+
+ roleSessionName := os.Getenv("AWS_ROLE_SESSION_NAME")
+ if roleSessionName == "" {
+ roleSessionName = m.EKSIdentity.RoleSessionName
+ }
+
+ region := os.Getenv("AWS_REGION")
+ if region == "" {
+ region = m.Region
+ }
+
+ var roleCreds ec2RoleCredRespBody
+ var err error
+
+ endpoint := m.Endpoint
+ switch {
+ case identityFile != "":
+ if len(endpoint) == 0 {
+ if region != "" {
+ if strings.HasPrefix(region, "cn-") {
+ endpoint = "https://sts." + region + ".amazonaws.com.cn"
+ } else {
+ endpoint = "https://sts." + region + ".amazonaws.com"
+ }
+ } else {
+ endpoint = DefaultSTSRoleEndpoint
+ }
+ }
+
+ creds := &STSWebIdentity{
+ Client: m.Client,
+ STSEndpoint: endpoint,
+ GetWebIDTokenExpiry: func() (*WebIdentityToken, error) {
+ token, err := os.ReadFile(identityFile)
+ if err != nil {
+ return nil, err
+ }
+
+ return &WebIdentityToken{Token: string(token)}, nil
+ },
+ RoleARN: roleArn,
+ roleSessionName: roleSessionName,
+ }
+
+ stsWebIdentityCreds, err := creds.Retrieve()
+ if err == nil {
+ m.SetExpiration(creds.Expiration(), DefaultExpiryWindow)
+ }
+ return stsWebIdentityCreds, err
+
+ case relativeURI != "":
+ if len(endpoint) == 0 {
+ endpoint = fmt.Sprintf("%s%s", DefaultECSRoleEndpoint, relativeURI)
+ }
+
+ roleCreds, err = getEcsTaskCredentials(m.Client, endpoint, token)
+
+ case fullURI != "":
+ if len(endpoint) == 0 {
+ endpoint = fullURI
+ var ok bool
+ if ok, err = isLoopback(endpoint); !ok {
+ if err == nil {
+ err = fmt.Errorf("uri host is not a loopback address: %s", endpoint)
+ }
+ break
+ }
+ }
+
+ roleCreds, err = getEcsTaskCredentials(m.Client, endpoint, token)
+
+ default:
+ roleCreds, err = getCredentials(m.Client, endpoint)
+ }
+
+ if err != nil {
+ return Value{}, err
+ }
+ // Expiry window is set to 10secs.
+ m.SetExpiration(roleCreds.Expiration, DefaultExpiryWindow)
+
+ return Value{
+ AccessKeyID: roleCreds.AccessKeyID,
+ SecretAccessKey: roleCreds.SecretAccessKey,
+ SessionToken: roleCreds.Token,
+ SignerType: SignatureV4,
+ }, nil
+}
+
+// A ec2RoleCredRespBody provides the shape for unmarshaling credential
+// request responses.
+type ec2RoleCredRespBody struct {
+ // Success State
+ Expiration time.Time
+ AccessKeyID string
+ SecretAccessKey string
+ Token string
+
+ // Error state
+ Code string
+ Message string
+
+ // Unused params.
+ LastUpdated time.Time
+ Type string
+}
+
+// Get the final IAM role URL where the request will
+// be sent to fetch the rolling access credentials.
+// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
+func getIAMRoleURL(endpoint string) (*url.URL, error) {
+ u, err := url.Parse(endpoint)
+ if err != nil {
+ return nil, err
+ }
+ u.Path = DefaultIAMSecurityCredsPath
+ return u, nil
+}
+
+// listRoleNames lists of credential role names associated
+// with the current EC2 service. If there are no credentials,
+// or there is an error making or receiving the request.
+// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
+func listRoleNames(client *http.Client, u *url.URL, token string) ([]string, error) {
+ req, err := http.NewRequest(http.MethodGet, u.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+ if token != "" {
+ req.Header.Add(TokenRequestHeader, token)
+ }
+ resp, err := client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return nil, errors.New(resp.Status)
+ }
+
+ credsList := []string{}
+ s := bufio.NewScanner(resp.Body)
+ for s.Scan() {
+ credsList = append(credsList, s.Text())
+ }
+
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ return credsList, nil
+}
+
+func getEcsTaskCredentials(client *http.Client, endpoint, token string) (ec2RoleCredRespBody, error) {
+ req, err := http.NewRequest(http.MethodGet, endpoint, nil)
+ if err != nil {
+ return ec2RoleCredRespBody{}, err
+ }
+
+ if token != "" {
+ req.Header.Set("Authorization", token)
+ }
+
+ resp, err := client.Do(req)
+ if err != nil {
+ return ec2RoleCredRespBody{}, err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return ec2RoleCredRespBody{}, errors.New(resp.Status)
+ }
+
+ respCreds := ec2RoleCredRespBody{}
+ if err := jsoniter.NewDecoder(resp.Body).Decode(&respCreds); err != nil {
+ return ec2RoleCredRespBody{}, err
+ }
+
+ return respCreds, nil
+}
+
+func fetchIMDSToken(client *http.Client, endpoint string) (string, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+ defer cancel()
+
+ req, err := http.NewRequestWithContext(ctx, http.MethodPut, endpoint+TokenPath, nil)
+ if err != nil {
+ return "", err
+ }
+ req.Header.Add(TokenRequestTTLHeader, TokenTTL)
+ resp, err := client.Do(req)
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+ data, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return "", err
+ }
+ if resp.StatusCode != http.StatusOK {
+ return "", errors.New(resp.Status)
+ }
+ return string(data), nil
+}
+
+// getCredentials - obtains the credentials from the IAM role name associated with
+// the current EC2 service.
+//
+// If the credentials cannot be found, or there is an error
+// reading the response an error will be returned.
+func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) {
+ if endpoint == "" {
+ endpoint = DefaultIAMRoleEndpoint
+ }
+
+ // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html
+ token, err := fetchIMDSToken(client, endpoint)
+ if err != nil {
+ // Return only errors for valid situations, if the IMDSv2 is not enabled
+ // we will not be able to get the token, in such a situation we have
+ // to rely on IMDSv1 behavior as a fallback, this check ensures that.
+ // Refer https://github.com/minio/minio-go/issues/1866
+ if !errors.Is(err, context.DeadlineExceeded) && !errors.Is(err, context.Canceled) {
+ return ec2RoleCredRespBody{}, err
+ }
+ }
+
+ // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
+ u, err := getIAMRoleURL(endpoint)
+ if err != nil {
+ return ec2RoleCredRespBody{}, err
+ }
+
+ // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
+ roleNames, err := listRoleNames(client, u, token)
+ if err != nil {
+ return ec2RoleCredRespBody{}, err
+ }
+
+ if len(roleNames) == 0 {
+ return ec2RoleCredRespBody{}, errors.New("No IAM roles attached to this EC2 service")
+ }
+
+ // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
+ // - An instance profile can contain only one IAM role. This limit cannot be increased.
+ roleName := roleNames[0]
+
+ // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
+ // The following command retrieves the security credentials for an
+ // IAM role named `s3access`.
+ //
+ // $ curl http://169.254.169.254/latest/meta-data/iam/security-credentials/s3access
+ //
+ u.Path = path.Join(u.Path, roleName)
+ req, err := http.NewRequest(http.MethodGet, u.String(), nil)
+ if err != nil {
+ return ec2RoleCredRespBody{}, err
+ }
+ if token != "" {
+ req.Header.Add(TokenRequestHeader, token)
+ }
+
+ resp, err := client.Do(req)
+ if err != nil {
+ return ec2RoleCredRespBody{}, err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return ec2RoleCredRespBody{}, errors.New(resp.Status)
+ }
+
+ respCreds := ec2RoleCredRespBody{}
+ if err := jsoniter.NewDecoder(resp.Body).Decode(&respCreds); err != nil {
+ return ec2RoleCredRespBody{}, err
+ }
+
+ if respCreds.Code != "Success" {
+ // If an error code was returned something failed requesting the role.
+ return ec2RoleCredRespBody{}, errors.New(respCreds.Message)
+ }
+
+ return respCreds, nil
+}
+
+// isLoopback identifies if a uri's host is on a loopback address
+func isLoopback(uri string) (bool, error) {
+ u, err := url.Parse(uri)
+ if err != nil {
+ return false, err
+ }
+
+ host := u.Hostname()
+ if len(host) == 0 {
+ return false, fmt.Errorf("can't parse host from uri: %s", uri)
+ }
+
+ ips, err := net.LookupHost(host)
+ if err != nil {
+ return false, err
+ }
+ for _, ip := range ips {
+ if !net.ParseIP(ip).IsLoopback() {
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go
new file mode 100644
index 0000000..b794333
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go
@@ -0,0 +1,77 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import "strings"
+
+// SignatureType is type of Authorization requested for a given HTTP request.
+type SignatureType int
+
+// Different types of supported signatures - default is SignatureV4 or SignatureDefault.
+const (
+ // SignatureDefault is always set to v4.
+ SignatureDefault SignatureType = iota
+ SignatureV4
+ SignatureV2
+ SignatureV4Streaming
+ SignatureAnonymous // Anonymous signature signifies, no signature.
+)
+
+// IsV2 - is signature SignatureV2?
+func (s SignatureType) IsV2() bool {
+ return s == SignatureV2
+}
+
+// IsV4 - is signature SignatureV4?
+func (s SignatureType) IsV4() bool {
+ return s == SignatureV4 || s == SignatureDefault
+}
+
+// IsStreamingV4 - is signature SignatureV4Streaming?
+func (s SignatureType) IsStreamingV4() bool {
+ return s == SignatureV4Streaming
+}
+
+// IsAnonymous - is signature empty?
+func (s SignatureType) IsAnonymous() bool {
+ return s == SignatureAnonymous
+}
+
+// Stringer humanized version of signature type,
+// strings returned here are case insensitive.
+func (s SignatureType) String() string {
+ if s.IsV2() {
+ return "S3v2"
+ } else if s.IsV4() {
+ return "S3v4"
+ } else if s.IsStreamingV4() {
+ return "S3v4Streaming"
+ }
+ return "Anonymous"
+}
+
+func parseSignatureType(str string) SignatureType {
+ if strings.EqualFold(str, "S3v4") {
+ return SignatureV4
+ } else if strings.EqualFold(str, "S3v2") {
+ return SignatureV2
+ } else if strings.EqualFold(str, "S3v4Streaming") {
+ return SignatureV4Streaming
+ }
+ return SignatureAnonymous
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go
new file mode 100644
index 0000000..7dde00b
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go
@@ -0,0 +1,67 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+// A Static is a set of credentials which are set programmatically,
+// and will never expire.
+type Static struct {
+ Value
+}
+
+// NewStaticV2 returns a pointer to a new Credentials object
+// wrapping a static credentials value provider, signature is
+// set to v2. If access and secret are not specified then
+// regardless of signature type set it Value will return
+// as anonymous.
+func NewStaticV2(id, secret, token string) *Credentials {
+ return NewStatic(id, secret, token, SignatureV2)
+}
+
+// NewStaticV4 is similar to NewStaticV2 with similar considerations.
+func NewStaticV4(id, secret, token string) *Credentials {
+ return NewStatic(id, secret, token, SignatureV4)
+}
+
+// NewStatic returns a pointer to a new Credentials object
+// wrapping a static credentials value provider.
+func NewStatic(id, secret, token string, signerType SignatureType) *Credentials {
+ return New(&Static{
+ Value: Value{
+ AccessKeyID: id,
+ SecretAccessKey: secret,
+ SessionToken: token,
+ SignerType: signerType,
+ },
+ })
+}
+
+// Retrieve returns the static credentials.
+func (s *Static) Retrieve() (Value, error) {
+ if s.AccessKeyID == "" || s.SecretAccessKey == "" {
+ // Anonymous is not an error
+ return Value{SignerType: SignatureAnonymous}, nil
+ }
+ return s.Value, nil
+}
+
+// IsExpired returns if the credentials are expired.
+//
+// For Static, the credentials never expired.
+func (s *Static) IsExpired() bool {
+ return false
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go
new file mode 100644
index 0000000..9e92c1e
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go
@@ -0,0 +1,182 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2019-2022 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "bytes"
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+)
+
+// AssumedRoleUser - The identifiers for the temporary security credentials that
+// the operation returns. Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumedRoleUser
+type AssumedRoleUser struct {
+ Arn string
+ AssumedRoleID string `xml:"AssumeRoleId"`
+}
+
+// AssumeRoleWithClientGrantsResponse contains the result of successful AssumeRoleWithClientGrants request.
+type AssumeRoleWithClientGrantsResponse struct {
+ XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithClientGrantsResponse" json:"-"`
+ Result ClientGrantsResult `xml:"AssumeRoleWithClientGrantsResult"`
+ ResponseMetadata struct {
+ RequestID string `xml:"RequestId,omitempty"`
+ } `xml:"ResponseMetadata,omitempty"`
+}
+
+// ClientGrantsResult - Contains the response to a successful AssumeRoleWithClientGrants
+// request, including temporary credentials that can be used to make MinIO API requests.
+type ClientGrantsResult struct {
+ AssumedRoleUser AssumedRoleUser `xml:",omitempty"`
+ Audience string `xml:",omitempty"`
+ Credentials struct {
+ AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"`
+ SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"`
+ Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"`
+ SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"`
+ } `xml:",omitempty"`
+ PackedPolicySize int `xml:",omitempty"`
+ Provider string `xml:",omitempty"`
+ SubjectFromClientGrantsToken string `xml:",omitempty"`
+}
+
+// ClientGrantsToken - client grants token with expiry.
+type ClientGrantsToken struct {
+ Token string
+ Expiry int
+}
+
+// A STSClientGrants retrieves credentials from MinIO service, and keeps track if
+// those credentials are expired.
+type STSClientGrants struct {
+ Expiry
+
+ // Required http Client to use when connecting to MinIO STS service.
+ Client *http.Client
+
+ // MinIO endpoint to fetch STS credentials.
+ STSEndpoint string
+
+ // getClientGrantsTokenExpiry function to retrieve tokens
+ // from IDP This function should return two values one is
+ // accessToken which is a self contained access token (JWT)
+ // and second return value is the expiry associated with
+ // this token. This is a customer provided function and
+ // is mandatory.
+ GetClientGrantsTokenExpiry func() (*ClientGrantsToken, error)
+}
+
+// NewSTSClientGrants returns a pointer to a new
+// Credentials object wrapping the STSClientGrants.
+func NewSTSClientGrants(stsEndpoint string, getClientGrantsTokenExpiry func() (*ClientGrantsToken, error)) (*Credentials, error) {
+ if stsEndpoint == "" {
+ return nil, errors.New("STS endpoint cannot be empty")
+ }
+ if getClientGrantsTokenExpiry == nil {
+ return nil, errors.New("Client grants access token and expiry retrieval function should be defined")
+ }
+ return New(&STSClientGrants{
+ Client: &http.Client{
+ Transport: http.DefaultTransport,
+ },
+ STSEndpoint: stsEndpoint,
+ GetClientGrantsTokenExpiry: getClientGrantsTokenExpiry,
+ }), nil
+}
+
+func getClientGrantsCredentials(clnt *http.Client, endpoint string,
+ getClientGrantsTokenExpiry func() (*ClientGrantsToken, error),
+) (AssumeRoleWithClientGrantsResponse, error) {
+ accessToken, err := getClientGrantsTokenExpiry()
+ if err != nil {
+ return AssumeRoleWithClientGrantsResponse{}, err
+ }
+
+ v := url.Values{}
+ v.Set("Action", "AssumeRoleWithClientGrants")
+ v.Set("Token", accessToken.Token)
+ v.Set("DurationSeconds", fmt.Sprintf("%d", accessToken.Expiry))
+ v.Set("Version", STSVersion)
+
+ u, err := url.Parse(endpoint)
+ if err != nil {
+ return AssumeRoleWithClientGrantsResponse{}, err
+ }
+
+ req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode()))
+ if err != nil {
+ return AssumeRoleWithClientGrantsResponse{}, err
+ }
+
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+
+ resp, err := clnt.Do(req)
+ if err != nil {
+ return AssumeRoleWithClientGrantsResponse{}, err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ var errResp ErrorResponse
+ buf, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return AssumeRoleWithClientGrantsResponse{}, err
+ }
+ _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp)
+ if err != nil {
+ var s3Err Error
+ if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil {
+ return AssumeRoleWithClientGrantsResponse{}, err
+ }
+ errResp.RequestID = s3Err.RequestID
+ errResp.STSError.Code = s3Err.Code
+ errResp.STSError.Message = s3Err.Message
+ }
+ return AssumeRoleWithClientGrantsResponse{}, errResp
+ }
+
+ a := AssumeRoleWithClientGrantsResponse{}
+ if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil {
+ return AssumeRoleWithClientGrantsResponse{}, err
+ }
+ return a, nil
+}
+
+// Retrieve retrieves credentials from the MinIO service.
+// Error will be returned if the request fails.
+func (m *STSClientGrants) Retrieve() (Value, error) {
+ a, err := getClientGrantsCredentials(m.Client, m.STSEndpoint, m.GetClientGrantsTokenExpiry)
+ if err != nil {
+ return Value{}, err
+ }
+
+ // Expiry window is set to 10secs.
+ m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow)
+
+ return Value{
+ AccessKeyID: a.Result.Credentials.AccessKey,
+ SecretAccessKey: a.Result.Credentials.SecretKey,
+ SessionToken: a.Result.Credentials.SessionToken,
+ SignerType: SignatureV4,
+ }, nil
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go
new file mode 100644
index 0000000..e1f9ce4
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go
@@ -0,0 +1,146 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2022 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "net/http"
+ "net/url"
+ "time"
+)
+
+// CustomTokenResult - Contains temporary creds and user metadata.
+type CustomTokenResult struct {
+ Credentials struct {
+ AccessKey string `xml:"AccessKeyId"`
+ SecretKey string `xml:"SecretAccessKey"`
+ Expiration time.Time `xml:"Expiration"`
+ SessionToken string `xml:"SessionToken"`
+ } `xml:",omitempty"`
+
+ AssumedUser string `xml:",omitempty"`
+}
+
+// AssumeRoleWithCustomTokenResponse contains the result of a successful
+// AssumeRoleWithCustomToken request.
+type AssumeRoleWithCustomTokenResponse struct {
+ XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithCustomTokenResponse" json:"-"`
+ Result CustomTokenResult `xml:"AssumeRoleWithCustomTokenResult"`
+ Metadata struct {
+ RequestID string `xml:"RequestId,omitempty"`
+ } `xml:"ResponseMetadata,omitempty"`
+}
+
+// CustomTokenIdentity - satisfies the Provider interface, and retrieves
+// credentials from MinIO using the AssumeRoleWithCustomToken STS API.
+type CustomTokenIdentity struct {
+ Expiry
+
+ Client *http.Client
+
+ // MinIO server STS endpoint to fetch STS credentials.
+ STSEndpoint string
+
+ // The custom token to use with the request.
+ Token string
+
+ // RoleArn associated with the identity
+ RoleArn string
+
+ // RequestedExpiry is to set the validity of the generated credentials
+ // (this value bounded by server).
+ RequestedExpiry time.Duration
+}
+
+// Retrieve - to satisfy Provider interface; fetches credentials from MinIO.
+func (c *CustomTokenIdentity) Retrieve() (value Value, err error) {
+ u, err := url.Parse(c.STSEndpoint)
+ if err != nil {
+ return value, err
+ }
+
+ v := url.Values{}
+ v.Set("Action", "AssumeRoleWithCustomToken")
+ v.Set("Version", STSVersion)
+ v.Set("RoleArn", c.RoleArn)
+ v.Set("Token", c.Token)
+ if c.RequestedExpiry != 0 {
+ v.Set("DurationSeconds", fmt.Sprintf("%d", int(c.RequestedExpiry.Seconds())))
+ }
+
+ u.RawQuery = v.Encode()
+
+ req, err := http.NewRequest(http.MethodPost, u.String(), nil)
+ if err != nil {
+ return value, err
+ }
+
+ resp, err := c.Client.Do(req)
+ if err != nil {
+ return value, err
+ }
+
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return value, errors.New(resp.Status)
+ }
+
+ r := AssumeRoleWithCustomTokenResponse{}
+ if err = xml.NewDecoder(resp.Body).Decode(&r); err != nil {
+ return
+ }
+
+ cr := r.Result.Credentials
+ c.SetExpiration(cr.Expiration, DefaultExpiryWindow)
+ return Value{
+ AccessKeyID: cr.AccessKey,
+ SecretAccessKey: cr.SecretKey,
+ SessionToken: cr.SessionToken,
+ SignerType: SignatureV4,
+ }, nil
+}
+
+// NewCustomTokenCredentials - returns credentials using the
+// AssumeRoleWithCustomToken STS API.
+func NewCustomTokenCredentials(stsEndpoint, token, roleArn string, optFuncs ...CustomTokenOpt) (*Credentials, error) {
+ c := CustomTokenIdentity{
+ Client: &http.Client{Transport: http.DefaultTransport},
+ STSEndpoint: stsEndpoint,
+ Token: token,
+ RoleArn: roleArn,
+ }
+ for _, optFunc := range optFuncs {
+ optFunc(&c)
+ }
+ return New(&c), nil
+}
+
+// CustomTokenOpt is a function type to configure the custom-token based
+// credentials using NewCustomTokenCredentials.
+type CustomTokenOpt func(*CustomTokenIdentity)
+
+// CustomTokenValidityOpt sets the validity duration of the requested
+// credentials. This value is ignored if the server enforces a lower validity
+// period.
+func CustomTokenValidityOpt(d time.Duration) CustomTokenOpt {
+ return func(c *CustomTokenIdentity) {
+ c.RequestedExpiry = d
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go
new file mode 100644
index 0000000..ec5f3f0
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go
@@ -0,0 +1,189 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2019-2022 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "bytes"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+)
+
+// AssumeRoleWithLDAPResponse contains the result of successful
+// AssumeRoleWithLDAPIdentity request
+type AssumeRoleWithLDAPResponse struct {
+ XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithLDAPIdentityResponse" json:"-"`
+ Result LDAPIdentityResult `xml:"AssumeRoleWithLDAPIdentityResult"`
+ ResponseMetadata struct {
+ RequestID string `xml:"RequestId,omitempty"`
+ } `xml:"ResponseMetadata,omitempty"`
+}
+
+// LDAPIdentityResult - contains credentials for a successful
+// AssumeRoleWithLDAPIdentity request.
+type LDAPIdentityResult struct {
+ Credentials struct {
+ AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"`
+ SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"`
+ Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"`
+ SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"`
+ } `xml:",omitempty"`
+
+ SubjectFromToken string `xml:",omitempty"`
+}
+
+// LDAPIdentity retrieves credentials from MinIO
+type LDAPIdentity struct {
+ Expiry
+
+ // Required http Client to use when connecting to MinIO STS service.
+ Client *http.Client
+
+ // Exported STS endpoint to fetch STS credentials.
+ STSEndpoint string
+
+ // LDAP username/password used to fetch LDAP STS credentials.
+ LDAPUsername, LDAPPassword string
+
+ // Session policy to apply to the generated credentials. Leave empty to
+ // use the full access policy available to the user.
+ Policy string
+
+ // RequestedExpiry is the configured expiry duration for credentials
+ // requested from LDAP.
+ RequestedExpiry time.Duration
+}
+
+// NewLDAPIdentity returns new credentials object that uses LDAP
+// Identity.
+func NewLDAPIdentity(stsEndpoint, ldapUsername, ldapPassword string, optFuncs ...LDAPIdentityOpt) (*Credentials, error) {
+ l := LDAPIdentity{
+ Client: &http.Client{Transport: http.DefaultTransport},
+ STSEndpoint: stsEndpoint,
+ LDAPUsername: ldapUsername,
+ LDAPPassword: ldapPassword,
+ }
+ for _, optFunc := range optFuncs {
+ optFunc(&l)
+ }
+ return New(&l), nil
+}
+
+// LDAPIdentityOpt is a function type used to configured the LDAPIdentity
+// instance.
+type LDAPIdentityOpt func(*LDAPIdentity)
+
+// LDAPIdentityPolicyOpt sets the session policy for requested credentials.
+func LDAPIdentityPolicyOpt(policy string) LDAPIdentityOpt {
+ return func(k *LDAPIdentity) {
+ k.Policy = policy
+ }
+}
+
+// LDAPIdentityExpiryOpt sets the expiry duration for requested credentials.
+func LDAPIdentityExpiryOpt(d time.Duration) LDAPIdentityOpt {
+ return func(k *LDAPIdentity) {
+ k.RequestedExpiry = d
+ }
+}
+
+// NewLDAPIdentityWithSessionPolicy returns new credentials object that uses
+// LDAP Identity with a specified session policy. The `policy` parameter must be
+// a JSON string specifying the policy document.
+//
+// Deprecated: Use the `LDAPIdentityPolicyOpt` with `NewLDAPIdentity` instead.
+func NewLDAPIdentityWithSessionPolicy(stsEndpoint, ldapUsername, ldapPassword, policy string) (*Credentials, error) {
+ return New(&LDAPIdentity{
+ Client: &http.Client{Transport: http.DefaultTransport},
+ STSEndpoint: stsEndpoint,
+ LDAPUsername: ldapUsername,
+ LDAPPassword: ldapPassword,
+ Policy: policy,
+ }), nil
+}
+
+// Retrieve gets the credential by calling the MinIO STS API for
+// LDAP on the configured stsEndpoint.
+func (k *LDAPIdentity) Retrieve() (value Value, err error) {
+ u, err := url.Parse(k.STSEndpoint)
+ if err != nil {
+ return value, err
+ }
+
+ v := url.Values{}
+ v.Set("Action", "AssumeRoleWithLDAPIdentity")
+ v.Set("Version", STSVersion)
+ v.Set("LDAPUsername", k.LDAPUsername)
+ v.Set("LDAPPassword", k.LDAPPassword)
+ if k.Policy != "" {
+ v.Set("Policy", k.Policy)
+ }
+ if k.RequestedExpiry != 0 {
+ v.Set("DurationSeconds", fmt.Sprintf("%d", int(k.RequestedExpiry.Seconds())))
+ }
+
+ req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode()))
+ if err != nil {
+ return value, err
+ }
+
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+
+ resp, err := k.Client.Do(req)
+ if err != nil {
+ return value, err
+ }
+
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ var errResp ErrorResponse
+ buf, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return value, err
+ }
+ _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp)
+ if err != nil {
+ var s3Err Error
+ if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil {
+ return value, err
+ }
+ errResp.RequestID = s3Err.RequestID
+ errResp.STSError.Code = s3Err.Code
+ errResp.STSError.Message = s3Err.Message
+ }
+ return value, errResp
+ }
+
+ r := AssumeRoleWithLDAPResponse{}
+ if err = xml.NewDecoder(resp.Body).Decode(&r); err != nil {
+ return
+ }
+
+ cr := r.Result.Credentials
+ k.SetExpiration(cr.Expiration, DefaultExpiryWindow)
+ return Value{
+ AccessKeyID: cr.AccessKey,
+ SecretAccessKey: cr.SecretKey,
+ SessionToken: cr.SessionToken,
+ SignerType: SignatureV4,
+ }, nil
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go
new file mode 100644
index 0000000..dee0a8c
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go
@@ -0,0 +1,211 @@
+// MinIO Go Library for Amazon S3 Compatible Cloud Storage
+// Copyright 2021 MinIO, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package credentials
+
+import (
+ "bytes"
+ "crypto/tls"
+ "encoding/xml"
+ "errors"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "strconv"
+ "time"
+)
+
+// CertificateIdentityOption is an optional AssumeRoleWithCertificate
+// parameter - e.g. a custom HTTP transport configuration or S3 credental
+// livetime.
+type CertificateIdentityOption func(*STSCertificateIdentity)
+
+// CertificateIdentityWithTransport returns a CertificateIdentityOption that
+// customizes the STSCertificateIdentity with the given http.RoundTripper.
+func CertificateIdentityWithTransport(t http.RoundTripper) CertificateIdentityOption {
+ return CertificateIdentityOption(func(i *STSCertificateIdentity) { i.Client.Transport = t })
+}
+
+// CertificateIdentityWithExpiry returns a CertificateIdentityOption that
+// customizes the STSCertificateIdentity with the given livetime.
+//
+// Fetched S3 credentials will have the given livetime if the STS server
+// allows such credentials.
+func CertificateIdentityWithExpiry(livetime time.Duration) CertificateIdentityOption {
+ return CertificateIdentityOption(func(i *STSCertificateIdentity) { i.S3CredentialLivetime = livetime })
+}
+
+// A STSCertificateIdentity retrieves S3 credentials from the MinIO STS API and
+// rotates those credentials once they expire.
+type STSCertificateIdentity struct {
+ Expiry
+
+ // STSEndpoint is the base URL endpoint of the STS API.
+ // For example, https://minio.local:9000
+ STSEndpoint string
+
+ // S3CredentialLivetime is the duration temp. S3 access
+ // credentials should be valid.
+ //
+ // It represents the access credential livetime requested
+ // by the client. The STS server may choose to issue
+ // temp. S3 credentials that have a different - usually
+ // shorter - livetime.
+ //
+ // The default livetime is one hour.
+ S3CredentialLivetime time.Duration
+
+ // Client is the HTTP client used to authenticate and fetch
+ // S3 credentials.
+ //
+ // A custom TLS client configuration can be specified by
+ // using a custom http.Transport:
+ // Client: http.Client {
+ // Transport: &http.Transport{
+ // TLSClientConfig: &tls.Config{},
+ // },
+ // }
+ Client http.Client
+}
+
+var _ Provider = (*STSWebIdentity)(nil) // compiler check
+
+// NewSTSCertificateIdentity returns a STSCertificateIdentity that authenticates
+// to the given STS endpoint with the given TLS certificate and retrieves and
+// rotates S3 credentials.
+func NewSTSCertificateIdentity(endpoint string, certificate tls.Certificate, options ...CertificateIdentityOption) (*Credentials, error) {
+ if endpoint == "" {
+ return nil, errors.New("STS endpoint cannot be empty")
+ }
+ if _, err := url.Parse(endpoint); err != nil {
+ return nil, err
+ }
+ identity := &STSCertificateIdentity{
+ STSEndpoint: endpoint,
+ Client: http.Client{
+ Transport: &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).DialContext,
+ ForceAttemptHTTP2: true,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 5 * time.Second,
+ TLSClientConfig: &tls.Config{
+ Certificates: []tls.Certificate{certificate},
+ },
+ },
+ },
+ }
+ for _, option := range options {
+ option(identity)
+ }
+ return New(identity), nil
+}
+
+// Retrieve fetches a new set of S3 credentials from the configured
+// STS API endpoint.
+func (i *STSCertificateIdentity) Retrieve() (Value, error) {
+ endpointURL, err := url.Parse(i.STSEndpoint)
+ if err != nil {
+ return Value{}, err
+ }
+ livetime := i.S3CredentialLivetime
+ if livetime == 0 {
+ livetime = 1 * time.Hour
+ }
+
+ queryValues := url.Values{}
+ queryValues.Set("Action", "AssumeRoleWithCertificate")
+ queryValues.Set("Version", STSVersion)
+ endpointURL.RawQuery = queryValues.Encode()
+
+ req, err := http.NewRequest(http.MethodPost, endpointURL.String(), nil)
+ if err != nil {
+ return Value{}, err
+ }
+ if req.Form == nil {
+ req.Form = url.Values{}
+ }
+ req.Form.Add("DurationSeconds", strconv.FormatUint(uint64(livetime.Seconds()), 10))
+
+ resp, err := i.Client.Do(req)
+ if err != nil {
+ return Value{}, err
+ }
+ if resp.Body != nil {
+ defer resp.Body.Close()
+ }
+ if resp.StatusCode != http.StatusOK {
+ var errResp ErrorResponse
+ buf, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return Value{}, err
+ }
+ _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp)
+ if err != nil {
+ var s3Err Error
+ if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil {
+ return Value{}, err
+ }
+ errResp.RequestID = s3Err.RequestID
+ errResp.STSError.Code = s3Err.Code
+ errResp.STSError.Message = s3Err.Message
+ }
+ return Value{}, errResp
+ }
+
+ const MaxSize = 10 * 1 << 20
+ var body io.Reader = resp.Body
+ if resp.ContentLength > 0 && resp.ContentLength < MaxSize {
+ body = io.LimitReader(body, resp.ContentLength)
+ } else {
+ body = io.LimitReader(body, MaxSize)
+ }
+
+ var response assumeRoleWithCertificateResponse
+ if err = xml.NewDecoder(body).Decode(&response); err != nil {
+ return Value{}, err
+ }
+ i.SetExpiration(response.Result.Credentials.Expiration, DefaultExpiryWindow)
+ return Value{
+ AccessKeyID: response.Result.Credentials.AccessKey,
+ SecretAccessKey: response.Result.Credentials.SecretKey,
+ SessionToken: response.Result.Credentials.SessionToken,
+ SignerType: SignatureDefault,
+ }, nil
+}
+
+// Expiration returns the expiration time of the current S3 credentials.
+func (i *STSCertificateIdentity) Expiration() time.Time { return i.expiration }
+
+type assumeRoleWithCertificateResponse struct {
+ XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithCertificateResponse" json:"-"`
+ Result struct {
+ Credentials struct {
+ AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"`
+ SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"`
+ Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"`
+ SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"`
+ } `xml:"Credentials" json:"credentials,omitempty"`
+ } `xml:"AssumeRoleWithCertificateResult"`
+ ResponseMetadata struct {
+ RequestID string `xml:"RequestId,omitempty"`
+ } `xml:"ResponseMetadata,omitempty"`
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go
new file mode 100644
index 0000000..2e2af50
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go
@@ -0,0 +1,205 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2019-2022 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "bytes"
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// AssumeRoleWithWebIdentityResponse contains the result of successful AssumeRoleWithWebIdentity request.
+type AssumeRoleWithWebIdentityResponse struct {
+ XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithWebIdentityResponse" json:"-"`
+ Result WebIdentityResult `xml:"AssumeRoleWithWebIdentityResult"`
+ ResponseMetadata struct {
+ RequestID string `xml:"RequestId,omitempty"`
+ } `xml:"ResponseMetadata,omitempty"`
+}
+
+// WebIdentityResult - Contains the response to a successful AssumeRoleWithWebIdentity
+// request, including temporary credentials that can be used to make MinIO API requests.
+type WebIdentityResult struct {
+ AssumedRoleUser AssumedRoleUser `xml:",omitempty"`
+ Audience string `xml:",omitempty"`
+ Credentials struct {
+ AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"`
+ SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"`
+ Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"`
+ SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"`
+ } `xml:",omitempty"`
+ PackedPolicySize int `xml:",omitempty"`
+ Provider string `xml:",omitempty"`
+ SubjectFromWebIdentityToken string `xml:",omitempty"`
+}
+
+// WebIdentityToken - web identity token with expiry.
+type WebIdentityToken struct {
+ Token string
+ AccessToken string
+ Expiry int
+}
+
+// A STSWebIdentity retrieves credentials from MinIO service, and keeps track if
+// those credentials are expired.
+type STSWebIdentity struct {
+ Expiry
+
+ // Required http Client to use when connecting to MinIO STS service.
+ Client *http.Client
+
+ // Exported STS endpoint to fetch STS credentials.
+ STSEndpoint string
+
+ // Exported GetWebIDTokenExpiry function which returns ID
+ // tokens from IDP. This function should return two values
+ // one is ID token which is a self contained ID token (JWT)
+ // and second return value is the expiry associated with
+ // this token.
+ // This is a customer provided function and is mandatory.
+ GetWebIDTokenExpiry func() (*WebIdentityToken, error)
+
+ // RoleARN is the Amazon Resource Name (ARN) of the role that the caller is
+ // assuming.
+ RoleARN string
+
+ // roleSessionName is the identifier for the assumed role session.
+ roleSessionName string
+}
+
+// NewSTSWebIdentity returns a pointer to a new
+// Credentials object wrapping the STSWebIdentity.
+func NewSTSWebIdentity(stsEndpoint string, getWebIDTokenExpiry func() (*WebIdentityToken, error)) (*Credentials, error) {
+ if stsEndpoint == "" {
+ return nil, errors.New("STS endpoint cannot be empty")
+ }
+ if getWebIDTokenExpiry == nil {
+ return nil, errors.New("Web ID token and expiry retrieval function should be defined")
+ }
+ return New(&STSWebIdentity{
+ Client: &http.Client{
+ Transport: http.DefaultTransport,
+ },
+ STSEndpoint: stsEndpoint,
+ GetWebIDTokenExpiry: getWebIDTokenExpiry,
+ }), nil
+}
+
+func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSessionName string,
+ getWebIDTokenExpiry func() (*WebIdentityToken, error),
+) (AssumeRoleWithWebIdentityResponse, error) {
+ idToken, err := getWebIDTokenExpiry()
+ if err != nil {
+ return AssumeRoleWithWebIdentityResponse{}, err
+ }
+
+ v := url.Values{}
+ v.Set("Action", "AssumeRoleWithWebIdentity")
+ if len(roleARN) > 0 {
+ v.Set("RoleArn", roleARN)
+
+ if len(roleSessionName) == 0 {
+ roleSessionName = strconv.FormatInt(time.Now().UnixNano(), 10)
+ }
+ v.Set("RoleSessionName", roleSessionName)
+ }
+ v.Set("WebIdentityToken", idToken.Token)
+ if idToken.AccessToken != "" {
+ // Usually set when server is using extended userInfo endpoint.
+ v.Set("WebIdentityAccessToken", idToken.AccessToken)
+ }
+ if idToken.Expiry > 0 {
+ v.Set("DurationSeconds", fmt.Sprintf("%d", idToken.Expiry))
+ }
+ v.Set("Version", STSVersion)
+
+ u, err := url.Parse(endpoint)
+ if err != nil {
+ return AssumeRoleWithWebIdentityResponse{}, err
+ }
+
+ req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode()))
+ if err != nil {
+ return AssumeRoleWithWebIdentityResponse{}, err
+ }
+
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+
+ resp, err := clnt.Do(req)
+ if err != nil {
+ return AssumeRoleWithWebIdentityResponse{}, err
+ }
+
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ var errResp ErrorResponse
+ buf, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return AssumeRoleWithWebIdentityResponse{}, err
+ }
+ _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp)
+ if err != nil {
+ var s3Err Error
+ if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil {
+ return AssumeRoleWithWebIdentityResponse{}, err
+ }
+ errResp.RequestID = s3Err.RequestID
+ errResp.STSError.Code = s3Err.Code
+ errResp.STSError.Message = s3Err.Message
+ }
+ return AssumeRoleWithWebIdentityResponse{}, errResp
+ }
+
+ a := AssumeRoleWithWebIdentityResponse{}
+ if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil {
+ return AssumeRoleWithWebIdentityResponse{}, err
+ }
+
+ return a, nil
+}
+
+// Retrieve retrieves credentials from the MinIO service.
+// Error will be returned if the request fails.
+func (m *STSWebIdentity) Retrieve() (Value, error) {
+ a, err := getWebIdentityCredentials(m.Client, m.STSEndpoint, m.RoleARN, m.roleSessionName, m.GetWebIDTokenExpiry)
+ if err != nil {
+ return Value{}, err
+ }
+
+ // Expiry window is set to 10secs.
+ m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow)
+
+ return Value{
+ AccessKeyID: a.Result.Credentials.AccessKey,
+ SecretAccessKey: a.Result.Credentials.SecretKey,
+ SessionToken: a.Result.Credentials.SessionToken,
+ SignerType: SignatureV4,
+ }, nil
+}
+
+// Expiration returns the expiration time of the credentials
+func (m *STSWebIdentity) Expiration() time.Time {
+ return m.expiration
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go
new file mode 100644
index 0000000..6db26c0
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go
@@ -0,0 +1,24 @@
+//go:build !fips
+// +build !fips
+
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2022 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package encrypt
+
+// FIPS is true if 'fips' build tag was specified.
+const FIPS = false
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go
new file mode 100644
index 0000000..6402582
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go
@@ -0,0 +1,24 @@
+//go:build fips
+// +build fips
+
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2022 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package encrypt
+
+// FIPS is true if 'fips' build tag was specified.
+const FIPS = true
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go
new file mode 100644
index 0000000..a7081c5
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go
@@ -0,0 +1,198 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2018 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package encrypt
+
+import (
+ "crypto/md5"
+ "encoding/base64"
+ "errors"
+ "net/http"
+
+ jsoniter "github.com/json-iterator/go"
+ "golang.org/x/crypto/argon2"
+)
+
+const (
+ // SseGenericHeader is the AWS SSE header used for SSE-S3 and SSE-KMS.
+ SseGenericHeader = "X-Amz-Server-Side-Encryption"
+
+ // SseKmsKeyID is the AWS SSE-KMS key id.
+ SseKmsKeyID = SseGenericHeader + "-Aws-Kms-Key-Id"
+ // SseEncryptionContext is the AWS SSE-KMS Encryption Context data.
+ SseEncryptionContext = SseGenericHeader + "-Context"
+
+ // SseCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key.
+ SseCustomerAlgorithm = SseGenericHeader + "-Customer-Algorithm"
+ // SseCustomerKey is the AWS SSE-C encryption key HTTP header key.
+ SseCustomerKey = SseGenericHeader + "-Customer-Key"
+ // SseCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key.
+ SseCustomerKeyMD5 = SseGenericHeader + "-Customer-Key-MD5"
+
+ // SseCopyCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key for CopyObject API.
+ SseCopyCustomerAlgorithm = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm"
+ // SseCopyCustomerKey is the AWS SSE-C encryption key HTTP header key for CopyObject API.
+ SseCopyCustomerKey = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key"
+ // SseCopyCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key for CopyObject API.
+ SseCopyCustomerKeyMD5 = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-MD5"
+)
+
+// PBKDF creates a SSE-C key from the provided password and salt.
+// PBKDF is a password-based key derivation function
+// which can be used to derive a high-entropy cryptographic
+// key from a low-entropy password and a salt.
+type PBKDF func(password, salt []byte) ServerSide
+
+// DefaultPBKDF is the default PBKDF. It uses Argon2id with the
+// recommended parameters from the RFC draft (1 pass, 64 MB memory, 4 threads).
+var DefaultPBKDF PBKDF = func(password, salt []byte) ServerSide {
+ sse := ssec{}
+ copy(sse[:], argon2.IDKey(password, salt, 1, 64*1024, 4, 32))
+ return sse
+}
+
+// Type is the server-side-encryption method. It represents one of
+// the following encryption methods:
+// - SSE-C: server-side-encryption with customer provided keys
+// - KMS: server-side-encryption with managed keys
+// - S3: server-side-encryption using S3 storage encryption
+type Type string
+
+const (
+ // SSEC represents server-side-encryption with customer provided keys
+ SSEC Type = "SSE-C"
+ // KMS represents server-side-encryption with managed keys
+ KMS Type = "KMS"
+ // S3 represents server-side-encryption using S3 storage encryption
+ S3 Type = "S3"
+)
+
+// ServerSide is a form of S3 server-side-encryption.
+type ServerSide interface {
+ // Type returns the server-side-encryption method.
+ Type() Type
+
+ // Marshal adds encryption headers to the provided HTTP headers.
+ // It marks an HTTP request as server-side-encryption request
+ // and inserts the required data into the headers.
+ Marshal(h http.Header)
+}
+
+// NewSSE returns a server-side-encryption using S3 storage encryption.
+// Using SSE-S3 the server will encrypt the object with server-managed keys.
+func NewSSE() ServerSide { return s3{} }
+
+// NewSSEKMS returns a new server-side-encryption using SSE-KMS and the provided Key Id and context.
+func NewSSEKMS(keyID string, context interface{}) (ServerSide, error) {
+ if context == nil {
+ return kms{key: keyID, hasContext: false}, nil
+ }
+ json := jsoniter.ConfigCompatibleWithStandardLibrary
+ serializedContext, err := json.Marshal(context)
+ if err != nil {
+ return nil, err
+ }
+ return kms{key: keyID, context: serializedContext, hasContext: true}, nil
+}
+
+// NewSSEC returns a new server-side-encryption using SSE-C and the provided key.
+// The key must be 32 bytes long.
+func NewSSEC(key []byte) (ServerSide, error) {
+ if len(key) != 32 {
+ return nil, errors.New("encrypt: SSE-C key must be 256 bit long")
+ }
+ sse := ssec{}
+ copy(sse[:], key)
+ return sse, nil
+}
+
+// SSE transforms a SSE-C copy encryption into a SSE-C encryption.
+// It is the inverse of SSECopy(...).
+//
+// If the provided sse is no SSE-C copy encryption SSE returns
+// sse unmodified.
+func SSE(sse ServerSide) ServerSide {
+ if sse == nil || sse.Type() != SSEC {
+ return sse
+ }
+ if sse, ok := sse.(ssecCopy); ok {
+ return ssec(sse)
+ }
+ return sse
+}
+
+// SSECopy transforms a SSE-C encryption into a SSE-C copy
+// encryption. This is required for SSE-C key rotation or a SSE-C
+// copy where the source and the destination should be encrypted.
+//
+// If the provided sse is no SSE-C encryption SSECopy returns
+// sse unmodified.
+func SSECopy(sse ServerSide) ServerSide {
+ if sse == nil || sse.Type() != SSEC {
+ return sse
+ }
+ if sse, ok := sse.(ssec); ok {
+ return ssecCopy(sse)
+ }
+ return sse
+}
+
+type ssec [32]byte
+
+func (s ssec) Type() Type { return SSEC }
+
+func (s ssec) Marshal(h http.Header) {
+ keyMD5 := md5.Sum(s[:])
+ h.Set(SseCustomerAlgorithm, "AES256")
+ h.Set(SseCustomerKey, base64.StdEncoding.EncodeToString(s[:]))
+ h.Set(SseCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:]))
+}
+
+type ssecCopy [32]byte
+
+func (s ssecCopy) Type() Type { return SSEC }
+
+func (s ssecCopy) Marshal(h http.Header) {
+ keyMD5 := md5.Sum(s[:])
+ h.Set(SseCopyCustomerAlgorithm, "AES256")
+ h.Set(SseCopyCustomerKey, base64.StdEncoding.EncodeToString(s[:]))
+ h.Set(SseCopyCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:]))
+}
+
+type s3 struct{}
+
+func (s s3) Type() Type { return S3 }
+
+func (s s3) Marshal(h http.Header) { h.Set(SseGenericHeader, "AES256") }
+
+type kms struct {
+ key string
+ context []byte
+ hasContext bool
+}
+
+func (s kms) Type() Type { return KMS }
+
+func (s kms) Marshal(h http.Header) {
+ h.Set(SseGenericHeader, "aws:kms")
+ if s.key != "" {
+ h.Set(SseKmsKeyID, s.key)
+ }
+ if s.hasContext {
+ h.Set(SseEncryptionContext, base64.StdEncoding.EncodeToString(s.context))
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go
new file mode 100644
index 0000000..c52f78c
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go
@@ -0,0 +1,491 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2020 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Package lifecycle contains all the lifecycle related data types and marshallers.
+package lifecycle
+
+import (
+ "encoding/json"
+ "encoding/xml"
+ "errors"
+ "time"
+)
+
+var errMissingStorageClass = errors.New("storage-class cannot be empty")
+
+// AbortIncompleteMultipartUpload structure, not supported yet on MinIO
+type AbortIncompleteMultipartUpload struct {
+ XMLName xml.Name `xml:"AbortIncompleteMultipartUpload,omitempty" json:"-"`
+ DaysAfterInitiation ExpirationDays `xml:"DaysAfterInitiation,omitempty" json:"DaysAfterInitiation,omitempty"`
+}
+
+// IsDaysNull returns true if days field is null
+func (n AbortIncompleteMultipartUpload) IsDaysNull() bool {
+ return n.DaysAfterInitiation == ExpirationDays(0)
+}
+
+// MarshalXML if days after initiation is set to non-zero value
+func (n AbortIncompleteMultipartUpload) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ if n.IsDaysNull() {
+ return nil
+ }
+ type abortIncompleteMultipartUploadWrapper AbortIncompleteMultipartUpload
+ return e.EncodeElement(abortIncompleteMultipartUploadWrapper(n), start)
+}
+
+// NoncurrentVersionExpiration - Specifies when noncurrent object versions expire.
+// Upon expiration, server permanently deletes the noncurrent object versions.
+// Set this lifecycle configuration action on a bucket that has versioning enabled
+// (or suspended) to request server delete noncurrent object versions at a
+// specific period in the object's lifetime.
+type NoncurrentVersionExpiration struct {
+ XMLName xml.Name `xml:"NoncurrentVersionExpiration" json:"-"`
+ NoncurrentDays ExpirationDays `xml:"NoncurrentDays,omitempty" json:"NoncurrentDays,omitempty"`
+ NewerNoncurrentVersions int `xml:"NewerNoncurrentVersions,omitempty" json:"NewerNoncurrentVersions,omitempty"`
+}
+
+// MarshalXML if n is non-empty, i.e has a non-zero NoncurrentDays or NewerNoncurrentVersions.
+func (n NoncurrentVersionExpiration) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ if n.isNull() {
+ return nil
+ }
+ type noncurrentVersionExpirationWrapper NoncurrentVersionExpiration
+ return e.EncodeElement(noncurrentVersionExpirationWrapper(n), start)
+}
+
+// IsDaysNull returns true if days field is null
+func (n NoncurrentVersionExpiration) IsDaysNull() bool {
+ return n.NoncurrentDays == ExpirationDays(0)
+}
+
+func (n NoncurrentVersionExpiration) isNull() bool {
+ return n.IsDaysNull() && n.NewerNoncurrentVersions == 0
+}
+
+// NoncurrentVersionTransition structure, set this action to request server to
+// transition noncurrent object versions to different set storage classes
+// at a specific period in the object's lifetime.
+type NoncurrentVersionTransition struct {
+ XMLName xml.Name `xml:"NoncurrentVersionTransition,omitempty" json:"-"`
+ StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"`
+ NoncurrentDays ExpirationDays `xml:"NoncurrentDays" json:"NoncurrentDays"`
+ NewerNoncurrentVersions int `xml:"NewerNoncurrentVersions,omitempty" json:"NewerNoncurrentVersions,omitempty"`
+}
+
+// IsDaysNull returns true if days field is null
+func (n NoncurrentVersionTransition) IsDaysNull() bool {
+ return n.NoncurrentDays == ExpirationDays(0)
+}
+
+// IsStorageClassEmpty returns true if storage class field is empty
+func (n NoncurrentVersionTransition) IsStorageClassEmpty() bool {
+ return n.StorageClass == ""
+}
+
+func (n NoncurrentVersionTransition) isNull() bool {
+ return n.StorageClass == ""
+}
+
+// UnmarshalJSON implements NoncurrentVersionTransition JSONify
+func (n *NoncurrentVersionTransition) UnmarshalJSON(b []byte) error {
+ type noncurrentVersionTransition NoncurrentVersionTransition
+ var nt noncurrentVersionTransition
+ err := json.Unmarshal(b, &nt)
+ if err != nil {
+ return err
+ }
+
+ if nt.StorageClass == "" {
+ return errMissingStorageClass
+ }
+ *n = NoncurrentVersionTransition(nt)
+ return nil
+}
+
+// MarshalXML is extended to leave out
+// tags
+func (n NoncurrentVersionTransition) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ if n.isNull() {
+ return nil
+ }
+ type noncurrentVersionTransitionWrapper NoncurrentVersionTransition
+ return e.EncodeElement(noncurrentVersionTransitionWrapper(n), start)
+}
+
+// Tag structure key/value pair representing an object tag to apply lifecycle configuration
+type Tag struct {
+ XMLName xml.Name `xml:"Tag,omitempty" json:"-"`
+ Key string `xml:"Key,omitempty" json:"Key,omitempty"`
+ Value string `xml:"Value,omitempty" json:"Value,omitempty"`
+}
+
+// IsEmpty returns whether this tag is empty or not.
+func (tag Tag) IsEmpty() bool {
+ return tag.Key == ""
+}
+
+// Transition structure - transition details of lifecycle configuration
+type Transition struct {
+ XMLName xml.Name `xml:"Transition" json:"-"`
+ Date ExpirationDate `xml:"Date,omitempty" json:"Date,omitempty"`
+ StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"`
+ Days ExpirationDays `xml:"Days" json:"Days"`
+}
+
+// UnmarshalJSON returns an error if storage-class is empty.
+func (t *Transition) UnmarshalJSON(b []byte) error {
+ type transition Transition
+ var tr transition
+ err := json.Unmarshal(b, &tr)
+ if err != nil {
+ return err
+ }
+
+ if tr.StorageClass == "" {
+ return errMissingStorageClass
+ }
+ *t = Transition(tr)
+ return nil
+}
+
+// MarshalJSON customizes json encoding by omitting empty values
+func (t Transition) MarshalJSON() ([]byte, error) {
+ if t.IsNull() {
+ return nil, nil
+ }
+ type transition struct {
+ Date *ExpirationDate `json:"Date,omitempty"`
+ StorageClass string `json:"StorageClass,omitempty"`
+ Days *ExpirationDays `json:"Days"`
+ }
+
+ newt := transition{
+ StorageClass: t.StorageClass,
+ }
+
+ if !t.IsDateNull() {
+ newt.Date = &t.Date
+ } else {
+ newt.Days = &t.Days
+ }
+ return json.Marshal(newt)
+}
+
+// IsDaysNull returns true if days field is null
+func (t Transition) IsDaysNull() bool {
+ return t.Days == ExpirationDays(0)
+}
+
+// IsDateNull returns true if date field is null
+func (t Transition) IsDateNull() bool {
+ return t.Date.Time.IsZero()
+}
+
+// IsNull returns true if no storage-class is set.
+func (t Transition) IsNull() bool {
+ return t.StorageClass == ""
+}
+
+// MarshalXML is transition is non null
+func (t Transition) MarshalXML(en *xml.Encoder, startElement xml.StartElement) error {
+ if t.IsNull() {
+ return nil
+ }
+ type transitionWrapper Transition
+ return en.EncodeElement(transitionWrapper(t), startElement)
+}
+
+// And And Rule for LifecycleTag, to be used in LifecycleRuleFilter
+type And struct {
+ XMLName xml.Name `xml:"And" json:"-"`
+ Prefix string `xml:"Prefix" json:"Prefix,omitempty"`
+ Tags []Tag `xml:"Tag" json:"Tags,omitempty"`
+ ObjectSizeLessThan int64 `xml:"ObjectSizeLessThan,omitempty" json:"ObjectSizeLessThan,omitempty"`
+ ObjectSizeGreaterThan int64 `xml:"ObjectSizeGreaterThan,omitempty" json:"ObjectSizeGreaterThan,omitempty"`
+}
+
+// IsEmpty returns true if Tags field is null
+func (a And) IsEmpty() bool {
+ return len(a.Tags) == 0 && a.Prefix == "" &&
+ a.ObjectSizeLessThan == 0 && a.ObjectSizeGreaterThan == 0
+}
+
+// Filter will be used in selecting rule(s) for lifecycle configuration
+type Filter struct {
+ XMLName xml.Name `xml:"Filter" json:"-"`
+ And And `xml:"And,omitempty" json:"And,omitempty"`
+ Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"`
+ Tag Tag `xml:"Tag,omitempty" json:"Tag,omitempty"`
+ ObjectSizeLessThan int64 `xml:"ObjectSizeLessThan,omitempty" json:"ObjectSizeLessThan,omitempty"`
+ ObjectSizeGreaterThan int64 `xml:"ObjectSizeGreaterThan,omitempty" json:"ObjectSizeGreaterThan,omitempty"`
+}
+
+// IsNull returns true if all Filter fields are empty.
+func (f Filter) IsNull() bool {
+ return f.Tag.IsEmpty() && f.And.IsEmpty() && f.Prefix == "" &&
+ f.ObjectSizeLessThan == 0 && f.ObjectSizeGreaterThan == 0
+}
+
+// MarshalJSON customizes json encoding by removing empty values.
+func (f Filter) MarshalJSON() ([]byte, error) {
+ type filter struct {
+ And *And `json:"And,omitempty"`
+ Prefix string `json:"Prefix,omitempty"`
+ Tag *Tag `json:"Tag,omitempty"`
+ ObjectSizeLessThan int64 `json:"ObjectSizeLessThan,omitempty"`
+ ObjectSizeGreaterThan int64 `json:"ObjectSizeGreaterThan,omitempty"`
+ }
+
+ newf := filter{
+ Prefix: f.Prefix,
+ }
+ if !f.Tag.IsEmpty() {
+ newf.Tag = &f.Tag
+ }
+ if !f.And.IsEmpty() {
+ newf.And = &f.And
+ }
+ newf.ObjectSizeLessThan = f.ObjectSizeLessThan
+ newf.ObjectSizeGreaterThan = f.ObjectSizeGreaterThan
+ return json.Marshal(newf)
+}
+
+// MarshalXML - produces the xml representation of the Filter struct
+// only one of Prefix, And and Tag should be present in the output.
+func (f Filter) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ if err := e.EncodeToken(start); err != nil {
+ return err
+ }
+
+ switch {
+ case !f.And.IsEmpty():
+ if err := e.EncodeElement(f.And, xml.StartElement{Name: xml.Name{Local: "And"}}); err != nil {
+ return err
+ }
+ case !f.Tag.IsEmpty():
+ if err := e.EncodeElement(f.Tag, xml.StartElement{Name: xml.Name{Local: "Tag"}}); err != nil {
+ return err
+ }
+ default:
+ if f.ObjectSizeLessThan > 0 {
+ if err := e.EncodeElement(f.ObjectSizeLessThan, xml.StartElement{Name: xml.Name{Local: "ObjectSizeLessThan"}}); err != nil {
+ return err
+ }
+ break
+ }
+ if f.ObjectSizeGreaterThan > 0 {
+ if err := e.EncodeElement(f.ObjectSizeGreaterThan, xml.StartElement{Name: xml.Name{Local: "ObjectSizeGreaterThan"}}); err != nil {
+ return err
+ }
+ break
+ }
+ // Print empty Prefix field only when everything else is empty
+ if err := e.EncodeElement(f.Prefix, xml.StartElement{Name: xml.Name{Local: "Prefix"}}); err != nil {
+ return err
+ }
+ }
+
+ return e.EncodeToken(xml.EndElement{Name: start.Name})
+}
+
+// ExpirationDays is a type alias to unmarshal Days in Expiration
+type ExpirationDays int
+
+// MarshalXML encodes number of days to expire if it is non-zero and
+// encodes empty string otherwise
+func (eDays ExpirationDays) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
+ if eDays == 0 {
+ return nil
+ }
+ return e.EncodeElement(int(eDays), startElement)
+}
+
+// ExpirationDate is a embedded type containing time.Time to unmarshal
+// Date in Expiration
+type ExpirationDate struct {
+ time.Time
+}
+
+// MarshalXML encodes expiration date if it is non-zero and encodes
+// empty string otherwise
+func (eDate ExpirationDate) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
+ if eDate.Time.IsZero() {
+ return nil
+ }
+ return e.EncodeElement(eDate.Format(time.RFC3339), startElement)
+}
+
+// ExpireDeleteMarker represents value of ExpiredObjectDeleteMarker field in Expiration XML element.
+type ExpireDeleteMarker ExpirationBoolean
+
+// IsEnabled returns true if the auto delete-marker expiration is enabled
+func (e ExpireDeleteMarker) IsEnabled() bool {
+ return bool(e)
+}
+
+// ExpirationBoolean represents an XML version of 'bool' type
+type ExpirationBoolean bool
+
+// MarshalXML encodes delete marker boolean into an XML form.
+func (b ExpirationBoolean) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
+ if !b {
+ return nil
+ }
+ type booleanWrapper ExpirationBoolean
+ return e.EncodeElement(booleanWrapper(b), startElement)
+}
+
+// IsEnabled returns true if the expiration boolean is enabled
+func (b ExpirationBoolean) IsEnabled() bool {
+ return bool(b)
+}
+
+// Expiration structure - expiration details of lifecycle configuration
+type Expiration struct {
+ XMLName xml.Name `xml:"Expiration,omitempty" json:"-"`
+ Date ExpirationDate `xml:"Date,omitempty" json:"Date,omitempty"`
+ Days ExpirationDays `xml:"Days,omitempty" json:"Days,omitempty"`
+ DeleteMarker ExpireDeleteMarker `xml:"ExpiredObjectDeleteMarker,omitempty" json:"ExpiredObjectDeleteMarker,omitempty"`
+ DeleteAll ExpirationBoolean `xml:"ExpiredObjectAllVersions,omitempty" json:"ExpiredObjectAllVersions,omitempty"`
+}
+
+// MarshalJSON customizes json encoding by removing empty day/date specification.
+func (e Expiration) MarshalJSON() ([]byte, error) {
+ type expiration struct {
+ Date *ExpirationDate `json:"Date,omitempty"`
+ Days *ExpirationDays `json:"Days,omitempty"`
+ DeleteMarker ExpireDeleteMarker `json:"ExpiredObjectDeleteMarker,omitempty"`
+ DeleteAll ExpirationBoolean `json:"ExpiredObjectAllVersions,omitempty"`
+ }
+
+ newexp := expiration{
+ DeleteMarker: e.DeleteMarker,
+ DeleteAll: e.DeleteAll,
+ }
+ if !e.IsDaysNull() {
+ newexp.Days = &e.Days
+ }
+ if !e.IsDateNull() {
+ newexp.Date = &e.Date
+ }
+ return json.Marshal(newexp)
+}
+
+// IsDaysNull returns true if days field is null
+func (e Expiration) IsDaysNull() bool {
+ return e.Days == ExpirationDays(0)
+}
+
+// IsDateNull returns true if date field is null
+func (e Expiration) IsDateNull() bool {
+ return e.Date.Time.IsZero()
+}
+
+// IsDeleteMarkerExpirationEnabled returns true if the auto-expiration of delete marker is enabled
+func (e Expiration) IsDeleteMarkerExpirationEnabled() bool {
+ return e.DeleteMarker.IsEnabled()
+}
+
+// IsNull returns true if both date and days fields are null
+func (e Expiration) IsNull() bool {
+ return e.IsDaysNull() && e.IsDateNull() && !e.IsDeleteMarkerExpirationEnabled()
+}
+
+// MarshalXML is expiration is non null
+func (e Expiration) MarshalXML(en *xml.Encoder, startElement xml.StartElement) error {
+ if e.IsNull() {
+ return nil
+ }
+ type expirationWrapper Expiration
+ return en.EncodeElement(expirationWrapper(e), startElement)
+}
+
+// MarshalJSON customizes json encoding by omitting empty values
+func (r Rule) MarshalJSON() ([]byte, error) {
+ type rule struct {
+ AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `json:"AbortIncompleteMultipartUpload,omitempty"`
+ Expiration *Expiration `json:"Expiration,omitempty"`
+ ID string `json:"ID"`
+ RuleFilter *Filter `json:"Filter,omitempty"`
+ NoncurrentVersionExpiration *NoncurrentVersionExpiration `json:"NoncurrentVersionExpiration,omitempty"`
+ NoncurrentVersionTransition *NoncurrentVersionTransition `json:"NoncurrentVersionTransition,omitempty"`
+ Prefix string `json:"Prefix,omitempty"`
+ Status string `json:"Status"`
+ Transition *Transition `json:"Transition,omitempty"`
+ }
+ newr := rule{
+ Prefix: r.Prefix,
+ Status: r.Status,
+ ID: r.ID,
+ }
+
+ if !r.RuleFilter.IsNull() {
+ newr.RuleFilter = &r.RuleFilter
+ }
+ if !r.AbortIncompleteMultipartUpload.IsDaysNull() {
+ newr.AbortIncompleteMultipartUpload = &r.AbortIncompleteMultipartUpload
+ }
+ if !r.Expiration.IsNull() {
+ newr.Expiration = &r.Expiration
+ }
+ if !r.Transition.IsNull() {
+ newr.Transition = &r.Transition
+ }
+ if !r.NoncurrentVersionExpiration.isNull() {
+ newr.NoncurrentVersionExpiration = &r.NoncurrentVersionExpiration
+ }
+ if !r.NoncurrentVersionTransition.isNull() {
+ newr.NoncurrentVersionTransition = &r.NoncurrentVersionTransition
+ }
+
+ return json.Marshal(newr)
+}
+
+// Rule represents a single rule in lifecycle configuration
+type Rule struct {
+ XMLName xml.Name `xml:"Rule,omitempty" json:"-"`
+ AbortIncompleteMultipartUpload AbortIncompleteMultipartUpload `xml:"AbortIncompleteMultipartUpload,omitempty" json:"AbortIncompleteMultipartUpload,omitempty"`
+ Expiration Expiration `xml:"Expiration,omitempty" json:"Expiration,omitempty"`
+ ID string `xml:"ID" json:"ID"`
+ RuleFilter Filter `xml:"Filter,omitempty" json:"Filter,omitempty"`
+ NoncurrentVersionExpiration NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty" json:"NoncurrentVersionExpiration,omitempty"`
+ NoncurrentVersionTransition NoncurrentVersionTransition `xml:"NoncurrentVersionTransition,omitempty" json:"NoncurrentVersionTransition,omitempty"`
+ Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"`
+ Status string `xml:"Status" json:"Status"`
+ Transition Transition `xml:"Transition,omitempty" json:"Transition,omitempty"`
+}
+
+// Configuration is a collection of Rule objects.
+type Configuration struct {
+ XMLName xml.Name `xml:"LifecycleConfiguration,omitempty" json:"-"`
+ Rules []Rule `xml:"Rule"`
+}
+
+// Empty check if lifecycle configuration is empty
+func (c *Configuration) Empty() bool {
+ if c == nil {
+ return true
+ }
+ return len(c.Rules) == 0
+}
+
+// NewConfiguration initializes a fresh lifecycle configuration
+// for manipulation, such as setting and removing lifecycle rules
+// and filters.
+func NewConfiguration() *Configuration {
+ return &Configuration{}
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go b/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go
new file mode 100644
index 0000000..126661a
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go
@@ -0,0 +1,78 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017-2020 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package notification
+
+// Indentity represents the user id, this is a compliance field.
+type identity struct {
+ PrincipalID string `json:"principalId"`
+}
+
+// event bucket metadata.
+type bucketMeta struct {
+ Name string `json:"name"`
+ OwnerIdentity identity `json:"ownerIdentity"`
+ ARN string `json:"arn"`
+}
+
+// event object metadata.
+type objectMeta struct {
+ Key string `json:"key"`
+ Size int64 `json:"size,omitempty"`
+ ETag string `json:"eTag,omitempty"`
+ ContentType string `json:"contentType,omitempty"`
+ UserMetadata map[string]string `json:"userMetadata,omitempty"`
+ VersionID string `json:"versionId,omitempty"`
+ Sequencer string `json:"sequencer"`
+}
+
+// event server specific metadata.
+type eventMeta struct {
+ SchemaVersion string `json:"s3SchemaVersion"`
+ ConfigurationID string `json:"configurationId"`
+ Bucket bucketMeta `json:"bucket"`
+ Object objectMeta `json:"object"`
+}
+
+// sourceInfo represents information on the client that
+// triggered the event notification.
+type sourceInfo struct {
+ Host string `json:"host"`
+ Port string `json:"port"`
+ UserAgent string `json:"userAgent"`
+}
+
+// Event represents an Amazon an S3 bucket notification event.
+type Event struct {
+ EventVersion string `json:"eventVersion"`
+ EventSource string `json:"eventSource"`
+ AwsRegion string `json:"awsRegion"`
+ EventTime string `json:"eventTime"`
+ EventName string `json:"eventName"`
+ UserIdentity identity `json:"userIdentity"`
+ RequestParameters map[string]string `json:"requestParameters"`
+ ResponseElements map[string]string `json:"responseElements"`
+ S3 eventMeta `json:"s3"`
+ Source sourceInfo `json:"source"`
+}
+
+// Info - represents the collection of notification events, additionally
+// also reports errors if any while listening on bucket notifications.
+type Info struct {
+ Records []Event
+ Err error
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go
new file mode 100644
index 0000000..a44799d
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go
@@ -0,0 +1,440 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2020 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package notification
+
+import (
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/minio/minio-go/v7/pkg/set"
+)
+
+// EventType is a S3 notification event associated to the bucket notification configuration
+type EventType string
+
+// The role of all event types are described in :
+//
+// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations
+const (
+ ObjectCreatedAll EventType = "s3:ObjectCreated:*"
+ ObjectCreatedPut EventType = "s3:ObjectCreated:Put"
+ ObjectCreatedPost EventType = "s3:ObjectCreated:Post"
+ ObjectCreatedCopy EventType = "s3:ObjectCreated:Copy"
+ ObjectCreatedDeleteTagging EventType = "s3:ObjectCreated:DeleteTagging"
+ ObjectCreatedCompleteMultipartUpload EventType = "s3:ObjectCreated:CompleteMultipartUpload"
+ ObjectCreatedPutLegalHold EventType = "s3:ObjectCreated:PutLegalHold"
+ ObjectCreatedPutRetention EventType = "s3:ObjectCreated:PutRetention"
+ ObjectCreatedPutTagging EventType = "s3:ObjectCreated:PutTagging"
+ ObjectAccessedGet EventType = "s3:ObjectAccessed:Get"
+ ObjectAccessedHead EventType = "s3:ObjectAccessed:Head"
+ ObjectAccessedGetRetention EventType = "s3:ObjectAccessed:GetRetention"
+ ObjectAccessedGetLegalHold EventType = "s3:ObjectAccessed:GetLegalHold"
+ ObjectAccessedAll EventType = "s3:ObjectAccessed:*"
+ ObjectRemovedAll EventType = "s3:ObjectRemoved:*"
+ ObjectRemovedDelete EventType = "s3:ObjectRemoved:Delete"
+ ObjectRemovedDeleteMarkerCreated EventType = "s3:ObjectRemoved:DeleteMarkerCreated"
+ ObjectReducedRedundancyLostObject EventType = "s3:ReducedRedundancyLostObject"
+ ObjectTransitionAll EventType = "s3:ObjectTransition:*"
+ ObjectTransitionFailed EventType = "s3:ObjectTransition:Failed"
+ ObjectTransitionComplete EventType = "s3:ObjectTransition:Complete"
+ ObjectTransitionPost EventType = "s3:ObjectRestore:Post"
+ ObjectTransitionCompleted EventType = "s3:ObjectRestore:Completed"
+ ObjectReplicationAll EventType = "s3:Replication:*"
+ ObjectReplicationOperationCompletedReplication EventType = "s3:Replication:OperationCompletedReplication"
+ ObjectReplicationOperationFailedReplication EventType = "s3:Replication:OperationFailedReplication"
+ ObjectReplicationOperationMissedThreshold EventType = "s3:Replication:OperationMissedThreshold"
+ ObjectReplicationOperationNotTracked EventType = "s3:Replication:OperationNotTracked"
+ ObjectReplicationOperationReplicatedAfterThreshold EventType = "s3:Replication:OperationReplicatedAfterThreshold"
+ ObjectScannerManyVersions EventType = "s3:Scanner:ManyVersions"
+ ObjectScannerBigPrefix EventType = "s3:Scanner:BigPrefix"
+ ObjectScannerAll EventType = "s3:Scanner:*"
+ BucketCreatedAll EventType = "s3:BucketCreated:*"
+ BucketRemovedAll EventType = "s3:BucketRemoved:*"
+)
+
+// FilterRule - child of S3Key, a tag in the notification xml which
+// carries suffix/prefix filters
+type FilterRule struct {
+ Name string `xml:"Name"`
+ Value string `xml:"Value"`
+}
+
+// S3Key - child of Filter, a tag in the notification xml which
+// carries suffix/prefix filters
+type S3Key struct {
+ FilterRules []FilterRule `xml:"FilterRule,omitempty"`
+}
+
+// Filter - a tag in the notification xml structure which carries
+// suffix/prefix filters
+type Filter struct {
+ S3Key S3Key `xml:"S3Key,omitempty"`
+}
+
+// Arn - holds ARN information that will be sent to the web service,
+// ARN desciption can be found in http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
+type Arn struct {
+ Partition string
+ Service string
+ Region string
+ AccountID string
+ Resource string
+}
+
+// NewArn creates new ARN based on the given partition, service, region, account id and resource
+func NewArn(partition, service, region, accountID, resource string) Arn {
+ return Arn{
+ Partition: partition,
+ Service: service,
+ Region: region,
+ AccountID: accountID,
+ Resource: resource,
+ }
+}
+
+var (
+ // ErrInvalidArnPrefix is returned when ARN string format does not start with 'arn'
+ ErrInvalidArnPrefix = errors.New("invalid ARN format, must start with 'arn:'")
+ // ErrInvalidArnFormat is returned when ARN string format is not valid
+ ErrInvalidArnFormat = errors.New("invalid ARN format, must be 'arn:::::'")
+)
+
+// NewArnFromString parses string representation of ARN into Arn object.
+// Returns an error if the string format is incorrect.
+func NewArnFromString(arn string) (Arn, error) {
+ parts := strings.Split(arn, ":")
+ if len(parts) != 6 {
+ return Arn{}, ErrInvalidArnFormat
+ }
+ if parts[0] != "arn" {
+ return Arn{}, ErrInvalidArnPrefix
+ }
+
+ return NewArn(parts[1], parts[2], parts[3], parts[4], parts[5]), nil
+}
+
+// String returns the string format of the ARN
+func (arn Arn) String() string {
+ return "arn:" + arn.Partition + ":" + arn.Service + ":" + arn.Region + ":" + arn.AccountID + ":" + arn.Resource
+}
+
+// Config - represents one single notification configuration
+// such as topic, queue or lambda configuration.
+type Config struct {
+ ID string `xml:"Id,omitempty"`
+ Arn Arn `xml:"-"`
+ Events []EventType `xml:"Event"`
+ Filter *Filter `xml:"Filter,omitempty"`
+}
+
+// NewConfig creates one notification config and sets the given ARN
+func NewConfig(arn Arn) Config {
+ return Config{Arn: arn, Filter: &Filter{}}
+}
+
+// AddEvents adds one event to the current notification config
+func (t *Config) AddEvents(events ...EventType) {
+ t.Events = append(t.Events, events...)
+}
+
+// AddFilterSuffix sets the suffix configuration to the current notification config
+func (t *Config) AddFilterSuffix(suffix string) {
+ if t.Filter == nil {
+ t.Filter = &Filter{}
+ }
+ newFilterRule := FilterRule{Name: "suffix", Value: suffix}
+ // Replace any suffix rule if existing and add to the list otherwise
+ for index := range t.Filter.S3Key.FilterRules {
+ if t.Filter.S3Key.FilterRules[index].Name == "suffix" {
+ t.Filter.S3Key.FilterRules[index] = newFilterRule
+ return
+ }
+ }
+ t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule)
+}
+
+// AddFilterPrefix sets the prefix configuration to the current notification config
+func (t *Config) AddFilterPrefix(prefix string) {
+ if t.Filter == nil {
+ t.Filter = &Filter{}
+ }
+ newFilterRule := FilterRule{Name: "prefix", Value: prefix}
+ // Replace any prefix rule if existing and add to the list otherwise
+ for index := range t.Filter.S3Key.FilterRules {
+ if t.Filter.S3Key.FilterRules[index].Name == "prefix" {
+ t.Filter.S3Key.FilterRules[index] = newFilterRule
+ return
+ }
+ }
+ t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule)
+}
+
+// EqualEventTypeList tells whether a and b contain the same events
+func EqualEventTypeList(a, b []EventType) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ setA := set.NewStringSet()
+ for _, i := range a {
+ setA.Add(string(i))
+ }
+
+ setB := set.NewStringSet()
+ for _, i := range b {
+ setB.Add(string(i))
+ }
+
+ return setA.Difference(setB).IsEmpty()
+}
+
+// EqualFilterRuleList tells whether a and b contain the same filters
+func EqualFilterRuleList(a, b []FilterRule) bool {
+ if len(a) != len(b) {
+ return false
+ }
+
+ setA := set.NewStringSet()
+ for _, i := range a {
+ setA.Add(fmt.Sprintf("%s-%s", i.Name, i.Value))
+ }
+
+ setB := set.NewStringSet()
+ for _, i := range b {
+ setB.Add(fmt.Sprintf("%s-%s", i.Name, i.Value))
+ }
+
+ return setA.Difference(setB).IsEmpty()
+}
+
+// Equal returns whether this `Config` is equal to another defined by the passed parameters
+func (t *Config) Equal(events []EventType, prefix, suffix string) bool {
+ if t == nil {
+ return false
+ }
+
+ // Compare events
+ passEvents := EqualEventTypeList(t.Events, events)
+
+ // Compare filters
+ var newFilterRules []FilterRule
+ if prefix != "" {
+ newFilterRules = append(newFilterRules, FilterRule{Name: "prefix", Value: prefix})
+ }
+ if suffix != "" {
+ newFilterRules = append(newFilterRules, FilterRule{Name: "suffix", Value: suffix})
+ }
+
+ var currentFilterRules []FilterRule
+ if t.Filter != nil {
+ currentFilterRules = t.Filter.S3Key.FilterRules
+ }
+
+ passFilters := EqualFilterRuleList(currentFilterRules, newFilterRules)
+ return passEvents && passFilters
+}
+
+// TopicConfig carries one single topic notification configuration
+type TopicConfig struct {
+ Config
+ Topic string `xml:"Topic"`
+}
+
+// QueueConfig carries one single queue notification configuration
+type QueueConfig struct {
+ Config
+ Queue string `xml:"Queue"`
+}
+
+// LambdaConfig carries one single cloudfunction notification configuration
+type LambdaConfig struct {
+ Config
+ Lambda string `xml:"CloudFunction"`
+}
+
+// Configuration - the struct that represents the whole XML to be sent to the web service
+type Configuration struct {
+ XMLName xml.Name `xml:"NotificationConfiguration"`
+ LambdaConfigs []LambdaConfig `xml:"CloudFunctionConfiguration"`
+ TopicConfigs []TopicConfig `xml:"TopicConfiguration"`
+ QueueConfigs []QueueConfig `xml:"QueueConfiguration"`
+}
+
+// AddTopic adds a given topic config to the general bucket notification config
+func (b *Configuration) AddTopic(topicConfig Config) bool {
+ newTopicConfig := TopicConfig{Config: topicConfig, Topic: topicConfig.Arn.String()}
+ for _, n := range b.TopicConfigs {
+ // If new config matches existing one
+ if n.Topic == newTopicConfig.Arn.String() && newTopicConfig.Filter == n.Filter {
+
+ existingConfig := set.NewStringSet()
+ for _, v := range n.Events {
+ existingConfig.Add(string(v))
+ }
+
+ newConfig := set.NewStringSet()
+ for _, v := range topicConfig.Events {
+ newConfig.Add(string(v))
+ }
+
+ if !newConfig.Intersection(existingConfig).IsEmpty() {
+ return false
+ }
+ }
+ }
+ b.TopicConfigs = append(b.TopicConfigs, newTopicConfig)
+ return true
+}
+
+// AddQueue adds a given queue config to the general bucket notification config
+func (b *Configuration) AddQueue(queueConfig Config) bool {
+ newQueueConfig := QueueConfig{Config: queueConfig, Queue: queueConfig.Arn.String()}
+ for _, n := range b.QueueConfigs {
+ if n.Queue == newQueueConfig.Arn.String() && newQueueConfig.Filter == n.Filter {
+
+ existingConfig := set.NewStringSet()
+ for _, v := range n.Events {
+ existingConfig.Add(string(v))
+ }
+
+ newConfig := set.NewStringSet()
+ for _, v := range queueConfig.Events {
+ newConfig.Add(string(v))
+ }
+
+ if !newConfig.Intersection(existingConfig).IsEmpty() {
+ return false
+ }
+ }
+ }
+ b.QueueConfigs = append(b.QueueConfigs, newQueueConfig)
+ return true
+}
+
+// AddLambda adds a given lambda config to the general bucket notification config
+func (b *Configuration) AddLambda(lambdaConfig Config) bool {
+ newLambdaConfig := LambdaConfig{Config: lambdaConfig, Lambda: lambdaConfig.Arn.String()}
+ for _, n := range b.LambdaConfigs {
+ if n.Lambda == newLambdaConfig.Arn.String() && newLambdaConfig.Filter == n.Filter {
+
+ existingConfig := set.NewStringSet()
+ for _, v := range n.Events {
+ existingConfig.Add(string(v))
+ }
+
+ newConfig := set.NewStringSet()
+ for _, v := range lambdaConfig.Events {
+ newConfig.Add(string(v))
+ }
+
+ if !newConfig.Intersection(existingConfig).IsEmpty() {
+ return false
+ }
+ }
+ }
+ b.LambdaConfigs = append(b.LambdaConfigs, newLambdaConfig)
+ return true
+}
+
+// RemoveTopicByArn removes all topic configurations that match the exact specified ARN
+func (b *Configuration) RemoveTopicByArn(arn Arn) {
+ var topics []TopicConfig
+ for _, topic := range b.TopicConfigs {
+ if topic.Topic != arn.String() {
+ topics = append(topics, topic)
+ }
+ }
+ b.TopicConfigs = topics
+}
+
+// ErrNoConfigMatch is returned when a notification configuration (sqs,sns,lambda) is not found when trying to delete
+var ErrNoConfigMatch = errors.New("no notification configuration matched")
+
+// RemoveTopicByArnEventsPrefixSuffix removes a topic configuration that match the exact specified ARN, events, prefix and suffix
+func (b *Configuration) RemoveTopicByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error {
+ removeIndex := -1
+ for i, v := range b.TopicConfigs {
+ // if it matches events and filters, mark the index for deletion
+ if v.Topic == arn.String() && v.Config.Equal(events, prefix, suffix) {
+ removeIndex = i
+ break // since we have at most one matching config
+ }
+ }
+ if removeIndex >= 0 {
+ b.TopicConfigs = append(b.TopicConfigs[:removeIndex], b.TopicConfigs[removeIndex+1:]...)
+ return nil
+ }
+ return ErrNoConfigMatch
+}
+
+// RemoveQueueByArn removes all queue configurations that match the exact specified ARN
+func (b *Configuration) RemoveQueueByArn(arn Arn) {
+ var queues []QueueConfig
+ for _, queue := range b.QueueConfigs {
+ if queue.Queue != arn.String() {
+ queues = append(queues, queue)
+ }
+ }
+ b.QueueConfigs = queues
+}
+
+// RemoveQueueByArnEventsPrefixSuffix removes a queue configuration that match the exact specified ARN, events, prefix and suffix
+func (b *Configuration) RemoveQueueByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error {
+ removeIndex := -1
+ for i, v := range b.QueueConfigs {
+ // if it matches events and filters, mark the index for deletion
+ if v.Queue == arn.String() && v.Config.Equal(events, prefix, suffix) {
+ removeIndex = i
+ break // since we have at most one matching config
+ }
+ }
+ if removeIndex >= 0 {
+ b.QueueConfigs = append(b.QueueConfigs[:removeIndex], b.QueueConfigs[removeIndex+1:]...)
+ return nil
+ }
+ return ErrNoConfigMatch
+}
+
+// RemoveLambdaByArn removes all lambda configurations that match the exact specified ARN
+func (b *Configuration) RemoveLambdaByArn(arn Arn) {
+ var lambdas []LambdaConfig
+ for _, lambda := range b.LambdaConfigs {
+ if lambda.Lambda != arn.String() {
+ lambdas = append(lambdas, lambda)
+ }
+ }
+ b.LambdaConfigs = lambdas
+}
+
+// RemoveLambdaByArnEventsPrefixSuffix removes a topic configuration that match the exact specified ARN, events, prefix and suffix
+func (b *Configuration) RemoveLambdaByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error {
+ removeIndex := -1
+ for i, v := range b.LambdaConfigs {
+ // if it matches events and filters, mark the index for deletion
+ if v.Lambda == arn.String() && v.Config.Equal(events, prefix, suffix) {
+ removeIndex = i
+ break // since we have at most one matching config
+ }
+ }
+ if removeIndex >= 0 {
+ b.LambdaConfigs = append(b.LambdaConfigs[:removeIndex], b.LambdaConfigs[removeIndex+1:]...)
+ return nil
+ }
+ return ErrNoConfigMatch
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go
new file mode 100644
index 0000000..0abbf6e
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go
@@ -0,0 +1,971 @@
+/*
+ * MinIO Client (C) 2020 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package replication
+
+import (
+ "bytes"
+ "encoding/xml"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/rs/xid"
+)
+
+var errInvalidFilter = fmt.Errorf("invalid filter")
+
+// OptionType specifies operation to be performed on config
+type OptionType string
+
+const (
+ // AddOption specifies addition of rule to config
+ AddOption OptionType = "Add"
+ // SetOption specifies modification of existing rule to config
+ SetOption OptionType = "Set"
+
+ // RemoveOption specifies rule options are for removing a rule
+ RemoveOption OptionType = "Remove"
+ // ImportOption is for getting current config
+ ImportOption OptionType = "Import"
+)
+
+// Options represents options to set a replication configuration rule
+type Options struct {
+ Op OptionType
+ RoleArn string
+ ID string
+ Prefix string
+ RuleStatus string
+ Priority string
+ TagString string
+ StorageClass string
+ DestBucket string
+ IsTagSet bool
+ IsSCSet bool
+ ReplicateDeletes string // replicate versioned deletes
+ ReplicateDeleteMarkers string // replicate soft deletes
+ ReplicaSync string // replicate replica metadata modifications
+ ExistingObjectReplicate string
+}
+
+// Tags returns a slice of tags for a rule
+func (opts Options) Tags() ([]Tag, error) {
+ var tagList []Tag
+ tagTokens := strings.Split(opts.TagString, "&")
+ for _, tok := range tagTokens {
+ if tok == "" {
+ break
+ }
+ kv := strings.SplitN(tok, "=", 2)
+ if len(kv) != 2 {
+ return []Tag{}, fmt.Errorf("tags should be entered as comma separated k=v pairs")
+ }
+ tagList = append(tagList, Tag{
+ Key: kv[0],
+ Value: kv[1],
+ })
+ }
+ return tagList, nil
+}
+
+// Config - replication configuration specified in
+// https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html
+type Config struct {
+ XMLName xml.Name `xml:"ReplicationConfiguration" json:"-"`
+ Rules []Rule `xml:"Rule" json:"Rules"`
+ Role string `xml:"Role" json:"Role"`
+}
+
+// Empty returns true if config is not set
+func (c *Config) Empty() bool {
+ return len(c.Rules) == 0
+}
+
+// AddRule adds a new rule to existing replication config. If a rule exists with the
+// same ID, then the rule is replaced.
+func (c *Config) AddRule(opts Options) error {
+ priority, err := strconv.Atoi(opts.Priority)
+ if err != nil {
+ return err
+ }
+ var compatSw bool // true if RoleArn is used with new mc client and older minio version prior to multisite
+ if opts.RoleArn != "" {
+ tokens := strings.Split(opts.RoleArn, ":")
+ if len(tokens) != 6 {
+ return fmt.Errorf("invalid format for replication Role Arn: %v", opts.RoleArn)
+ }
+ switch {
+ case strings.HasPrefix(opts.RoleArn, "arn:minio:replication") && len(c.Rules) == 0:
+ c.Role = opts.RoleArn
+ compatSw = true
+ case strings.HasPrefix(opts.RoleArn, "arn:aws:iam"):
+ c.Role = opts.RoleArn
+ default:
+ return fmt.Errorf("RoleArn invalid for AWS replication configuration: %v", opts.RoleArn)
+ }
+ }
+
+ var status Status
+ // toggle rule status for edit option
+ switch opts.RuleStatus {
+ case "enable":
+ status = Enabled
+ case "disable":
+ status = Disabled
+ default:
+ return fmt.Errorf("rule state should be either [enable|disable]")
+ }
+
+ tags, err := opts.Tags()
+ if err != nil {
+ return err
+ }
+ andVal := And{
+ Tags: tags,
+ }
+ filter := Filter{Prefix: opts.Prefix}
+ // only a single tag is set.
+ if opts.Prefix == "" && len(tags) == 1 {
+ filter.Tag = tags[0]
+ }
+ // both prefix and tag are present
+ if len(andVal.Tags) > 1 || opts.Prefix != "" {
+ filter.And = andVal
+ filter.And.Prefix = opts.Prefix
+ filter.Prefix = ""
+ filter.Tag = Tag{}
+ }
+ if opts.ID == "" {
+ opts.ID = xid.New().String()
+ }
+
+ destBucket := opts.DestBucket
+ // ref https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html
+ if btokens := strings.Split(destBucket, ":"); len(btokens) != 6 {
+ if len(btokens) == 1 && compatSw {
+ destBucket = fmt.Sprintf("arn:aws:s3:::%s", destBucket)
+ } else {
+ return fmt.Errorf("destination bucket needs to be in Arn format")
+ }
+ }
+ dmStatus := Disabled
+ if opts.ReplicateDeleteMarkers != "" {
+ switch opts.ReplicateDeleteMarkers {
+ case "enable":
+ dmStatus = Enabled
+ case "disable":
+ dmStatus = Disabled
+ default:
+ return fmt.Errorf("ReplicateDeleteMarkers should be either enable|disable")
+ }
+ }
+
+ vDeleteStatus := Disabled
+ if opts.ReplicateDeletes != "" {
+ switch opts.ReplicateDeletes {
+ case "enable":
+ vDeleteStatus = Enabled
+ case "disable":
+ vDeleteStatus = Disabled
+ default:
+ return fmt.Errorf("ReplicateDeletes should be either enable|disable")
+ }
+ }
+ var replicaSync Status
+ // replica sync is by default Enabled, unless specified.
+ switch opts.ReplicaSync {
+ case "enable", "":
+ replicaSync = Enabled
+ case "disable":
+ replicaSync = Disabled
+ default:
+ return fmt.Errorf("replica metadata sync should be either [enable|disable]")
+ }
+
+ var existingStatus Status
+ if opts.ExistingObjectReplicate != "" {
+ switch opts.ExistingObjectReplicate {
+ case "enable":
+ existingStatus = Enabled
+ case "disable", "":
+ existingStatus = Disabled
+ default:
+ return fmt.Errorf("existingObjectReplicate should be either enable|disable")
+ }
+ }
+ newRule := Rule{
+ ID: opts.ID,
+ Priority: priority,
+ Status: status,
+ Filter: filter,
+ Destination: Destination{
+ Bucket: destBucket,
+ StorageClass: opts.StorageClass,
+ },
+ DeleteMarkerReplication: DeleteMarkerReplication{Status: dmStatus},
+ DeleteReplication: DeleteReplication{Status: vDeleteStatus},
+ // MinIO enables replica metadata syncing by default in the case of bi-directional replication to allow
+ // automatic failover as the expectation in this case is that replica and source should be identical.
+ // However AWS leaves this configurable https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-for-metadata-changes.html
+ SourceSelectionCriteria: SourceSelectionCriteria{
+ ReplicaModifications: ReplicaModifications{
+ Status: replicaSync,
+ },
+ },
+ // By default disable existing object replication unless selected
+ ExistingObjectReplication: ExistingObjectReplication{
+ Status: existingStatus,
+ },
+ }
+
+ // validate rule after overlaying priority for pre-existing rule being disabled.
+ if err := newRule.Validate(); err != nil {
+ return err
+ }
+ // if replication config uses RoleArn, migrate this to the destination element as target ARN for remote bucket for MinIO configuration
+ if c.Role != "" && !strings.HasPrefix(c.Role, "arn:aws:iam") && !compatSw {
+ for i := range c.Rules {
+ c.Rules[i].Destination.Bucket = c.Role
+ }
+ c.Role = ""
+ }
+
+ for _, rule := range c.Rules {
+ if rule.Priority == newRule.Priority {
+ return fmt.Errorf("priority must be unique. Replication configuration already has a rule with this priority")
+ }
+ if rule.ID == newRule.ID {
+ return fmt.Errorf("a rule exists with this ID")
+ }
+ }
+
+ c.Rules = append(c.Rules, newRule)
+ return nil
+}
+
+// EditRule modifies an existing rule in replication config
+func (c *Config) EditRule(opts Options) error {
+ if opts.ID == "" {
+ return fmt.Errorf("rule ID missing")
+ }
+ // if replication config uses RoleArn, migrate this to the destination element as target ARN for remote bucket for non AWS.
+ if c.Role != "" && !strings.HasPrefix(c.Role, "arn:aws:iam") && len(c.Rules) > 1 {
+ for i := range c.Rules {
+ c.Rules[i].Destination.Bucket = c.Role
+ }
+ c.Role = ""
+ }
+
+ rIdx := -1
+ var newRule Rule
+ for i, rule := range c.Rules {
+ if rule.ID == opts.ID {
+ rIdx = i
+ newRule = rule
+ break
+ }
+ }
+ if rIdx < 0 {
+ return fmt.Errorf("rule with ID %s not found in replication configuration", opts.ID)
+ }
+ prefixChg := opts.Prefix != newRule.Prefix()
+ if opts.IsTagSet || prefixChg {
+ prefix := newRule.Prefix()
+ if prefix != opts.Prefix {
+ prefix = opts.Prefix
+ }
+ tags := []Tag{newRule.Filter.Tag}
+ if len(newRule.Filter.And.Tags) != 0 {
+ tags = newRule.Filter.And.Tags
+ }
+ var err error
+ if opts.IsTagSet {
+ tags, err = opts.Tags()
+ if err != nil {
+ return err
+ }
+ }
+ andVal := And{
+ Tags: tags,
+ }
+
+ filter := Filter{Prefix: prefix}
+ // only a single tag is set.
+ if prefix == "" && len(tags) == 1 {
+ filter.Tag = tags[0]
+ }
+ // both prefix and tag are present
+ if len(andVal.Tags) > 1 || prefix != "" {
+ filter.And = andVal
+ filter.And.Prefix = prefix
+ filter.Prefix = ""
+ filter.Tag = Tag{}
+ }
+ newRule.Filter = filter
+ }
+
+ // toggle rule status for edit option
+ if opts.RuleStatus != "" {
+ switch opts.RuleStatus {
+ case "enable":
+ newRule.Status = Enabled
+ case "disable":
+ newRule.Status = Disabled
+ default:
+ return fmt.Errorf("rule state should be either [enable|disable]")
+ }
+ }
+ // set DeleteMarkerReplication rule status for edit option
+ if opts.ReplicateDeleteMarkers != "" {
+ switch opts.ReplicateDeleteMarkers {
+ case "enable":
+ newRule.DeleteMarkerReplication.Status = Enabled
+ case "disable":
+ newRule.DeleteMarkerReplication.Status = Disabled
+ default:
+ return fmt.Errorf("ReplicateDeleteMarkers state should be either [enable|disable]")
+ }
+ }
+
+ // set DeleteReplication rule status for edit option. This is a MinIO specific
+ // option to replicate versioned deletes
+ if opts.ReplicateDeletes != "" {
+ switch opts.ReplicateDeletes {
+ case "enable":
+ newRule.DeleteReplication.Status = Enabled
+ case "disable":
+ newRule.DeleteReplication.Status = Disabled
+ default:
+ return fmt.Errorf("ReplicateDeletes state should be either [enable|disable]")
+ }
+ }
+
+ if opts.ReplicaSync != "" {
+ switch opts.ReplicaSync {
+ case "enable", "":
+ newRule.SourceSelectionCriteria.ReplicaModifications.Status = Enabled
+ case "disable":
+ newRule.SourceSelectionCriteria.ReplicaModifications.Status = Disabled
+ default:
+ return fmt.Errorf("replica metadata sync should be either [enable|disable]")
+ }
+ }
+
+ if opts.ExistingObjectReplicate != "" {
+ switch opts.ExistingObjectReplicate {
+ case "enable":
+ newRule.ExistingObjectReplication.Status = Enabled
+ case "disable":
+ newRule.ExistingObjectReplication.Status = Disabled
+ default:
+ return fmt.Errorf("existingObjectsReplication state should be either [enable|disable]")
+ }
+ }
+ if opts.IsSCSet {
+ newRule.Destination.StorageClass = opts.StorageClass
+ }
+ if opts.Priority != "" {
+ priority, err := strconv.Atoi(opts.Priority)
+ if err != nil {
+ return err
+ }
+ newRule.Priority = priority
+ }
+ if opts.DestBucket != "" {
+ destBucket := opts.DestBucket
+ // ref https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html
+ if btokens := strings.Split(opts.DestBucket, ":"); len(btokens) != 6 {
+ return fmt.Errorf("destination bucket needs to be in Arn format")
+ }
+ newRule.Destination.Bucket = destBucket
+ }
+ // validate rule
+ if err := newRule.Validate(); err != nil {
+ return err
+ }
+ // ensure priority and destination bucket restrictions are not violated
+ for idx, rule := range c.Rules {
+ if rule.Priority == newRule.Priority && rIdx != idx {
+ return fmt.Errorf("priority must be unique. Replication configuration already has a rule with this priority")
+ }
+ if rule.Destination.Bucket != newRule.Destination.Bucket && rule.ID == newRule.ID {
+ return fmt.Errorf("invalid destination bucket for this rule")
+ }
+ }
+
+ c.Rules[rIdx] = newRule
+ return nil
+}
+
+// RemoveRule removes a rule from replication config.
+func (c *Config) RemoveRule(opts Options) error {
+ var newRules []Rule
+ ruleFound := false
+ for _, rule := range c.Rules {
+ if rule.ID != opts.ID {
+ newRules = append(newRules, rule)
+ continue
+ }
+ ruleFound = true
+ }
+ if !ruleFound {
+ return fmt.Errorf("Rule with ID %s not found", opts.ID)
+ }
+ if len(newRules) == 0 {
+ return fmt.Errorf("replication configuration should have at least one rule")
+ }
+ c.Rules = newRules
+ return nil
+}
+
+// Rule - a rule for replication configuration.
+type Rule struct {
+ XMLName xml.Name `xml:"Rule" json:"-"`
+ ID string `xml:"ID,omitempty"`
+ Status Status `xml:"Status"`
+ Priority int `xml:"Priority"`
+ DeleteMarkerReplication DeleteMarkerReplication `xml:"DeleteMarkerReplication"`
+ DeleteReplication DeleteReplication `xml:"DeleteReplication"`
+ Destination Destination `xml:"Destination"`
+ Filter Filter `xml:"Filter" json:"Filter"`
+ SourceSelectionCriteria SourceSelectionCriteria `xml:"SourceSelectionCriteria" json:"SourceSelectionCriteria"`
+ ExistingObjectReplication ExistingObjectReplication `xml:"ExistingObjectReplication,omitempty" json:"ExistingObjectReplication,omitempty"`
+}
+
+// Validate validates the rule for correctness
+func (r Rule) Validate() error {
+ if err := r.validateID(); err != nil {
+ return err
+ }
+ if err := r.validateStatus(); err != nil {
+ return err
+ }
+ if err := r.validateFilter(); err != nil {
+ return err
+ }
+
+ if r.Priority < 0 && r.Status == Enabled {
+ return fmt.Errorf("priority must be set for the rule")
+ }
+
+ if err := r.validateStatus(); err != nil {
+ return err
+ }
+ return r.ExistingObjectReplication.Validate()
+}
+
+// validateID - checks if ID is valid or not.
+func (r Rule) validateID() error {
+ // cannot be longer than 255 characters
+ if len(r.ID) > 255 {
+ return fmt.Errorf("ID must be less than 255 characters")
+ }
+ return nil
+}
+
+// validateStatus - checks if status is valid or not.
+func (r Rule) validateStatus() error {
+ // Status can't be empty
+ if len(r.Status) == 0 {
+ return fmt.Errorf("status cannot be empty")
+ }
+
+ // Status must be one of Enabled or Disabled
+ if r.Status != Enabled && r.Status != Disabled {
+ return fmt.Errorf("status must be set to either Enabled or Disabled")
+ }
+ return nil
+}
+
+func (r Rule) validateFilter() error {
+ return r.Filter.Validate()
+}
+
+// Prefix - a rule can either have prefix under or under
+// . This method returns the prefix from the
+// location where it is available
+func (r Rule) Prefix() string {
+ if r.Filter.Prefix != "" {
+ return r.Filter.Prefix
+ }
+ return r.Filter.And.Prefix
+}
+
+// Tags - a rule can either have tag under or under
+// . This method returns all the tags from the
+// rule in the format tag1=value1&tag2=value2
+func (r Rule) Tags() string {
+ ts := []Tag{r.Filter.Tag}
+ if len(r.Filter.And.Tags) != 0 {
+ ts = r.Filter.And.Tags
+ }
+
+ var buf bytes.Buffer
+ for _, t := range ts {
+ if buf.Len() > 0 {
+ buf.WriteString("&")
+ }
+ buf.WriteString(t.String())
+ }
+ return buf.String()
+}
+
+// Filter - a filter for a replication configuration Rule.
+type Filter struct {
+ XMLName xml.Name `xml:"Filter" json:"-"`
+ Prefix string `json:"Prefix,omitempty"`
+ And And `xml:"And,omitempty" json:"And,omitempty"`
+ Tag Tag `xml:"Tag,omitempty" json:"Tag,omitempty"`
+}
+
+// Validate - validates the filter element
+func (f Filter) Validate() error {
+ // A Filter must have exactly one of Prefix, Tag, or And specified.
+ if !f.And.isEmpty() {
+ if f.Prefix != "" {
+ return errInvalidFilter
+ }
+ if !f.Tag.IsEmpty() {
+ return errInvalidFilter
+ }
+ }
+ if f.Prefix != "" {
+ if !f.Tag.IsEmpty() {
+ return errInvalidFilter
+ }
+ }
+ if !f.Tag.IsEmpty() {
+ if err := f.Tag.Validate(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Tag - a tag for a replication configuration Rule filter.
+type Tag struct {
+ XMLName xml.Name `json:"-"`
+ Key string `xml:"Key,omitempty" json:"Key,omitempty"`
+ Value string `xml:"Value,omitempty" json:"Value,omitempty"`
+}
+
+func (tag Tag) String() string {
+ if tag.IsEmpty() {
+ return ""
+ }
+ return tag.Key + "=" + tag.Value
+}
+
+// IsEmpty returns whether this tag is empty or not.
+func (tag Tag) IsEmpty() bool {
+ return tag.Key == ""
+}
+
+// Validate checks this tag.
+func (tag Tag) Validate() error {
+ if len(tag.Key) == 0 || utf8.RuneCountInString(tag.Key) > 128 {
+ return fmt.Errorf("invalid Tag Key")
+ }
+
+ if utf8.RuneCountInString(tag.Value) > 256 {
+ return fmt.Errorf("invalid Tag Value")
+ }
+ return nil
+}
+
+// Destination - destination in ReplicationConfiguration.
+type Destination struct {
+ XMLName xml.Name `xml:"Destination" json:"-"`
+ Bucket string `xml:"Bucket" json:"Bucket"`
+ StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"`
+}
+
+// And - a tag to combine a prefix and multiple tags for replication configuration rule.
+type And struct {
+ XMLName xml.Name `xml:"And,omitempty" json:"-"`
+ Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"`
+ Tags []Tag `xml:"Tag,omitempty" json:"Tag,omitempty"`
+}
+
+// isEmpty returns true if Tags field is null
+func (a And) isEmpty() bool {
+ return len(a.Tags) == 0 && a.Prefix == ""
+}
+
+// Status represents Enabled/Disabled status
+type Status string
+
+// Supported status types
+const (
+ Enabled Status = "Enabled"
+ Disabled Status = "Disabled"
+)
+
+// DeleteMarkerReplication - whether delete markers are replicated - https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html
+type DeleteMarkerReplication struct {
+ Status Status `xml:"Status" json:"Status"` // should be set to "Disabled" by default
+}
+
+// IsEmpty returns true if DeleteMarkerReplication is not set
+func (d DeleteMarkerReplication) IsEmpty() bool {
+ return len(d.Status) == 0
+}
+
+// DeleteReplication - whether versioned deletes are replicated - this
+// is a MinIO specific extension
+type DeleteReplication struct {
+ Status Status `xml:"Status" json:"Status"` // should be set to "Disabled" by default
+}
+
+// IsEmpty returns true if DeleteReplication is not set
+func (d DeleteReplication) IsEmpty() bool {
+ return len(d.Status) == 0
+}
+
+// ReplicaModifications specifies if replica modification sync is enabled
+type ReplicaModifications struct {
+ Status Status `xml:"Status" json:"Status"` // should be set to "Enabled" by default
+}
+
+// SourceSelectionCriteria - specifies additional source selection criteria in ReplicationConfiguration.
+type SourceSelectionCriteria struct {
+ ReplicaModifications ReplicaModifications `xml:"ReplicaModifications" json:"ReplicaModifications"`
+}
+
+// IsValid - checks whether SourceSelectionCriteria is valid or not.
+func (s SourceSelectionCriteria) IsValid() bool {
+ return s.ReplicaModifications.Status == Enabled || s.ReplicaModifications.Status == Disabled
+}
+
+// Validate source selection criteria
+func (s SourceSelectionCriteria) Validate() error {
+ if (s == SourceSelectionCriteria{}) {
+ return nil
+ }
+ if !s.IsValid() {
+ return fmt.Errorf("invalid ReplicaModification status")
+ }
+ return nil
+}
+
+// ExistingObjectReplication - whether existing object replication is enabled
+type ExistingObjectReplication struct {
+ Status Status `xml:"Status"` // should be set to "Disabled" by default
+}
+
+// IsEmpty returns true if DeleteMarkerReplication is not set
+func (e ExistingObjectReplication) IsEmpty() bool {
+ return len(e.Status) == 0
+}
+
+// Validate validates whether the status is disabled.
+func (e ExistingObjectReplication) Validate() error {
+ if e.IsEmpty() {
+ return nil
+ }
+ if e.Status != Disabled && e.Status != Enabled {
+ return fmt.Errorf("invalid ExistingObjectReplication status")
+ }
+ return nil
+}
+
+// TargetMetrics represents inline replication metrics
+// such as pending, failed and completed bytes in total for a bucket remote target
+type TargetMetrics struct {
+ // Completed count
+ ReplicatedCount uint64 `json:"replicationCount,omitempty"`
+ // Completed size in bytes
+ ReplicatedSize uint64 `json:"completedReplicationSize,omitempty"`
+ // Bandwidth limit in bytes/sec for this target
+ BandWidthLimitInBytesPerSecond int64 `json:"limitInBits,omitempty"`
+ // Current bandwidth used in bytes/sec for this target
+ CurrentBandwidthInBytesPerSecond float64 `json:"currentBandwidth,omitempty"`
+ // errors seen in replication in last minute, hour and total
+ Failed TimedErrStats `json:"failed,omitempty"`
+ // Deprecated fields
+ // Pending size in bytes
+ PendingSize uint64 `json:"pendingReplicationSize,omitempty"`
+ // Total Replica size in bytes
+ ReplicaSize uint64 `json:"replicaSize,omitempty"`
+ // Failed size in bytes
+ FailedSize uint64 `json:"failedReplicationSize,omitempty"`
+ // Total number of pending operations including metadata updates
+ PendingCount uint64 `json:"pendingReplicationCount,omitempty"`
+ // Total number of failed operations including metadata updates
+ FailedCount uint64 `json:"failedReplicationCount,omitempty"`
+}
+
+// Metrics represents inline replication metrics for a bucket.
+type Metrics struct {
+ Stats map[string]TargetMetrics
+ // Completed size in bytes across targets
+ ReplicatedSize uint64 `json:"completedReplicationSize,omitempty"`
+ // Total Replica size in bytes across targets
+ ReplicaSize uint64 `json:"replicaSize,omitempty"`
+ // Total Replica counts
+ ReplicaCount int64 `json:"replicaCount,omitempty"`
+ // Total Replicated count
+ ReplicatedCount int64 `json:"replicationCount,omitempty"`
+ // errors seen in replication in last minute, hour and total
+ Errors TimedErrStats `json:"failed,omitempty"`
+ // Total number of entries that are queued for replication
+ QStats InQueueMetric `json:"queued"`
+ // Deprecated fields
+ // Total Pending size in bytes across targets
+ PendingSize uint64 `json:"pendingReplicationSize,omitempty"`
+ // Failed size in bytes across targets
+ FailedSize uint64 `json:"failedReplicationSize,omitempty"`
+ // Total number of pending operations including metadata updates across targets
+ PendingCount uint64 `json:"pendingReplicationCount,omitempty"`
+ // Total number of failed operations including metadata updates across targets
+ FailedCount uint64 `json:"failedReplicationCount,omitempty"`
+}
+
+// RStat - has count and bytes for replication metrics
+type RStat struct {
+ Count float64 `json:"count"`
+ Bytes int64 `json:"bytes"`
+}
+
+// Add two RStat
+func (r RStat) Add(r1 RStat) RStat {
+ return RStat{
+ Count: r.Count + r1.Count,
+ Bytes: r.Bytes + r1.Bytes,
+ }
+}
+
+// TimedErrStats holds error stats for a time period
+type TimedErrStats struct {
+ LastMinute RStat `json:"lastMinute"`
+ LastHour RStat `json:"lastHour"`
+ Totals RStat `json:"totals"`
+}
+
+// Add two TimedErrStats
+func (te TimedErrStats) Add(o TimedErrStats) TimedErrStats {
+ return TimedErrStats{
+ LastMinute: te.LastMinute.Add(o.LastMinute),
+ LastHour: te.LastHour.Add(o.LastHour),
+ Totals: te.Totals.Add(o.Totals),
+ }
+}
+
+// ResyncTargetsInfo provides replication target information to resync replicated data.
+type ResyncTargetsInfo struct {
+ Targets []ResyncTarget `json:"target,omitempty"`
+}
+
+// ResyncTarget provides the replica resources and resetID to initiate resync replication.
+type ResyncTarget struct {
+ Arn string `json:"arn"`
+ ResetID string `json:"resetid"`
+ StartTime time.Time `json:"startTime,omitempty"`
+ EndTime time.Time `json:"endTime,omitempty"`
+ // Status of resync operation
+ ResyncStatus string `json:"resyncStatus,omitempty"`
+ // Completed size in bytes
+ ReplicatedSize int64 `json:"completedReplicationSize,omitempty"`
+ // Failed size in bytes
+ FailedSize int64 `json:"failedReplicationSize,omitempty"`
+ // Total number of failed operations
+ FailedCount int64 `json:"failedReplicationCount,omitempty"`
+ // Total number of completed operations
+ ReplicatedCount int64 `json:"replicationCount,omitempty"`
+ // Last bucket/object replicated.
+ Bucket string `json:"bucket,omitempty"`
+ Object string `json:"object,omitempty"`
+}
+
+// XferStats holds transfer rate info for uploads/sec
+type XferStats struct {
+ AvgRate float64 `json:"avgRate"`
+ PeakRate float64 `json:"peakRate"`
+ CurrRate float64 `json:"currRate"`
+}
+
+// Merge two XferStats
+func (x *XferStats) Merge(x1 XferStats) {
+ x.AvgRate += x1.AvgRate
+ x.PeakRate += x1.PeakRate
+ x.CurrRate += x1.CurrRate
+}
+
+// QStat holds count and bytes for objects in replication queue
+type QStat struct {
+ Count float64 `json:"count"`
+ Bytes float64 `json:"bytes"`
+}
+
+// Add 2 QStat entries
+func (q *QStat) Add(q1 QStat) {
+ q.Count += q1.Count
+ q.Bytes += q1.Bytes
+}
+
+// InQueueMetric holds stats for objects in replication queue
+type InQueueMetric struct {
+ Curr QStat `json:"curr" msg:"cq"`
+ Avg QStat `json:"avg" msg:"aq"`
+ Max QStat `json:"peak" msg:"pq"`
+}
+
+// MetricName name of replication metric
+type MetricName string
+
+const (
+ // Large is a metric name for large objects >=128MiB
+ Large MetricName = "Large"
+ // Small is a metric name for objects <128MiB size
+ Small MetricName = "Small"
+ // Total is a metric name for total objects
+ Total MetricName = "Total"
+)
+
+// WorkerStat has stats on number of replication workers
+type WorkerStat struct {
+ Curr int32 `json:"curr"`
+ Avg float32 `json:"avg"`
+ Max int32 `json:"max"`
+}
+
+// ReplMRFStats holds stats of MRF backlog saved to disk in the last 5 minutes
+// and number of entries that failed replication after 3 retries
+type ReplMRFStats struct {
+ LastFailedCount uint64 `json:"failedCount_last5min"`
+ // Count of unreplicated entries that were dropped after MRF retry limit reached since cluster start.
+ TotalDroppedCount uint64 `json:"droppedCount_since_uptime"`
+ // Bytes of unreplicated entries that were dropped after MRF retry limit reached since cluster start.
+ TotalDroppedBytes uint64 `json:"droppedBytes_since_uptime"`
+}
+
+// ReplQNodeStats holds stats for a node in replication queue
+type ReplQNodeStats struct {
+ NodeName string `json:"nodeName"`
+ Uptime int64 `json:"uptime"`
+ Workers WorkerStat `json:"activeWorkers"`
+
+ XferStats map[MetricName]XferStats `json:"transferSummary"`
+ TgtXferStats map[string]map[MetricName]XferStats `json:"tgtTransferStats"`
+
+ QStats InQueueMetric `json:"queueStats"`
+ MRFStats ReplMRFStats `json:"mrfStats"`
+}
+
+// ReplQueueStats holds stats for replication queue across nodes
+type ReplQueueStats struct {
+ Nodes []ReplQNodeStats `json:"nodes"`
+}
+
+// Workers returns number of workers across all nodes
+func (q ReplQueueStats) Workers() (tot WorkerStat) {
+ for _, node := range q.Nodes {
+ tot.Avg += node.Workers.Avg
+ tot.Curr += node.Workers.Curr
+ if tot.Max < node.Workers.Max {
+ tot.Max = node.Workers.Max
+ }
+ }
+ if len(q.Nodes) > 0 {
+ tot.Avg /= float32(len(q.Nodes))
+ tot.Curr /= int32(len(q.Nodes))
+ }
+ return tot
+}
+
+// qStatSummary returns cluster level stats for objects in replication queue
+func (q ReplQueueStats) qStatSummary() InQueueMetric {
+ m := InQueueMetric{}
+ for _, v := range q.Nodes {
+ m.Avg.Add(v.QStats.Avg)
+ m.Curr.Add(v.QStats.Curr)
+ if m.Max.Count < v.QStats.Max.Count {
+ m.Max.Add(v.QStats.Max)
+ }
+ }
+ return m
+}
+
+// ReplQStats holds stats for objects in replication queue
+type ReplQStats struct {
+ Uptime int64 `json:"uptime"`
+ Workers WorkerStat `json:"workers"`
+
+ XferStats map[MetricName]XferStats `json:"xferStats"`
+ TgtXferStats map[string]map[MetricName]XferStats `json:"tgtXferStats"`
+
+ QStats InQueueMetric `json:"qStats"`
+ MRFStats ReplMRFStats `json:"mrfStats"`
+}
+
+// QStats returns cluster level stats for objects in replication queue
+func (q ReplQueueStats) QStats() (r ReplQStats) {
+ r.QStats = q.qStatSummary()
+ r.XferStats = make(map[MetricName]XferStats)
+ r.TgtXferStats = make(map[string]map[MetricName]XferStats)
+ r.Workers = q.Workers()
+
+ for _, node := range q.Nodes {
+ for arn := range node.TgtXferStats {
+ xmap, ok := node.TgtXferStats[arn]
+ if !ok {
+ xmap = make(map[MetricName]XferStats)
+ }
+ for m, v := range xmap {
+ st, ok := r.XferStats[m]
+ if !ok {
+ st = XferStats{}
+ }
+ st.AvgRate += v.AvgRate
+ st.CurrRate += v.CurrRate
+ st.PeakRate = math.Max(st.PeakRate, v.PeakRate)
+ if _, ok := r.TgtXferStats[arn]; !ok {
+ r.TgtXferStats[arn] = make(map[MetricName]XferStats)
+ }
+ r.TgtXferStats[arn][m] = st
+ }
+ }
+ for k, v := range node.XferStats {
+ st, ok := r.XferStats[k]
+ if !ok {
+ st = XferStats{}
+ }
+ st.AvgRate += v.AvgRate
+ st.CurrRate += v.CurrRate
+ st.PeakRate = math.Max(st.PeakRate, v.PeakRate)
+ r.XferStats[k] = st
+ }
+ r.MRFStats.LastFailedCount += node.MRFStats.LastFailedCount
+ r.MRFStats.TotalDroppedCount += node.MRFStats.TotalDroppedCount
+ r.MRFStats.TotalDroppedBytes += node.MRFStats.TotalDroppedBytes
+ r.Uptime += node.Uptime
+ }
+ if len(q.Nodes) > 0 {
+ r.Uptime /= int64(len(q.Nodes)) // average uptime
+ }
+ return
+}
+
+// MetricsV2 represents replication metrics for a bucket.
+type MetricsV2 struct {
+ Uptime int64 `json:"uptime"`
+ CurrentStats Metrics `json:"currStats"`
+ QueueStats ReplQueueStats `json:"queueStats"`
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go b/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go
new file mode 100644
index 0000000..056e78a
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go
@@ -0,0 +1,411 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2020 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package s3utils
+
+import (
+ "bytes"
+ "encoding/hex"
+ "errors"
+ "net"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "unicode/utf8"
+)
+
+// Sentinel URL is the default url value which is invalid.
+var sentinelURL = url.URL{}
+
+// IsValidDomain validates if input string is a valid domain name.
+func IsValidDomain(host string) bool {
+ // See RFC 1035, RFC 3696.
+ host = strings.TrimSpace(host)
+ if len(host) == 0 || len(host) > 255 {
+ return false
+ }
+ // host cannot start or end with "-"
+ if host[len(host)-1:] == "-" || host[:1] == "-" {
+ return false
+ }
+ // host cannot start or end with "_"
+ if host[len(host)-1:] == "_" || host[:1] == "_" {
+ return false
+ }
+ // host cannot start with a "."
+ if host[:1] == "." {
+ return false
+ }
+ // All non alphanumeric characters are invalid.
+ if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:>/") {
+ return false
+ }
+ // No need to regexp match, since the list is non-exhaustive.
+ // We let it valid and fail later.
+ return true
+}
+
+// IsValidIP parses input string for ip address validity.
+func IsValidIP(ip string) bool {
+ return net.ParseIP(ip) != nil
+}
+
+// IsVirtualHostSupported - verifies if bucketName can be part of
+// virtual host. Currently only Amazon S3 and Google Cloud Storage
+// would support this.
+func IsVirtualHostSupported(endpointURL url.URL, bucketName string) bool {
+ if endpointURL == sentinelURL {
+ return false
+ }
+ // bucketName can be valid but '.' in the hostname will fail SSL
+ // certificate validation. So do not use host-style for such buckets.
+ if endpointURL.Scheme == "https" && strings.Contains(bucketName, ".") {
+ return false
+ }
+ // Return true for all other cases
+ return IsAmazonEndpoint(endpointURL) || IsGoogleEndpoint(endpointURL) || IsAliyunOSSEndpoint(endpointURL)
+}
+
+// Refer for region styles - https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
+
+// amazonS3HostHyphen - regular expression used to determine if an arg is s3 host in hyphenated style.
+var amazonS3HostHyphen = regexp.MustCompile(`^s3-(.*?).amazonaws.com$`)
+
+// amazonS3HostDualStack - regular expression used to determine if an arg is s3 host dualstack.
+var amazonS3HostDualStack = regexp.MustCompile(`^s3.dualstack.(.*?).amazonaws.com$`)
+
+// amazonS3HostFIPS - regular expression used to determine if an arg is s3 FIPS host.
+var amazonS3HostFIPS = regexp.MustCompile(`^s3-fips.(.*?).amazonaws.com$`)
+
+// amazonS3HostFIPSDualStack - regular expression used to determine if an arg is s3 FIPS host dualstack.
+var amazonS3HostFIPSDualStack = regexp.MustCompile(`^s3-fips.dualstack.(.*?).amazonaws.com$`)
+
+// amazonS3HostDot - regular expression used to determine if an arg is s3 host in . style.
+var amazonS3HostDot = regexp.MustCompile(`^s3.(.*?).amazonaws.com$`)
+
+// amazonS3ChinaHost - regular expression used to determine if the arg is s3 china host.
+var amazonS3ChinaHost = regexp.MustCompile(`^s3.(cn.*?).amazonaws.com.cn$`)
+
+// amazonS3ChinaHostDualStack - regular expression used to determine if the arg is s3 china host dualstack.
+var amazonS3ChinaHostDualStack = regexp.MustCompile(`^s3.dualstack.(cn.*?).amazonaws.com.cn$`)
+
+// Regular expression used to determine if the arg is elb host.
+var elbAmazonRegex = regexp.MustCompile(`elb(.*?).amazonaws.com$`)
+
+// Regular expression used to determine if the arg is elb host in china.
+var elbAmazonCnRegex = regexp.MustCompile(`elb(.*?).amazonaws.com.cn$`)
+
+// amazonS3HostPrivateLink - regular expression used to determine if an arg is s3 host in AWS PrivateLink interface endpoints style
+var amazonS3HostPrivateLink = regexp.MustCompile(`^(?:bucket|accesspoint).vpce-.*?.s3.(.*?).vpce.amazonaws.com$`)
+
+// GetRegionFromURL - returns a region from url host.
+func GetRegionFromURL(endpointURL url.URL) string {
+ if endpointURL == sentinelURL {
+ return ""
+ }
+ if endpointURL.Host == "s3-external-1.amazonaws.com" {
+ return ""
+ }
+
+ // if elb's are used we cannot calculate which region it may be, just return empty.
+ if elbAmazonRegex.MatchString(endpointURL.Host) || elbAmazonCnRegex.MatchString(endpointURL.Host) {
+ return ""
+ }
+
+ // We check for FIPS dualstack matching first to avoid the non-greedy
+ // regex for FIPS non-dualstack matching a dualstack URL
+ parts := amazonS3HostFIPSDualStack.FindStringSubmatch(endpointURL.Host)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+
+ parts = amazonS3HostFIPS.FindStringSubmatch(endpointURL.Host)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+
+ parts = amazonS3HostDualStack.FindStringSubmatch(endpointURL.Host)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+
+ parts = amazonS3HostHyphen.FindStringSubmatch(endpointURL.Host)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+
+ parts = amazonS3ChinaHost.FindStringSubmatch(endpointURL.Host)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+
+ parts = amazonS3ChinaHostDualStack.FindStringSubmatch(endpointURL.Host)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+
+ parts = amazonS3HostDot.FindStringSubmatch(endpointURL.Host)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+
+ parts = amazonS3HostPrivateLink.FindStringSubmatch(endpointURL.Host)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+
+ return ""
+}
+
+// IsAliyunOSSEndpoint - Match if it is exactly Aliyun OSS endpoint.
+func IsAliyunOSSEndpoint(endpointURL url.URL) bool {
+ return strings.HasSuffix(endpointURL.Host, "aliyuncs.com")
+}
+
+// IsAmazonEndpoint - Match if it is exactly Amazon S3 endpoint.
+func IsAmazonEndpoint(endpointURL url.URL) bool {
+ if endpointURL.Host == "s3-external-1.amazonaws.com" || endpointURL.Host == "s3.amazonaws.com" {
+ return true
+ }
+ return GetRegionFromURL(endpointURL) != ""
+}
+
+// IsAmazonGovCloudEndpoint - Match if it is exactly Amazon S3 GovCloud endpoint.
+func IsAmazonGovCloudEndpoint(endpointURL url.URL) bool {
+ if endpointURL == sentinelURL {
+ return false
+ }
+ return (endpointURL.Host == "s3-us-gov-west-1.amazonaws.com" ||
+ endpointURL.Host == "s3-us-gov-east-1.amazonaws.com" ||
+ IsAmazonFIPSGovCloudEndpoint(endpointURL))
+}
+
+// IsAmazonFIPSGovCloudEndpoint - match if the endpoint is FIPS and GovCloud.
+func IsAmazonFIPSGovCloudEndpoint(endpointURL url.URL) bool {
+ if endpointURL == sentinelURL {
+ return false
+ }
+ return IsAmazonFIPSEndpoint(endpointURL) && strings.Contains(endpointURL.Host, "us-gov-")
+}
+
+// IsAmazonFIPSEndpoint - Match if it is exactly Amazon S3 FIPS endpoint.
+// See https://aws.amazon.com/compliance/fips.
+func IsAmazonFIPSEndpoint(endpointURL url.URL) bool {
+ if endpointURL == sentinelURL {
+ return false
+ }
+ return strings.HasPrefix(endpointURL.Host, "s3-fips") && strings.HasSuffix(endpointURL.Host, ".amazonaws.com")
+}
+
+// IsAmazonPrivateLinkEndpoint - Match if it is exactly Amazon S3 PrivateLink interface endpoint
+// See https://docs.aws.amazon.com/AmazonS3/latest/userguide/privatelink-interface-endpoints.html.
+func IsAmazonPrivateLinkEndpoint(endpointURL url.URL) bool {
+ if endpointURL == sentinelURL {
+ return false
+ }
+ return amazonS3HostPrivateLink.MatchString(endpointURL.Host)
+}
+
+// IsGoogleEndpoint - Match if it is exactly Google cloud storage endpoint.
+func IsGoogleEndpoint(endpointURL url.URL) bool {
+ if endpointURL == sentinelURL {
+ return false
+ }
+ return endpointURL.Host == "storage.googleapis.com"
+}
+
+// Expects ascii encoded strings - from output of urlEncodePath
+func percentEncodeSlash(s string) string {
+ return strings.ReplaceAll(s, "/", "%2F")
+}
+
+// QueryEncode - encodes query values in their URL encoded form. In
+// addition to the percent encoding performed by urlEncodePath() used
+// here, it also percent encodes '/' (forward slash)
+func QueryEncode(v url.Values) string {
+ if v == nil {
+ return ""
+ }
+ var buf bytes.Buffer
+ keys := make([]string, 0, len(v))
+ for k := range v {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, k := range keys {
+ vs := v[k]
+ prefix := percentEncodeSlash(EncodePath(k)) + "="
+ for _, v := range vs {
+ if buf.Len() > 0 {
+ buf.WriteByte('&')
+ }
+ buf.WriteString(prefix)
+ buf.WriteString(percentEncodeSlash(EncodePath(v)))
+ }
+ }
+ return buf.String()
+}
+
+// TagDecode - decodes canonical tag into map of key and value.
+func TagDecode(ctag string) map[string]string {
+ if ctag == "" {
+ return map[string]string{}
+ }
+ tags := strings.Split(ctag, "&")
+ tagMap := make(map[string]string, len(tags))
+ var err error
+ for _, tag := range tags {
+ kvs := strings.SplitN(tag, "=", 2)
+ if len(kvs) == 0 {
+ return map[string]string{}
+ }
+ if len(kvs) == 1 {
+ return map[string]string{}
+ }
+ tagMap[kvs[0]], err = url.PathUnescape(kvs[1])
+ if err != nil {
+ continue
+ }
+ }
+ return tagMap
+}
+
+// TagEncode - encodes tag values in their URL encoded form. In
+// addition to the percent encoding performed by urlEncodePath() used
+// here, it also percent encodes '/' (forward slash)
+func TagEncode(tags map[string]string) string {
+ if tags == nil {
+ return ""
+ }
+ values := url.Values{}
+ for k, v := range tags {
+ values[k] = []string{v}
+ }
+ return QueryEncode(values)
+}
+
+// if object matches reserved string, no need to encode them
+var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
+
+// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences
+//
+// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
+// non english characters cannot be parsed due to the nature in which url.Encode() is written
+//
+// This function on the other hand is a direct replacement for url.Encode() technique to support
+// pretty much every UTF-8 character.
+func EncodePath(pathName string) string {
+ if reservedObjectNames.MatchString(pathName) {
+ return pathName
+ }
+ var encodedPathname strings.Builder
+ for _, s := range pathName {
+ if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
+ encodedPathname.WriteRune(s)
+ continue
+ }
+ switch s {
+ case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
+ encodedPathname.WriteRune(s)
+ continue
+ default:
+ l := utf8.RuneLen(s)
+ if l < 0 {
+ // if utf8 cannot convert return the same string as is
+ return pathName
+ }
+ u := make([]byte, l)
+ utf8.EncodeRune(u, s)
+ for _, r := range u {
+ hex := hex.EncodeToString([]byte{r})
+ encodedPathname.WriteString("%" + strings.ToUpper(hex))
+ }
+ }
+ }
+ return encodedPathname.String()
+}
+
+// We support '.' with bucket names but we fallback to using path
+// style requests instead for such buckets.
+var (
+ validBucketName = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9\.\-\_\:]{1,61}[A-Za-z0-9]$`)
+ validBucketNameStrict = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
+ ipAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`)
+)
+
+// Common checker for both stricter and basic validation.
+func checkBucketNameCommon(bucketName string, strict bool) (err error) {
+ if strings.TrimSpace(bucketName) == "" {
+ return errors.New("Bucket name cannot be empty")
+ }
+ if len(bucketName) < 3 {
+ return errors.New("Bucket name cannot be shorter than 3 characters")
+ }
+ if len(bucketName) > 63 {
+ return errors.New("Bucket name cannot be longer than 63 characters")
+ }
+ if ipAddress.MatchString(bucketName) {
+ return errors.New("Bucket name cannot be an ip address")
+ }
+ if strings.Contains(bucketName, "..") || strings.Contains(bucketName, ".-") || strings.Contains(bucketName, "-.") {
+ return errors.New("Bucket name contains invalid characters")
+ }
+ if strict {
+ if !validBucketNameStrict.MatchString(bucketName) {
+ err = errors.New("Bucket name contains invalid characters")
+ }
+ return err
+ }
+ if !validBucketName.MatchString(bucketName) {
+ err = errors.New("Bucket name contains invalid characters")
+ }
+ return err
+}
+
+// CheckValidBucketName - checks if we have a valid input bucket name.
+func CheckValidBucketName(bucketName string) (err error) {
+ return checkBucketNameCommon(bucketName, false)
+}
+
+// CheckValidBucketNameStrict - checks if we have a valid input bucket name.
+// This is a stricter version.
+// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
+func CheckValidBucketNameStrict(bucketName string) (err error) {
+ return checkBucketNameCommon(bucketName, true)
+}
+
+// CheckValidObjectNamePrefix - checks if we have a valid input object name prefix.
+// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
+func CheckValidObjectNamePrefix(objectName string) error {
+ if len(objectName) > 1024 {
+ return errors.New("Object name cannot be longer than 1024 characters")
+ }
+ if !utf8.ValidString(objectName) {
+ return errors.New("Object name with non UTF-8 strings are not supported")
+ }
+ return nil
+}
+
+// CheckValidObjectName - checks if we have a valid input object name.
+// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
+func CheckValidObjectName(objectName string) error {
+ if strings.TrimSpace(objectName) == "" {
+ return errors.New("Object name cannot be empty")
+ }
+ return CheckValidObjectNamePrefix(objectName)
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go b/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go
new file mode 100644
index 0000000..c35e58e
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go
@@ -0,0 +1,200 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package set
+
+import (
+ "fmt"
+ "sort"
+
+ jsoniter "github.com/json-iterator/go"
+)
+
+// StringSet - uses map as set of strings.
+type StringSet map[string]struct{}
+
+var json = jsoniter.ConfigCompatibleWithStandardLibrary
+
+// ToSlice - returns StringSet as string slice.
+func (set StringSet) ToSlice() []string {
+ keys := make([]string, 0, len(set))
+ for k := range set {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+// IsEmpty - returns whether the set is empty or not.
+func (set StringSet) IsEmpty() bool {
+ return len(set) == 0
+}
+
+// Add - adds string to the set.
+func (set StringSet) Add(s string) {
+ set[s] = struct{}{}
+}
+
+// Remove - removes string in the set. It does nothing if string does not exist in the set.
+func (set StringSet) Remove(s string) {
+ delete(set, s)
+}
+
+// Contains - checks if string is in the set.
+func (set StringSet) Contains(s string) bool {
+ _, ok := set[s]
+ return ok
+}
+
+// FuncMatch - returns new set containing each value who passes match function.
+// A 'matchFn' should accept element in a set as first argument and
+// 'matchString' as second argument. The function can do any logic to
+// compare both the arguments and should return true to accept element in
+// a set to include in output set else the element is ignored.
+func (set StringSet) FuncMatch(matchFn func(string, string) bool, matchString string) StringSet {
+ nset := NewStringSet()
+ for k := range set {
+ if matchFn(k, matchString) {
+ nset.Add(k)
+ }
+ }
+ return nset
+}
+
+// ApplyFunc - returns new set containing each value processed by 'applyFn'.
+// A 'applyFn' should accept element in a set as a argument and return
+// a processed string. The function can do any logic to return a processed
+// string.
+func (set StringSet) ApplyFunc(applyFn func(string) string) StringSet {
+ nset := NewStringSet()
+ for k := range set {
+ nset.Add(applyFn(k))
+ }
+ return nset
+}
+
+// Equals - checks whether given set is equal to current set or not.
+func (set StringSet) Equals(sset StringSet) bool {
+ // If length of set is not equal to length of given set, the
+ // set is not equal to given set.
+ if len(set) != len(sset) {
+ return false
+ }
+
+ // As both sets are equal in length, check each elements are equal.
+ for k := range set {
+ if _, ok := sset[k]; !ok {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Intersection - returns the intersection with given set as new set.
+func (set StringSet) Intersection(sset StringSet) StringSet {
+ nset := NewStringSet()
+ for k := range set {
+ if _, ok := sset[k]; ok {
+ nset.Add(k)
+ }
+ }
+
+ return nset
+}
+
+// Difference - returns the difference with given set as new set.
+func (set StringSet) Difference(sset StringSet) StringSet {
+ nset := NewStringSet()
+ for k := range set {
+ if _, ok := sset[k]; !ok {
+ nset.Add(k)
+ }
+ }
+
+ return nset
+}
+
+// Union - returns the union with given set as new set.
+func (set StringSet) Union(sset StringSet) StringSet {
+ nset := NewStringSet()
+ for k := range set {
+ nset.Add(k)
+ }
+
+ for k := range sset {
+ nset.Add(k)
+ }
+
+ return nset
+}
+
+// MarshalJSON - converts to JSON data.
+func (set StringSet) MarshalJSON() ([]byte, error) {
+ return json.Marshal(set.ToSlice())
+}
+
+// UnmarshalJSON - parses JSON data and creates new set with it.
+// If 'data' contains JSON string array, the set contains each string.
+// If 'data' contains JSON string, the set contains the string as one element.
+// If 'data' contains Other JSON types, JSON parse error is returned.
+func (set *StringSet) UnmarshalJSON(data []byte) error {
+ sl := []string{}
+ var err error
+ if err = json.Unmarshal(data, &sl); err == nil {
+ *set = make(StringSet)
+ for _, s := range sl {
+ set.Add(s)
+ }
+ } else {
+ var s string
+ if err = json.Unmarshal(data, &s); err == nil {
+ *set = make(StringSet)
+ set.Add(s)
+ }
+ }
+
+ return err
+}
+
+// String - returns printable string of the set.
+func (set StringSet) String() string {
+ return fmt.Sprintf("%s", set.ToSlice())
+}
+
+// NewStringSet - creates new string set.
+func NewStringSet() StringSet {
+ return make(StringSet)
+}
+
+// CreateStringSet - creates new string set with given string values.
+func CreateStringSet(sl ...string) StringSet {
+ set := make(StringSet)
+ for _, k := range sl {
+ set.Add(k)
+ }
+ return set
+}
+
+// CopyStringSet - returns copy of given set.
+func CopyStringSet(set StringSet) StringSet {
+ nset := NewStringSet()
+ for k, v := range set {
+ nset[k] = v
+ }
+ return nset
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go
new file mode 100644
index 0000000..77540e2
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go
@@ -0,0 +1,224 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2022 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package signer
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// getUnsignedChunkLength - calculates the length of chunk metadata
+func getUnsignedChunkLength(chunkDataSize int64) int64 {
+ return int64(len(fmt.Sprintf("%x", chunkDataSize))) +
+ crlfLen +
+ chunkDataSize +
+ crlfLen
+}
+
+// getUSStreamLength - calculates the length of the overall stream (data + metadata)
+func getUSStreamLength(dataLen, chunkSize int64, trailers http.Header) int64 {
+ if dataLen <= 0 {
+ return 0
+ }
+
+ chunksCount := int64(dataLen / chunkSize)
+ remainingBytes := int64(dataLen % chunkSize)
+ streamLen := int64(0)
+ streamLen += chunksCount * getUnsignedChunkLength(chunkSize)
+ if remainingBytes > 0 {
+ streamLen += getUnsignedChunkLength(remainingBytes)
+ }
+ streamLen += getUnsignedChunkLength(0)
+ if len(trailers) > 0 {
+ for name, placeholder := range trailers {
+ if len(placeholder) > 0 {
+ streamLen += int64(len(name) + len(trailerKVSeparator) + len(placeholder[0]) + 1)
+ }
+ }
+ streamLen += crlfLen
+ }
+
+ return streamLen
+}
+
+// prepareStreamingRequest - prepares a request with appropriate
+// headers before computing the seed signature.
+func prepareUSStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) {
+ req.TransferEncoding = []string{"aws-chunked"}
+ if sessionToken != "" {
+ req.Header.Set("X-Amz-Security-Token", sessionToken)
+ }
+
+ req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat))
+ // Set content length with streaming signature for each chunk included.
+ req.ContentLength = getUSStreamLength(dataLen, int64(payloadChunkSize), req.Trailer)
+}
+
+// StreamingUSReader implements chunked upload signature as a reader on
+// top of req.Body's ReaderCloser chunk header;data;... repeat
+type StreamingUSReader struct {
+ contentLen int64 // Content-Length from req header
+ baseReadCloser io.ReadCloser // underlying io.Reader
+ bytesRead int64 // bytes read from underlying io.Reader
+ buf bytes.Buffer // holds signed chunk
+ chunkBuf []byte // holds raw data read from req Body
+ chunkBufLen int // no. of bytes read so far into chunkBuf
+ done bool // done reading the underlying reader to EOF
+ chunkNum int
+ totalChunks int
+ lastChunkSize int
+ trailer http.Header
+}
+
+// writeChunk - signs a chunk read from s.baseReader of chunkLen size.
+func (s *StreamingUSReader) writeChunk(chunkLen int, addCrLf bool) {
+ s.buf.WriteString(strconv.FormatInt(int64(chunkLen), 16) + "\r\n")
+
+ // Write chunk data into streaming buffer
+ s.buf.Write(s.chunkBuf[:chunkLen])
+
+ // Write the chunk trailer.
+ if addCrLf {
+ s.buf.Write([]byte("\r\n"))
+ }
+
+ // Reset chunkBufLen for next chunk read.
+ s.chunkBufLen = 0
+ s.chunkNum++
+}
+
+// addSignedTrailer - adds a trailer with the provided headers,
+// then signs a chunk and adds it to output.
+func (s *StreamingUSReader) addTrailer(h http.Header) {
+ olen := len(s.chunkBuf)
+ s.chunkBuf = s.chunkBuf[:0]
+ for k, v := range h {
+ s.chunkBuf = append(s.chunkBuf, []byte(strings.ToLower(k)+trailerKVSeparator+v[0]+"\n")...)
+ }
+
+ s.buf.Write(s.chunkBuf)
+ s.buf.WriteString("\r\n\r\n")
+
+ // Reset chunkBufLen for next chunk read.
+ s.chunkBuf = s.chunkBuf[:olen]
+ s.chunkBufLen = 0
+ s.chunkNum++
+}
+
+// StreamingUnsignedV4 - provides chunked upload
+func StreamingUnsignedV4(req *http.Request, sessionToken string, dataLen int64, reqTime time.Time) *http.Request {
+ // Set headers needed for streaming signature.
+ prepareUSStreamingRequest(req, sessionToken, dataLen, reqTime)
+
+ if req.Body == nil {
+ req.Body = io.NopCloser(bytes.NewReader([]byte("")))
+ }
+
+ stReader := &StreamingUSReader{
+ baseReadCloser: req.Body,
+ chunkBuf: make([]byte, payloadChunkSize),
+ contentLen: dataLen,
+ chunkNum: 1,
+ totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1,
+ lastChunkSize: int(dataLen % payloadChunkSize),
+ }
+ if len(req.Trailer) > 0 {
+ stReader.trailer = req.Trailer
+ // Remove...
+ req.Trailer = nil
+ }
+
+ req.Body = stReader
+
+ return req
+}
+
+// Read - this method performs chunk upload signature providing a
+// io.Reader interface.
+func (s *StreamingUSReader) Read(buf []byte) (int, error) {
+ switch {
+ // After the last chunk is read from underlying reader, we
+ // never re-fill s.buf.
+ case s.done:
+
+ // s.buf will be (re-)filled with next chunk when has lesser
+ // bytes than asked for.
+ case s.buf.Len() < len(buf):
+ s.chunkBufLen = 0
+ for {
+ n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:])
+ // Usually we validate `err` first, but in this case
+ // we are validating n > 0 for the following reasons.
+ //
+ // 1. n > 0, err is one of io.EOF, nil (near end of stream)
+ // A Reader returning a non-zero number of bytes at the end
+ // of the input stream may return either err == EOF or err == nil
+ //
+ // 2. n == 0, err is io.EOF (actual end of stream)
+ //
+ // Callers should always process the n > 0 bytes returned
+ // before considering the error err.
+ if n1 > 0 {
+ s.chunkBufLen += n1
+ s.bytesRead += int64(n1)
+
+ if s.chunkBufLen == payloadChunkSize ||
+ (s.chunkNum == s.totalChunks-1 &&
+ s.chunkBufLen == s.lastChunkSize) {
+ // Sign the chunk and write it to s.buf.
+ s.writeChunk(s.chunkBufLen, true)
+ break
+ }
+ }
+ if err != nil {
+ if err == io.EOF {
+ // No more data left in baseReader - last chunk.
+ // Done reading the last chunk from baseReader.
+ s.done = true
+
+ // bytes read from baseReader different than
+ // content length provided.
+ if s.bytesRead != s.contentLen {
+ return 0, fmt.Errorf("http: ContentLength=%d with Body length %d", s.contentLen, s.bytesRead)
+ }
+
+ // Sign the chunk and write it to s.buf.
+ s.writeChunk(0, len(s.trailer) == 0)
+ if len(s.trailer) > 0 {
+ // Trailer must be set now.
+ s.addTrailer(s.trailer)
+ }
+ break
+ }
+ return 0, err
+ }
+
+ }
+ }
+ return s.buf.Read(buf)
+}
+
+// Close - this method makes underlying io.ReadCloser's Close method available.
+func (s *StreamingUSReader) Close() error {
+ return s.baseReadCloser.Close()
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go
new file mode 100644
index 0000000..1c2f1dc
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go
@@ -0,0 +1,403 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package signer
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+
+ md5simd "github.com/minio/md5-simd"
+)
+
+// Reference for constants used below -
+// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming
+const (
+ streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
+ streamingSignTrailerAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER"
+ streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD"
+ streamingTrailerHdr = "AWS4-HMAC-SHA256-TRAILER"
+ emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ payloadChunkSize = 64 * 1024
+ chunkSigConstLen = 17 // ";chunk-signature="
+ signatureStrLen = 64 // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2"
+ crlfLen = 2 // CRLF
+ trailerKVSeparator = ":"
+ trailerSignature = "x-amz-trailer-signature"
+)
+
+// Request headers to be ignored while calculating seed signature for
+// a request.
+var ignoredStreamingHeaders = map[string]bool{
+ "Authorization": true,
+ "User-Agent": true,
+ "Content-Type": true,
+}
+
+// getSignedChunkLength - calculates the length of chunk metadata
+func getSignedChunkLength(chunkDataSize int64) int64 {
+ return int64(len(fmt.Sprintf("%x", chunkDataSize))) +
+ chunkSigConstLen +
+ signatureStrLen +
+ crlfLen +
+ chunkDataSize +
+ crlfLen
+}
+
+// getStreamLength - calculates the length of the overall stream (data + metadata)
+func getStreamLength(dataLen, chunkSize int64, trailers http.Header) int64 {
+ if dataLen <= 0 {
+ return 0
+ }
+
+ chunksCount := int64(dataLen / chunkSize)
+ remainingBytes := int64(dataLen % chunkSize)
+ streamLen := int64(0)
+ streamLen += chunksCount * getSignedChunkLength(chunkSize)
+ if remainingBytes > 0 {
+ streamLen += getSignedChunkLength(remainingBytes)
+ }
+ streamLen += getSignedChunkLength(0)
+ if len(trailers) > 0 {
+ for name, placeholder := range trailers {
+ if len(placeholder) > 0 {
+ streamLen += int64(len(name) + len(trailerKVSeparator) + len(placeholder[0]) + 1)
+ }
+ }
+ streamLen += int64(len(trailerSignature)+len(trailerKVSeparator)) + signatureStrLen + crlfLen + crlfLen
+ }
+
+ return streamLen
+}
+
+// buildChunkStringToSign - returns the string to sign given chunk data
+// and previous signature.
+func buildChunkStringToSign(t time.Time, region, previousSig, chunkChecksum string) string {
+ stringToSignParts := []string{
+ streamingPayloadHdr,
+ t.Format(iso8601DateFormat),
+ getScope(region, t, ServiceTypeS3),
+ previousSig,
+ emptySHA256,
+ chunkChecksum,
+ }
+
+ return strings.Join(stringToSignParts, "\n")
+}
+
+// buildTrailerChunkStringToSign - returns the string to sign given chunk data
+// and previous signature.
+func buildTrailerChunkStringToSign(t time.Time, region, previousSig, chunkChecksum string) string {
+ stringToSignParts := []string{
+ streamingTrailerHdr,
+ t.Format(iso8601DateFormat),
+ getScope(region, t, ServiceTypeS3),
+ previousSig,
+ chunkChecksum,
+ }
+
+ return strings.Join(stringToSignParts, "\n")
+}
+
+// prepareStreamingRequest - prepares a request with appropriate
+// headers before computing the seed signature.
+func prepareStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) {
+ // Set x-amz-content-sha256 header.
+ if len(req.Trailer) == 0 {
+ req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm)
+ } else {
+ req.Header.Set("X-Amz-Content-Sha256", streamingSignTrailerAlgorithm)
+ for k := range req.Trailer {
+ req.Header.Add("X-Amz-Trailer", strings.ToLower(k))
+ }
+ req.TransferEncoding = []string{"aws-chunked"}
+ }
+
+ if sessionToken != "" {
+ req.Header.Set("X-Amz-Security-Token", sessionToken)
+ }
+
+ req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat))
+ // Set content length with streaming signature for each chunk included.
+ req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize), req.Trailer)
+ req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(dataLen, 10))
+}
+
+// buildChunkHeader - returns the chunk header.
+// e.g string(IntHexBase(chunk-size)) + ";chunk-signature=" + signature + \r\n + chunk-data + \r\n
+func buildChunkHeader(chunkLen int64, signature string) []byte {
+ return []byte(strconv.FormatInt(chunkLen, 16) + ";chunk-signature=" + signature + "\r\n")
+}
+
+// buildChunkSignature - returns chunk signature for a given chunk and previous signature.
+func buildChunkSignature(chunkCheckSum string, reqTime time.Time, region,
+ previousSignature, secretAccessKey string,
+) string {
+ chunkStringToSign := buildChunkStringToSign(reqTime, region,
+ previousSignature, chunkCheckSum)
+ signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3)
+ return getSignature(signingKey, chunkStringToSign)
+}
+
+// buildChunkSignature - returns chunk signature for a given chunk and previous signature.
+func buildTrailerChunkSignature(chunkChecksum string, reqTime time.Time, region,
+ previousSignature, secretAccessKey string,
+) string {
+ chunkStringToSign := buildTrailerChunkStringToSign(reqTime, region,
+ previousSignature, chunkChecksum)
+ signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3)
+ return getSignature(signingKey, chunkStringToSign)
+}
+
+// getSeedSignature - returns the seed signature for a given request.
+func (s *StreamingReader) setSeedSignature(req *http.Request) {
+ // Get canonical request
+ canonicalRequest := getCanonicalRequest(*req, ignoredStreamingHeaders, getHashedPayload(*req))
+
+ // Get string to sign from canonical request.
+ stringToSign := getStringToSignV4(s.reqTime, s.region, canonicalRequest, ServiceTypeS3)
+
+ signingKey := getSigningKey(s.secretAccessKey, s.region, s.reqTime, ServiceTypeS3)
+
+ // Calculate signature.
+ s.seedSignature = getSignature(signingKey, stringToSign)
+}
+
+// StreamingReader implements chunked upload signature as a reader on
+// top of req.Body's ReaderCloser chunk header;data;... repeat
+type StreamingReader struct {
+ accessKeyID string
+ secretAccessKey string
+ sessionToken string
+ region string
+ prevSignature string
+ seedSignature string
+ contentLen int64 // Content-Length from req header
+ baseReadCloser io.ReadCloser // underlying io.Reader
+ bytesRead int64 // bytes read from underlying io.Reader
+ buf bytes.Buffer // holds signed chunk
+ chunkBuf []byte // holds raw data read from req Body
+ chunkBufLen int // no. of bytes read so far into chunkBuf
+ done bool // done reading the underlying reader to EOF
+ reqTime time.Time
+ chunkNum int
+ totalChunks int
+ lastChunkSize int
+ trailer http.Header
+ sh256 md5simd.Hasher
+}
+
+// signChunk - signs a chunk read from s.baseReader of chunkLen size.
+func (s *StreamingReader) signChunk(chunkLen int, addCrLf bool) {
+ // Compute chunk signature for next header
+ s.sh256.Reset()
+ s.sh256.Write(s.chunkBuf[:chunkLen])
+ chunckChecksum := hex.EncodeToString(s.sh256.Sum(nil))
+
+ signature := buildChunkSignature(chunckChecksum, s.reqTime,
+ s.region, s.prevSignature, s.secretAccessKey)
+
+ // For next chunk signature computation
+ s.prevSignature = signature
+
+ // Write chunk header into streaming buffer
+ chunkHdr := buildChunkHeader(int64(chunkLen), signature)
+ s.buf.Write(chunkHdr)
+
+ // Write chunk data into streaming buffer
+ s.buf.Write(s.chunkBuf[:chunkLen])
+
+ // Write the chunk trailer.
+ if addCrLf {
+ s.buf.Write([]byte("\r\n"))
+ }
+
+ // Reset chunkBufLen for next chunk read.
+ s.chunkBufLen = 0
+ s.chunkNum++
+}
+
+// addSignedTrailer - adds a trailer with the provided headers,
+// then signs a chunk and adds it to output.
+func (s *StreamingReader) addSignedTrailer(h http.Header) {
+ olen := len(s.chunkBuf)
+ s.chunkBuf = s.chunkBuf[:0]
+ for k, v := range h {
+ s.chunkBuf = append(s.chunkBuf, []byte(strings.ToLower(k)+trailerKVSeparator+v[0]+"\n")...)
+ }
+
+ s.sh256.Reset()
+ s.sh256.Write(s.chunkBuf)
+ chunkChecksum := hex.EncodeToString(s.sh256.Sum(nil))
+ // Compute chunk signature
+ signature := buildTrailerChunkSignature(chunkChecksum, s.reqTime,
+ s.region, s.prevSignature, s.secretAccessKey)
+
+ // For next chunk signature computation
+ s.prevSignature = signature
+
+ s.buf.Write(s.chunkBuf)
+ s.buf.WriteString("\r\n" + trailerSignature + trailerKVSeparator + signature + "\r\n\r\n")
+
+ // Reset chunkBufLen for next chunk read.
+ s.chunkBuf = s.chunkBuf[:olen]
+ s.chunkBufLen = 0
+ s.chunkNum++
+}
+
+// setStreamingAuthHeader - builds and sets authorization header value
+// for streaming signature.
+func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) {
+ credential := GetCredential(s.accessKeyID, s.region, s.reqTime, ServiceTypeS3)
+ authParts := []string{
+ signV4Algorithm + " Credential=" + credential,
+ "SignedHeaders=" + getSignedHeaders(*req, ignoredStreamingHeaders),
+ "Signature=" + s.seedSignature,
+ }
+
+ // Set authorization header.
+ auth := strings.Join(authParts, ",")
+ req.Header.Set("Authorization", auth)
+}
+
+// StreamingSignV4 - provides chunked upload signatureV4 support by
+// implementing io.Reader.
+func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionToken,
+ region string, dataLen int64, reqTime time.Time, sh256 md5simd.Hasher,
+) *http.Request {
+ // Set headers needed for streaming signature.
+ prepareStreamingRequest(req, sessionToken, dataLen, reqTime)
+
+ if req.Body == nil {
+ req.Body = io.NopCloser(bytes.NewReader([]byte("")))
+ }
+
+ stReader := &StreamingReader{
+ baseReadCloser: req.Body,
+ accessKeyID: accessKeyID,
+ secretAccessKey: secretAccessKey,
+ sessionToken: sessionToken,
+ region: region,
+ reqTime: reqTime,
+ chunkBuf: make([]byte, payloadChunkSize),
+ contentLen: dataLen,
+ chunkNum: 1,
+ totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1,
+ lastChunkSize: int(dataLen % payloadChunkSize),
+ sh256: sh256,
+ }
+ if len(req.Trailer) > 0 {
+ stReader.trailer = req.Trailer
+ // Remove...
+ req.Trailer = nil
+ }
+
+ // Add the request headers required for chunk upload signing.
+
+ // Compute the seed signature.
+ stReader.setSeedSignature(req)
+
+ // Set the authorization header with the seed signature.
+ stReader.setStreamingAuthHeader(req)
+
+ // Set seed signature as prevSignature for subsequent
+ // streaming signing process.
+ stReader.prevSignature = stReader.seedSignature
+ req.Body = stReader
+
+ return req
+}
+
+// Read - this method performs chunk upload signature providing a
+// io.Reader interface.
+func (s *StreamingReader) Read(buf []byte) (int, error) {
+ switch {
+ // After the last chunk is read from underlying reader, we
+ // never re-fill s.buf.
+ case s.done:
+
+ // s.buf will be (re-)filled with next chunk when has lesser
+ // bytes than asked for.
+ case s.buf.Len() < len(buf):
+ s.chunkBufLen = 0
+ for {
+ n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:])
+ // Usually we validate `err` first, but in this case
+ // we are validating n > 0 for the following reasons.
+ //
+ // 1. n > 0, err is one of io.EOF, nil (near end of stream)
+ // A Reader returning a non-zero number of bytes at the end
+ // of the input stream may return either err == EOF or err == nil
+ //
+ // 2. n == 0, err is io.EOF (actual end of stream)
+ //
+ // Callers should always process the n > 0 bytes returned
+ // before considering the error err.
+ if n1 > 0 {
+ s.chunkBufLen += n1
+ s.bytesRead += int64(n1)
+
+ if s.chunkBufLen == payloadChunkSize ||
+ (s.chunkNum == s.totalChunks-1 &&
+ s.chunkBufLen == s.lastChunkSize) {
+ // Sign the chunk and write it to s.buf.
+ s.signChunk(s.chunkBufLen, true)
+ break
+ }
+ }
+ if err != nil {
+ if err == io.EOF {
+ // No more data left in baseReader - last chunk.
+ // Done reading the last chunk from baseReader.
+ s.done = true
+
+ // bytes read from baseReader different than
+ // content length provided.
+ if s.bytesRead != s.contentLen {
+ return 0, fmt.Errorf("http: ContentLength=%d with Body length %d", s.contentLen, s.bytesRead)
+ }
+
+ // Sign the chunk and write it to s.buf.
+ s.signChunk(0, len(s.trailer) == 0)
+ if len(s.trailer) > 0 {
+ // Trailer must be set now.
+ s.addSignedTrailer(s.trailer)
+ }
+ break
+ }
+ return 0, err
+ }
+
+ }
+ }
+ return s.buf.Read(buf)
+}
+
+// Close - this method makes underlying io.ReadCloser's Close method available.
+func (s *StreamingReader) Close() error {
+ if s.sh256 != nil {
+ s.sh256.Close()
+ s.sh256 = nil
+ }
+ return s.baseReadCloser.Close()
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go
new file mode 100644
index 0000000..fa4f8c9
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go
@@ -0,0 +1,319 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package signer
+
+import (
+ "bytes"
+ "crypto/hmac"
+ "crypto/sha1"
+ "encoding/base64"
+ "fmt"
+ "net/http"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/minio/minio-go/v7/pkg/s3utils"
+)
+
+// Signature and API related constants.
+const (
+ signV2Algorithm = "AWS"
+)
+
+// Encode input URL path to URL encoded path.
+func encodeURL2Path(req *http.Request, virtualHost bool) (path string) {
+ if virtualHost {
+ reqHost := getHostAddr(req)
+ dotPos := strings.Index(reqHost, ".")
+ if dotPos > -1 {
+ bucketName := reqHost[:dotPos]
+ path = "/" + bucketName
+ path += req.URL.Path
+ path = s3utils.EncodePath(path)
+ return
+ }
+ }
+ path = s3utils.EncodePath(req.URL.Path)
+ return
+}
+
+// PreSignV2 - presign the request in following style.
+// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}.
+func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64, virtualHost bool) *http.Request {
+ // Presign is not needed for anonymous credentials.
+ if accessKeyID == "" || secretAccessKey == "" {
+ return &req
+ }
+
+ d := time.Now().UTC()
+ // Find epoch expires when the request will expire.
+ epochExpires := d.Unix() + expires
+
+ // Add expires header if not present.
+ if expiresStr := req.Header.Get("Expires"); expiresStr == "" {
+ req.Header.Set("Expires", strconv.FormatInt(epochExpires, 10))
+ }
+
+ // Get presigned string to sign.
+ stringToSign := preStringToSignV2(req, virtualHost)
+ hm := hmac.New(sha1.New, []byte(secretAccessKey))
+ hm.Write([]byte(stringToSign))
+
+ // Calculate signature.
+ signature := base64.StdEncoding.EncodeToString(hm.Sum(nil))
+
+ query := req.URL.Query()
+ // Handle specially for Google Cloud Storage.
+ if strings.Contains(getHostAddr(&req), ".storage.googleapis.com") {
+ query.Set("GoogleAccessId", accessKeyID)
+ } else {
+ query.Set("AWSAccessKeyId", accessKeyID)
+ }
+
+ // Fill in Expires for presigned query.
+ query.Set("Expires", strconv.FormatInt(epochExpires, 10))
+
+ // Encode query and save.
+ req.URL.RawQuery = s3utils.QueryEncode(query)
+
+ // Save signature finally.
+ req.URL.RawQuery += "&Signature=" + s3utils.EncodePath(signature)
+
+ // Return.
+ return &req
+}
+
+// PostPresignSignatureV2 - presigned signature for PostPolicy
+// request.
+func PostPresignSignatureV2(policyBase64, secretAccessKey string) string {
+ hm := hmac.New(sha1.New, []byte(secretAccessKey))
+ hm.Write([]byte(policyBase64))
+ signature := base64.StdEncoding.EncodeToString(hm.Sum(nil))
+ return signature
+}
+
+// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature;
+// Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) );
+//
+// StringToSign = HTTP-Verb + "\n" +
+// Content-Md5 + "\n" +
+// Content-Type + "\n" +
+// Date + "\n" +
+// CanonicalizedProtocolHeaders +
+// CanonicalizedResource;
+//
+// CanonicalizedResource = [ "/" + Bucket ] +
+// +
+// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
+//
+// CanonicalizedProtocolHeaders =
+
+// SignV2 sign the request before Do() (AWS Signature Version 2).
+func SignV2(req http.Request, accessKeyID, secretAccessKey string, virtualHost bool) *http.Request {
+ // Signature calculation is not needed for anonymous credentials.
+ if accessKeyID == "" || secretAccessKey == "" {
+ return &req
+ }
+
+ // Initial time.
+ d := time.Now().UTC()
+
+ // Add date if not present.
+ if date := req.Header.Get("Date"); date == "" {
+ req.Header.Set("Date", d.Format(http.TimeFormat))
+ }
+
+ // Calculate HMAC for secretAccessKey.
+ stringToSign := stringToSignV2(req, virtualHost)
+ hm := hmac.New(sha1.New, []byte(secretAccessKey))
+ hm.Write([]byte(stringToSign))
+
+ // Prepare auth header.
+ authHeader := new(bytes.Buffer)
+ authHeader.WriteString(fmt.Sprintf("%s %s:", signV2Algorithm, accessKeyID))
+ encoder := base64.NewEncoder(base64.StdEncoding, authHeader)
+ encoder.Write(hm.Sum(nil))
+ encoder.Close()
+
+ // Set Authorization header.
+ req.Header.Set("Authorization", authHeader.String())
+
+ return &req
+}
+
+// From the Amazon docs:
+//
+// StringToSign = HTTP-Verb + "\n" +
+//
+// Content-Md5 + "\n" +
+// Content-Type + "\n" +
+// Expires + "\n" +
+// CanonicalizedProtocolHeaders +
+// CanonicalizedResource;
+func preStringToSignV2(req http.Request, virtualHost bool) string {
+ buf := new(bytes.Buffer)
+ // Write standard headers.
+ writePreSignV2Headers(buf, req)
+ // Write canonicalized protocol headers if any.
+ writeCanonicalizedHeaders(buf, req)
+ // Write canonicalized Query resources if any.
+ writeCanonicalizedResource(buf, req, virtualHost)
+ return buf.String()
+}
+
+// writePreSignV2Headers - write preSign v2 required headers.
+func writePreSignV2Headers(buf *bytes.Buffer, req http.Request) {
+ buf.WriteString(req.Method + "\n")
+ buf.WriteString(req.Header.Get("Content-Md5") + "\n")
+ buf.WriteString(req.Header.Get("Content-Type") + "\n")
+ buf.WriteString(req.Header.Get("Expires") + "\n")
+}
+
+// From the Amazon docs:
+//
+// StringToSign = HTTP-Verb + "\n" +
+//
+// Content-Md5 + "\n" +
+// Content-Type + "\n" +
+// Date + "\n" +
+// CanonicalizedProtocolHeaders +
+// CanonicalizedResource;
+func stringToSignV2(req http.Request, virtualHost bool) string {
+ buf := new(bytes.Buffer)
+ // Write standard headers.
+ writeSignV2Headers(buf, req)
+ // Write canonicalized protocol headers if any.
+ writeCanonicalizedHeaders(buf, req)
+ // Write canonicalized Query resources if any.
+ writeCanonicalizedResource(buf, req, virtualHost)
+ return buf.String()
+}
+
+// writeSignV2Headers - write signV2 required headers.
+func writeSignV2Headers(buf *bytes.Buffer, req http.Request) {
+ buf.WriteString(req.Method + "\n")
+ buf.WriteString(req.Header.Get("Content-Md5") + "\n")
+ buf.WriteString(req.Header.Get("Content-Type") + "\n")
+ buf.WriteString(req.Header.Get("Date") + "\n")
+}
+
+// writeCanonicalizedHeaders - write canonicalized headers.
+func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) {
+ var protoHeaders []string
+ vals := make(map[string][]string)
+ for k, vv := range req.Header {
+ // All the AMZ headers should be lowercase
+ lk := strings.ToLower(k)
+ if strings.HasPrefix(lk, "x-amz") {
+ protoHeaders = append(protoHeaders, lk)
+ vals[lk] = vv
+ }
+ }
+ sort.Strings(protoHeaders)
+ for _, k := range protoHeaders {
+ buf.WriteString(k)
+ buf.WriteByte(':')
+ for idx, v := range vals[k] {
+ if idx > 0 {
+ buf.WriteByte(',')
+ }
+ buf.WriteString(v)
+ }
+ buf.WriteByte('\n')
+ }
+}
+
+// AWS S3 Signature V2 calculation rule is give here:
+// http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationStringToSign
+
+// Whitelist resource list that will be used in query string for signature-V2 calculation.
+//
+// This list should be kept alphabetically sorted, do not hastily edit.
+var resourceList = []string{
+ "acl",
+ "cors",
+ "delete",
+ "encryption",
+ "legal-hold",
+ "lifecycle",
+ "location",
+ "logging",
+ "notification",
+ "partNumber",
+ "policy",
+ "replication",
+ "requestPayment",
+ "response-cache-control",
+ "response-content-disposition",
+ "response-content-encoding",
+ "response-content-language",
+ "response-content-type",
+ "response-expires",
+ "retention",
+ "select",
+ "select-type",
+ "tagging",
+ "torrent",
+ "uploadId",
+ "uploads",
+ "versionId",
+ "versioning",
+ "versions",
+ "website",
+}
+
+// From the Amazon docs:
+//
+// CanonicalizedResource = [ "/" + Bucket ] +
+//
+// +
+// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
+func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, virtualHost bool) {
+ // Save request URL.
+ requestURL := req.URL
+ // Get encoded URL path.
+ buf.WriteString(encodeURL2Path(&req, virtualHost))
+ if requestURL.RawQuery != "" {
+ var n int
+ vals, _ := url.ParseQuery(requestURL.RawQuery)
+ // Verify if any sub resource queries are present, if yes
+ // canonicallize them.
+ for _, resource := range resourceList {
+ if vv, ok := vals[resource]; ok && len(vv) > 0 {
+ n++
+ // First element
+ switch n {
+ case 1:
+ buf.WriteByte('?')
+ // The rest
+ default:
+ buf.WriteByte('&')
+ }
+ buf.WriteString(resource)
+ // Request parameters
+ if len(vv[0]) > 0 {
+ buf.WriteByte('=')
+ buf.WriteString(vv[0])
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go
new file mode 100644
index 0000000..ffd2514
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go
@@ -0,0 +1,351 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package signer
+
+import (
+ "bytes"
+ "encoding/hex"
+ "net/http"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/minio/minio-go/v7/pkg/s3utils"
+)
+
+// Signature and API related constants.
+const (
+ signV4Algorithm = "AWS4-HMAC-SHA256"
+ iso8601DateFormat = "20060102T150405Z"
+ yyyymmdd = "20060102"
+)
+
+// Different service types
+const (
+ ServiceTypeS3 = "s3"
+ ServiceTypeSTS = "sts"
+)
+
+// Excerpts from @lsegal -
+// https:/github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258.
+//
+// * User-Agent
+// This is ignored from signing because signing this causes problems with generating pre-signed
+// URLs (that are executed by other agents) or when customers pass requests through proxies, which
+// may modify the user-agent.
+//
+// * Authorization
+// Is skipped for obvious reasons.
+//
+// * Accept-Encoding
+// Some S3 servers like Hitachi Content Platform do not honor this header for signature
+// calculation.
+var v4IgnoredHeaders = map[string]bool{
+ "Accept-Encoding": true,
+ "Authorization": true,
+ "User-Agent": true,
+}
+
+// getSigningKey hmac seed to calculate final signature.
+func getSigningKey(secret, loc string, t time.Time, serviceType string) []byte {
+ date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd)))
+ location := sumHMAC(date, []byte(loc))
+ service := sumHMAC(location, []byte(serviceType))
+ signingKey := sumHMAC(service, []byte("aws4_request"))
+ return signingKey
+}
+
+// getSignature final signature in hexadecimal form.
+func getSignature(signingKey []byte, stringToSign string) string {
+ return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))
+}
+
+// getScope generate a string of a specific date, an AWS region, and a
+// service.
+func getScope(location string, t time.Time, serviceType string) string {
+ scope := strings.Join([]string{
+ t.Format(yyyymmdd),
+ location,
+ serviceType,
+ "aws4_request",
+ }, "/")
+ return scope
+}
+
+// GetCredential generate a credential string.
+func GetCredential(accessKeyID, location string, t time.Time, serviceType string) string {
+ scope := getScope(location, t, serviceType)
+ return accessKeyID + "/" + scope
+}
+
+// getHashedPayload get the hexadecimal value of the SHA256 hash of
+// the request payload.
+func getHashedPayload(req http.Request) string {
+ hashedPayload := req.Header.Get("X-Amz-Content-Sha256")
+ if hashedPayload == "" {
+ // Presign does not have a payload, use S3 recommended value.
+ hashedPayload = unsignedPayload
+ }
+ return hashedPayload
+}
+
+// getCanonicalHeaders generate a list of request headers for
+// signature.
+func getCanonicalHeaders(req http.Request, ignoredHeaders map[string]bool) string {
+ var headers []string
+ vals := make(map[string][]string)
+ for k, vv := range req.Header {
+ if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
+ continue // ignored header
+ }
+ headers = append(headers, strings.ToLower(k))
+ vals[strings.ToLower(k)] = vv
+ }
+ if !headerExists("host", headers) {
+ headers = append(headers, "host")
+ }
+ sort.Strings(headers)
+
+ var buf bytes.Buffer
+ // Save all the headers in canonical form : newline
+ // separated for each header.
+ for _, k := range headers {
+ buf.WriteString(k)
+ buf.WriteByte(':')
+ switch {
+ case k == "host":
+ buf.WriteString(getHostAddr(&req))
+ buf.WriteByte('\n')
+ default:
+ for idx, v := range vals[k] {
+ if idx > 0 {
+ buf.WriteByte(',')
+ }
+ buf.WriteString(signV4TrimAll(v))
+ }
+ buf.WriteByte('\n')
+ }
+ }
+ return buf.String()
+}
+
+func headerExists(key string, headers []string) bool {
+ for _, k := range headers {
+ if k == key {
+ return true
+ }
+ }
+ return false
+}
+
+// getSignedHeaders generate all signed request headers.
+// i.e lexically sorted, semicolon-separated list of lowercase
+// request header names.
+func getSignedHeaders(req http.Request, ignoredHeaders map[string]bool) string {
+ var headers []string
+ for k := range req.Header {
+ if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
+ continue // Ignored header found continue.
+ }
+ headers = append(headers, strings.ToLower(k))
+ }
+ if !headerExists("host", headers) {
+ headers = append(headers, "host")
+ }
+ sort.Strings(headers)
+ return strings.Join(headers, ";")
+}
+
+// getCanonicalRequest generate a canonical request of style.
+//
+// canonicalRequest =
+//
+// \n
+// \n
+// \n
+// \n
+// \n
+//
+func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool, hashedPayload string) string {
+ req.URL.RawQuery = strings.ReplaceAll(req.URL.Query().Encode(), "+", "%20")
+ canonicalRequest := strings.Join([]string{
+ req.Method,
+ s3utils.EncodePath(req.URL.Path),
+ req.URL.RawQuery,
+ getCanonicalHeaders(req, ignoredHeaders),
+ getSignedHeaders(req, ignoredHeaders),
+ hashedPayload,
+ }, "\n")
+ return canonicalRequest
+}
+
+// getStringToSign a string based on selected query values.
+func getStringToSignV4(t time.Time, location, canonicalRequest, serviceType string) string {
+ stringToSign := signV4Algorithm + "\n" + t.Format(iso8601DateFormat) + "\n"
+ stringToSign = stringToSign + getScope(location, t, serviceType) + "\n"
+ stringToSign += hex.EncodeToString(sum256([]byte(canonicalRequest)))
+ return stringToSign
+}
+
+// PreSignV4 presign the request, in accordance with
+// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html.
+func PreSignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, expires int64) *http.Request {
+ // Presign is not needed for anonymous credentials.
+ if accessKeyID == "" || secretAccessKey == "" {
+ return &req
+ }
+
+ // Initial time.
+ t := time.Now().UTC()
+
+ // Get credential string.
+ credential := GetCredential(accessKeyID, location, t, ServiceTypeS3)
+
+ // Get all signed headers.
+ signedHeaders := getSignedHeaders(req, v4IgnoredHeaders)
+
+ // Set URL query.
+ query := req.URL.Query()
+ query.Set("X-Amz-Algorithm", signV4Algorithm)
+ query.Set("X-Amz-Date", t.Format(iso8601DateFormat))
+ query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10))
+ query.Set("X-Amz-SignedHeaders", signedHeaders)
+ query.Set("X-Amz-Credential", credential)
+ // Set session token if available.
+ if sessionToken != "" {
+ query.Set("X-Amz-Security-Token", sessionToken)
+ }
+ req.URL.RawQuery = query.Encode()
+
+ // Get canonical request.
+ canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders, getHashedPayload(req))
+
+ // Get string to sign from canonical request.
+ stringToSign := getStringToSignV4(t, location, canonicalRequest, ServiceTypeS3)
+
+ // Gext hmac signing key.
+ signingKey := getSigningKey(secretAccessKey, location, t, ServiceTypeS3)
+
+ // Calculate signature.
+ signature := getSignature(signingKey, stringToSign)
+
+ // Add signature header to RawQuery.
+ req.URL.RawQuery += "&X-Amz-Signature=" + signature
+
+ return &req
+}
+
+// PostPresignSignatureV4 - presigned signature for PostPolicy
+// requests.
+func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string {
+ // Get signining key.
+ signingkey := getSigningKey(secretAccessKey, location, t, ServiceTypeS3)
+ // Calculate signature.
+ signature := getSignature(signingkey, policyBase64)
+ return signature
+}
+
+// SignV4STS - signature v4 for STS request.
+func SignV4STS(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request {
+ return signV4(req, accessKeyID, secretAccessKey, "", location, ServiceTypeSTS, nil)
+}
+
+// Internal function called for different service types.
+func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location, serviceType string, trailer http.Header) *http.Request {
+ // Signature calculation is not needed for anonymous credentials.
+ if accessKeyID == "" || secretAccessKey == "" {
+ return &req
+ }
+
+ // Initial time.
+ t := time.Now().UTC()
+
+ // Set x-amz-date.
+ req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat))
+
+ // Set session token if available.
+ if sessionToken != "" {
+ req.Header.Set("X-Amz-Security-Token", sessionToken)
+ }
+
+ if len(trailer) > 0 {
+ for k := range trailer {
+ req.Header.Add("X-Amz-Trailer", strings.ToLower(k))
+ }
+
+ req.Header.Set("Content-Encoding", "aws-chunked")
+ req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(req.ContentLength, 10))
+ }
+
+ hashedPayload := getHashedPayload(req)
+ if serviceType == ServiceTypeSTS {
+ // Content sha256 header is not sent with the request
+ // but it is expected to have sha256 of payload for signature
+ // in STS service type request.
+ req.Header.Del("X-Amz-Content-Sha256")
+ }
+
+ // Get canonical request.
+ canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders, hashedPayload)
+
+ // Get string to sign from canonical request.
+ stringToSign := getStringToSignV4(t, location, canonicalRequest, serviceType)
+
+ // Get hmac signing key.
+ signingKey := getSigningKey(secretAccessKey, location, t, serviceType)
+
+ // Get credential string.
+ credential := GetCredential(accessKeyID, location, t, serviceType)
+
+ // Get all signed headers.
+ signedHeaders := getSignedHeaders(req, v4IgnoredHeaders)
+
+ // Calculate signature.
+ signature := getSignature(signingKey, stringToSign)
+
+ // If regular request, construct the final authorization header.
+ parts := []string{
+ signV4Algorithm + " Credential=" + credential,
+ "SignedHeaders=" + signedHeaders,
+ "Signature=" + signature,
+ }
+
+ // Set authorization header.
+ auth := strings.Join(parts, ", ")
+ req.Header.Set("Authorization", auth)
+
+ if len(trailer) > 0 {
+ // Use custom chunked encoding.
+ req.Trailer = trailer
+ return StreamingUnsignedV4(&req, sessionToken, req.ContentLength, time.Now().UTC())
+ }
+ return &req
+}
+
+// SignV4 sign the request before Do(), in accordance with
+// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html.
+func SignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string) *http.Request {
+ return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3, nil)
+}
+
+// SignV4Trailer sign the request before Do(), in accordance with
+// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html
+func SignV4Trailer(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, trailer http.Header) *http.Request {
+ return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3, trailer)
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go
new file mode 100644
index 0000000..87c9939
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go
@@ -0,0 +1,62 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package signer
+
+import (
+ "crypto/hmac"
+ "crypto/sha256"
+ "net/http"
+ "strings"
+)
+
+// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
+const unsignedPayload = "UNSIGNED-PAYLOAD"
+
+// sum256 calculate sha256 sum for an input byte array.
+func sum256(data []byte) []byte {
+ hash := sha256.New()
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+// sumHMAC calculate hmac between two input byte array.
+func sumHMAC(key, data []byte) []byte {
+ hash := hmac.New(sha256.New, key)
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+// getHostAddr returns host header if available, otherwise returns host from URL
+func getHostAddr(req *http.Request) string {
+ host := req.Header.Get("host")
+ if host != "" && req.Host != host {
+ return host
+ }
+ if req.Host != "" {
+ return req.Host
+ }
+ return req.URL.Host
+}
+
+// Trim leading and trailing spaces and replace sequential spaces with one space, following Trimall()
+// in http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
+func signV4TrimAll(input string) string {
+ // Compress adjacent spaces (a space is determined by
+ // unicode.IsSpace() internally here) to one space and return
+ return strings.Join(strings.Fields(input), " ")
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go b/vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go
new file mode 100644
index 0000000..b5fb956
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go
@@ -0,0 +1,66 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2020 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package sse
+
+import "encoding/xml"
+
+// ApplySSEByDefault defines default encryption configuration, KMS or SSE. To activate
+// KMS, SSEAlgoritm needs to be set to "aws:kms"
+// Minio currently does not support Kms.
+type ApplySSEByDefault struct {
+ KmsMasterKeyID string `xml:"KMSMasterKeyID,omitempty"`
+ SSEAlgorithm string `xml:"SSEAlgorithm"`
+}
+
+// Rule layer encapsulates default encryption configuration
+type Rule struct {
+ Apply ApplySSEByDefault `xml:"ApplyServerSideEncryptionByDefault"`
+}
+
+// Configuration is the default encryption configuration structure
+type Configuration struct {
+ XMLName xml.Name `xml:"ServerSideEncryptionConfiguration"`
+ Rules []Rule `xml:"Rule"`
+}
+
+// NewConfigurationSSES3 initializes a new SSE-S3 configuration
+func NewConfigurationSSES3() *Configuration {
+ return &Configuration{
+ Rules: []Rule{
+ {
+ Apply: ApplySSEByDefault{
+ SSEAlgorithm: "AES256",
+ },
+ },
+ },
+ }
+}
+
+// NewConfigurationSSEKMS initializes a new SSE-KMS configuration
+func NewConfigurationSSEKMS(kmsMasterKey string) *Configuration {
+ return &Configuration{
+ Rules: []Rule{
+ {
+ Apply: ApplySSEByDefault{
+ KmsMasterKeyID: kmsMasterKey,
+ SSEAlgorithm: "aws:kms",
+ },
+ },
+ },
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go b/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go
new file mode 100644
index 0000000..7a84a6f
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go
@@ -0,0 +1,413 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2020-2022 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package tags
+
+import (
+ "encoding/xml"
+ "io"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "unicode/utf8"
+)
+
+// Error contains tag specific error.
+type Error interface {
+ error
+ Code() string
+}
+
+type errTag struct {
+ code string
+ message string
+}
+
+// Code contains error code.
+func (err errTag) Code() string {
+ return err.code
+}
+
+// Error contains error message.
+func (err errTag) Error() string {
+ return err.message
+}
+
+var (
+ errTooManyObjectTags = &errTag{"BadRequest", "Tags cannot be more than 10"}
+ errTooManyTags = &errTag{"BadRequest", "Tags cannot be more than 50"}
+ errInvalidTagKey = &errTag{"InvalidTag", "The TagKey you have provided is invalid"}
+ errInvalidTagValue = &errTag{"InvalidTag", "The TagValue you have provided is invalid"}
+ errDuplicateTagKey = &errTag{"InvalidTag", "Cannot provide multiple Tags with the same key"}
+)
+
+// Tag comes with limitation as per
+// https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html amd
+// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions
+const (
+ maxKeyLength = 128
+ maxValueLength = 256
+ maxObjectTagCount = 10
+ maxTagCount = 50
+)
+
+// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions
+// borrowed from this article and also testing various ASCII characters following regex
+// is supported by AWS S3 for both tags and values.
+var validTagKeyValue = regexp.MustCompile(`^[a-zA-Z0-9-+\-._:/@ ]+$`)
+
+func checkKey(key string) error {
+ if len(key) == 0 {
+ return errInvalidTagKey
+ }
+
+ if utf8.RuneCountInString(key) > maxKeyLength || !validTagKeyValue.MatchString(key) {
+ return errInvalidTagKey
+ }
+
+ return nil
+}
+
+func checkValue(value string) error {
+ if value != "" {
+ if utf8.RuneCountInString(value) > maxValueLength || !validTagKeyValue.MatchString(value) {
+ return errInvalidTagValue
+ }
+ }
+
+ return nil
+}
+
+// Tag denotes key and value.
+type Tag struct {
+ Key string `xml:"Key"`
+ Value string `xml:"Value"`
+}
+
+func (tag Tag) String() string {
+ return tag.Key + "=" + tag.Value
+}
+
+// IsEmpty returns whether this tag is empty or not.
+func (tag Tag) IsEmpty() bool {
+ return tag.Key == ""
+}
+
+// Validate checks this tag.
+func (tag Tag) Validate() error {
+ if err := checkKey(tag.Key); err != nil {
+ return err
+ }
+
+ return checkValue(tag.Value)
+}
+
+// MarshalXML encodes to XML data.
+func (tag Tag) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ if err := tag.Validate(); err != nil {
+ return err
+ }
+
+ type subTag Tag // to avoid recursively calling MarshalXML()
+ return e.EncodeElement(subTag(tag), start)
+}
+
+// UnmarshalXML decodes XML data to tag.
+func (tag *Tag) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type subTag Tag // to avoid recursively calling UnmarshalXML()
+ var st subTag
+ if err := d.DecodeElement(&st, &start); err != nil {
+ return err
+ }
+
+ if err := Tag(st).Validate(); err != nil {
+ return err
+ }
+
+ *tag = Tag(st)
+ return nil
+}
+
+// tagSet represents list of unique tags.
+type tagSet struct {
+ tagMap map[string]string
+ isObject bool
+}
+
+func (tags tagSet) String() string {
+ if len(tags.tagMap) == 0 {
+ return ""
+ }
+ var buf strings.Builder
+ keys := make([]string, 0, len(tags.tagMap))
+ for k := range tags.tagMap {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, k := range keys {
+ keyEscaped := url.QueryEscape(k)
+ valueEscaped := url.QueryEscape(tags.tagMap[k])
+ if buf.Len() > 0 {
+ buf.WriteByte('&')
+ }
+ buf.WriteString(keyEscaped)
+ buf.WriteByte('=')
+ buf.WriteString(valueEscaped)
+ }
+ return buf.String()
+}
+
+func (tags *tagSet) remove(key string) {
+ delete(tags.tagMap, key)
+}
+
+func (tags *tagSet) set(key, value string, failOnExist bool) error {
+ if failOnExist {
+ if _, found := tags.tagMap[key]; found {
+ return errDuplicateTagKey
+ }
+ }
+
+ if err := checkKey(key); err != nil {
+ return err
+ }
+
+ if err := checkValue(value); err != nil {
+ return err
+ }
+
+ if tags.isObject {
+ if len(tags.tagMap) == maxObjectTagCount {
+ return errTooManyObjectTags
+ }
+ } else if len(tags.tagMap) == maxTagCount {
+ return errTooManyTags
+ }
+
+ tags.tagMap[key] = value
+ return nil
+}
+
+func (tags tagSet) count() int {
+ return len(tags.tagMap)
+}
+
+func (tags tagSet) toMap() map[string]string {
+ m := make(map[string]string, len(tags.tagMap))
+ for key, value := range tags.tagMap {
+ m[key] = value
+ }
+ return m
+}
+
+// MarshalXML encodes to XML data.
+func (tags tagSet) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ tagList := struct {
+ Tags []Tag `xml:"Tag"`
+ }{}
+
+ tagList.Tags = make([]Tag, 0, len(tags.tagMap))
+ for key, value := range tags.tagMap {
+ tagList.Tags = append(tagList.Tags, Tag{key, value})
+ }
+
+ return e.EncodeElement(tagList, start)
+}
+
+// UnmarshalXML decodes XML data to tag list.
+func (tags *tagSet) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ tagList := struct {
+ Tags []Tag `xml:"Tag"`
+ }{}
+
+ if err := d.DecodeElement(&tagList, &start); err != nil {
+ return err
+ }
+
+ if tags.isObject {
+ if len(tagList.Tags) > maxObjectTagCount {
+ return errTooManyObjectTags
+ }
+ } else if len(tagList.Tags) > maxTagCount {
+ return errTooManyTags
+ }
+
+ m := make(map[string]string, len(tagList.Tags))
+ for _, tag := range tagList.Tags {
+ if _, found := m[tag.Key]; found {
+ return errDuplicateTagKey
+ }
+
+ m[tag.Key] = tag.Value
+ }
+
+ tags.tagMap = m
+ return nil
+}
+
+type tagging struct {
+ XMLName xml.Name `xml:"Tagging"`
+ TagSet *tagSet `xml:"TagSet"`
+}
+
+// Tags is list of tags of XML request/response as per
+// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html#API_GetBucketTagging_RequestBody
+type Tags tagging
+
+func (tags Tags) String() string {
+ return tags.TagSet.String()
+}
+
+// Remove removes a tag by its key.
+func (tags *Tags) Remove(key string) {
+ tags.TagSet.remove(key)
+}
+
+// Set sets new tag.
+func (tags *Tags) Set(key, value string) error {
+ return tags.TagSet.set(key, value, false)
+}
+
+// Count - return number of tags accounted for
+func (tags Tags) Count() int {
+ return tags.TagSet.count()
+}
+
+// ToMap returns copy of tags.
+func (tags Tags) ToMap() map[string]string {
+ return tags.TagSet.toMap()
+}
+
+// MapToObjectTags converts an input map of key and value into
+// *Tags data structure with validation.
+func MapToObjectTags(tagMap map[string]string) (*Tags, error) {
+ return NewTags(tagMap, true)
+}
+
+// MapToBucketTags converts an input map of key and value into
+// *Tags data structure with validation.
+func MapToBucketTags(tagMap map[string]string) (*Tags, error) {
+ return NewTags(tagMap, false)
+}
+
+// NewTags creates Tags from tagMap, If isObject is set, it validates for object tags.
+func NewTags(tagMap map[string]string, isObject bool) (*Tags, error) {
+ tagging := &Tags{
+ TagSet: &tagSet{
+ tagMap: make(map[string]string),
+ isObject: isObject,
+ },
+ }
+
+ for key, value := range tagMap {
+ if err := tagging.TagSet.set(key, value, true); err != nil {
+ return nil, err
+ }
+ }
+
+ return tagging, nil
+}
+
+func unmarshalXML(reader io.Reader, isObject bool) (*Tags, error) {
+ tagging := &Tags{
+ TagSet: &tagSet{
+ tagMap: make(map[string]string),
+ isObject: isObject,
+ },
+ }
+
+ if err := xml.NewDecoder(reader).Decode(tagging); err != nil {
+ return nil, err
+ }
+
+ return tagging, nil
+}
+
+// ParseBucketXML decodes XML data of tags in reader specified in
+// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html#API_PutBucketTagging_RequestSyntax.
+func ParseBucketXML(reader io.Reader) (*Tags, error) {
+ return unmarshalXML(reader, false)
+}
+
+// ParseObjectXML decodes XML data of tags in reader specified in
+// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html#API_PutObjectTagging_RequestSyntax
+func ParseObjectXML(reader io.Reader) (*Tags, error) {
+ return unmarshalXML(reader, true)
+}
+
+// stringsCut slices s around the first instance of sep,
+// returning the text before and after sep.
+// The found result reports whether sep appears in s.
+// If sep does not appear in s, cut returns s, "", false.
+func stringsCut(s, sep string) (before, after string, found bool) {
+ if i := strings.Index(s, sep); i >= 0 {
+ return s[:i], s[i+len(sep):], true
+ }
+ return s, "", false
+}
+
+func (tags *tagSet) parseTags(tgs string) (err error) {
+ for tgs != "" {
+ var key string
+ key, tgs, _ = stringsCut(tgs, "&")
+ if key == "" {
+ continue
+ }
+ key, value, _ := stringsCut(key, "=")
+ key, err1 := url.QueryUnescape(key)
+ if err1 != nil {
+ if err == nil {
+ err = err1
+ }
+ continue
+ }
+ value, err1 = url.QueryUnescape(value)
+ if err1 != nil {
+ if err == nil {
+ err = err1
+ }
+ continue
+ }
+ if err = tags.set(key, value, true); err != nil {
+ return err
+ }
+ }
+ return err
+}
+
+// Parse decodes HTTP query formatted string into tags which is limited by isObject.
+// A query formatted string is like "key1=value1&key2=value2".
+func Parse(s string, isObject bool) (*Tags, error) {
+ tagging := &Tags{
+ TagSet: &tagSet{
+ tagMap: make(map[string]string),
+ isObject: isObject,
+ },
+ }
+
+ if err := tagging.TagSet.parseTags(s); err != nil {
+ return nil, err
+ }
+
+ return tagging, nil
+}
+
+// ParseObjectTags decodes HTTP query formatted string into tags. A query formatted string is like "key1=value1&key2=value2".
+func ParseObjectTags(s string) (*Tags, error) {
+ return Parse(s, true)
+}
--
cgit v1.2.3