aboutsummaryrefslogtreecommitdiffstats
path: root/vendor/github.com/minio/minio-go/v7/pkg
diff options
context:
space:
mode:
authorLibravatar Rutger Broekhoff2023-12-29 21:31:53 +0100
committerLibravatar Rutger Broekhoff2023-12-29 21:31:53 +0100
commit404aeae4545d2426c089a5f8d5e82dae56f5212b (patch)
tree2d84e00af272b39fc04f3795ae06bc48970e57b5 /vendor/github.com/minio/minio-go/v7/pkg
parent209d8b0187ed025dec9ac149ebcced3462877bff (diff)
downloadgitolfs3-404aeae4545d2426c089a5f8d5e82dae56f5212b.tar.gz
gitolfs3-404aeae4545d2426c089a5f8d5e82dae56f5212b.zip
Make Nix builds work
Diffstat (limited to 'vendor/github.com/minio/minio-go/v7/pkg')
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go242
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go88
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample17
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go193
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json7
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample15
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go60
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go71
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go68
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go95
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go157
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go139
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go433
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go77
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go67
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go182
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go146
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go189
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go211
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go205
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go24
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go24
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go198
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go491
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/notification/info.go78
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go440
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go971
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go411
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go200
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go224
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go403
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go319
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go351
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go62
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go66
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go413
36 files changed, 7337 insertions, 0 deletions
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go
new file mode 100644
index 0000000..800c4a2
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go
@@ -0,0 +1,242 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20import (
21 "bytes"
22 "crypto/sha256"
23 "encoding/hex"
24 "encoding/xml"
25 "errors"
26 "io"
27 "net/http"
28 "net/url"
29 "strconv"
30 "strings"
31 "time"
32
33 "github.com/minio/minio-go/v7/pkg/signer"
34)
35
36// AssumeRoleResponse contains the result of successful AssumeRole request.
37type AssumeRoleResponse struct {
38 XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleResponse" json:"-"`
39
40 Result AssumeRoleResult `xml:"AssumeRoleResult"`
41 ResponseMetadata struct {
42 RequestID string `xml:"RequestId,omitempty"`
43 } `xml:"ResponseMetadata,omitempty"`
44}
45
46// AssumeRoleResult - Contains the response to a successful AssumeRole
47// request, including temporary credentials that can be used to make
48// MinIO API requests.
49type AssumeRoleResult struct {
50 // The identifiers for the temporary security credentials that the operation
51 // returns.
52 AssumedRoleUser AssumedRoleUser `xml:",omitempty"`
53
54 // The temporary security credentials, which include an access key ID, a secret
55 // access key, and a security (or session) token.
56 //
57 // Note: The size of the security token that STS APIs return is not fixed. We
58 // strongly recommend that you make no assumptions about the maximum size. As
59 // of this writing, the typical size is less than 4096 bytes, but that can vary.
60 // Also, future updates to AWS might require larger sizes.
61 Credentials struct {
62 AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"`
63 SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"`
64 Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"`
65 SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"`
66 } `xml:",omitempty"`
67
68 // A percentage value that indicates the size of the policy in packed form.
69 // The service rejects any policy with a packed size greater than 100 percent,
70 // which means the policy exceeded the allowed space.
71 PackedPolicySize int `xml:",omitempty"`
72}
73
74// A STSAssumeRole retrieves credentials from MinIO service, and keeps track if
75// those credentials are expired.
76type STSAssumeRole struct {
77 Expiry
78
79 // Required http Client to use when connecting to MinIO STS service.
80 Client *http.Client
81
82 // STS endpoint to fetch STS credentials.
83 STSEndpoint string
84
85 // various options for this request.
86 Options STSAssumeRoleOptions
87}
88
89// STSAssumeRoleOptions collection of various input options
90// to obtain AssumeRole credentials.
91type STSAssumeRoleOptions struct {
92 // Mandatory inputs.
93 AccessKey string
94 SecretKey string
95
96 SessionToken string // Optional if the first request is made with temporary credentials.
97 Policy string // Optional to assign a policy to the assumed role
98
99 Location string // Optional commonly needed with AWS STS.
100 DurationSeconds int // Optional defaults to 1 hour.
101
102 // Optional only valid if using with AWS STS
103 RoleARN string
104 RoleSessionName string
105 ExternalID string
106}
107
108// NewSTSAssumeRole returns a pointer to a new
109// Credentials object wrapping the STSAssumeRole.
110func NewSTSAssumeRole(stsEndpoint string, opts STSAssumeRoleOptions) (*Credentials, error) {
111 if stsEndpoint == "" {
112 return nil, errors.New("STS endpoint cannot be empty")
113 }
114 if opts.AccessKey == "" || opts.SecretKey == "" {
115 return nil, errors.New("AssumeRole credentials access/secretkey is mandatory")
116 }
117 return New(&STSAssumeRole{
118 Client: &http.Client{
119 Transport: http.DefaultTransport,
120 },
121 STSEndpoint: stsEndpoint,
122 Options: opts,
123 }), nil
124}
125
126const defaultDurationSeconds = 3600
127
128// closeResponse close non nil response with any response Body.
129// convenient wrapper to drain any remaining data on response body.
130//
131// Subsequently this allows golang http RoundTripper
132// to re-use the same connection for future requests.
133func closeResponse(resp *http.Response) {
134 // Callers should close resp.Body when done reading from it.
135 // If resp.Body is not closed, the Client's underlying RoundTripper
136 // (typically Transport) may not be able to re-use a persistent TCP
137 // connection to the server for a subsequent "keep-alive" request.
138 if resp != nil && resp.Body != nil {
139 // Drain any remaining Body and then close the connection.
140 // Without this closing connection would disallow re-using
141 // the same connection for future uses.
142 // - http://stackoverflow.com/a/17961593/4465767
143 io.Copy(io.Discard, resp.Body)
144 resp.Body.Close()
145 }
146}
147
148func getAssumeRoleCredentials(clnt *http.Client, endpoint string, opts STSAssumeRoleOptions) (AssumeRoleResponse, error) {
149 v := url.Values{}
150 v.Set("Action", "AssumeRole")
151 v.Set("Version", STSVersion)
152 if opts.RoleARN != "" {
153 v.Set("RoleArn", opts.RoleARN)
154 }
155 if opts.RoleSessionName != "" {
156 v.Set("RoleSessionName", opts.RoleSessionName)
157 }
158 if opts.DurationSeconds > defaultDurationSeconds {
159 v.Set("DurationSeconds", strconv.Itoa(opts.DurationSeconds))
160 } else {
161 v.Set("DurationSeconds", strconv.Itoa(defaultDurationSeconds))
162 }
163 if opts.Policy != "" {
164 v.Set("Policy", opts.Policy)
165 }
166 if opts.ExternalID != "" {
167 v.Set("ExternalId", opts.ExternalID)
168 }
169
170 u, err := url.Parse(endpoint)
171 if err != nil {
172 return AssumeRoleResponse{}, err
173 }
174 u.Path = "/"
175
176 postBody := strings.NewReader(v.Encode())
177 hash := sha256.New()
178 if _, err = io.Copy(hash, postBody); err != nil {
179 return AssumeRoleResponse{}, err
180 }
181 postBody.Seek(0, 0)
182
183 req, err := http.NewRequest(http.MethodPost, u.String(), postBody)
184 if err != nil {
185 return AssumeRoleResponse{}, err
186 }
187 req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
188 req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(hash.Sum(nil)))
189 if opts.SessionToken != "" {
190 req.Header.Set("X-Amz-Security-Token", opts.SessionToken)
191 }
192 req = signer.SignV4STS(*req, opts.AccessKey, opts.SecretKey, opts.Location)
193
194 resp, err := clnt.Do(req)
195 if err != nil {
196 return AssumeRoleResponse{}, err
197 }
198 defer closeResponse(resp)
199 if resp.StatusCode != http.StatusOK {
200 var errResp ErrorResponse
201 buf, err := io.ReadAll(resp.Body)
202 if err != nil {
203 return AssumeRoleResponse{}, err
204 }
205 _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp)
206 if err != nil {
207 var s3Err Error
208 if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil {
209 return AssumeRoleResponse{}, err
210 }
211 errResp.RequestID = s3Err.RequestID
212 errResp.STSError.Code = s3Err.Code
213 errResp.STSError.Message = s3Err.Message
214 }
215 return AssumeRoleResponse{}, errResp
216 }
217
218 a := AssumeRoleResponse{}
219 if _, err = xmlDecodeAndBody(resp.Body, &a); err != nil {
220 return AssumeRoleResponse{}, err
221 }
222 return a, nil
223}
224
225// Retrieve retrieves credentials from the MinIO service.
226// Error will be returned if the request fails.
227func (m *STSAssumeRole) Retrieve() (Value, error) {
228 a, err := getAssumeRoleCredentials(m.Client, m.STSEndpoint, m.Options)
229 if err != nil {
230 return Value{}, err
231 }
232
233 // Expiry window is set to 10secs.
234 m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow)
235
236 return Value{
237 AccessKeyID: a.Result.Credentials.AccessKey,
238 SecretAccessKey: a.Result.Credentials.SecretKey,
239 SessionToken: a.Result.Credentials.SessionToken,
240 SignerType: SignatureV4,
241 }, nil
242}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go
new file mode 100644
index 0000000..ddccfb1
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go
@@ -0,0 +1,88 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20// A Chain will search for a provider which returns credentials
21// and cache that provider until Retrieve is called again.
22//
23// The Chain provides a way of chaining multiple providers together
24// which will pick the first available using priority order of the
25// Providers in the list.
26//
27// If none of the Providers retrieve valid credentials Value, ChainProvider's
28// Retrieve() will return the no credentials value.
29//
30// If a Provider is found which returns valid credentials Value ChainProvider
31// will cache that Provider for all calls to IsExpired(), until Retrieve is
32// called again after IsExpired() is true.
33//
34// creds := credentials.NewChainCredentials(
35// []credentials.Provider{
36// &credentials.EnvAWSS3{},
37// &credentials.EnvMinio{},
38// })
39//
40// // Usage of ChainCredentials.
41// mc, err := minio.NewWithCredentials(endpoint, creds, secure, "us-east-1")
42// if err != nil {
43// log.Fatalln(err)
44// }
45type Chain struct {
46 Providers []Provider
47 curr Provider
48}
49
50// NewChainCredentials returns a pointer to a new Credentials object
51// wrapping a chain of providers.
52func NewChainCredentials(providers []Provider) *Credentials {
53 return New(&Chain{
54 Providers: append([]Provider{}, providers...),
55 })
56}
57
58// Retrieve returns the credentials value, returns no credentials(anonymous)
59// if no credentials provider returned any value.
60//
61// If a provider is found with credentials, it will be cached and any calls
62// to IsExpired() will return the expired state of the cached provider.
63func (c *Chain) Retrieve() (Value, error) {
64 for _, p := range c.Providers {
65 creds, _ := p.Retrieve()
66 // Always prioritize non-anonymous providers, if any.
67 if creds.AccessKeyID == "" && creds.SecretAccessKey == "" {
68 continue
69 }
70 c.curr = p
71 return creds, nil
72 }
73 // At this point we have exhausted all the providers and
74 // are left without any credentials return anonymous.
75 return Value{
76 SignerType: SignatureAnonymous,
77 }, nil
78}
79
80// IsExpired will returned the expired state of the currently cached provider
81// if there is one. If there is no current provider, true will be returned.
82func (c *Chain) IsExpired() bool {
83 if c.curr != nil {
84 return c.curr.IsExpired()
85 }
86
87 return true
88}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample b/vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample
new file mode 100644
index 0000000..d793c9e
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample
@@ -0,0 +1,17 @@
1{
2 "version": "8",
3 "hosts": {
4 "play": {
5 "url": "https://play.min.io",
6 "accessKey": "Q3AM3UQ867SPQQA43P2F",
7 "secretKey": "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
8 "api": "S3v2"
9 },
10 "s3": {
11 "url": "https://s3.amazonaws.com",
12 "accessKey": "accessKey",
13 "secretKey": "secret",
14 "api": "S3v4"
15 }
16 }
17} \ No newline at end of file
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go
new file mode 100644
index 0000000..af61049
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go
@@ -0,0 +1,193 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20import (
21 "sync"
22 "time"
23)
24
25const (
26 // STSVersion sts version string
27 STSVersion = "2011-06-15"
28
29 // How much duration to slash from the given expiration duration
30 defaultExpiryWindow = 0.8
31)
32
33// A Value is the AWS credentials value for individual credential fields.
34type Value struct {
35 // AWS Access key ID
36 AccessKeyID string
37
38 // AWS Secret Access Key
39 SecretAccessKey string
40
41 // AWS Session Token
42 SessionToken string
43
44 // Signature Type.
45 SignerType SignatureType
46}
47
48// A Provider is the interface for any component which will provide credentials
49// Value. A provider is required to manage its own Expired state, and what to
50// be expired means.
51type Provider interface {
52 // Retrieve returns nil if it successfully retrieved the value.
53 // Error is returned if the value were not obtainable, or empty.
54 Retrieve() (Value, error)
55
56 // IsExpired returns if the credentials are no longer valid, and need
57 // to be retrieved.
58 IsExpired() bool
59}
60
61// A Expiry provides shared expiration logic to be used by credentials
62// providers to implement expiry functionality.
63//
64// The best method to use this struct is as an anonymous field within the
65// provider's struct.
66//
67// Example:
68//
69// type IAMCredentialProvider struct {
70// Expiry
71// ...
72// }
73type Expiry struct {
74 // The date/time when to expire on
75 expiration time.Time
76
77 // If set will be used by IsExpired to determine the current time.
78 // Defaults to time.Now if CurrentTime is not set.
79 CurrentTime func() time.Time
80}
81
82// SetExpiration sets the expiration IsExpired will check when called.
83//
84// If window is greater than 0 the expiration time will be reduced by the
85// window value.
86//
87// Using a window is helpful to trigger credentials to expire sooner than
88// the expiration time given to ensure no requests are made with expired
89// tokens.
90func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
91 if e.CurrentTime == nil {
92 e.CurrentTime = time.Now
93 }
94 cut := window
95 if cut < 0 {
96 expireIn := expiration.Sub(e.CurrentTime())
97 cut = time.Duration(float64(expireIn) * (1 - defaultExpiryWindow))
98 }
99 e.expiration = expiration.Add(-cut)
100}
101
102// IsExpired returns if the credentials are expired.
103func (e *Expiry) IsExpired() bool {
104 if e.CurrentTime == nil {
105 e.CurrentTime = time.Now
106 }
107 return e.expiration.Before(e.CurrentTime())
108}
109
110// Credentials - A container for synchronous safe retrieval of credentials Value.
111// Credentials will cache the credentials value until they expire. Once the value
112// expires the next Get will attempt to retrieve valid credentials.
113//
114// Credentials is safe to use across multiple goroutines and will manage the
115// synchronous state so the Providers do not need to implement their own
116// synchronization.
117//
118// The first Credentials.Get() will always call Provider.Retrieve() to get the
119// first instance of the credentials Value. All calls to Get() after that
120// will return the cached credentials Value until IsExpired() returns true.
121type Credentials struct {
122 sync.Mutex
123
124 creds Value
125 forceRefresh bool
126 provider Provider
127}
128
129// New returns a pointer to a new Credentials with the provider set.
130func New(provider Provider) *Credentials {
131 return &Credentials{
132 provider: provider,
133 forceRefresh: true,
134 }
135}
136
137// Get returns the credentials value, or error if the credentials Value failed
138// to be retrieved.
139//
140// Will return the cached credentials Value if it has not expired. If the
141// credentials Value has expired the Provider's Retrieve() will be called
142// to refresh the credentials.
143//
144// If Credentials.Expire() was called the credentials Value will be force
145// expired, and the next call to Get() will cause them to be refreshed.
146func (c *Credentials) Get() (Value, error) {
147 if c == nil {
148 return Value{}, nil
149 }
150
151 c.Lock()
152 defer c.Unlock()
153
154 if c.isExpired() {
155 creds, err := c.provider.Retrieve()
156 if err != nil {
157 return Value{}, err
158 }
159 c.creds = creds
160 c.forceRefresh = false
161 }
162
163 return c.creds, nil
164}
165
166// Expire expires the credentials and forces them to be retrieved on the
167// next call to Get().
168//
169// This will override the Provider's expired state, and force Credentials
170// to call the Provider's Retrieve().
171func (c *Credentials) Expire() {
172 c.Lock()
173 defer c.Unlock()
174
175 c.forceRefresh = true
176}
177
178// IsExpired returns if the credentials are no longer valid, and need
179// to be refreshed.
180//
181// If the Credentials were forced to be expired with Expire() this will
182// reflect that override.
183func (c *Credentials) IsExpired() bool {
184 c.Lock()
185 defer c.Unlock()
186
187 return c.isExpired()
188}
189
190// isExpired helper method wrapping the definition of expired credentials.
191func (c *Credentials) isExpired() bool {
192 return c.forceRefresh || c.provider.IsExpired()
193}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json
new file mode 100644
index 0000000..afbfad5
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json
@@ -0,0 +1,7 @@
1{
2 "Version": 1,
3 "SessionToken": "token",
4 "AccessKeyId": "accessKey",
5 "SecretAccessKey": "secret",
6 "Expiration": "9999-04-27T16:02:25.000Z"
7}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample
new file mode 100644
index 0000000..e2dc1bf
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample
@@ -0,0 +1,15 @@
1[default]
2aws_access_key_id = accessKey
3aws_secret_access_key = secret
4aws_session_token = token
5
6[no_token]
7aws_access_key_id = accessKey
8aws_secret_access_key = secret
9
10[with_colon]
11aws_access_key_id: accessKey
12aws_secret_access_key: secret
13
14[with_process]
15credential_process = /bin/cat credentials.json
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go
new file mode 100644
index 0000000..fbfb105
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go
@@ -0,0 +1,60 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18// Package credentials provides credential retrieval and management
19// for S3 compatible object storage.
20//
21// By default the Credentials.Get() will cache the successful result of a
22// Provider's Retrieve() until Provider.IsExpired() returns true. At which
23// point Credentials will call Provider's Retrieve() to get new credential Value.
24//
25// The Provider is responsible for determining when credentials have expired.
26// It is also important to note that Credentials will always call Retrieve the
27// first time Credentials.Get() is called.
28//
29// Example of using the environment variable credentials.
30//
31// creds := NewFromEnv()
32// // Retrieve the credentials value
33// credValue, err := creds.Get()
34// if err != nil {
35// // handle error
36// }
37//
38// Example of forcing credentials to expire and be refreshed on the next Get().
39// This may be helpful to proactively expire credentials and refresh them sooner
40// than they would naturally expire on their own.
41//
42// creds := NewFromIAM("")
43// creds.Expire()
44// credsValue, err := creds.Get()
45// // New credentials will be retrieved instead of from cache.
46//
47// # Custom Provider
48//
49// Each Provider built into this package also provides a helper method to generate
50// a Credentials pointer setup with the provider. To use a custom Provider just
51// create a type which satisfies the Provider interface and pass it to the
52// NewCredentials method.
53//
54// type MyProvider struct{}
55// func (m *MyProvider) Retrieve() (Value, error) {...}
56// func (m *MyProvider) IsExpired() bool {...}
57//
58// creds := NewCredentials(&MyProvider{})
59// credValue, err := creds.Get()
60package credentials
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go
new file mode 100644
index 0000000..b6e60d0
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go
@@ -0,0 +1,71 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20import "os"
21
22// A EnvAWS retrieves credentials from the environment variables of the
23// running process. EnvAWSironment credentials never expire.
24//
25// EnvAWSironment variables used:
26//
27// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY.
28// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY.
29// * Secret Token: AWS_SESSION_TOKEN.
30type EnvAWS struct {
31 retrieved bool
32}
33
34// NewEnvAWS returns a pointer to a new Credentials object
35// wrapping the environment variable provider.
36func NewEnvAWS() *Credentials {
37 return New(&EnvAWS{})
38}
39
40// Retrieve retrieves the keys from the environment.
41func (e *EnvAWS) Retrieve() (Value, error) {
42 e.retrieved = false
43
44 id := os.Getenv("AWS_ACCESS_KEY_ID")
45 if id == "" {
46 id = os.Getenv("AWS_ACCESS_KEY")
47 }
48
49 secret := os.Getenv("AWS_SECRET_ACCESS_KEY")
50 if secret == "" {
51 secret = os.Getenv("AWS_SECRET_KEY")
52 }
53
54 signerType := SignatureV4
55 if id == "" || secret == "" {
56 signerType = SignatureAnonymous
57 }
58
59 e.retrieved = true
60 return Value{
61 AccessKeyID: id,
62 SecretAccessKey: secret,
63 SessionToken: os.Getenv("AWS_SESSION_TOKEN"),
64 SignerType: signerType,
65 }, nil
66}
67
68// IsExpired returns if the credentials have been retrieved.
69func (e *EnvAWS) IsExpired() bool {
70 return !e.retrieved
71}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go
new file mode 100644
index 0000000..5bfeab1
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go
@@ -0,0 +1,68 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20import "os"
21
22// A EnvMinio retrieves credentials from the environment variables of the
23// running process. EnvMinioironment credentials never expire.
24//
25// Environment variables used:
26//
27// * Access Key ID: MINIO_ACCESS_KEY.
28// * Secret Access Key: MINIO_SECRET_KEY.
29// * Access Key ID: MINIO_ROOT_USER.
30// * Secret Access Key: MINIO_ROOT_PASSWORD.
31type EnvMinio struct {
32 retrieved bool
33}
34
35// NewEnvMinio returns a pointer to a new Credentials object
36// wrapping the environment variable provider.
37func NewEnvMinio() *Credentials {
38 return New(&EnvMinio{})
39}
40
41// Retrieve retrieves the keys from the environment.
42func (e *EnvMinio) Retrieve() (Value, error) {
43 e.retrieved = false
44
45 id := os.Getenv("MINIO_ROOT_USER")
46 secret := os.Getenv("MINIO_ROOT_PASSWORD")
47
48 signerType := SignatureV4
49 if id == "" || secret == "" {
50 id = os.Getenv("MINIO_ACCESS_KEY")
51 secret = os.Getenv("MINIO_SECRET_KEY")
52 if id == "" || secret == "" {
53 signerType = SignatureAnonymous
54 }
55 }
56
57 e.retrieved = true
58 return Value{
59 AccessKeyID: id,
60 SecretAccessKey: secret,
61 SignerType: signerType,
62 }, nil
63}
64
65// IsExpired returns if the credentials have been retrieved.
66func (e *EnvMinio) IsExpired() bool {
67 return !e.retrieved
68}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go
new file mode 100644
index 0000000..07a9c2f
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go
@@ -0,0 +1,95 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2021 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20import (
21 "bytes"
22 "encoding/xml"
23 "fmt"
24 "io"
25)
26
27// ErrorResponse - Is the typed error returned.
28// ErrorResponse struct should be comparable since it is compared inside
29// golang http API (https://github.com/golang/go/issues/29768)
30type ErrorResponse struct {
31 XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ ErrorResponse" json:"-"`
32 STSError struct {
33 Type string `xml:"Type"`
34 Code string `xml:"Code"`
35 Message string `xml:"Message"`
36 } `xml:"Error"`
37 RequestID string `xml:"RequestId"`
38}
39
40// Error - Is the typed error returned by all API operations.
41type Error struct {
42 XMLName xml.Name `xml:"Error" json:"-"`
43 Code string
44 Message string
45 BucketName string
46 Key string
47 Resource string
48 RequestID string `xml:"RequestId"`
49 HostID string `xml:"HostId"`
50
51 // Region where the bucket is located. This header is returned
52 // only in HEAD bucket and ListObjects response.
53 Region string
54
55 // Captures the server string returned in response header.
56 Server string
57
58 // Underlying HTTP status code for the returned error
59 StatusCode int `xml:"-" json:"-"`
60}
61
62// Error - Returns S3 error string.
63func (e Error) Error() string {
64 if e.Message == "" {
65 return fmt.Sprintf("Error response code %s.", e.Code)
66 }
67 return e.Message
68}
69
70// Error - Returns STS error string.
71func (e ErrorResponse) Error() string {
72 if e.STSError.Message == "" {
73 return fmt.Sprintf("Error response code %s.", e.STSError.Code)
74 }
75 return e.STSError.Message
76}
77
78// xmlDecoder provide decoded value in xml.
79func xmlDecoder(body io.Reader, v interface{}) error {
80 d := xml.NewDecoder(body)
81 return d.Decode(v)
82}
83
84// xmlDecodeAndBody reads the whole body up to 1MB and
85// tries to XML decode it into v.
86// The body that was read and any error from reading or decoding is returned.
87func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) {
88 // read the whole body (up to 1MB)
89 const maxBodyLength = 1 << 20
90 body, err := io.ReadAll(io.LimitReader(bodyReader, maxBodyLength))
91 if err != nil {
92 return nil, err
93 }
94 return bytes.TrimSpace(body), xmlDecoder(bytes.NewReader(body), v)
95}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go
new file mode 100644
index 0000000..5b07376
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go
@@ -0,0 +1,157 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20import (
21 "encoding/json"
22 "errors"
23 "os"
24 "os/exec"
25 "path/filepath"
26 "strings"
27 "time"
28
29 ini "gopkg.in/ini.v1"
30)
31
32// A externalProcessCredentials stores the output of a credential_process
33type externalProcessCredentials struct {
34 Version int
35 SessionToken string
36 AccessKeyID string `json:"AccessKeyId"`
37 SecretAccessKey string
38 Expiration time.Time
39}
40
41// A FileAWSCredentials retrieves credentials from the current user's home
42// directory, and keeps track if those credentials are expired.
43//
44// Profile ini file example: $HOME/.aws/credentials
45type FileAWSCredentials struct {
46 Expiry
47
48 // Path to the shared credentials file.
49 //
50 // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the
51 // env value is empty will default to current user's home directory.
52 // Linux/OSX: "$HOME/.aws/credentials"
53 // Windows: "%USERPROFILE%\.aws\credentials"
54 Filename string
55
56 // AWS Profile to extract credentials from the shared credentials file. If empty
57 // will default to environment variable "AWS_PROFILE" or "default" if
58 // environment variable is also not set.
59 Profile string
60
61 // retrieved states if the credentials have been successfully retrieved.
62 retrieved bool
63}
64
65// NewFileAWSCredentials returns a pointer to a new Credentials object
66// wrapping the Profile file provider.
67func NewFileAWSCredentials(filename, profile string) *Credentials {
68 return New(&FileAWSCredentials{
69 Filename: filename,
70 Profile: profile,
71 })
72}
73
74// Retrieve reads and extracts the shared credentials from the current
75// users home directory.
76func (p *FileAWSCredentials) Retrieve() (Value, error) {
77 if p.Filename == "" {
78 p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE")
79 if p.Filename == "" {
80 homeDir, err := os.UserHomeDir()
81 if err != nil {
82 return Value{}, err
83 }
84 p.Filename = filepath.Join(homeDir, ".aws", "credentials")
85 }
86 }
87 if p.Profile == "" {
88 p.Profile = os.Getenv("AWS_PROFILE")
89 if p.Profile == "" {
90 p.Profile = "default"
91 }
92 }
93
94 p.retrieved = false
95
96 iniProfile, err := loadProfile(p.Filename, p.Profile)
97 if err != nil {
98 return Value{}, err
99 }
100
101 // Default to empty string if not found.
102 id := iniProfile.Key("aws_access_key_id")
103 // Default to empty string if not found.
104 secret := iniProfile.Key("aws_secret_access_key")
105 // Default to empty string if not found.
106 token := iniProfile.Key("aws_session_token")
107
108 // If credential_process is defined, obtain credentials by executing
109 // the external process
110 credentialProcess := strings.TrimSpace(iniProfile.Key("credential_process").String())
111 if credentialProcess != "" {
112 args := strings.Fields(credentialProcess)
113 if len(args) <= 1 {
114 return Value{}, errors.New("invalid credential process args")
115 }
116 cmd := exec.Command(args[0], args[1:]...)
117 out, err := cmd.Output()
118 if err != nil {
119 return Value{}, err
120 }
121 var externalProcessCredentials externalProcessCredentials
122 err = json.Unmarshal([]byte(out), &externalProcessCredentials)
123 if err != nil {
124 return Value{}, err
125 }
126 p.retrieved = true
127 p.SetExpiration(externalProcessCredentials.Expiration, DefaultExpiryWindow)
128 return Value{
129 AccessKeyID: externalProcessCredentials.AccessKeyID,
130 SecretAccessKey: externalProcessCredentials.SecretAccessKey,
131 SessionToken: externalProcessCredentials.SessionToken,
132 SignerType: SignatureV4,
133 }, nil
134 }
135 p.retrieved = true
136 return Value{
137 AccessKeyID: id.String(),
138 SecretAccessKey: secret.String(),
139 SessionToken: token.String(),
140 SignerType: SignatureV4,
141 }, nil
142}
143
144// loadProfiles loads from the file pointed to by shared credentials filename for profile.
145// The credentials retrieved from the profile will be returned or error. Error will be
146// returned if it fails to read from the file, or the data is invalid.
147func loadProfile(filename, profile string) (*ini.Section, error) {
148 config, err := ini.Load(filename)
149 if err != nil {
150 return nil, err
151 }
152 iniProfile, err := config.GetSection(profile)
153 if err != nil {
154 return nil, err
155 }
156 return iniProfile, nil
157}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go
new file mode 100644
index 0000000..eb77767
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go
@@ -0,0 +1,139 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20import (
21 "os"
22 "path/filepath"
23 "runtime"
24
25 jsoniter "github.com/json-iterator/go"
26)
27
28// A FileMinioClient retrieves credentials from the current user's home
29// directory, and keeps track if those credentials are expired.
30//
31// Configuration file example: $HOME/.mc/config.json
32type FileMinioClient struct {
33 // Path to the shared credentials file.
34 //
35 // If empty will look for "MINIO_SHARED_CREDENTIALS_FILE" env variable. If the
36 // env value is empty will default to current user's home directory.
37 // Linux/OSX: "$HOME/.mc/config.json"
38 // Windows: "%USERALIAS%\mc\config.json"
39 Filename string
40
41 // MinIO Alias to extract credentials from the shared credentials file. If empty
42 // will default to environment variable "MINIO_ALIAS" or "default" if
43 // environment variable is also not set.
44 Alias string
45
46 // retrieved states if the credentials have been successfully retrieved.
47 retrieved bool
48}
49
50// NewFileMinioClient returns a pointer to a new Credentials object
51// wrapping the Alias file provider.
52func NewFileMinioClient(filename, alias string) *Credentials {
53 return New(&FileMinioClient{
54 Filename: filename,
55 Alias: alias,
56 })
57}
58
59// Retrieve reads and extracts the shared credentials from the current
60// users home directory.
61func (p *FileMinioClient) Retrieve() (Value, error) {
62 if p.Filename == "" {
63 if value, ok := os.LookupEnv("MINIO_SHARED_CREDENTIALS_FILE"); ok {
64 p.Filename = value
65 } else {
66 homeDir, err := os.UserHomeDir()
67 if err != nil {
68 return Value{}, err
69 }
70 p.Filename = filepath.Join(homeDir, ".mc", "config.json")
71 if runtime.GOOS == "windows" {
72 p.Filename = filepath.Join(homeDir, "mc", "config.json")
73 }
74 }
75 }
76
77 if p.Alias == "" {
78 p.Alias = os.Getenv("MINIO_ALIAS")
79 if p.Alias == "" {
80 p.Alias = "s3"
81 }
82 }
83
84 p.retrieved = false
85
86 hostCfg, err := loadAlias(p.Filename, p.Alias)
87 if err != nil {
88 return Value{}, err
89 }
90
91 p.retrieved = true
92 return Value{
93 AccessKeyID: hostCfg.AccessKey,
94 SecretAccessKey: hostCfg.SecretKey,
95 SignerType: parseSignatureType(hostCfg.API),
96 }, nil
97}
98
99// IsExpired returns if the shared credentials have expired.
100func (p *FileMinioClient) IsExpired() bool {
101 return !p.retrieved
102}
103
104// hostConfig configuration of a host.
105type hostConfig struct {
106 URL string `json:"url"`
107 AccessKey string `json:"accessKey"`
108 SecretKey string `json:"secretKey"`
109 API string `json:"api"`
110}
111
112// config config version.
113type config struct {
114 Version string `json:"version"`
115 Hosts map[string]hostConfig `json:"hosts"`
116 Aliases map[string]hostConfig `json:"aliases"`
117}
118
119// loadAliass loads from the file pointed to by shared credentials filename for alias.
120// The credentials retrieved from the alias will be returned or error. Error will be
121// returned if it fails to read from the file.
122func loadAlias(filename, alias string) (hostConfig, error) {
123 cfg := &config{}
124 json := jsoniter.ConfigCompatibleWithStandardLibrary
125
126 configBytes, err := os.ReadFile(filename)
127 if err != nil {
128 return hostConfig{}, err
129 }
130 if err = json.Unmarshal(configBytes, cfg); err != nil {
131 return hostConfig{}, err
132 }
133
134 if cfg.Version == "10" {
135 return cfg.Aliases[alias], nil
136 }
137
138 return cfg.Hosts[alias], nil
139}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go
new file mode 100644
index 0000000..c5153c4
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go
@@ -0,0 +1,433 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20import (
21 "bufio"
22 "context"
23 "errors"
24 "fmt"
25 "io"
26 "net"
27 "net/http"
28 "net/url"
29 "os"
30 "path"
31 "strings"
32 "time"
33
34 jsoniter "github.com/json-iterator/go"
35)
36
37// DefaultExpiryWindow - Default expiry window.
38// ExpiryWindow will allow the credentials to trigger refreshing
39// prior to the credentials actually expiring. This is beneficial
40// so race conditions with expiring credentials do not cause
41// request to fail unexpectedly due to ExpiredTokenException exceptions.
42// DefaultExpiryWindow can be used as parameter to (*Expiry).SetExpiration.
43// When used the tokens refresh will be triggered when 80% of the elapsed
44// time until the actual expiration time is passed.
45const DefaultExpiryWindow = -1
46
47// A IAM retrieves credentials from the EC2 service, and keeps track if
48// those credentials are expired.
49type IAM struct {
50 Expiry
51
52 // Required http Client to use when connecting to IAM metadata service.
53 Client *http.Client
54
55 // Custom endpoint to fetch IAM role credentials.
56 Endpoint string
57
58 // Region configurable custom region for STS
59 Region string
60
61 // Support for container authorization token https://docs.aws.amazon.com/sdkref/latest/guide/feature-container-credentials.html
62 Container struct {
63 AuthorizationToken string
64 CredentialsFullURI string
65 CredentialsRelativeURI string
66 }
67
68 // EKS based k8s RBAC authorization - https://docs.aws.amazon.com/eks/latest/userguide/pod-configuration.html
69 EKSIdentity struct {
70 TokenFile string
71 RoleARN string
72 RoleSessionName string
73 }
74}
75
76// IAM Roles for Amazon EC2
77// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
78const (
79 DefaultIAMRoleEndpoint = "http://169.254.169.254"
80 DefaultECSRoleEndpoint = "http://169.254.170.2"
81 DefaultSTSRoleEndpoint = "https://sts.amazonaws.com"
82 DefaultIAMSecurityCredsPath = "/latest/meta-data/iam/security-credentials/"
83 TokenRequestTTLHeader = "X-aws-ec2-metadata-token-ttl-seconds"
84 TokenPath = "/latest/api/token"
85 TokenTTL = "21600"
86 TokenRequestHeader = "X-aws-ec2-metadata-token"
87)
88
89// NewIAM returns a pointer to a new Credentials object wrapping the IAM.
90func NewIAM(endpoint string) *Credentials {
91 return New(&IAM{
92 Client: &http.Client{
93 Transport: http.DefaultTransport,
94 },
95 Endpoint: endpoint,
96 })
97}
98
99// Retrieve retrieves credentials from the EC2 service.
100// Error will be returned if the request fails, or unable to extract
101// the desired
102func (m *IAM) Retrieve() (Value, error) {
103 token := os.Getenv("AWS_CONTAINER_AUTHORIZATION_TOKEN")
104 if token == "" {
105 token = m.Container.AuthorizationToken
106 }
107
108 relativeURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI")
109 if relativeURI == "" {
110 relativeURI = m.Container.CredentialsRelativeURI
111 }
112
113 fullURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_FULL_URI")
114 if fullURI == "" {
115 fullURI = m.Container.CredentialsFullURI
116 }
117
118 identityFile := os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE")
119 if identityFile == "" {
120 identityFile = m.EKSIdentity.TokenFile
121 }
122
123 roleArn := os.Getenv("AWS_ROLE_ARN")
124 if roleArn == "" {
125 roleArn = m.EKSIdentity.RoleARN
126 }
127
128 roleSessionName := os.Getenv("AWS_ROLE_SESSION_NAME")
129 if roleSessionName == "" {
130 roleSessionName = m.EKSIdentity.RoleSessionName
131 }
132
133 region := os.Getenv("AWS_REGION")
134 if region == "" {
135 region = m.Region
136 }
137
138 var roleCreds ec2RoleCredRespBody
139 var err error
140
141 endpoint := m.Endpoint
142 switch {
143 case identityFile != "":
144 if len(endpoint) == 0 {
145 if region != "" {
146 if strings.HasPrefix(region, "cn-") {
147 endpoint = "https://sts." + region + ".amazonaws.com.cn"
148 } else {
149 endpoint = "https://sts." + region + ".amazonaws.com"
150 }
151 } else {
152 endpoint = DefaultSTSRoleEndpoint
153 }
154 }
155
156 creds := &STSWebIdentity{
157 Client: m.Client,
158 STSEndpoint: endpoint,
159 GetWebIDTokenExpiry: func() (*WebIdentityToken, error) {
160 token, err := os.ReadFile(identityFile)
161 if err != nil {
162 return nil, err
163 }
164
165 return &WebIdentityToken{Token: string(token)}, nil
166 },
167 RoleARN: roleArn,
168 roleSessionName: roleSessionName,
169 }
170
171 stsWebIdentityCreds, err := creds.Retrieve()
172 if err == nil {
173 m.SetExpiration(creds.Expiration(), DefaultExpiryWindow)
174 }
175 return stsWebIdentityCreds, err
176
177 case relativeURI != "":
178 if len(endpoint) == 0 {
179 endpoint = fmt.Sprintf("%s%s", DefaultECSRoleEndpoint, relativeURI)
180 }
181
182 roleCreds, err = getEcsTaskCredentials(m.Client, endpoint, token)
183
184 case fullURI != "":
185 if len(endpoint) == 0 {
186 endpoint = fullURI
187 var ok bool
188 if ok, err = isLoopback(endpoint); !ok {
189 if err == nil {
190 err = fmt.Errorf("uri host is not a loopback address: %s", endpoint)
191 }
192 break
193 }
194 }
195
196 roleCreds, err = getEcsTaskCredentials(m.Client, endpoint, token)
197
198 default:
199 roleCreds, err = getCredentials(m.Client, endpoint)
200 }
201
202 if err != nil {
203 return Value{}, err
204 }
205 // Expiry window is set to 10secs.
206 m.SetExpiration(roleCreds.Expiration, DefaultExpiryWindow)
207
208 return Value{
209 AccessKeyID: roleCreds.AccessKeyID,
210 SecretAccessKey: roleCreds.SecretAccessKey,
211 SessionToken: roleCreds.Token,
212 SignerType: SignatureV4,
213 }, nil
214}
215
216// A ec2RoleCredRespBody provides the shape for unmarshaling credential
217// request responses.
218type ec2RoleCredRespBody struct {
219 // Success State
220 Expiration time.Time
221 AccessKeyID string
222 SecretAccessKey string
223 Token string
224
225 // Error state
226 Code string
227 Message string
228
229 // Unused params.
230 LastUpdated time.Time
231 Type string
232}
233
234// Get the final IAM role URL where the request will
235// be sent to fetch the rolling access credentials.
236// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
237func getIAMRoleURL(endpoint string) (*url.URL, error) {
238 u, err := url.Parse(endpoint)
239 if err != nil {
240 return nil, err
241 }
242 u.Path = DefaultIAMSecurityCredsPath
243 return u, nil
244}
245
246// listRoleNames lists of credential role names associated
247// with the current EC2 service. If there are no credentials,
248// or there is an error making or receiving the request.
249// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
250func listRoleNames(client *http.Client, u *url.URL, token string) ([]string, error) {
251 req, err := http.NewRequest(http.MethodGet, u.String(), nil)
252 if err != nil {
253 return nil, err
254 }
255 if token != "" {
256 req.Header.Add(TokenRequestHeader, token)
257 }
258 resp, err := client.Do(req)
259 if err != nil {
260 return nil, err
261 }
262 defer resp.Body.Close()
263 if resp.StatusCode != http.StatusOK {
264 return nil, errors.New(resp.Status)
265 }
266
267 credsList := []string{}
268 s := bufio.NewScanner(resp.Body)
269 for s.Scan() {
270 credsList = append(credsList, s.Text())
271 }
272
273 if err := s.Err(); err != nil {
274 return nil, err
275 }
276
277 return credsList, nil
278}
279
280func getEcsTaskCredentials(client *http.Client, endpoint, token string) (ec2RoleCredRespBody, error) {
281 req, err := http.NewRequest(http.MethodGet, endpoint, nil)
282 if err != nil {
283 return ec2RoleCredRespBody{}, err
284 }
285
286 if token != "" {
287 req.Header.Set("Authorization", token)
288 }
289
290 resp, err := client.Do(req)
291 if err != nil {
292 return ec2RoleCredRespBody{}, err
293 }
294 defer resp.Body.Close()
295 if resp.StatusCode != http.StatusOK {
296 return ec2RoleCredRespBody{}, errors.New(resp.Status)
297 }
298
299 respCreds := ec2RoleCredRespBody{}
300 if err := jsoniter.NewDecoder(resp.Body).Decode(&respCreds); err != nil {
301 return ec2RoleCredRespBody{}, err
302 }
303
304 return respCreds, nil
305}
306
307func fetchIMDSToken(client *http.Client, endpoint string) (string, error) {
308 ctx, cancel := context.WithTimeout(context.Background(), time.Second)
309 defer cancel()
310
311 req, err := http.NewRequestWithContext(ctx, http.MethodPut, endpoint+TokenPath, nil)
312 if err != nil {
313 return "", err
314 }
315 req.Header.Add(TokenRequestTTLHeader, TokenTTL)
316 resp, err := client.Do(req)
317 if err != nil {
318 return "", err
319 }
320 defer resp.Body.Close()
321 data, err := io.ReadAll(resp.Body)
322 if err != nil {
323 return "", err
324 }
325 if resp.StatusCode != http.StatusOK {
326 return "", errors.New(resp.Status)
327 }
328 return string(data), nil
329}
330
331// getCredentials - obtains the credentials from the IAM role name associated with
332// the current EC2 service.
333//
334// If the credentials cannot be found, or there is an error
335// reading the response an error will be returned.
336func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) {
337 if endpoint == "" {
338 endpoint = DefaultIAMRoleEndpoint
339 }
340
341 // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html
342 token, err := fetchIMDSToken(client, endpoint)
343 if err != nil {
344 // Return only errors for valid situations, if the IMDSv2 is not enabled
345 // we will not be able to get the token, in such a situation we have
346 // to rely on IMDSv1 behavior as a fallback, this check ensures that.
347 // Refer https://github.com/minio/minio-go/issues/1866
348 if !errors.Is(err, context.DeadlineExceeded) && !errors.Is(err, context.Canceled) {
349 return ec2RoleCredRespBody{}, err
350 }
351 }
352
353 // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
354 u, err := getIAMRoleURL(endpoint)
355 if err != nil {
356 return ec2RoleCredRespBody{}, err
357 }
358
359 // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
360 roleNames, err := listRoleNames(client, u, token)
361 if err != nil {
362 return ec2RoleCredRespBody{}, err
363 }
364
365 if len(roleNames) == 0 {
366 return ec2RoleCredRespBody{}, errors.New("No IAM roles attached to this EC2 service")
367 }
368
369 // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
370 // - An instance profile can contain only one IAM role. This limit cannot be increased.
371 roleName := roleNames[0]
372
373 // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
374 // The following command retrieves the security credentials for an
375 // IAM role named `s3access`.
376 //
377 // $ curl http://169.254.169.254/latest/meta-data/iam/security-credentials/s3access
378 //
379 u.Path = path.Join(u.Path, roleName)
380 req, err := http.NewRequest(http.MethodGet, u.String(), nil)
381 if err != nil {
382 return ec2RoleCredRespBody{}, err
383 }
384 if token != "" {
385 req.Header.Add(TokenRequestHeader, token)
386 }
387
388 resp, err := client.Do(req)
389 if err != nil {
390 return ec2RoleCredRespBody{}, err
391 }
392 defer resp.Body.Close()
393 if resp.StatusCode != http.StatusOK {
394 return ec2RoleCredRespBody{}, errors.New(resp.Status)
395 }
396
397 respCreds := ec2RoleCredRespBody{}
398 if err := jsoniter.NewDecoder(resp.Body).Decode(&respCreds); err != nil {
399 return ec2RoleCredRespBody{}, err
400 }
401
402 if respCreds.Code != "Success" {
403 // If an error code was returned something failed requesting the role.
404 return ec2RoleCredRespBody{}, errors.New(respCreds.Message)
405 }
406
407 return respCreds, nil
408}
409
410// isLoopback identifies if a uri's host is on a loopback address
411func isLoopback(uri string) (bool, error) {
412 u, err := url.Parse(uri)
413 if err != nil {
414 return false, err
415 }
416
417 host := u.Hostname()
418 if len(host) == 0 {
419 return false, fmt.Errorf("can't parse host from uri: %s", uri)
420 }
421
422 ips, err := net.LookupHost(host)
423 if err != nil {
424 return false, err
425 }
426 for _, ip := range ips {
427 if !net.ParseIP(ip).IsLoopback() {
428 return false, nil
429 }
430 }
431
432 return true, nil
433}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go
new file mode 100644
index 0000000..b794333
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go
@@ -0,0 +1,77 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20import "strings"
21
22// SignatureType is type of Authorization requested for a given HTTP request.
23type SignatureType int
24
25// Different types of supported signatures - default is SignatureV4 or SignatureDefault.
26const (
27 // SignatureDefault is always set to v4.
28 SignatureDefault SignatureType = iota
29 SignatureV4
30 SignatureV2
31 SignatureV4Streaming
32 SignatureAnonymous // Anonymous signature signifies, no signature.
33)
34
35// IsV2 - is signature SignatureV2?
36func (s SignatureType) IsV2() bool {
37 return s == SignatureV2
38}
39
40// IsV4 - is signature SignatureV4?
41func (s SignatureType) IsV4() bool {
42 return s == SignatureV4 || s == SignatureDefault
43}
44
45// IsStreamingV4 - is signature SignatureV4Streaming?
46func (s SignatureType) IsStreamingV4() bool {
47 return s == SignatureV4Streaming
48}
49
50// IsAnonymous - is signature empty?
51func (s SignatureType) IsAnonymous() bool {
52 return s == SignatureAnonymous
53}
54
55// Stringer humanized version of signature type,
56// strings returned here are case insensitive.
57func (s SignatureType) String() string {
58 if s.IsV2() {
59 return "S3v2"
60 } else if s.IsV4() {
61 return "S3v4"
62 } else if s.IsStreamingV4() {
63 return "S3v4Streaming"
64 }
65 return "Anonymous"
66}
67
68func parseSignatureType(str string) SignatureType {
69 if strings.EqualFold(str, "S3v4") {
70 return SignatureV4
71 } else if strings.EqualFold(str, "S3v2") {
72 return SignatureV2
73 } else if strings.EqualFold(str, "S3v4Streaming") {
74 return SignatureV4Streaming
75 }
76 return SignatureAnonymous
77}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go
new file mode 100644
index 0000000..7dde00b
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go
@@ -0,0 +1,67 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20// A Static is a set of credentials which are set programmatically,
21// and will never expire.
22type Static struct {
23 Value
24}
25
26// NewStaticV2 returns a pointer to a new Credentials object
27// wrapping a static credentials value provider, signature is
28// set to v2. If access and secret are not specified then
29// regardless of signature type set it Value will return
30// as anonymous.
31func NewStaticV2(id, secret, token string) *Credentials {
32 return NewStatic(id, secret, token, SignatureV2)
33}
34
35// NewStaticV4 is similar to NewStaticV2 with similar considerations.
36func NewStaticV4(id, secret, token string) *Credentials {
37 return NewStatic(id, secret, token, SignatureV4)
38}
39
40// NewStatic returns a pointer to a new Credentials object
41// wrapping a static credentials value provider.
42func NewStatic(id, secret, token string, signerType SignatureType) *Credentials {
43 return New(&Static{
44 Value: Value{
45 AccessKeyID: id,
46 SecretAccessKey: secret,
47 SessionToken: token,
48 SignerType: signerType,
49 },
50 })
51}
52
53// Retrieve returns the static credentials.
54func (s *Static) Retrieve() (Value, error) {
55 if s.AccessKeyID == "" || s.SecretAccessKey == "" {
56 // Anonymous is not an error
57 return Value{SignerType: SignatureAnonymous}, nil
58 }
59 return s.Value, nil
60}
61
62// IsExpired returns if the credentials are expired.
63//
64// For Static, the credentials never expired.
65func (s *Static) IsExpired() bool {
66 return false
67}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go
new file mode 100644
index 0000000..9e92c1e
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go
@@ -0,0 +1,182 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2019-2022 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20import (
21 "bytes"
22 "encoding/xml"
23 "errors"
24 "fmt"
25 "io"
26 "net/http"
27 "net/url"
28 "strings"
29 "time"
30)
31
32// AssumedRoleUser - The identifiers for the temporary security credentials that
33// the operation returns. Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumedRoleUser
34type AssumedRoleUser struct {
35 Arn string
36 AssumedRoleID string `xml:"AssumeRoleId"`
37}
38
39// AssumeRoleWithClientGrantsResponse contains the result of successful AssumeRoleWithClientGrants request.
40type AssumeRoleWithClientGrantsResponse struct {
41 XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithClientGrantsResponse" json:"-"`
42 Result ClientGrantsResult `xml:"AssumeRoleWithClientGrantsResult"`
43 ResponseMetadata struct {
44 RequestID string `xml:"RequestId,omitempty"`
45 } `xml:"ResponseMetadata,omitempty"`
46}
47
48// ClientGrantsResult - Contains the response to a successful AssumeRoleWithClientGrants
49// request, including temporary credentials that can be used to make MinIO API requests.
50type ClientGrantsResult struct {
51 AssumedRoleUser AssumedRoleUser `xml:",omitempty"`
52 Audience string `xml:",omitempty"`
53 Credentials struct {
54 AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"`
55 SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"`
56 Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"`
57 SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"`
58 } `xml:",omitempty"`
59 PackedPolicySize int `xml:",omitempty"`
60 Provider string `xml:",omitempty"`
61 SubjectFromClientGrantsToken string `xml:",omitempty"`
62}
63
64// ClientGrantsToken - client grants token with expiry.
65type ClientGrantsToken struct {
66 Token string
67 Expiry int
68}
69
70// A STSClientGrants retrieves credentials from MinIO service, and keeps track if
71// those credentials are expired.
72type STSClientGrants struct {
73 Expiry
74
75 // Required http Client to use when connecting to MinIO STS service.
76 Client *http.Client
77
78 // MinIO endpoint to fetch STS credentials.
79 STSEndpoint string
80
81 // getClientGrantsTokenExpiry function to retrieve tokens
82 // from IDP This function should return two values one is
83 // accessToken which is a self contained access token (JWT)
84 // and second return value is the expiry associated with
85 // this token. This is a customer provided function and
86 // is mandatory.
87 GetClientGrantsTokenExpiry func() (*ClientGrantsToken, error)
88}
89
90// NewSTSClientGrants returns a pointer to a new
91// Credentials object wrapping the STSClientGrants.
92func NewSTSClientGrants(stsEndpoint string, getClientGrantsTokenExpiry func() (*ClientGrantsToken, error)) (*Credentials, error) {
93 if stsEndpoint == "" {
94 return nil, errors.New("STS endpoint cannot be empty")
95 }
96 if getClientGrantsTokenExpiry == nil {
97 return nil, errors.New("Client grants access token and expiry retrieval function should be defined")
98 }
99 return New(&STSClientGrants{
100 Client: &http.Client{
101 Transport: http.DefaultTransport,
102 },
103 STSEndpoint: stsEndpoint,
104 GetClientGrantsTokenExpiry: getClientGrantsTokenExpiry,
105 }), nil
106}
107
108func getClientGrantsCredentials(clnt *http.Client, endpoint string,
109 getClientGrantsTokenExpiry func() (*ClientGrantsToken, error),
110) (AssumeRoleWithClientGrantsResponse, error) {
111 accessToken, err := getClientGrantsTokenExpiry()
112 if err != nil {
113 return AssumeRoleWithClientGrantsResponse{}, err
114 }
115
116 v := url.Values{}
117 v.Set("Action", "AssumeRoleWithClientGrants")
118 v.Set("Token", accessToken.Token)
119 v.Set("DurationSeconds", fmt.Sprintf("%d", accessToken.Expiry))
120 v.Set("Version", STSVersion)
121
122 u, err := url.Parse(endpoint)
123 if err != nil {
124 return AssumeRoleWithClientGrantsResponse{}, err
125 }
126
127 req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode()))
128 if err != nil {
129 return AssumeRoleWithClientGrantsResponse{}, err
130 }
131
132 req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
133
134 resp, err := clnt.Do(req)
135 if err != nil {
136 return AssumeRoleWithClientGrantsResponse{}, err
137 }
138 defer resp.Body.Close()
139 if resp.StatusCode != http.StatusOK {
140 var errResp ErrorResponse
141 buf, err := io.ReadAll(resp.Body)
142 if err != nil {
143 return AssumeRoleWithClientGrantsResponse{}, err
144 }
145 _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp)
146 if err != nil {
147 var s3Err Error
148 if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil {
149 return AssumeRoleWithClientGrantsResponse{}, err
150 }
151 errResp.RequestID = s3Err.RequestID
152 errResp.STSError.Code = s3Err.Code
153 errResp.STSError.Message = s3Err.Message
154 }
155 return AssumeRoleWithClientGrantsResponse{}, errResp
156 }
157
158 a := AssumeRoleWithClientGrantsResponse{}
159 if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil {
160 return AssumeRoleWithClientGrantsResponse{}, err
161 }
162 return a, nil
163}
164
165// Retrieve retrieves credentials from the MinIO service.
166// Error will be returned if the request fails.
167func (m *STSClientGrants) Retrieve() (Value, error) {
168 a, err := getClientGrantsCredentials(m.Client, m.STSEndpoint, m.GetClientGrantsTokenExpiry)
169 if err != nil {
170 return Value{}, err
171 }
172
173 // Expiry window is set to 10secs.
174 m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow)
175
176 return Value{
177 AccessKeyID: a.Result.Credentials.AccessKey,
178 SecretAccessKey: a.Result.Credentials.SecretKey,
179 SessionToken: a.Result.Credentials.SessionToken,
180 SignerType: SignatureV4,
181 }, nil
182}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go
new file mode 100644
index 0000000..e1f9ce4
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go
@@ -0,0 +1,146 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2022 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20import (
21 "encoding/xml"
22 "errors"
23 "fmt"
24 "net/http"
25 "net/url"
26 "time"
27)
28
29// CustomTokenResult - Contains temporary creds and user metadata.
30type CustomTokenResult struct {
31 Credentials struct {
32 AccessKey string `xml:"AccessKeyId"`
33 SecretKey string `xml:"SecretAccessKey"`
34 Expiration time.Time `xml:"Expiration"`
35 SessionToken string `xml:"SessionToken"`
36 } `xml:",omitempty"`
37
38 AssumedUser string `xml:",omitempty"`
39}
40
41// AssumeRoleWithCustomTokenResponse contains the result of a successful
42// AssumeRoleWithCustomToken request.
43type AssumeRoleWithCustomTokenResponse struct {
44 XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithCustomTokenResponse" json:"-"`
45 Result CustomTokenResult `xml:"AssumeRoleWithCustomTokenResult"`
46 Metadata struct {
47 RequestID string `xml:"RequestId,omitempty"`
48 } `xml:"ResponseMetadata,omitempty"`
49}
50
51// CustomTokenIdentity - satisfies the Provider interface, and retrieves
52// credentials from MinIO using the AssumeRoleWithCustomToken STS API.
53type CustomTokenIdentity struct {
54 Expiry
55
56 Client *http.Client
57
58 // MinIO server STS endpoint to fetch STS credentials.
59 STSEndpoint string
60
61 // The custom token to use with the request.
62 Token string
63
64 // RoleArn associated with the identity
65 RoleArn string
66
67 // RequestedExpiry is to set the validity of the generated credentials
68 // (this value bounded by server).
69 RequestedExpiry time.Duration
70}
71
72// Retrieve - to satisfy Provider interface; fetches credentials from MinIO.
73func (c *CustomTokenIdentity) Retrieve() (value Value, err error) {
74 u, err := url.Parse(c.STSEndpoint)
75 if err != nil {
76 return value, err
77 }
78
79 v := url.Values{}
80 v.Set("Action", "AssumeRoleWithCustomToken")
81 v.Set("Version", STSVersion)
82 v.Set("RoleArn", c.RoleArn)
83 v.Set("Token", c.Token)
84 if c.RequestedExpiry != 0 {
85 v.Set("DurationSeconds", fmt.Sprintf("%d", int(c.RequestedExpiry.Seconds())))
86 }
87
88 u.RawQuery = v.Encode()
89
90 req, err := http.NewRequest(http.MethodPost, u.String(), nil)
91 if err != nil {
92 return value, err
93 }
94
95 resp, err := c.Client.Do(req)
96 if err != nil {
97 return value, err
98 }
99
100 defer resp.Body.Close()
101 if resp.StatusCode != http.StatusOK {
102 return value, errors.New(resp.Status)
103 }
104
105 r := AssumeRoleWithCustomTokenResponse{}
106 if err = xml.NewDecoder(resp.Body).Decode(&r); err != nil {
107 return
108 }
109
110 cr := r.Result.Credentials
111 c.SetExpiration(cr.Expiration, DefaultExpiryWindow)
112 return Value{
113 AccessKeyID: cr.AccessKey,
114 SecretAccessKey: cr.SecretKey,
115 SessionToken: cr.SessionToken,
116 SignerType: SignatureV4,
117 }, nil
118}
119
120// NewCustomTokenCredentials - returns credentials using the
121// AssumeRoleWithCustomToken STS API.
122func NewCustomTokenCredentials(stsEndpoint, token, roleArn string, optFuncs ...CustomTokenOpt) (*Credentials, error) {
123 c := CustomTokenIdentity{
124 Client: &http.Client{Transport: http.DefaultTransport},
125 STSEndpoint: stsEndpoint,
126 Token: token,
127 RoleArn: roleArn,
128 }
129 for _, optFunc := range optFuncs {
130 optFunc(&c)
131 }
132 return New(&c), nil
133}
134
135// CustomTokenOpt is a function type to configure the custom-token based
136// credentials using NewCustomTokenCredentials.
137type CustomTokenOpt func(*CustomTokenIdentity)
138
139// CustomTokenValidityOpt sets the validity duration of the requested
140// credentials. This value is ignored if the server enforces a lower validity
141// period.
142func CustomTokenValidityOpt(d time.Duration) CustomTokenOpt {
143 return func(c *CustomTokenIdentity) {
144 c.RequestedExpiry = d
145 }
146}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go
new file mode 100644
index 0000000..ec5f3f0
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go
@@ -0,0 +1,189 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2019-2022 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20import (
21 "bytes"
22 "encoding/xml"
23 "fmt"
24 "io"
25 "net/http"
26 "net/url"
27 "strings"
28 "time"
29)
30
31// AssumeRoleWithLDAPResponse contains the result of successful
32// AssumeRoleWithLDAPIdentity request
33type AssumeRoleWithLDAPResponse struct {
34 XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithLDAPIdentityResponse" json:"-"`
35 Result LDAPIdentityResult `xml:"AssumeRoleWithLDAPIdentityResult"`
36 ResponseMetadata struct {
37 RequestID string `xml:"RequestId,omitempty"`
38 } `xml:"ResponseMetadata,omitempty"`
39}
40
41// LDAPIdentityResult - contains credentials for a successful
42// AssumeRoleWithLDAPIdentity request.
43type LDAPIdentityResult struct {
44 Credentials struct {
45 AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"`
46 SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"`
47 Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"`
48 SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"`
49 } `xml:",omitempty"`
50
51 SubjectFromToken string `xml:",omitempty"`
52}
53
54// LDAPIdentity retrieves credentials from MinIO
55type LDAPIdentity struct {
56 Expiry
57
58 // Required http Client to use when connecting to MinIO STS service.
59 Client *http.Client
60
61 // Exported STS endpoint to fetch STS credentials.
62 STSEndpoint string
63
64 // LDAP username/password used to fetch LDAP STS credentials.
65 LDAPUsername, LDAPPassword string
66
67 // Session policy to apply to the generated credentials. Leave empty to
68 // use the full access policy available to the user.
69 Policy string
70
71 // RequestedExpiry is the configured expiry duration for credentials
72 // requested from LDAP.
73 RequestedExpiry time.Duration
74}
75
76// NewLDAPIdentity returns new credentials object that uses LDAP
77// Identity.
78func NewLDAPIdentity(stsEndpoint, ldapUsername, ldapPassword string, optFuncs ...LDAPIdentityOpt) (*Credentials, error) {
79 l := LDAPIdentity{
80 Client: &http.Client{Transport: http.DefaultTransport},
81 STSEndpoint: stsEndpoint,
82 LDAPUsername: ldapUsername,
83 LDAPPassword: ldapPassword,
84 }
85 for _, optFunc := range optFuncs {
86 optFunc(&l)
87 }
88 return New(&l), nil
89}
90
91// LDAPIdentityOpt is a function type used to configured the LDAPIdentity
92// instance.
93type LDAPIdentityOpt func(*LDAPIdentity)
94
95// LDAPIdentityPolicyOpt sets the session policy for requested credentials.
96func LDAPIdentityPolicyOpt(policy string) LDAPIdentityOpt {
97 return func(k *LDAPIdentity) {
98 k.Policy = policy
99 }
100}
101
102// LDAPIdentityExpiryOpt sets the expiry duration for requested credentials.
103func LDAPIdentityExpiryOpt(d time.Duration) LDAPIdentityOpt {
104 return func(k *LDAPIdentity) {
105 k.RequestedExpiry = d
106 }
107}
108
109// NewLDAPIdentityWithSessionPolicy returns new credentials object that uses
110// LDAP Identity with a specified session policy. The `policy` parameter must be
111// a JSON string specifying the policy document.
112//
113// Deprecated: Use the `LDAPIdentityPolicyOpt` with `NewLDAPIdentity` instead.
114func NewLDAPIdentityWithSessionPolicy(stsEndpoint, ldapUsername, ldapPassword, policy string) (*Credentials, error) {
115 return New(&LDAPIdentity{
116 Client: &http.Client{Transport: http.DefaultTransport},
117 STSEndpoint: stsEndpoint,
118 LDAPUsername: ldapUsername,
119 LDAPPassword: ldapPassword,
120 Policy: policy,
121 }), nil
122}
123
124// Retrieve gets the credential by calling the MinIO STS API for
125// LDAP on the configured stsEndpoint.
126func (k *LDAPIdentity) Retrieve() (value Value, err error) {
127 u, err := url.Parse(k.STSEndpoint)
128 if err != nil {
129 return value, err
130 }
131
132 v := url.Values{}
133 v.Set("Action", "AssumeRoleWithLDAPIdentity")
134 v.Set("Version", STSVersion)
135 v.Set("LDAPUsername", k.LDAPUsername)
136 v.Set("LDAPPassword", k.LDAPPassword)
137 if k.Policy != "" {
138 v.Set("Policy", k.Policy)
139 }
140 if k.RequestedExpiry != 0 {
141 v.Set("DurationSeconds", fmt.Sprintf("%d", int(k.RequestedExpiry.Seconds())))
142 }
143
144 req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode()))
145 if err != nil {
146 return value, err
147 }
148
149 req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
150
151 resp, err := k.Client.Do(req)
152 if err != nil {
153 return value, err
154 }
155
156 defer resp.Body.Close()
157 if resp.StatusCode != http.StatusOK {
158 var errResp ErrorResponse
159 buf, err := io.ReadAll(resp.Body)
160 if err != nil {
161 return value, err
162 }
163 _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp)
164 if err != nil {
165 var s3Err Error
166 if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil {
167 return value, err
168 }
169 errResp.RequestID = s3Err.RequestID
170 errResp.STSError.Code = s3Err.Code
171 errResp.STSError.Message = s3Err.Message
172 }
173 return value, errResp
174 }
175
176 r := AssumeRoleWithLDAPResponse{}
177 if err = xml.NewDecoder(resp.Body).Decode(&r); err != nil {
178 return
179 }
180
181 cr := r.Result.Credentials
182 k.SetExpiration(cr.Expiration, DefaultExpiryWindow)
183 return Value{
184 AccessKeyID: cr.AccessKey,
185 SecretAccessKey: cr.SecretKey,
186 SessionToken: cr.SessionToken,
187 SignerType: SignatureV4,
188 }, nil
189}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go
new file mode 100644
index 0000000..dee0a8c
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go
@@ -0,0 +1,211 @@
1// MinIO Go Library for Amazon S3 Compatible Cloud Storage
2// Copyright 2021 MinIO, Inc.
3//
4// Licensed under the Apache License, Version 2.0 (the "License");
5// you may not use this file except in compliance with the License.
6// You may obtain a copy of the License at
7//
8// http://www.apache.org/licenses/LICENSE-2.0
9//
10// Unless required by applicable law or agreed to in writing, software
11// distributed under the License is distributed on an "AS IS" BASIS,
12// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13// See the License for the specific language governing permissions and
14// limitations under the License.
15
16package credentials
17
18import (
19 "bytes"
20 "crypto/tls"
21 "encoding/xml"
22 "errors"
23 "io"
24 "net"
25 "net/http"
26 "net/url"
27 "strconv"
28 "time"
29)
30
31// CertificateIdentityOption is an optional AssumeRoleWithCertificate
32// parameter - e.g. a custom HTTP transport configuration or S3 credental
33// livetime.
34type CertificateIdentityOption func(*STSCertificateIdentity)
35
36// CertificateIdentityWithTransport returns a CertificateIdentityOption that
37// customizes the STSCertificateIdentity with the given http.RoundTripper.
38func CertificateIdentityWithTransport(t http.RoundTripper) CertificateIdentityOption {
39 return CertificateIdentityOption(func(i *STSCertificateIdentity) { i.Client.Transport = t })
40}
41
42// CertificateIdentityWithExpiry returns a CertificateIdentityOption that
43// customizes the STSCertificateIdentity with the given livetime.
44//
45// Fetched S3 credentials will have the given livetime if the STS server
46// allows such credentials.
47func CertificateIdentityWithExpiry(livetime time.Duration) CertificateIdentityOption {
48 return CertificateIdentityOption(func(i *STSCertificateIdentity) { i.S3CredentialLivetime = livetime })
49}
50
51// A STSCertificateIdentity retrieves S3 credentials from the MinIO STS API and
52// rotates those credentials once they expire.
53type STSCertificateIdentity struct {
54 Expiry
55
56 // STSEndpoint is the base URL endpoint of the STS API.
57 // For example, https://minio.local:9000
58 STSEndpoint string
59
60 // S3CredentialLivetime is the duration temp. S3 access
61 // credentials should be valid.
62 //
63 // It represents the access credential livetime requested
64 // by the client. The STS server may choose to issue
65 // temp. S3 credentials that have a different - usually
66 // shorter - livetime.
67 //
68 // The default livetime is one hour.
69 S3CredentialLivetime time.Duration
70
71 // Client is the HTTP client used to authenticate and fetch
72 // S3 credentials.
73 //
74 // A custom TLS client configuration can be specified by
75 // using a custom http.Transport:
76 // Client: http.Client {
77 // Transport: &http.Transport{
78 // TLSClientConfig: &tls.Config{},
79 // },
80 // }
81 Client http.Client
82}
83
84var _ Provider = (*STSWebIdentity)(nil) // compiler check
85
86// NewSTSCertificateIdentity returns a STSCertificateIdentity that authenticates
87// to the given STS endpoint with the given TLS certificate and retrieves and
88// rotates S3 credentials.
89func NewSTSCertificateIdentity(endpoint string, certificate tls.Certificate, options ...CertificateIdentityOption) (*Credentials, error) {
90 if endpoint == "" {
91 return nil, errors.New("STS endpoint cannot be empty")
92 }
93 if _, err := url.Parse(endpoint); err != nil {
94 return nil, err
95 }
96 identity := &STSCertificateIdentity{
97 STSEndpoint: endpoint,
98 Client: http.Client{
99 Transport: &http.Transport{
100 Proxy: http.ProxyFromEnvironment,
101 DialContext: (&net.Dialer{
102 Timeout: 30 * time.Second,
103 KeepAlive: 30 * time.Second,
104 }).DialContext,
105 ForceAttemptHTTP2: true,
106 MaxIdleConns: 100,
107 IdleConnTimeout: 90 * time.Second,
108 TLSHandshakeTimeout: 10 * time.Second,
109 ExpectContinueTimeout: 5 * time.Second,
110 TLSClientConfig: &tls.Config{
111 Certificates: []tls.Certificate{certificate},
112 },
113 },
114 },
115 }
116 for _, option := range options {
117 option(identity)
118 }
119 return New(identity), nil
120}
121
122// Retrieve fetches a new set of S3 credentials from the configured
123// STS API endpoint.
124func (i *STSCertificateIdentity) Retrieve() (Value, error) {
125 endpointURL, err := url.Parse(i.STSEndpoint)
126 if err != nil {
127 return Value{}, err
128 }
129 livetime := i.S3CredentialLivetime
130 if livetime == 0 {
131 livetime = 1 * time.Hour
132 }
133
134 queryValues := url.Values{}
135 queryValues.Set("Action", "AssumeRoleWithCertificate")
136 queryValues.Set("Version", STSVersion)
137 endpointURL.RawQuery = queryValues.Encode()
138
139 req, err := http.NewRequest(http.MethodPost, endpointURL.String(), nil)
140 if err != nil {
141 return Value{}, err
142 }
143 if req.Form == nil {
144 req.Form = url.Values{}
145 }
146 req.Form.Add("DurationSeconds", strconv.FormatUint(uint64(livetime.Seconds()), 10))
147
148 resp, err := i.Client.Do(req)
149 if err != nil {
150 return Value{}, err
151 }
152 if resp.Body != nil {
153 defer resp.Body.Close()
154 }
155 if resp.StatusCode != http.StatusOK {
156 var errResp ErrorResponse
157 buf, err := io.ReadAll(resp.Body)
158 if err != nil {
159 return Value{}, err
160 }
161 _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp)
162 if err != nil {
163 var s3Err Error
164 if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil {
165 return Value{}, err
166 }
167 errResp.RequestID = s3Err.RequestID
168 errResp.STSError.Code = s3Err.Code
169 errResp.STSError.Message = s3Err.Message
170 }
171 return Value{}, errResp
172 }
173
174 const MaxSize = 10 * 1 << 20
175 var body io.Reader = resp.Body
176 if resp.ContentLength > 0 && resp.ContentLength < MaxSize {
177 body = io.LimitReader(body, resp.ContentLength)
178 } else {
179 body = io.LimitReader(body, MaxSize)
180 }
181
182 var response assumeRoleWithCertificateResponse
183 if err = xml.NewDecoder(body).Decode(&response); err != nil {
184 return Value{}, err
185 }
186 i.SetExpiration(response.Result.Credentials.Expiration, DefaultExpiryWindow)
187 return Value{
188 AccessKeyID: response.Result.Credentials.AccessKey,
189 SecretAccessKey: response.Result.Credentials.SecretKey,
190 SessionToken: response.Result.Credentials.SessionToken,
191 SignerType: SignatureDefault,
192 }, nil
193}
194
195// Expiration returns the expiration time of the current S3 credentials.
196func (i *STSCertificateIdentity) Expiration() time.Time { return i.expiration }
197
198type assumeRoleWithCertificateResponse struct {
199 XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithCertificateResponse" json:"-"`
200 Result struct {
201 Credentials struct {
202 AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"`
203 SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"`
204 Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"`
205 SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"`
206 } `xml:"Credentials" json:"credentials,omitempty"`
207 } `xml:"AssumeRoleWithCertificateResult"`
208 ResponseMetadata struct {
209 RequestID string `xml:"RequestId,omitempty"`
210 } `xml:"ResponseMetadata,omitempty"`
211}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go
new file mode 100644
index 0000000..2e2af50
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go
@@ -0,0 +1,205 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2019-2022 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20import (
21 "bytes"
22 "encoding/xml"
23 "errors"
24 "fmt"
25 "io"
26 "net/http"
27 "net/url"
28 "strconv"
29 "strings"
30 "time"
31)
32
33// AssumeRoleWithWebIdentityResponse contains the result of successful AssumeRoleWithWebIdentity request.
34type AssumeRoleWithWebIdentityResponse struct {
35 XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithWebIdentityResponse" json:"-"`
36 Result WebIdentityResult `xml:"AssumeRoleWithWebIdentityResult"`
37 ResponseMetadata struct {
38 RequestID string `xml:"RequestId,omitempty"`
39 } `xml:"ResponseMetadata,omitempty"`
40}
41
42// WebIdentityResult - Contains the response to a successful AssumeRoleWithWebIdentity
43// request, including temporary credentials that can be used to make MinIO API requests.
44type WebIdentityResult struct {
45 AssumedRoleUser AssumedRoleUser `xml:",omitempty"`
46 Audience string `xml:",omitempty"`
47 Credentials struct {
48 AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"`
49 SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"`
50 Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"`
51 SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"`
52 } `xml:",omitempty"`
53 PackedPolicySize int `xml:",omitempty"`
54 Provider string `xml:",omitempty"`
55 SubjectFromWebIdentityToken string `xml:",omitempty"`
56}
57
58// WebIdentityToken - web identity token with expiry.
59type WebIdentityToken struct {
60 Token string
61 AccessToken string
62 Expiry int
63}
64
65// A STSWebIdentity retrieves credentials from MinIO service, and keeps track if
66// those credentials are expired.
67type STSWebIdentity struct {
68 Expiry
69
70 // Required http Client to use when connecting to MinIO STS service.
71 Client *http.Client
72
73 // Exported STS endpoint to fetch STS credentials.
74 STSEndpoint string
75
76 // Exported GetWebIDTokenExpiry function which returns ID
77 // tokens from IDP. This function should return two values
78 // one is ID token which is a self contained ID token (JWT)
79 // and second return value is the expiry associated with
80 // this token.
81 // This is a customer provided function and is mandatory.
82 GetWebIDTokenExpiry func() (*WebIdentityToken, error)
83
84 // RoleARN is the Amazon Resource Name (ARN) of the role that the caller is
85 // assuming.
86 RoleARN string
87
88 // roleSessionName is the identifier for the assumed role session.
89 roleSessionName string
90}
91
92// NewSTSWebIdentity returns a pointer to a new
93// Credentials object wrapping the STSWebIdentity.
94func NewSTSWebIdentity(stsEndpoint string, getWebIDTokenExpiry func() (*WebIdentityToken, error)) (*Credentials, error) {
95 if stsEndpoint == "" {
96 return nil, errors.New("STS endpoint cannot be empty")
97 }
98 if getWebIDTokenExpiry == nil {
99 return nil, errors.New("Web ID token and expiry retrieval function should be defined")
100 }
101 return New(&STSWebIdentity{
102 Client: &http.Client{
103 Transport: http.DefaultTransport,
104 },
105 STSEndpoint: stsEndpoint,
106 GetWebIDTokenExpiry: getWebIDTokenExpiry,
107 }), nil
108}
109
110func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSessionName string,
111 getWebIDTokenExpiry func() (*WebIdentityToken, error),
112) (AssumeRoleWithWebIdentityResponse, error) {
113 idToken, err := getWebIDTokenExpiry()
114 if err != nil {
115 return AssumeRoleWithWebIdentityResponse{}, err
116 }
117
118 v := url.Values{}
119 v.Set("Action", "AssumeRoleWithWebIdentity")
120 if len(roleARN) > 0 {
121 v.Set("RoleArn", roleARN)
122
123 if len(roleSessionName) == 0 {
124 roleSessionName = strconv.FormatInt(time.Now().UnixNano(), 10)
125 }
126 v.Set("RoleSessionName", roleSessionName)
127 }
128 v.Set("WebIdentityToken", idToken.Token)
129 if idToken.AccessToken != "" {
130 // Usually set when server is using extended userInfo endpoint.
131 v.Set("WebIdentityAccessToken", idToken.AccessToken)
132 }
133 if idToken.Expiry > 0 {
134 v.Set("DurationSeconds", fmt.Sprintf("%d", idToken.Expiry))
135 }
136 v.Set("Version", STSVersion)
137
138 u, err := url.Parse(endpoint)
139 if err != nil {
140 return AssumeRoleWithWebIdentityResponse{}, err
141 }
142
143 req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode()))
144 if err != nil {
145 return AssumeRoleWithWebIdentityResponse{}, err
146 }
147
148 req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
149
150 resp, err := clnt.Do(req)
151 if err != nil {
152 return AssumeRoleWithWebIdentityResponse{}, err
153 }
154
155 defer resp.Body.Close()
156 if resp.StatusCode != http.StatusOK {
157 var errResp ErrorResponse
158 buf, err := io.ReadAll(resp.Body)
159 if err != nil {
160 return AssumeRoleWithWebIdentityResponse{}, err
161 }
162 _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp)
163 if err != nil {
164 var s3Err Error
165 if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil {
166 return AssumeRoleWithWebIdentityResponse{}, err
167 }
168 errResp.RequestID = s3Err.RequestID
169 errResp.STSError.Code = s3Err.Code
170 errResp.STSError.Message = s3Err.Message
171 }
172 return AssumeRoleWithWebIdentityResponse{}, errResp
173 }
174
175 a := AssumeRoleWithWebIdentityResponse{}
176 if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil {
177 return AssumeRoleWithWebIdentityResponse{}, err
178 }
179
180 return a, nil
181}
182
183// Retrieve retrieves credentials from the MinIO service.
184// Error will be returned if the request fails.
185func (m *STSWebIdentity) Retrieve() (Value, error) {
186 a, err := getWebIdentityCredentials(m.Client, m.STSEndpoint, m.RoleARN, m.roleSessionName, m.GetWebIDTokenExpiry)
187 if err != nil {
188 return Value{}, err
189 }
190
191 // Expiry window is set to 10secs.
192 m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow)
193
194 return Value{
195 AccessKeyID: a.Result.Credentials.AccessKey,
196 SecretAccessKey: a.Result.Credentials.SecretKey,
197 SessionToken: a.Result.Credentials.SessionToken,
198 SignerType: SignatureV4,
199 }, nil
200}
201
202// Expiration returns the expiration time of the credentials
203func (m *STSWebIdentity) Expiration() time.Time {
204 return m.expiration
205}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go
new file mode 100644
index 0000000..6db26c0
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go
@@ -0,0 +1,24 @@
1//go:build !fips
2// +build !fips
3
4/*
5 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
6 * Copyright 2022 MinIO, Inc.
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 */
20
21package encrypt
22
23// FIPS is true if 'fips' build tag was specified.
24const FIPS = false
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go
new file mode 100644
index 0000000..6402582
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go
@@ -0,0 +1,24 @@
1//go:build fips
2// +build fips
3
4/*
5 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
6 * Copyright 2022 MinIO, Inc.
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 */
20
21package encrypt
22
23// FIPS is true if 'fips' build tag was specified.
24const FIPS = true
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go
new file mode 100644
index 0000000..a7081c5
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go
@@ -0,0 +1,198 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2018 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package encrypt
19
20import (
21 "crypto/md5"
22 "encoding/base64"
23 "errors"
24 "net/http"
25
26 jsoniter "github.com/json-iterator/go"
27 "golang.org/x/crypto/argon2"
28)
29
30const (
31 // SseGenericHeader is the AWS SSE header used for SSE-S3 and SSE-KMS.
32 SseGenericHeader = "X-Amz-Server-Side-Encryption"
33
34 // SseKmsKeyID is the AWS SSE-KMS key id.
35 SseKmsKeyID = SseGenericHeader + "-Aws-Kms-Key-Id"
36 // SseEncryptionContext is the AWS SSE-KMS Encryption Context data.
37 SseEncryptionContext = SseGenericHeader + "-Context"
38
39 // SseCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key.
40 SseCustomerAlgorithm = SseGenericHeader + "-Customer-Algorithm"
41 // SseCustomerKey is the AWS SSE-C encryption key HTTP header key.
42 SseCustomerKey = SseGenericHeader + "-Customer-Key"
43 // SseCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key.
44 SseCustomerKeyMD5 = SseGenericHeader + "-Customer-Key-MD5"
45
46 // SseCopyCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key for CopyObject API.
47 SseCopyCustomerAlgorithm = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm"
48 // SseCopyCustomerKey is the AWS SSE-C encryption key HTTP header key for CopyObject API.
49 SseCopyCustomerKey = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key"
50 // SseCopyCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key for CopyObject API.
51 SseCopyCustomerKeyMD5 = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-MD5"
52)
53
54// PBKDF creates a SSE-C key from the provided password and salt.
55// PBKDF is a password-based key derivation function
56// which can be used to derive a high-entropy cryptographic
57// key from a low-entropy password and a salt.
58type PBKDF func(password, salt []byte) ServerSide
59
60// DefaultPBKDF is the default PBKDF. It uses Argon2id with the
61// recommended parameters from the RFC draft (1 pass, 64 MB memory, 4 threads).
62var DefaultPBKDF PBKDF = func(password, salt []byte) ServerSide {
63 sse := ssec{}
64 copy(sse[:], argon2.IDKey(password, salt, 1, 64*1024, 4, 32))
65 return sse
66}
67
68// Type is the server-side-encryption method. It represents one of
69// the following encryption methods:
70// - SSE-C: server-side-encryption with customer provided keys
71// - KMS: server-side-encryption with managed keys
72// - S3: server-side-encryption using S3 storage encryption
73type Type string
74
75const (
76 // SSEC represents server-side-encryption with customer provided keys
77 SSEC Type = "SSE-C"
78 // KMS represents server-side-encryption with managed keys
79 KMS Type = "KMS"
80 // S3 represents server-side-encryption using S3 storage encryption
81 S3 Type = "S3"
82)
83
84// ServerSide is a form of S3 server-side-encryption.
85type ServerSide interface {
86 // Type returns the server-side-encryption method.
87 Type() Type
88
89 // Marshal adds encryption headers to the provided HTTP headers.
90 // It marks an HTTP request as server-side-encryption request
91 // and inserts the required data into the headers.
92 Marshal(h http.Header)
93}
94
95// NewSSE returns a server-side-encryption using S3 storage encryption.
96// Using SSE-S3 the server will encrypt the object with server-managed keys.
97func NewSSE() ServerSide { return s3{} }
98
99// NewSSEKMS returns a new server-side-encryption using SSE-KMS and the provided Key Id and context.
100func NewSSEKMS(keyID string, context interface{}) (ServerSide, error) {
101 if context == nil {
102 return kms{key: keyID, hasContext: false}, nil
103 }
104 json := jsoniter.ConfigCompatibleWithStandardLibrary
105 serializedContext, err := json.Marshal(context)
106 if err != nil {
107 return nil, err
108 }
109 return kms{key: keyID, context: serializedContext, hasContext: true}, nil
110}
111
112// NewSSEC returns a new server-side-encryption using SSE-C and the provided key.
113// The key must be 32 bytes long.
114func NewSSEC(key []byte) (ServerSide, error) {
115 if len(key) != 32 {
116 return nil, errors.New("encrypt: SSE-C key must be 256 bit long")
117 }
118 sse := ssec{}
119 copy(sse[:], key)
120 return sse, nil
121}
122
123// SSE transforms a SSE-C copy encryption into a SSE-C encryption.
124// It is the inverse of SSECopy(...).
125//
126// If the provided sse is no SSE-C copy encryption SSE returns
127// sse unmodified.
128func SSE(sse ServerSide) ServerSide {
129 if sse == nil || sse.Type() != SSEC {
130 return sse
131 }
132 if sse, ok := sse.(ssecCopy); ok {
133 return ssec(sse)
134 }
135 return sse
136}
137
138// SSECopy transforms a SSE-C encryption into a SSE-C copy
139// encryption. This is required for SSE-C key rotation or a SSE-C
140// copy where the source and the destination should be encrypted.
141//
142// If the provided sse is no SSE-C encryption SSECopy returns
143// sse unmodified.
144func SSECopy(sse ServerSide) ServerSide {
145 if sse == nil || sse.Type() != SSEC {
146 return sse
147 }
148 if sse, ok := sse.(ssec); ok {
149 return ssecCopy(sse)
150 }
151 return sse
152}
153
154type ssec [32]byte
155
156func (s ssec) Type() Type { return SSEC }
157
158func (s ssec) Marshal(h http.Header) {
159 keyMD5 := md5.Sum(s[:])
160 h.Set(SseCustomerAlgorithm, "AES256")
161 h.Set(SseCustomerKey, base64.StdEncoding.EncodeToString(s[:]))
162 h.Set(SseCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:]))
163}
164
165type ssecCopy [32]byte
166
167func (s ssecCopy) Type() Type { return SSEC }
168
169func (s ssecCopy) Marshal(h http.Header) {
170 keyMD5 := md5.Sum(s[:])
171 h.Set(SseCopyCustomerAlgorithm, "AES256")
172 h.Set(SseCopyCustomerKey, base64.StdEncoding.EncodeToString(s[:]))
173 h.Set(SseCopyCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:]))
174}
175
176type s3 struct{}
177
178func (s s3) Type() Type { return S3 }
179
180func (s s3) Marshal(h http.Header) { h.Set(SseGenericHeader, "AES256") }
181
182type kms struct {
183 key string
184 context []byte
185 hasContext bool
186}
187
188func (s kms) Type() Type { return KMS }
189
190func (s kms) Marshal(h http.Header) {
191 h.Set(SseGenericHeader, "aws:kms")
192 if s.key != "" {
193 h.Set(SseKmsKeyID, s.key)
194 }
195 if s.hasContext {
196 h.Set(SseEncryptionContext, base64.StdEncoding.EncodeToString(s.context))
197 }
198}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go
new file mode 100644
index 0000000..c52f78c
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go
@@ -0,0 +1,491 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18// Package lifecycle contains all the lifecycle related data types and marshallers.
19package lifecycle
20
21import (
22 "encoding/json"
23 "encoding/xml"
24 "errors"
25 "time"
26)
27
28var errMissingStorageClass = errors.New("storage-class cannot be empty")
29
30// AbortIncompleteMultipartUpload structure, not supported yet on MinIO
31type AbortIncompleteMultipartUpload struct {
32 XMLName xml.Name `xml:"AbortIncompleteMultipartUpload,omitempty" json:"-"`
33 DaysAfterInitiation ExpirationDays `xml:"DaysAfterInitiation,omitempty" json:"DaysAfterInitiation,omitempty"`
34}
35
36// IsDaysNull returns true if days field is null
37func (n AbortIncompleteMultipartUpload) IsDaysNull() bool {
38 return n.DaysAfterInitiation == ExpirationDays(0)
39}
40
41// MarshalXML if days after initiation is set to non-zero value
42func (n AbortIncompleteMultipartUpload) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
43 if n.IsDaysNull() {
44 return nil
45 }
46 type abortIncompleteMultipartUploadWrapper AbortIncompleteMultipartUpload
47 return e.EncodeElement(abortIncompleteMultipartUploadWrapper(n), start)
48}
49
50// NoncurrentVersionExpiration - Specifies when noncurrent object versions expire.
51// Upon expiration, server permanently deletes the noncurrent object versions.
52// Set this lifecycle configuration action on a bucket that has versioning enabled
53// (or suspended) to request server delete noncurrent object versions at a
54// specific period in the object's lifetime.
55type NoncurrentVersionExpiration struct {
56 XMLName xml.Name `xml:"NoncurrentVersionExpiration" json:"-"`
57 NoncurrentDays ExpirationDays `xml:"NoncurrentDays,omitempty" json:"NoncurrentDays,omitempty"`
58 NewerNoncurrentVersions int `xml:"NewerNoncurrentVersions,omitempty" json:"NewerNoncurrentVersions,omitempty"`
59}
60
61// MarshalXML if n is non-empty, i.e has a non-zero NoncurrentDays or NewerNoncurrentVersions.
62func (n NoncurrentVersionExpiration) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
63 if n.isNull() {
64 return nil
65 }
66 type noncurrentVersionExpirationWrapper NoncurrentVersionExpiration
67 return e.EncodeElement(noncurrentVersionExpirationWrapper(n), start)
68}
69
70// IsDaysNull returns true if days field is null
71func (n NoncurrentVersionExpiration) IsDaysNull() bool {
72 return n.NoncurrentDays == ExpirationDays(0)
73}
74
75func (n NoncurrentVersionExpiration) isNull() bool {
76 return n.IsDaysNull() && n.NewerNoncurrentVersions == 0
77}
78
79// NoncurrentVersionTransition structure, set this action to request server to
80// transition noncurrent object versions to different set storage classes
81// at a specific period in the object's lifetime.
82type NoncurrentVersionTransition struct {
83 XMLName xml.Name `xml:"NoncurrentVersionTransition,omitempty" json:"-"`
84 StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"`
85 NoncurrentDays ExpirationDays `xml:"NoncurrentDays" json:"NoncurrentDays"`
86 NewerNoncurrentVersions int `xml:"NewerNoncurrentVersions,omitempty" json:"NewerNoncurrentVersions,omitempty"`
87}
88
89// IsDaysNull returns true if days field is null
90func (n NoncurrentVersionTransition) IsDaysNull() bool {
91 return n.NoncurrentDays == ExpirationDays(0)
92}
93
94// IsStorageClassEmpty returns true if storage class field is empty
95func (n NoncurrentVersionTransition) IsStorageClassEmpty() bool {
96 return n.StorageClass == ""
97}
98
99func (n NoncurrentVersionTransition) isNull() bool {
100 return n.StorageClass == ""
101}
102
103// UnmarshalJSON implements NoncurrentVersionTransition JSONify
104func (n *NoncurrentVersionTransition) UnmarshalJSON(b []byte) error {
105 type noncurrentVersionTransition NoncurrentVersionTransition
106 var nt noncurrentVersionTransition
107 err := json.Unmarshal(b, &nt)
108 if err != nil {
109 return err
110 }
111
112 if nt.StorageClass == "" {
113 return errMissingStorageClass
114 }
115 *n = NoncurrentVersionTransition(nt)
116 return nil
117}
118
119// MarshalXML is extended to leave out
120// <NoncurrentVersionTransition></NoncurrentVersionTransition> tags
121func (n NoncurrentVersionTransition) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
122 if n.isNull() {
123 return nil
124 }
125 type noncurrentVersionTransitionWrapper NoncurrentVersionTransition
126 return e.EncodeElement(noncurrentVersionTransitionWrapper(n), start)
127}
128
129// Tag structure key/value pair representing an object tag to apply lifecycle configuration
130type Tag struct {
131 XMLName xml.Name `xml:"Tag,omitempty" json:"-"`
132 Key string `xml:"Key,omitempty" json:"Key,omitempty"`
133 Value string `xml:"Value,omitempty" json:"Value,omitempty"`
134}
135
136// IsEmpty returns whether this tag is empty or not.
137func (tag Tag) IsEmpty() bool {
138 return tag.Key == ""
139}
140
141// Transition structure - transition details of lifecycle configuration
142type Transition struct {
143 XMLName xml.Name `xml:"Transition" json:"-"`
144 Date ExpirationDate `xml:"Date,omitempty" json:"Date,omitempty"`
145 StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"`
146 Days ExpirationDays `xml:"Days" json:"Days"`
147}
148
149// UnmarshalJSON returns an error if storage-class is empty.
150func (t *Transition) UnmarshalJSON(b []byte) error {
151 type transition Transition
152 var tr transition
153 err := json.Unmarshal(b, &tr)
154 if err != nil {
155 return err
156 }
157
158 if tr.StorageClass == "" {
159 return errMissingStorageClass
160 }
161 *t = Transition(tr)
162 return nil
163}
164
165// MarshalJSON customizes json encoding by omitting empty values
166func (t Transition) MarshalJSON() ([]byte, error) {
167 if t.IsNull() {
168 return nil, nil
169 }
170 type transition struct {
171 Date *ExpirationDate `json:"Date,omitempty"`
172 StorageClass string `json:"StorageClass,omitempty"`
173 Days *ExpirationDays `json:"Days"`
174 }
175
176 newt := transition{
177 StorageClass: t.StorageClass,
178 }
179
180 if !t.IsDateNull() {
181 newt.Date = &t.Date
182 } else {
183 newt.Days = &t.Days
184 }
185 return json.Marshal(newt)
186}
187
188// IsDaysNull returns true if days field is null
189func (t Transition) IsDaysNull() bool {
190 return t.Days == ExpirationDays(0)
191}
192
193// IsDateNull returns true if date field is null
194func (t Transition) IsDateNull() bool {
195 return t.Date.Time.IsZero()
196}
197
198// IsNull returns true if no storage-class is set.
199func (t Transition) IsNull() bool {
200 return t.StorageClass == ""
201}
202
203// MarshalXML is transition is non null
204func (t Transition) MarshalXML(en *xml.Encoder, startElement xml.StartElement) error {
205 if t.IsNull() {
206 return nil
207 }
208 type transitionWrapper Transition
209 return en.EncodeElement(transitionWrapper(t), startElement)
210}
211
212// And And Rule for LifecycleTag, to be used in LifecycleRuleFilter
213type And struct {
214 XMLName xml.Name `xml:"And" json:"-"`
215 Prefix string `xml:"Prefix" json:"Prefix,omitempty"`
216 Tags []Tag `xml:"Tag" json:"Tags,omitempty"`
217 ObjectSizeLessThan int64 `xml:"ObjectSizeLessThan,omitempty" json:"ObjectSizeLessThan,omitempty"`
218 ObjectSizeGreaterThan int64 `xml:"ObjectSizeGreaterThan,omitempty" json:"ObjectSizeGreaterThan,omitempty"`
219}
220
221// IsEmpty returns true if Tags field is null
222func (a And) IsEmpty() bool {
223 return len(a.Tags) == 0 && a.Prefix == "" &&
224 a.ObjectSizeLessThan == 0 && a.ObjectSizeGreaterThan == 0
225}
226
227// Filter will be used in selecting rule(s) for lifecycle configuration
228type Filter struct {
229 XMLName xml.Name `xml:"Filter" json:"-"`
230 And And `xml:"And,omitempty" json:"And,omitempty"`
231 Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"`
232 Tag Tag `xml:"Tag,omitempty" json:"Tag,omitempty"`
233 ObjectSizeLessThan int64 `xml:"ObjectSizeLessThan,omitempty" json:"ObjectSizeLessThan,omitempty"`
234 ObjectSizeGreaterThan int64 `xml:"ObjectSizeGreaterThan,omitempty" json:"ObjectSizeGreaterThan,omitempty"`
235}
236
237// IsNull returns true if all Filter fields are empty.
238func (f Filter) IsNull() bool {
239 return f.Tag.IsEmpty() && f.And.IsEmpty() && f.Prefix == "" &&
240 f.ObjectSizeLessThan == 0 && f.ObjectSizeGreaterThan == 0
241}
242
243// MarshalJSON customizes json encoding by removing empty values.
244func (f Filter) MarshalJSON() ([]byte, error) {
245 type filter struct {
246 And *And `json:"And,omitempty"`
247 Prefix string `json:"Prefix,omitempty"`
248 Tag *Tag `json:"Tag,omitempty"`
249 ObjectSizeLessThan int64 `json:"ObjectSizeLessThan,omitempty"`
250 ObjectSizeGreaterThan int64 `json:"ObjectSizeGreaterThan,omitempty"`
251 }
252
253 newf := filter{
254 Prefix: f.Prefix,
255 }
256 if !f.Tag.IsEmpty() {
257 newf.Tag = &f.Tag
258 }
259 if !f.And.IsEmpty() {
260 newf.And = &f.And
261 }
262 newf.ObjectSizeLessThan = f.ObjectSizeLessThan
263 newf.ObjectSizeGreaterThan = f.ObjectSizeGreaterThan
264 return json.Marshal(newf)
265}
266
267// MarshalXML - produces the xml representation of the Filter struct
268// only one of Prefix, And and Tag should be present in the output.
269func (f Filter) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
270 if err := e.EncodeToken(start); err != nil {
271 return err
272 }
273
274 switch {
275 case !f.And.IsEmpty():
276 if err := e.EncodeElement(f.And, xml.StartElement{Name: xml.Name{Local: "And"}}); err != nil {
277 return err
278 }
279 case !f.Tag.IsEmpty():
280 if err := e.EncodeElement(f.Tag, xml.StartElement{Name: xml.Name{Local: "Tag"}}); err != nil {
281 return err
282 }
283 default:
284 if f.ObjectSizeLessThan > 0 {
285 if err := e.EncodeElement(f.ObjectSizeLessThan, xml.StartElement{Name: xml.Name{Local: "ObjectSizeLessThan"}}); err != nil {
286 return err
287 }
288 break
289 }
290 if f.ObjectSizeGreaterThan > 0 {
291 if err := e.EncodeElement(f.ObjectSizeGreaterThan, xml.StartElement{Name: xml.Name{Local: "ObjectSizeGreaterThan"}}); err != nil {
292 return err
293 }
294 break
295 }
296 // Print empty Prefix field only when everything else is empty
297 if err := e.EncodeElement(f.Prefix, xml.StartElement{Name: xml.Name{Local: "Prefix"}}); err != nil {
298 return err
299 }
300 }
301
302 return e.EncodeToken(xml.EndElement{Name: start.Name})
303}
304
305// ExpirationDays is a type alias to unmarshal Days in Expiration
306type ExpirationDays int
307
308// MarshalXML encodes number of days to expire if it is non-zero and
309// encodes empty string otherwise
310func (eDays ExpirationDays) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
311 if eDays == 0 {
312 return nil
313 }
314 return e.EncodeElement(int(eDays), startElement)
315}
316
317// ExpirationDate is a embedded type containing time.Time to unmarshal
318// Date in Expiration
319type ExpirationDate struct {
320 time.Time
321}
322
323// MarshalXML encodes expiration date if it is non-zero and encodes
324// empty string otherwise
325func (eDate ExpirationDate) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
326 if eDate.Time.IsZero() {
327 return nil
328 }
329 return e.EncodeElement(eDate.Format(time.RFC3339), startElement)
330}
331
332// ExpireDeleteMarker represents value of ExpiredObjectDeleteMarker field in Expiration XML element.
333type ExpireDeleteMarker ExpirationBoolean
334
335// IsEnabled returns true if the auto delete-marker expiration is enabled
336func (e ExpireDeleteMarker) IsEnabled() bool {
337 return bool(e)
338}
339
340// ExpirationBoolean represents an XML version of 'bool' type
341type ExpirationBoolean bool
342
343// MarshalXML encodes delete marker boolean into an XML form.
344func (b ExpirationBoolean) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
345 if !b {
346 return nil
347 }
348 type booleanWrapper ExpirationBoolean
349 return e.EncodeElement(booleanWrapper(b), startElement)
350}
351
352// IsEnabled returns true if the expiration boolean is enabled
353func (b ExpirationBoolean) IsEnabled() bool {
354 return bool(b)
355}
356
357// Expiration structure - expiration details of lifecycle configuration
358type Expiration struct {
359 XMLName xml.Name `xml:"Expiration,omitempty" json:"-"`
360 Date ExpirationDate `xml:"Date,omitempty" json:"Date,omitempty"`
361 Days ExpirationDays `xml:"Days,omitempty" json:"Days,omitempty"`
362 DeleteMarker ExpireDeleteMarker `xml:"ExpiredObjectDeleteMarker,omitempty" json:"ExpiredObjectDeleteMarker,omitempty"`
363 DeleteAll ExpirationBoolean `xml:"ExpiredObjectAllVersions,omitempty" json:"ExpiredObjectAllVersions,omitempty"`
364}
365
366// MarshalJSON customizes json encoding by removing empty day/date specification.
367func (e Expiration) MarshalJSON() ([]byte, error) {
368 type expiration struct {
369 Date *ExpirationDate `json:"Date,omitempty"`
370 Days *ExpirationDays `json:"Days,omitempty"`
371 DeleteMarker ExpireDeleteMarker `json:"ExpiredObjectDeleteMarker,omitempty"`
372 DeleteAll ExpirationBoolean `json:"ExpiredObjectAllVersions,omitempty"`
373 }
374
375 newexp := expiration{
376 DeleteMarker: e.DeleteMarker,
377 DeleteAll: e.DeleteAll,
378 }
379 if !e.IsDaysNull() {
380 newexp.Days = &e.Days
381 }
382 if !e.IsDateNull() {
383 newexp.Date = &e.Date
384 }
385 return json.Marshal(newexp)
386}
387
388// IsDaysNull returns true if days field is null
389func (e Expiration) IsDaysNull() bool {
390 return e.Days == ExpirationDays(0)
391}
392
393// IsDateNull returns true if date field is null
394func (e Expiration) IsDateNull() bool {
395 return e.Date.Time.IsZero()
396}
397
398// IsDeleteMarkerExpirationEnabled returns true if the auto-expiration of delete marker is enabled
399func (e Expiration) IsDeleteMarkerExpirationEnabled() bool {
400 return e.DeleteMarker.IsEnabled()
401}
402
403// IsNull returns true if both date and days fields are null
404func (e Expiration) IsNull() bool {
405 return e.IsDaysNull() && e.IsDateNull() && !e.IsDeleteMarkerExpirationEnabled()
406}
407
408// MarshalXML is expiration is non null
409func (e Expiration) MarshalXML(en *xml.Encoder, startElement xml.StartElement) error {
410 if e.IsNull() {
411 return nil
412 }
413 type expirationWrapper Expiration
414 return en.EncodeElement(expirationWrapper(e), startElement)
415}
416
417// MarshalJSON customizes json encoding by omitting empty values
418func (r Rule) MarshalJSON() ([]byte, error) {
419 type rule struct {
420 AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `json:"AbortIncompleteMultipartUpload,omitempty"`
421 Expiration *Expiration `json:"Expiration,omitempty"`
422 ID string `json:"ID"`
423 RuleFilter *Filter `json:"Filter,omitempty"`
424 NoncurrentVersionExpiration *NoncurrentVersionExpiration `json:"NoncurrentVersionExpiration,omitempty"`
425 NoncurrentVersionTransition *NoncurrentVersionTransition `json:"NoncurrentVersionTransition,omitempty"`
426 Prefix string `json:"Prefix,omitempty"`
427 Status string `json:"Status"`
428 Transition *Transition `json:"Transition,omitempty"`
429 }
430 newr := rule{
431 Prefix: r.Prefix,
432 Status: r.Status,
433 ID: r.ID,
434 }
435
436 if !r.RuleFilter.IsNull() {
437 newr.RuleFilter = &r.RuleFilter
438 }
439 if !r.AbortIncompleteMultipartUpload.IsDaysNull() {
440 newr.AbortIncompleteMultipartUpload = &r.AbortIncompleteMultipartUpload
441 }
442 if !r.Expiration.IsNull() {
443 newr.Expiration = &r.Expiration
444 }
445 if !r.Transition.IsNull() {
446 newr.Transition = &r.Transition
447 }
448 if !r.NoncurrentVersionExpiration.isNull() {
449 newr.NoncurrentVersionExpiration = &r.NoncurrentVersionExpiration
450 }
451 if !r.NoncurrentVersionTransition.isNull() {
452 newr.NoncurrentVersionTransition = &r.NoncurrentVersionTransition
453 }
454
455 return json.Marshal(newr)
456}
457
458// Rule represents a single rule in lifecycle configuration
459type Rule struct {
460 XMLName xml.Name `xml:"Rule,omitempty" json:"-"`
461 AbortIncompleteMultipartUpload AbortIncompleteMultipartUpload `xml:"AbortIncompleteMultipartUpload,omitempty" json:"AbortIncompleteMultipartUpload,omitempty"`
462 Expiration Expiration `xml:"Expiration,omitempty" json:"Expiration,omitempty"`
463 ID string `xml:"ID" json:"ID"`
464 RuleFilter Filter `xml:"Filter,omitempty" json:"Filter,omitempty"`
465 NoncurrentVersionExpiration NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty" json:"NoncurrentVersionExpiration,omitempty"`
466 NoncurrentVersionTransition NoncurrentVersionTransition `xml:"NoncurrentVersionTransition,omitempty" json:"NoncurrentVersionTransition,omitempty"`
467 Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"`
468 Status string `xml:"Status" json:"Status"`
469 Transition Transition `xml:"Transition,omitempty" json:"Transition,omitempty"`
470}
471
472// Configuration is a collection of Rule objects.
473type Configuration struct {
474 XMLName xml.Name `xml:"LifecycleConfiguration,omitempty" json:"-"`
475 Rules []Rule `xml:"Rule"`
476}
477
478// Empty check if lifecycle configuration is empty
479func (c *Configuration) Empty() bool {
480 if c == nil {
481 return true
482 }
483 return len(c.Rules) == 0
484}
485
486// NewConfiguration initializes a fresh lifecycle configuration
487// for manipulation, such as setting and removing lifecycle rules
488// and filters.
489func NewConfiguration() *Configuration {
490 return &Configuration{}
491}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go b/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go
new file mode 100644
index 0000000..126661a
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go
@@ -0,0 +1,78 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017-2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package notification
19
20// Indentity represents the user id, this is a compliance field.
21type identity struct {
22 PrincipalID string `json:"principalId"`
23}
24
25// event bucket metadata.
26type bucketMeta struct {
27 Name string `json:"name"`
28 OwnerIdentity identity `json:"ownerIdentity"`
29 ARN string `json:"arn"`
30}
31
32// event object metadata.
33type objectMeta struct {
34 Key string `json:"key"`
35 Size int64 `json:"size,omitempty"`
36 ETag string `json:"eTag,omitempty"`
37 ContentType string `json:"contentType,omitempty"`
38 UserMetadata map[string]string `json:"userMetadata,omitempty"`
39 VersionID string `json:"versionId,omitempty"`
40 Sequencer string `json:"sequencer"`
41}
42
43// event server specific metadata.
44type eventMeta struct {
45 SchemaVersion string `json:"s3SchemaVersion"`
46 ConfigurationID string `json:"configurationId"`
47 Bucket bucketMeta `json:"bucket"`
48 Object objectMeta `json:"object"`
49}
50
51// sourceInfo represents information on the client that
52// triggered the event notification.
53type sourceInfo struct {
54 Host string `json:"host"`
55 Port string `json:"port"`
56 UserAgent string `json:"userAgent"`
57}
58
59// Event represents an Amazon an S3 bucket notification event.
60type Event struct {
61 EventVersion string `json:"eventVersion"`
62 EventSource string `json:"eventSource"`
63 AwsRegion string `json:"awsRegion"`
64 EventTime string `json:"eventTime"`
65 EventName string `json:"eventName"`
66 UserIdentity identity `json:"userIdentity"`
67 RequestParameters map[string]string `json:"requestParameters"`
68 ResponseElements map[string]string `json:"responseElements"`
69 S3 eventMeta `json:"s3"`
70 Source sourceInfo `json:"source"`
71}
72
73// Info - represents the collection of notification events, additionally
74// also reports errors if any while listening on bucket notifications.
75type Info struct {
76 Records []Event
77 Err error
78}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go
new file mode 100644
index 0000000..a44799d
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go
@@ -0,0 +1,440 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package notification
19
20import (
21 "encoding/xml"
22 "errors"
23 "fmt"
24 "strings"
25
26 "github.com/minio/minio-go/v7/pkg/set"
27)
28
29// EventType is a S3 notification event associated to the bucket notification configuration
30type EventType string
31
32// The role of all event types are described in :
33//
34// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations
35const (
36 ObjectCreatedAll EventType = "s3:ObjectCreated:*"
37 ObjectCreatedPut EventType = "s3:ObjectCreated:Put"
38 ObjectCreatedPost EventType = "s3:ObjectCreated:Post"
39 ObjectCreatedCopy EventType = "s3:ObjectCreated:Copy"
40 ObjectCreatedDeleteTagging EventType = "s3:ObjectCreated:DeleteTagging"
41 ObjectCreatedCompleteMultipartUpload EventType = "s3:ObjectCreated:CompleteMultipartUpload"
42 ObjectCreatedPutLegalHold EventType = "s3:ObjectCreated:PutLegalHold"
43 ObjectCreatedPutRetention EventType = "s3:ObjectCreated:PutRetention"
44 ObjectCreatedPutTagging EventType = "s3:ObjectCreated:PutTagging"
45 ObjectAccessedGet EventType = "s3:ObjectAccessed:Get"
46 ObjectAccessedHead EventType = "s3:ObjectAccessed:Head"
47 ObjectAccessedGetRetention EventType = "s3:ObjectAccessed:GetRetention"
48 ObjectAccessedGetLegalHold EventType = "s3:ObjectAccessed:GetLegalHold"
49 ObjectAccessedAll EventType = "s3:ObjectAccessed:*"
50 ObjectRemovedAll EventType = "s3:ObjectRemoved:*"
51 ObjectRemovedDelete EventType = "s3:ObjectRemoved:Delete"
52 ObjectRemovedDeleteMarkerCreated EventType = "s3:ObjectRemoved:DeleteMarkerCreated"
53 ObjectReducedRedundancyLostObject EventType = "s3:ReducedRedundancyLostObject"
54 ObjectTransitionAll EventType = "s3:ObjectTransition:*"
55 ObjectTransitionFailed EventType = "s3:ObjectTransition:Failed"
56 ObjectTransitionComplete EventType = "s3:ObjectTransition:Complete"
57 ObjectTransitionPost EventType = "s3:ObjectRestore:Post"
58 ObjectTransitionCompleted EventType = "s3:ObjectRestore:Completed"
59 ObjectReplicationAll EventType = "s3:Replication:*"
60 ObjectReplicationOperationCompletedReplication EventType = "s3:Replication:OperationCompletedReplication"
61 ObjectReplicationOperationFailedReplication EventType = "s3:Replication:OperationFailedReplication"
62 ObjectReplicationOperationMissedThreshold EventType = "s3:Replication:OperationMissedThreshold"
63 ObjectReplicationOperationNotTracked EventType = "s3:Replication:OperationNotTracked"
64 ObjectReplicationOperationReplicatedAfterThreshold EventType = "s3:Replication:OperationReplicatedAfterThreshold"
65 ObjectScannerManyVersions EventType = "s3:Scanner:ManyVersions"
66 ObjectScannerBigPrefix EventType = "s3:Scanner:BigPrefix"
67 ObjectScannerAll EventType = "s3:Scanner:*"
68 BucketCreatedAll EventType = "s3:BucketCreated:*"
69 BucketRemovedAll EventType = "s3:BucketRemoved:*"
70)
71
72// FilterRule - child of S3Key, a tag in the notification xml which
73// carries suffix/prefix filters
74type FilterRule struct {
75 Name string `xml:"Name"`
76 Value string `xml:"Value"`
77}
78
79// S3Key - child of Filter, a tag in the notification xml which
80// carries suffix/prefix filters
81type S3Key struct {
82 FilterRules []FilterRule `xml:"FilterRule,omitempty"`
83}
84
85// Filter - a tag in the notification xml structure which carries
86// suffix/prefix filters
87type Filter struct {
88 S3Key S3Key `xml:"S3Key,omitempty"`
89}
90
91// Arn - holds ARN information that will be sent to the web service,
92// ARN desciption can be found in http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
93type Arn struct {
94 Partition string
95 Service string
96 Region string
97 AccountID string
98 Resource string
99}
100
101// NewArn creates new ARN based on the given partition, service, region, account id and resource
102func NewArn(partition, service, region, accountID, resource string) Arn {
103 return Arn{
104 Partition: partition,
105 Service: service,
106 Region: region,
107 AccountID: accountID,
108 Resource: resource,
109 }
110}
111
112var (
113 // ErrInvalidArnPrefix is returned when ARN string format does not start with 'arn'
114 ErrInvalidArnPrefix = errors.New("invalid ARN format, must start with 'arn:'")
115 // ErrInvalidArnFormat is returned when ARN string format is not valid
116 ErrInvalidArnFormat = errors.New("invalid ARN format, must be 'arn:<partition>:<service>:<region>:<accountID>:<resource>'")
117)
118
119// NewArnFromString parses string representation of ARN into Arn object.
120// Returns an error if the string format is incorrect.
121func NewArnFromString(arn string) (Arn, error) {
122 parts := strings.Split(arn, ":")
123 if len(parts) != 6 {
124 return Arn{}, ErrInvalidArnFormat
125 }
126 if parts[0] != "arn" {
127 return Arn{}, ErrInvalidArnPrefix
128 }
129
130 return NewArn(parts[1], parts[2], parts[3], parts[4], parts[5]), nil
131}
132
133// String returns the string format of the ARN
134func (arn Arn) String() string {
135 return "arn:" + arn.Partition + ":" + arn.Service + ":" + arn.Region + ":" + arn.AccountID + ":" + arn.Resource
136}
137
138// Config - represents one single notification configuration
139// such as topic, queue or lambda configuration.
140type Config struct {
141 ID string `xml:"Id,omitempty"`
142 Arn Arn `xml:"-"`
143 Events []EventType `xml:"Event"`
144 Filter *Filter `xml:"Filter,omitempty"`
145}
146
147// NewConfig creates one notification config and sets the given ARN
148func NewConfig(arn Arn) Config {
149 return Config{Arn: arn, Filter: &Filter{}}
150}
151
152// AddEvents adds one event to the current notification config
153func (t *Config) AddEvents(events ...EventType) {
154 t.Events = append(t.Events, events...)
155}
156
157// AddFilterSuffix sets the suffix configuration to the current notification config
158func (t *Config) AddFilterSuffix(suffix string) {
159 if t.Filter == nil {
160 t.Filter = &Filter{}
161 }
162 newFilterRule := FilterRule{Name: "suffix", Value: suffix}
163 // Replace any suffix rule if existing and add to the list otherwise
164 for index := range t.Filter.S3Key.FilterRules {
165 if t.Filter.S3Key.FilterRules[index].Name == "suffix" {
166 t.Filter.S3Key.FilterRules[index] = newFilterRule
167 return
168 }
169 }
170 t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule)
171}
172
173// AddFilterPrefix sets the prefix configuration to the current notification config
174func (t *Config) AddFilterPrefix(prefix string) {
175 if t.Filter == nil {
176 t.Filter = &Filter{}
177 }
178 newFilterRule := FilterRule{Name: "prefix", Value: prefix}
179 // Replace any prefix rule if existing and add to the list otherwise
180 for index := range t.Filter.S3Key.FilterRules {
181 if t.Filter.S3Key.FilterRules[index].Name == "prefix" {
182 t.Filter.S3Key.FilterRules[index] = newFilterRule
183 return
184 }
185 }
186 t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule)
187}
188
189// EqualEventTypeList tells whether a and b contain the same events
190func EqualEventTypeList(a, b []EventType) bool {
191 if len(a) != len(b) {
192 return false
193 }
194 setA := set.NewStringSet()
195 for _, i := range a {
196 setA.Add(string(i))
197 }
198
199 setB := set.NewStringSet()
200 for _, i := range b {
201 setB.Add(string(i))
202 }
203
204 return setA.Difference(setB).IsEmpty()
205}
206
207// EqualFilterRuleList tells whether a and b contain the same filters
208func EqualFilterRuleList(a, b []FilterRule) bool {
209 if len(a) != len(b) {
210 return false
211 }
212
213 setA := set.NewStringSet()
214 for _, i := range a {
215 setA.Add(fmt.Sprintf("%s-%s", i.Name, i.Value))
216 }
217
218 setB := set.NewStringSet()
219 for _, i := range b {
220 setB.Add(fmt.Sprintf("%s-%s", i.Name, i.Value))
221 }
222
223 return setA.Difference(setB).IsEmpty()
224}
225
226// Equal returns whether this `Config` is equal to another defined by the passed parameters
227func (t *Config) Equal(events []EventType, prefix, suffix string) bool {
228 if t == nil {
229 return false
230 }
231
232 // Compare events
233 passEvents := EqualEventTypeList(t.Events, events)
234
235 // Compare filters
236 var newFilterRules []FilterRule
237 if prefix != "" {
238 newFilterRules = append(newFilterRules, FilterRule{Name: "prefix", Value: prefix})
239 }
240 if suffix != "" {
241 newFilterRules = append(newFilterRules, FilterRule{Name: "suffix", Value: suffix})
242 }
243
244 var currentFilterRules []FilterRule
245 if t.Filter != nil {
246 currentFilterRules = t.Filter.S3Key.FilterRules
247 }
248
249 passFilters := EqualFilterRuleList(currentFilterRules, newFilterRules)
250 return passEvents && passFilters
251}
252
253// TopicConfig carries one single topic notification configuration
254type TopicConfig struct {
255 Config
256 Topic string `xml:"Topic"`
257}
258
259// QueueConfig carries one single queue notification configuration
260type QueueConfig struct {
261 Config
262 Queue string `xml:"Queue"`
263}
264
265// LambdaConfig carries one single cloudfunction notification configuration
266type LambdaConfig struct {
267 Config
268 Lambda string `xml:"CloudFunction"`
269}
270
271// Configuration - the struct that represents the whole XML to be sent to the web service
272type Configuration struct {
273 XMLName xml.Name `xml:"NotificationConfiguration"`
274 LambdaConfigs []LambdaConfig `xml:"CloudFunctionConfiguration"`
275 TopicConfigs []TopicConfig `xml:"TopicConfiguration"`
276 QueueConfigs []QueueConfig `xml:"QueueConfiguration"`
277}
278
279// AddTopic adds a given topic config to the general bucket notification config
280func (b *Configuration) AddTopic(topicConfig Config) bool {
281 newTopicConfig := TopicConfig{Config: topicConfig, Topic: topicConfig.Arn.String()}
282 for _, n := range b.TopicConfigs {
283 // If new config matches existing one
284 if n.Topic == newTopicConfig.Arn.String() && newTopicConfig.Filter == n.Filter {
285
286 existingConfig := set.NewStringSet()
287 for _, v := range n.Events {
288 existingConfig.Add(string(v))
289 }
290
291 newConfig := set.NewStringSet()
292 for _, v := range topicConfig.Events {
293 newConfig.Add(string(v))
294 }
295
296 if !newConfig.Intersection(existingConfig).IsEmpty() {
297 return false
298 }
299 }
300 }
301 b.TopicConfigs = append(b.TopicConfigs, newTopicConfig)
302 return true
303}
304
305// AddQueue adds a given queue config to the general bucket notification config
306func (b *Configuration) AddQueue(queueConfig Config) bool {
307 newQueueConfig := QueueConfig{Config: queueConfig, Queue: queueConfig.Arn.String()}
308 for _, n := range b.QueueConfigs {
309 if n.Queue == newQueueConfig.Arn.String() && newQueueConfig.Filter == n.Filter {
310
311 existingConfig := set.NewStringSet()
312 for _, v := range n.Events {
313 existingConfig.Add(string(v))
314 }
315
316 newConfig := set.NewStringSet()
317 for _, v := range queueConfig.Events {
318 newConfig.Add(string(v))
319 }
320
321 if !newConfig.Intersection(existingConfig).IsEmpty() {
322 return false
323 }
324 }
325 }
326 b.QueueConfigs = append(b.QueueConfigs, newQueueConfig)
327 return true
328}
329
330// AddLambda adds a given lambda config to the general bucket notification config
331func (b *Configuration) AddLambda(lambdaConfig Config) bool {
332 newLambdaConfig := LambdaConfig{Config: lambdaConfig, Lambda: lambdaConfig.Arn.String()}
333 for _, n := range b.LambdaConfigs {
334 if n.Lambda == newLambdaConfig.Arn.String() && newLambdaConfig.Filter == n.Filter {
335
336 existingConfig := set.NewStringSet()
337 for _, v := range n.Events {
338 existingConfig.Add(string(v))
339 }
340
341 newConfig := set.NewStringSet()
342 for _, v := range lambdaConfig.Events {
343 newConfig.Add(string(v))
344 }
345
346 if !newConfig.Intersection(existingConfig).IsEmpty() {
347 return false
348 }
349 }
350 }
351 b.LambdaConfigs = append(b.LambdaConfigs, newLambdaConfig)
352 return true
353}
354
355// RemoveTopicByArn removes all topic configurations that match the exact specified ARN
356func (b *Configuration) RemoveTopicByArn(arn Arn) {
357 var topics []TopicConfig
358 for _, topic := range b.TopicConfigs {
359 if topic.Topic != arn.String() {
360 topics = append(topics, topic)
361 }
362 }
363 b.TopicConfigs = topics
364}
365
366// ErrNoConfigMatch is returned when a notification configuration (sqs,sns,lambda) is not found when trying to delete
367var ErrNoConfigMatch = errors.New("no notification configuration matched")
368
369// RemoveTopicByArnEventsPrefixSuffix removes a topic configuration that match the exact specified ARN, events, prefix and suffix
370func (b *Configuration) RemoveTopicByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error {
371 removeIndex := -1
372 for i, v := range b.TopicConfigs {
373 // if it matches events and filters, mark the index for deletion
374 if v.Topic == arn.String() && v.Config.Equal(events, prefix, suffix) {
375 removeIndex = i
376 break // since we have at most one matching config
377 }
378 }
379 if removeIndex >= 0 {
380 b.TopicConfigs = append(b.TopicConfigs[:removeIndex], b.TopicConfigs[removeIndex+1:]...)
381 return nil
382 }
383 return ErrNoConfigMatch
384}
385
386// RemoveQueueByArn removes all queue configurations that match the exact specified ARN
387func (b *Configuration) RemoveQueueByArn(arn Arn) {
388 var queues []QueueConfig
389 for _, queue := range b.QueueConfigs {
390 if queue.Queue != arn.String() {
391 queues = append(queues, queue)
392 }
393 }
394 b.QueueConfigs = queues
395}
396
397// RemoveQueueByArnEventsPrefixSuffix removes a queue configuration that match the exact specified ARN, events, prefix and suffix
398func (b *Configuration) RemoveQueueByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error {
399 removeIndex := -1
400 for i, v := range b.QueueConfigs {
401 // if it matches events and filters, mark the index for deletion
402 if v.Queue == arn.String() && v.Config.Equal(events, prefix, suffix) {
403 removeIndex = i
404 break // since we have at most one matching config
405 }
406 }
407 if removeIndex >= 0 {
408 b.QueueConfigs = append(b.QueueConfigs[:removeIndex], b.QueueConfigs[removeIndex+1:]...)
409 return nil
410 }
411 return ErrNoConfigMatch
412}
413
414// RemoveLambdaByArn removes all lambda configurations that match the exact specified ARN
415func (b *Configuration) RemoveLambdaByArn(arn Arn) {
416 var lambdas []LambdaConfig
417 for _, lambda := range b.LambdaConfigs {
418 if lambda.Lambda != arn.String() {
419 lambdas = append(lambdas, lambda)
420 }
421 }
422 b.LambdaConfigs = lambdas
423}
424
425// RemoveLambdaByArnEventsPrefixSuffix removes a topic configuration that match the exact specified ARN, events, prefix and suffix
426func (b *Configuration) RemoveLambdaByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error {
427 removeIndex := -1
428 for i, v := range b.LambdaConfigs {
429 // if it matches events and filters, mark the index for deletion
430 if v.Lambda == arn.String() && v.Config.Equal(events, prefix, suffix) {
431 removeIndex = i
432 break // since we have at most one matching config
433 }
434 }
435 if removeIndex >= 0 {
436 b.LambdaConfigs = append(b.LambdaConfigs[:removeIndex], b.LambdaConfigs[removeIndex+1:]...)
437 return nil
438 }
439 return ErrNoConfigMatch
440}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go
new file mode 100644
index 0000000..0abbf6e
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go
@@ -0,0 +1,971 @@
1/*
2 * MinIO Client (C) 2020 MinIO, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17package replication
18
19import (
20 "bytes"
21 "encoding/xml"
22 "fmt"
23 "math"
24 "strconv"
25 "strings"
26 "time"
27 "unicode/utf8"
28
29 "github.com/rs/xid"
30)
31
32var errInvalidFilter = fmt.Errorf("invalid filter")
33
34// OptionType specifies operation to be performed on config
35type OptionType string
36
37const (
38 // AddOption specifies addition of rule to config
39 AddOption OptionType = "Add"
40 // SetOption specifies modification of existing rule to config
41 SetOption OptionType = "Set"
42
43 // RemoveOption specifies rule options are for removing a rule
44 RemoveOption OptionType = "Remove"
45 // ImportOption is for getting current config
46 ImportOption OptionType = "Import"
47)
48
49// Options represents options to set a replication configuration rule
50type Options struct {
51 Op OptionType
52 RoleArn string
53 ID string
54 Prefix string
55 RuleStatus string
56 Priority string
57 TagString string
58 StorageClass string
59 DestBucket string
60 IsTagSet bool
61 IsSCSet bool
62 ReplicateDeletes string // replicate versioned deletes
63 ReplicateDeleteMarkers string // replicate soft deletes
64 ReplicaSync string // replicate replica metadata modifications
65 ExistingObjectReplicate string
66}
67
68// Tags returns a slice of tags for a rule
69func (opts Options) Tags() ([]Tag, error) {
70 var tagList []Tag
71 tagTokens := strings.Split(opts.TagString, "&")
72 for _, tok := range tagTokens {
73 if tok == "" {
74 break
75 }
76 kv := strings.SplitN(tok, "=", 2)
77 if len(kv) != 2 {
78 return []Tag{}, fmt.Errorf("tags should be entered as comma separated k=v pairs")
79 }
80 tagList = append(tagList, Tag{
81 Key: kv[0],
82 Value: kv[1],
83 })
84 }
85 return tagList, nil
86}
87
88// Config - replication configuration specified in
89// https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html
90type Config struct {
91 XMLName xml.Name `xml:"ReplicationConfiguration" json:"-"`
92 Rules []Rule `xml:"Rule" json:"Rules"`
93 Role string `xml:"Role" json:"Role"`
94}
95
96// Empty returns true if config is not set
97func (c *Config) Empty() bool {
98 return len(c.Rules) == 0
99}
100
101// AddRule adds a new rule to existing replication config. If a rule exists with the
102// same ID, then the rule is replaced.
103func (c *Config) AddRule(opts Options) error {
104 priority, err := strconv.Atoi(opts.Priority)
105 if err != nil {
106 return err
107 }
108 var compatSw bool // true if RoleArn is used with new mc client and older minio version prior to multisite
109 if opts.RoleArn != "" {
110 tokens := strings.Split(opts.RoleArn, ":")
111 if len(tokens) != 6 {
112 return fmt.Errorf("invalid format for replication Role Arn: %v", opts.RoleArn)
113 }
114 switch {
115 case strings.HasPrefix(opts.RoleArn, "arn:minio:replication") && len(c.Rules) == 0:
116 c.Role = opts.RoleArn
117 compatSw = true
118 case strings.HasPrefix(opts.RoleArn, "arn:aws:iam"):
119 c.Role = opts.RoleArn
120 default:
121 return fmt.Errorf("RoleArn invalid for AWS replication configuration: %v", opts.RoleArn)
122 }
123 }
124
125 var status Status
126 // toggle rule status for edit option
127 switch opts.RuleStatus {
128 case "enable":
129 status = Enabled
130 case "disable":
131 status = Disabled
132 default:
133 return fmt.Errorf("rule state should be either [enable|disable]")
134 }
135
136 tags, err := opts.Tags()
137 if err != nil {
138 return err
139 }
140 andVal := And{
141 Tags: tags,
142 }
143 filter := Filter{Prefix: opts.Prefix}
144 // only a single tag is set.
145 if opts.Prefix == "" && len(tags) == 1 {
146 filter.Tag = tags[0]
147 }
148 // both prefix and tag are present
149 if len(andVal.Tags) > 1 || opts.Prefix != "" {
150 filter.And = andVal
151 filter.And.Prefix = opts.Prefix
152 filter.Prefix = ""
153 filter.Tag = Tag{}
154 }
155 if opts.ID == "" {
156 opts.ID = xid.New().String()
157 }
158
159 destBucket := opts.DestBucket
160 // ref https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html
161 if btokens := strings.Split(destBucket, ":"); len(btokens) != 6 {
162 if len(btokens) == 1 && compatSw {
163 destBucket = fmt.Sprintf("arn:aws:s3:::%s", destBucket)
164 } else {
165 return fmt.Errorf("destination bucket needs to be in Arn format")
166 }
167 }
168 dmStatus := Disabled
169 if opts.ReplicateDeleteMarkers != "" {
170 switch opts.ReplicateDeleteMarkers {
171 case "enable":
172 dmStatus = Enabled
173 case "disable":
174 dmStatus = Disabled
175 default:
176 return fmt.Errorf("ReplicateDeleteMarkers should be either enable|disable")
177 }
178 }
179
180 vDeleteStatus := Disabled
181 if opts.ReplicateDeletes != "" {
182 switch opts.ReplicateDeletes {
183 case "enable":
184 vDeleteStatus = Enabled
185 case "disable":
186 vDeleteStatus = Disabled
187 default:
188 return fmt.Errorf("ReplicateDeletes should be either enable|disable")
189 }
190 }
191 var replicaSync Status
192 // replica sync is by default Enabled, unless specified.
193 switch opts.ReplicaSync {
194 case "enable", "":
195 replicaSync = Enabled
196 case "disable":
197 replicaSync = Disabled
198 default:
199 return fmt.Errorf("replica metadata sync should be either [enable|disable]")
200 }
201
202 var existingStatus Status
203 if opts.ExistingObjectReplicate != "" {
204 switch opts.ExistingObjectReplicate {
205 case "enable":
206 existingStatus = Enabled
207 case "disable", "":
208 existingStatus = Disabled
209 default:
210 return fmt.Errorf("existingObjectReplicate should be either enable|disable")
211 }
212 }
213 newRule := Rule{
214 ID: opts.ID,
215 Priority: priority,
216 Status: status,
217 Filter: filter,
218 Destination: Destination{
219 Bucket: destBucket,
220 StorageClass: opts.StorageClass,
221 },
222 DeleteMarkerReplication: DeleteMarkerReplication{Status: dmStatus},
223 DeleteReplication: DeleteReplication{Status: vDeleteStatus},
224 // MinIO enables replica metadata syncing by default in the case of bi-directional replication to allow
225 // automatic failover as the expectation in this case is that replica and source should be identical.
226 // However AWS leaves this configurable https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-for-metadata-changes.html
227 SourceSelectionCriteria: SourceSelectionCriteria{
228 ReplicaModifications: ReplicaModifications{
229 Status: replicaSync,
230 },
231 },
232 // By default disable existing object replication unless selected
233 ExistingObjectReplication: ExistingObjectReplication{
234 Status: existingStatus,
235 },
236 }
237
238 // validate rule after overlaying priority for pre-existing rule being disabled.
239 if err := newRule.Validate(); err != nil {
240 return err
241 }
242 // if replication config uses RoleArn, migrate this to the destination element as target ARN for remote bucket for MinIO configuration
243 if c.Role != "" && !strings.HasPrefix(c.Role, "arn:aws:iam") && !compatSw {
244 for i := range c.Rules {
245 c.Rules[i].Destination.Bucket = c.Role
246 }
247 c.Role = ""
248 }
249
250 for _, rule := range c.Rules {
251 if rule.Priority == newRule.Priority {
252 return fmt.Errorf("priority must be unique. Replication configuration already has a rule with this priority")
253 }
254 if rule.ID == newRule.ID {
255 return fmt.Errorf("a rule exists with this ID")
256 }
257 }
258
259 c.Rules = append(c.Rules, newRule)
260 return nil
261}
262
263// EditRule modifies an existing rule in replication config
264func (c *Config) EditRule(opts Options) error {
265 if opts.ID == "" {
266 return fmt.Errorf("rule ID missing")
267 }
268 // if replication config uses RoleArn, migrate this to the destination element as target ARN for remote bucket for non AWS.
269 if c.Role != "" && !strings.HasPrefix(c.Role, "arn:aws:iam") && len(c.Rules) > 1 {
270 for i := range c.Rules {
271 c.Rules[i].Destination.Bucket = c.Role
272 }
273 c.Role = ""
274 }
275
276 rIdx := -1
277 var newRule Rule
278 for i, rule := range c.Rules {
279 if rule.ID == opts.ID {
280 rIdx = i
281 newRule = rule
282 break
283 }
284 }
285 if rIdx < 0 {
286 return fmt.Errorf("rule with ID %s not found in replication configuration", opts.ID)
287 }
288 prefixChg := opts.Prefix != newRule.Prefix()
289 if opts.IsTagSet || prefixChg {
290 prefix := newRule.Prefix()
291 if prefix != opts.Prefix {
292 prefix = opts.Prefix
293 }
294 tags := []Tag{newRule.Filter.Tag}
295 if len(newRule.Filter.And.Tags) != 0 {
296 tags = newRule.Filter.And.Tags
297 }
298 var err error
299 if opts.IsTagSet {
300 tags, err = opts.Tags()
301 if err != nil {
302 return err
303 }
304 }
305 andVal := And{
306 Tags: tags,
307 }
308
309 filter := Filter{Prefix: prefix}
310 // only a single tag is set.
311 if prefix == "" && len(tags) == 1 {
312 filter.Tag = tags[0]
313 }
314 // both prefix and tag are present
315 if len(andVal.Tags) > 1 || prefix != "" {
316 filter.And = andVal
317 filter.And.Prefix = prefix
318 filter.Prefix = ""
319 filter.Tag = Tag{}
320 }
321 newRule.Filter = filter
322 }
323
324 // toggle rule status for edit option
325 if opts.RuleStatus != "" {
326 switch opts.RuleStatus {
327 case "enable":
328 newRule.Status = Enabled
329 case "disable":
330 newRule.Status = Disabled
331 default:
332 return fmt.Errorf("rule state should be either [enable|disable]")
333 }
334 }
335 // set DeleteMarkerReplication rule status for edit option
336 if opts.ReplicateDeleteMarkers != "" {
337 switch opts.ReplicateDeleteMarkers {
338 case "enable":
339 newRule.DeleteMarkerReplication.Status = Enabled
340 case "disable":
341 newRule.DeleteMarkerReplication.Status = Disabled
342 default:
343 return fmt.Errorf("ReplicateDeleteMarkers state should be either [enable|disable]")
344 }
345 }
346
347 // set DeleteReplication rule status for edit option. This is a MinIO specific
348 // option to replicate versioned deletes
349 if opts.ReplicateDeletes != "" {
350 switch opts.ReplicateDeletes {
351 case "enable":
352 newRule.DeleteReplication.Status = Enabled
353 case "disable":
354 newRule.DeleteReplication.Status = Disabled
355 default:
356 return fmt.Errorf("ReplicateDeletes state should be either [enable|disable]")
357 }
358 }
359
360 if opts.ReplicaSync != "" {
361 switch opts.ReplicaSync {
362 case "enable", "":
363 newRule.SourceSelectionCriteria.ReplicaModifications.Status = Enabled
364 case "disable":
365 newRule.SourceSelectionCriteria.ReplicaModifications.Status = Disabled
366 default:
367 return fmt.Errorf("replica metadata sync should be either [enable|disable]")
368 }
369 }
370
371 if opts.ExistingObjectReplicate != "" {
372 switch opts.ExistingObjectReplicate {
373 case "enable":
374 newRule.ExistingObjectReplication.Status = Enabled
375 case "disable":
376 newRule.ExistingObjectReplication.Status = Disabled
377 default:
378 return fmt.Errorf("existingObjectsReplication state should be either [enable|disable]")
379 }
380 }
381 if opts.IsSCSet {
382 newRule.Destination.StorageClass = opts.StorageClass
383 }
384 if opts.Priority != "" {
385 priority, err := strconv.Atoi(opts.Priority)
386 if err != nil {
387 return err
388 }
389 newRule.Priority = priority
390 }
391 if opts.DestBucket != "" {
392 destBucket := opts.DestBucket
393 // ref https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html
394 if btokens := strings.Split(opts.DestBucket, ":"); len(btokens) != 6 {
395 return fmt.Errorf("destination bucket needs to be in Arn format")
396 }
397 newRule.Destination.Bucket = destBucket
398 }
399 // validate rule
400 if err := newRule.Validate(); err != nil {
401 return err
402 }
403 // ensure priority and destination bucket restrictions are not violated
404 for idx, rule := range c.Rules {
405 if rule.Priority == newRule.Priority && rIdx != idx {
406 return fmt.Errorf("priority must be unique. Replication configuration already has a rule with this priority")
407 }
408 if rule.Destination.Bucket != newRule.Destination.Bucket && rule.ID == newRule.ID {
409 return fmt.Errorf("invalid destination bucket for this rule")
410 }
411 }
412
413 c.Rules[rIdx] = newRule
414 return nil
415}
416
417// RemoveRule removes a rule from replication config.
418func (c *Config) RemoveRule(opts Options) error {
419 var newRules []Rule
420 ruleFound := false
421 for _, rule := range c.Rules {
422 if rule.ID != opts.ID {
423 newRules = append(newRules, rule)
424 continue
425 }
426 ruleFound = true
427 }
428 if !ruleFound {
429 return fmt.Errorf("Rule with ID %s not found", opts.ID)
430 }
431 if len(newRules) == 0 {
432 return fmt.Errorf("replication configuration should have at least one rule")
433 }
434 c.Rules = newRules
435 return nil
436}
437
438// Rule - a rule for replication configuration.
439type Rule struct {
440 XMLName xml.Name `xml:"Rule" json:"-"`
441 ID string `xml:"ID,omitempty"`
442 Status Status `xml:"Status"`
443 Priority int `xml:"Priority"`
444 DeleteMarkerReplication DeleteMarkerReplication `xml:"DeleteMarkerReplication"`
445 DeleteReplication DeleteReplication `xml:"DeleteReplication"`
446 Destination Destination `xml:"Destination"`
447 Filter Filter `xml:"Filter" json:"Filter"`
448 SourceSelectionCriteria SourceSelectionCriteria `xml:"SourceSelectionCriteria" json:"SourceSelectionCriteria"`
449 ExistingObjectReplication ExistingObjectReplication `xml:"ExistingObjectReplication,omitempty" json:"ExistingObjectReplication,omitempty"`
450}
451
452// Validate validates the rule for correctness
453func (r Rule) Validate() error {
454 if err := r.validateID(); err != nil {
455 return err
456 }
457 if err := r.validateStatus(); err != nil {
458 return err
459 }
460 if err := r.validateFilter(); err != nil {
461 return err
462 }
463
464 if r.Priority < 0 && r.Status == Enabled {
465 return fmt.Errorf("priority must be set for the rule")
466 }
467
468 if err := r.validateStatus(); err != nil {
469 return err
470 }
471 return r.ExistingObjectReplication.Validate()
472}
473
474// validateID - checks if ID is valid or not.
475func (r Rule) validateID() error {
476 // cannot be longer than 255 characters
477 if len(r.ID) > 255 {
478 return fmt.Errorf("ID must be less than 255 characters")
479 }
480 return nil
481}
482
483// validateStatus - checks if status is valid or not.
484func (r Rule) validateStatus() error {
485 // Status can't be empty
486 if len(r.Status) == 0 {
487 return fmt.Errorf("status cannot be empty")
488 }
489
490 // Status must be one of Enabled or Disabled
491 if r.Status != Enabled && r.Status != Disabled {
492 return fmt.Errorf("status must be set to either Enabled or Disabled")
493 }
494 return nil
495}
496
497func (r Rule) validateFilter() error {
498 return r.Filter.Validate()
499}
500
501// Prefix - a rule can either have prefix under <filter></filter> or under
502// <filter><and></and></filter>. This method returns the prefix from the
503// location where it is available
504func (r Rule) Prefix() string {
505 if r.Filter.Prefix != "" {
506 return r.Filter.Prefix
507 }
508 return r.Filter.And.Prefix
509}
510
511// Tags - a rule can either have tag under <filter></filter> or under
512// <filter><and></and></filter>. This method returns all the tags from the
513// rule in the format tag1=value1&tag2=value2
514func (r Rule) Tags() string {
515 ts := []Tag{r.Filter.Tag}
516 if len(r.Filter.And.Tags) != 0 {
517 ts = r.Filter.And.Tags
518 }
519
520 var buf bytes.Buffer
521 for _, t := range ts {
522 if buf.Len() > 0 {
523 buf.WriteString("&")
524 }
525 buf.WriteString(t.String())
526 }
527 return buf.String()
528}
529
530// Filter - a filter for a replication configuration Rule.
531type Filter struct {
532 XMLName xml.Name `xml:"Filter" json:"-"`
533 Prefix string `json:"Prefix,omitempty"`
534 And And `xml:"And,omitempty" json:"And,omitempty"`
535 Tag Tag `xml:"Tag,omitempty" json:"Tag,omitempty"`
536}
537
538// Validate - validates the filter element
539func (f Filter) Validate() error {
540 // A Filter must have exactly one of Prefix, Tag, or And specified.
541 if !f.And.isEmpty() {
542 if f.Prefix != "" {
543 return errInvalidFilter
544 }
545 if !f.Tag.IsEmpty() {
546 return errInvalidFilter
547 }
548 }
549 if f.Prefix != "" {
550 if !f.Tag.IsEmpty() {
551 return errInvalidFilter
552 }
553 }
554 if !f.Tag.IsEmpty() {
555 if err := f.Tag.Validate(); err != nil {
556 return err
557 }
558 }
559 return nil
560}
561
562// Tag - a tag for a replication configuration Rule filter.
563type Tag struct {
564 XMLName xml.Name `json:"-"`
565 Key string `xml:"Key,omitempty" json:"Key,omitempty"`
566 Value string `xml:"Value,omitempty" json:"Value,omitempty"`
567}
568
569func (tag Tag) String() string {
570 if tag.IsEmpty() {
571 return ""
572 }
573 return tag.Key + "=" + tag.Value
574}
575
576// IsEmpty returns whether this tag is empty or not.
577func (tag Tag) IsEmpty() bool {
578 return tag.Key == ""
579}
580
581// Validate checks this tag.
582func (tag Tag) Validate() error {
583 if len(tag.Key) == 0 || utf8.RuneCountInString(tag.Key) > 128 {
584 return fmt.Errorf("invalid Tag Key")
585 }
586
587 if utf8.RuneCountInString(tag.Value) > 256 {
588 return fmt.Errorf("invalid Tag Value")
589 }
590 return nil
591}
592
593// Destination - destination in ReplicationConfiguration.
594type Destination struct {
595 XMLName xml.Name `xml:"Destination" json:"-"`
596 Bucket string `xml:"Bucket" json:"Bucket"`
597 StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"`
598}
599
600// And - a tag to combine a prefix and multiple tags for replication configuration rule.
601type And struct {
602 XMLName xml.Name `xml:"And,omitempty" json:"-"`
603 Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"`
604 Tags []Tag `xml:"Tag,omitempty" json:"Tag,omitempty"`
605}
606
607// isEmpty returns true if Tags field is null
608func (a And) isEmpty() bool {
609 return len(a.Tags) == 0 && a.Prefix == ""
610}
611
612// Status represents Enabled/Disabled status
613type Status string
614
615// Supported status types
616const (
617 Enabled Status = "Enabled"
618 Disabled Status = "Disabled"
619)
620
621// DeleteMarkerReplication - whether delete markers are replicated - https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html
622type DeleteMarkerReplication struct {
623 Status Status `xml:"Status" json:"Status"` // should be set to "Disabled" by default
624}
625
626// IsEmpty returns true if DeleteMarkerReplication is not set
627func (d DeleteMarkerReplication) IsEmpty() bool {
628 return len(d.Status) == 0
629}
630
631// DeleteReplication - whether versioned deletes are replicated - this
632// is a MinIO specific extension
633type DeleteReplication struct {
634 Status Status `xml:"Status" json:"Status"` // should be set to "Disabled" by default
635}
636
637// IsEmpty returns true if DeleteReplication is not set
638func (d DeleteReplication) IsEmpty() bool {
639 return len(d.Status) == 0
640}
641
642// ReplicaModifications specifies if replica modification sync is enabled
643type ReplicaModifications struct {
644 Status Status `xml:"Status" json:"Status"` // should be set to "Enabled" by default
645}
646
647// SourceSelectionCriteria - specifies additional source selection criteria in ReplicationConfiguration.
648type SourceSelectionCriteria struct {
649 ReplicaModifications ReplicaModifications `xml:"ReplicaModifications" json:"ReplicaModifications"`
650}
651
652// IsValid - checks whether SourceSelectionCriteria is valid or not.
653func (s SourceSelectionCriteria) IsValid() bool {
654 return s.ReplicaModifications.Status == Enabled || s.ReplicaModifications.Status == Disabled
655}
656
657// Validate source selection criteria
658func (s SourceSelectionCriteria) Validate() error {
659 if (s == SourceSelectionCriteria{}) {
660 return nil
661 }
662 if !s.IsValid() {
663 return fmt.Errorf("invalid ReplicaModification status")
664 }
665 return nil
666}
667
668// ExistingObjectReplication - whether existing object replication is enabled
669type ExistingObjectReplication struct {
670 Status Status `xml:"Status"` // should be set to "Disabled" by default
671}
672
673// IsEmpty returns true if DeleteMarkerReplication is not set
674func (e ExistingObjectReplication) IsEmpty() bool {
675 return len(e.Status) == 0
676}
677
678// Validate validates whether the status is disabled.
679func (e ExistingObjectReplication) Validate() error {
680 if e.IsEmpty() {
681 return nil
682 }
683 if e.Status != Disabled && e.Status != Enabled {
684 return fmt.Errorf("invalid ExistingObjectReplication status")
685 }
686 return nil
687}
688
689// TargetMetrics represents inline replication metrics
690// such as pending, failed and completed bytes in total for a bucket remote target
691type TargetMetrics struct {
692 // Completed count
693 ReplicatedCount uint64 `json:"replicationCount,omitempty"`
694 // Completed size in bytes
695 ReplicatedSize uint64 `json:"completedReplicationSize,omitempty"`
696 // Bandwidth limit in bytes/sec for this target
697 BandWidthLimitInBytesPerSecond int64 `json:"limitInBits,omitempty"`
698 // Current bandwidth used in bytes/sec for this target
699 CurrentBandwidthInBytesPerSecond float64 `json:"currentBandwidth,omitempty"`
700 // errors seen in replication in last minute, hour and total
701 Failed TimedErrStats `json:"failed,omitempty"`
702 // Deprecated fields
703 // Pending size in bytes
704 PendingSize uint64 `json:"pendingReplicationSize,omitempty"`
705 // Total Replica size in bytes
706 ReplicaSize uint64 `json:"replicaSize,omitempty"`
707 // Failed size in bytes
708 FailedSize uint64 `json:"failedReplicationSize,omitempty"`
709 // Total number of pending operations including metadata updates
710 PendingCount uint64 `json:"pendingReplicationCount,omitempty"`
711 // Total number of failed operations including metadata updates
712 FailedCount uint64 `json:"failedReplicationCount,omitempty"`
713}
714
715// Metrics represents inline replication metrics for a bucket.
716type Metrics struct {
717 Stats map[string]TargetMetrics
718 // Completed size in bytes across targets
719 ReplicatedSize uint64 `json:"completedReplicationSize,omitempty"`
720 // Total Replica size in bytes across targets
721 ReplicaSize uint64 `json:"replicaSize,omitempty"`
722 // Total Replica counts
723 ReplicaCount int64 `json:"replicaCount,omitempty"`
724 // Total Replicated count
725 ReplicatedCount int64 `json:"replicationCount,omitempty"`
726 // errors seen in replication in last minute, hour and total
727 Errors TimedErrStats `json:"failed,omitempty"`
728 // Total number of entries that are queued for replication
729 QStats InQueueMetric `json:"queued"`
730 // Deprecated fields
731 // Total Pending size in bytes across targets
732 PendingSize uint64 `json:"pendingReplicationSize,omitempty"`
733 // Failed size in bytes across targets
734 FailedSize uint64 `json:"failedReplicationSize,omitempty"`
735 // Total number of pending operations including metadata updates across targets
736 PendingCount uint64 `json:"pendingReplicationCount,omitempty"`
737 // Total number of failed operations including metadata updates across targets
738 FailedCount uint64 `json:"failedReplicationCount,omitempty"`
739}
740
741// RStat - has count and bytes for replication metrics
742type RStat struct {
743 Count float64 `json:"count"`
744 Bytes int64 `json:"bytes"`
745}
746
747// Add two RStat
748func (r RStat) Add(r1 RStat) RStat {
749 return RStat{
750 Count: r.Count + r1.Count,
751 Bytes: r.Bytes + r1.Bytes,
752 }
753}
754
755// TimedErrStats holds error stats for a time period
756type TimedErrStats struct {
757 LastMinute RStat `json:"lastMinute"`
758 LastHour RStat `json:"lastHour"`
759 Totals RStat `json:"totals"`
760}
761
762// Add two TimedErrStats
763func (te TimedErrStats) Add(o TimedErrStats) TimedErrStats {
764 return TimedErrStats{
765 LastMinute: te.LastMinute.Add(o.LastMinute),
766 LastHour: te.LastHour.Add(o.LastHour),
767 Totals: te.Totals.Add(o.Totals),
768 }
769}
770
771// ResyncTargetsInfo provides replication target information to resync replicated data.
772type ResyncTargetsInfo struct {
773 Targets []ResyncTarget `json:"target,omitempty"`
774}
775
776// ResyncTarget provides the replica resources and resetID to initiate resync replication.
777type ResyncTarget struct {
778 Arn string `json:"arn"`
779 ResetID string `json:"resetid"`
780 StartTime time.Time `json:"startTime,omitempty"`
781 EndTime time.Time `json:"endTime,omitempty"`
782 // Status of resync operation
783 ResyncStatus string `json:"resyncStatus,omitempty"`
784 // Completed size in bytes
785 ReplicatedSize int64 `json:"completedReplicationSize,omitempty"`
786 // Failed size in bytes
787 FailedSize int64 `json:"failedReplicationSize,omitempty"`
788 // Total number of failed operations
789 FailedCount int64 `json:"failedReplicationCount,omitempty"`
790 // Total number of completed operations
791 ReplicatedCount int64 `json:"replicationCount,omitempty"`
792 // Last bucket/object replicated.
793 Bucket string `json:"bucket,omitempty"`
794 Object string `json:"object,omitempty"`
795}
796
797// XferStats holds transfer rate info for uploads/sec
798type XferStats struct {
799 AvgRate float64 `json:"avgRate"`
800 PeakRate float64 `json:"peakRate"`
801 CurrRate float64 `json:"currRate"`
802}
803
804// Merge two XferStats
805func (x *XferStats) Merge(x1 XferStats) {
806 x.AvgRate += x1.AvgRate
807 x.PeakRate += x1.PeakRate
808 x.CurrRate += x1.CurrRate
809}
810
811// QStat holds count and bytes for objects in replication queue
812type QStat struct {
813 Count float64 `json:"count"`
814 Bytes float64 `json:"bytes"`
815}
816
817// Add 2 QStat entries
818func (q *QStat) Add(q1 QStat) {
819 q.Count += q1.Count
820 q.Bytes += q1.Bytes
821}
822
823// InQueueMetric holds stats for objects in replication queue
824type InQueueMetric struct {
825 Curr QStat `json:"curr" msg:"cq"`
826 Avg QStat `json:"avg" msg:"aq"`
827 Max QStat `json:"peak" msg:"pq"`
828}
829
830// MetricName name of replication metric
831type MetricName string
832
833const (
834 // Large is a metric name for large objects >=128MiB
835 Large MetricName = "Large"
836 // Small is a metric name for objects <128MiB size
837 Small MetricName = "Small"
838 // Total is a metric name for total objects
839 Total MetricName = "Total"
840)
841
842// WorkerStat has stats on number of replication workers
843type WorkerStat struct {
844 Curr int32 `json:"curr"`
845 Avg float32 `json:"avg"`
846 Max int32 `json:"max"`
847}
848
849// ReplMRFStats holds stats of MRF backlog saved to disk in the last 5 minutes
850// and number of entries that failed replication after 3 retries
851type ReplMRFStats struct {
852 LastFailedCount uint64 `json:"failedCount_last5min"`
853 // Count of unreplicated entries that were dropped after MRF retry limit reached since cluster start.
854 TotalDroppedCount uint64 `json:"droppedCount_since_uptime"`
855 // Bytes of unreplicated entries that were dropped after MRF retry limit reached since cluster start.
856 TotalDroppedBytes uint64 `json:"droppedBytes_since_uptime"`
857}
858
859// ReplQNodeStats holds stats for a node in replication queue
860type ReplQNodeStats struct {
861 NodeName string `json:"nodeName"`
862 Uptime int64 `json:"uptime"`
863 Workers WorkerStat `json:"activeWorkers"`
864
865 XferStats map[MetricName]XferStats `json:"transferSummary"`
866 TgtXferStats map[string]map[MetricName]XferStats `json:"tgtTransferStats"`
867
868 QStats InQueueMetric `json:"queueStats"`
869 MRFStats ReplMRFStats `json:"mrfStats"`
870}
871
872// ReplQueueStats holds stats for replication queue across nodes
873type ReplQueueStats struct {
874 Nodes []ReplQNodeStats `json:"nodes"`
875}
876
877// Workers returns number of workers across all nodes
878func (q ReplQueueStats) Workers() (tot WorkerStat) {
879 for _, node := range q.Nodes {
880 tot.Avg += node.Workers.Avg
881 tot.Curr += node.Workers.Curr
882 if tot.Max < node.Workers.Max {
883 tot.Max = node.Workers.Max
884 }
885 }
886 if len(q.Nodes) > 0 {
887 tot.Avg /= float32(len(q.Nodes))
888 tot.Curr /= int32(len(q.Nodes))
889 }
890 return tot
891}
892
893// qStatSummary returns cluster level stats for objects in replication queue
894func (q ReplQueueStats) qStatSummary() InQueueMetric {
895 m := InQueueMetric{}
896 for _, v := range q.Nodes {
897 m.Avg.Add(v.QStats.Avg)
898 m.Curr.Add(v.QStats.Curr)
899 if m.Max.Count < v.QStats.Max.Count {
900 m.Max.Add(v.QStats.Max)
901 }
902 }
903 return m
904}
905
906// ReplQStats holds stats for objects in replication queue
907type ReplQStats struct {
908 Uptime int64 `json:"uptime"`
909 Workers WorkerStat `json:"workers"`
910
911 XferStats map[MetricName]XferStats `json:"xferStats"`
912 TgtXferStats map[string]map[MetricName]XferStats `json:"tgtXferStats"`
913
914 QStats InQueueMetric `json:"qStats"`
915 MRFStats ReplMRFStats `json:"mrfStats"`
916}
917
918// QStats returns cluster level stats for objects in replication queue
919func (q ReplQueueStats) QStats() (r ReplQStats) {
920 r.QStats = q.qStatSummary()
921 r.XferStats = make(map[MetricName]XferStats)
922 r.TgtXferStats = make(map[string]map[MetricName]XferStats)
923 r.Workers = q.Workers()
924
925 for _, node := range q.Nodes {
926 for arn := range node.TgtXferStats {
927 xmap, ok := node.TgtXferStats[arn]
928 if !ok {
929 xmap = make(map[MetricName]XferStats)
930 }
931 for m, v := range xmap {
932 st, ok := r.XferStats[m]
933 if !ok {
934 st = XferStats{}
935 }
936 st.AvgRate += v.AvgRate
937 st.CurrRate += v.CurrRate
938 st.PeakRate = math.Max(st.PeakRate, v.PeakRate)
939 if _, ok := r.TgtXferStats[arn]; !ok {
940 r.TgtXferStats[arn] = make(map[MetricName]XferStats)
941 }
942 r.TgtXferStats[arn][m] = st
943 }
944 }
945 for k, v := range node.XferStats {
946 st, ok := r.XferStats[k]
947 if !ok {
948 st = XferStats{}
949 }
950 st.AvgRate += v.AvgRate
951 st.CurrRate += v.CurrRate
952 st.PeakRate = math.Max(st.PeakRate, v.PeakRate)
953 r.XferStats[k] = st
954 }
955 r.MRFStats.LastFailedCount += node.MRFStats.LastFailedCount
956 r.MRFStats.TotalDroppedCount += node.MRFStats.TotalDroppedCount
957 r.MRFStats.TotalDroppedBytes += node.MRFStats.TotalDroppedBytes
958 r.Uptime += node.Uptime
959 }
960 if len(q.Nodes) > 0 {
961 r.Uptime /= int64(len(q.Nodes)) // average uptime
962 }
963 return
964}
965
966// MetricsV2 represents replication metrics for a bucket.
967type MetricsV2 struct {
968 Uptime int64 `json:"uptime"`
969 CurrentStats Metrics `json:"currStats"`
970 QueueStats ReplQueueStats `json:"queueStats"`
971}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go b/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go
new file mode 100644
index 0000000..056e78a
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go
@@ -0,0 +1,411 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package s3utils
19
20import (
21 "bytes"
22 "encoding/hex"
23 "errors"
24 "net"
25 "net/url"
26 "regexp"
27 "sort"
28 "strings"
29 "unicode/utf8"
30)
31
32// Sentinel URL is the default url value which is invalid.
33var sentinelURL = url.URL{}
34
35// IsValidDomain validates if input string is a valid domain name.
36func IsValidDomain(host string) bool {
37 // See RFC 1035, RFC 3696.
38 host = strings.TrimSpace(host)
39 if len(host) == 0 || len(host) > 255 {
40 return false
41 }
42 // host cannot start or end with "-"
43 if host[len(host)-1:] == "-" || host[:1] == "-" {
44 return false
45 }
46 // host cannot start or end with "_"
47 if host[len(host)-1:] == "_" || host[:1] == "_" {
48 return false
49 }
50 // host cannot start with a "."
51 if host[:1] == "." {
52 return false
53 }
54 // All non alphanumeric characters are invalid.
55 if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:><?/") {
56 return false
57 }
58 // No need to regexp match, since the list is non-exhaustive.
59 // We let it valid and fail later.
60 return true
61}
62
63// IsValidIP parses input string for ip address validity.
64func IsValidIP(ip string) bool {
65 return net.ParseIP(ip) != nil
66}
67
68// IsVirtualHostSupported - verifies if bucketName can be part of
69// virtual host. Currently only Amazon S3 and Google Cloud Storage
70// would support this.
71func IsVirtualHostSupported(endpointURL url.URL, bucketName string) bool {
72 if endpointURL == sentinelURL {
73 return false
74 }
75 // bucketName can be valid but '.' in the hostname will fail SSL
76 // certificate validation. So do not use host-style for such buckets.
77 if endpointURL.Scheme == "https" && strings.Contains(bucketName, ".") {
78 return false
79 }
80 // Return true for all other cases
81 return IsAmazonEndpoint(endpointURL) || IsGoogleEndpoint(endpointURL) || IsAliyunOSSEndpoint(endpointURL)
82}
83
84// Refer for region styles - https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
85
86// amazonS3HostHyphen - regular expression used to determine if an arg is s3 host in hyphenated style.
87var amazonS3HostHyphen = regexp.MustCompile(`^s3-(.*?).amazonaws.com$`)
88
89// amazonS3HostDualStack - regular expression used to determine if an arg is s3 host dualstack.
90var amazonS3HostDualStack = regexp.MustCompile(`^s3.dualstack.(.*?).amazonaws.com$`)
91
92// amazonS3HostFIPS - regular expression used to determine if an arg is s3 FIPS host.
93var amazonS3HostFIPS = regexp.MustCompile(`^s3-fips.(.*?).amazonaws.com$`)
94
95// amazonS3HostFIPSDualStack - regular expression used to determine if an arg is s3 FIPS host dualstack.
96var amazonS3HostFIPSDualStack = regexp.MustCompile(`^s3-fips.dualstack.(.*?).amazonaws.com$`)
97
98// amazonS3HostDot - regular expression used to determine if an arg is s3 host in . style.
99var amazonS3HostDot = regexp.MustCompile(`^s3.(.*?).amazonaws.com$`)
100
101// amazonS3ChinaHost - regular expression used to determine if the arg is s3 china host.
102var amazonS3ChinaHost = regexp.MustCompile(`^s3.(cn.*?).amazonaws.com.cn$`)
103
104// amazonS3ChinaHostDualStack - regular expression used to determine if the arg is s3 china host dualstack.
105var amazonS3ChinaHostDualStack = regexp.MustCompile(`^s3.dualstack.(cn.*?).amazonaws.com.cn$`)
106
107// Regular expression used to determine if the arg is elb host.
108var elbAmazonRegex = regexp.MustCompile(`elb(.*?).amazonaws.com$`)
109
110// Regular expression used to determine if the arg is elb host in china.
111var elbAmazonCnRegex = regexp.MustCompile(`elb(.*?).amazonaws.com.cn$`)
112
113// amazonS3HostPrivateLink - regular expression used to determine if an arg is s3 host in AWS PrivateLink interface endpoints style
114var amazonS3HostPrivateLink = regexp.MustCompile(`^(?:bucket|accesspoint).vpce-.*?.s3.(.*?).vpce.amazonaws.com$`)
115
116// GetRegionFromURL - returns a region from url host.
117func GetRegionFromURL(endpointURL url.URL) string {
118 if endpointURL == sentinelURL {
119 return ""
120 }
121 if endpointURL.Host == "s3-external-1.amazonaws.com" {
122 return ""
123 }
124
125 // if elb's are used we cannot calculate which region it may be, just return empty.
126 if elbAmazonRegex.MatchString(endpointURL.Host) || elbAmazonCnRegex.MatchString(endpointURL.Host) {
127 return ""
128 }
129
130 // We check for FIPS dualstack matching first to avoid the non-greedy
131 // regex for FIPS non-dualstack matching a dualstack URL
132 parts := amazonS3HostFIPSDualStack.FindStringSubmatch(endpointURL.Host)
133 if len(parts) > 1 {
134 return parts[1]
135 }
136
137 parts = amazonS3HostFIPS.FindStringSubmatch(endpointURL.Host)
138 if len(parts) > 1 {
139 return parts[1]
140 }
141
142 parts = amazonS3HostDualStack.FindStringSubmatch(endpointURL.Host)
143 if len(parts) > 1 {
144 return parts[1]
145 }
146
147 parts = amazonS3HostHyphen.FindStringSubmatch(endpointURL.Host)
148 if len(parts) > 1 {
149 return parts[1]
150 }
151
152 parts = amazonS3ChinaHost.FindStringSubmatch(endpointURL.Host)
153 if len(parts) > 1 {
154 return parts[1]
155 }
156
157 parts = amazonS3ChinaHostDualStack.FindStringSubmatch(endpointURL.Host)
158 if len(parts) > 1 {
159 return parts[1]
160 }
161
162 parts = amazonS3HostDot.FindStringSubmatch(endpointURL.Host)
163 if len(parts) > 1 {
164 return parts[1]
165 }
166
167 parts = amazonS3HostPrivateLink.FindStringSubmatch(endpointURL.Host)
168 if len(parts) > 1 {
169 return parts[1]
170 }
171
172 return ""
173}
174
175// IsAliyunOSSEndpoint - Match if it is exactly Aliyun OSS endpoint.
176func IsAliyunOSSEndpoint(endpointURL url.URL) bool {
177 return strings.HasSuffix(endpointURL.Host, "aliyuncs.com")
178}
179
180// IsAmazonEndpoint - Match if it is exactly Amazon S3 endpoint.
181func IsAmazonEndpoint(endpointURL url.URL) bool {
182 if endpointURL.Host == "s3-external-1.amazonaws.com" || endpointURL.Host == "s3.amazonaws.com" {
183 return true
184 }
185 return GetRegionFromURL(endpointURL) != ""
186}
187
188// IsAmazonGovCloudEndpoint - Match if it is exactly Amazon S3 GovCloud endpoint.
189func IsAmazonGovCloudEndpoint(endpointURL url.URL) bool {
190 if endpointURL == sentinelURL {
191 return false
192 }
193 return (endpointURL.Host == "s3-us-gov-west-1.amazonaws.com" ||
194 endpointURL.Host == "s3-us-gov-east-1.amazonaws.com" ||
195 IsAmazonFIPSGovCloudEndpoint(endpointURL))
196}
197
198// IsAmazonFIPSGovCloudEndpoint - match if the endpoint is FIPS and GovCloud.
199func IsAmazonFIPSGovCloudEndpoint(endpointURL url.URL) bool {
200 if endpointURL == sentinelURL {
201 return false
202 }
203 return IsAmazonFIPSEndpoint(endpointURL) && strings.Contains(endpointURL.Host, "us-gov-")
204}
205
206// IsAmazonFIPSEndpoint - Match if it is exactly Amazon S3 FIPS endpoint.
207// See https://aws.amazon.com/compliance/fips.
208func IsAmazonFIPSEndpoint(endpointURL url.URL) bool {
209 if endpointURL == sentinelURL {
210 return false
211 }
212 return strings.HasPrefix(endpointURL.Host, "s3-fips") && strings.HasSuffix(endpointURL.Host, ".amazonaws.com")
213}
214
215// IsAmazonPrivateLinkEndpoint - Match if it is exactly Amazon S3 PrivateLink interface endpoint
216// See https://docs.aws.amazon.com/AmazonS3/latest/userguide/privatelink-interface-endpoints.html.
217func IsAmazonPrivateLinkEndpoint(endpointURL url.URL) bool {
218 if endpointURL == sentinelURL {
219 return false
220 }
221 return amazonS3HostPrivateLink.MatchString(endpointURL.Host)
222}
223
224// IsGoogleEndpoint - Match if it is exactly Google cloud storage endpoint.
225func IsGoogleEndpoint(endpointURL url.URL) bool {
226 if endpointURL == sentinelURL {
227 return false
228 }
229 return endpointURL.Host == "storage.googleapis.com"
230}
231
232// Expects ascii encoded strings - from output of urlEncodePath
233func percentEncodeSlash(s string) string {
234 return strings.ReplaceAll(s, "/", "%2F")
235}
236
237// QueryEncode - encodes query values in their URL encoded form. In
238// addition to the percent encoding performed by urlEncodePath() used
239// here, it also percent encodes '/' (forward slash)
240func QueryEncode(v url.Values) string {
241 if v == nil {
242 return ""
243 }
244 var buf bytes.Buffer
245 keys := make([]string, 0, len(v))
246 for k := range v {
247 keys = append(keys, k)
248 }
249 sort.Strings(keys)
250 for _, k := range keys {
251 vs := v[k]
252 prefix := percentEncodeSlash(EncodePath(k)) + "="
253 for _, v := range vs {
254 if buf.Len() > 0 {
255 buf.WriteByte('&')
256 }
257 buf.WriteString(prefix)
258 buf.WriteString(percentEncodeSlash(EncodePath(v)))
259 }
260 }
261 return buf.String()
262}
263
264// TagDecode - decodes canonical tag into map of key and value.
265func TagDecode(ctag string) map[string]string {
266 if ctag == "" {
267 return map[string]string{}
268 }
269 tags := strings.Split(ctag, "&")
270 tagMap := make(map[string]string, len(tags))
271 var err error
272 for _, tag := range tags {
273 kvs := strings.SplitN(tag, "=", 2)
274 if len(kvs) == 0 {
275 return map[string]string{}
276 }
277 if len(kvs) == 1 {
278 return map[string]string{}
279 }
280 tagMap[kvs[0]], err = url.PathUnescape(kvs[1])
281 if err != nil {
282 continue
283 }
284 }
285 return tagMap
286}
287
288// TagEncode - encodes tag values in their URL encoded form. In
289// addition to the percent encoding performed by urlEncodePath() used
290// here, it also percent encodes '/' (forward slash)
291func TagEncode(tags map[string]string) string {
292 if tags == nil {
293 return ""
294 }
295 values := url.Values{}
296 for k, v := range tags {
297 values[k] = []string{v}
298 }
299 return QueryEncode(values)
300}
301
302// if object matches reserved string, no need to encode them
303var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
304
305// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences
306//
307// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
308// non english characters cannot be parsed due to the nature in which url.Encode() is written
309//
310// This function on the other hand is a direct replacement for url.Encode() technique to support
311// pretty much every UTF-8 character.
312func EncodePath(pathName string) string {
313 if reservedObjectNames.MatchString(pathName) {
314 return pathName
315 }
316 var encodedPathname strings.Builder
317 for _, s := range pathName {
318 if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
319 encodedPathname.WriteRune(s)
320 continue
321 }
322 switch s {
323 case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
324 encodedPathname.WriteRune(s)
325 continue
326 default:
327 l := utf8.RuneLen(s)
328 if l < 0 {
329 // if utf8 cannot convert return the same string as is
330 return pathName
331 }
332 u := make([]byte, l)
333 utf8.EncodeRune(u, s)
334 for _, r := range u {
335 hex := hex.EncodeToString([]byte{r})
336 encodedPathname.WriteString("%" + strings.ToUpper(hex))
337 }
338 }
339 }
340 return encodedPathname.String()
341}
342
343// We support '.' with bucket names but we fallback to using path
344// style requests instead for such buckets.
345var (
346 validBucketName = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9\.\-\_\:]{1,61}[A-Za-z0-9]$`)
347 validBucketNameStrict = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
348 ipAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`)
349)
350
351// Common checker for both stricter and basic validation.
352func checkBucketNameCommon(bucketName string, strict bool) (err error) {
353 if strings.TrimSpace(bucketName) == "" {
354 return errors.New("Bucket name cannot be empty")
355 }
356 if len(bucketName) < 3 {
357 return errors.New("Bucket name cannot be shorter than 3 characters")
358 }
359 if len(bucketName) > 63 {
360 return errors.New("Bucket name cannot be longer than 63 characters")
361 }
362 if ipAddress.MatchString(bucketName) {
363 return errors.New("Bucket name cannot be an ip address")
364 }
365 if strings.Contains(bucketName, "..") || strings.Contains(bucketName, ".-") || strings.Contains(bucketName, "-.") {
366 return errors.New("Bucket name contains invalid characters")
367 }
368 if strict {
369 if !validBucketNameStrict.MatchString(bucketName) {
370 err = errors.New("Bucket name contains invalid characters")
371 }
372 return err
373 }
374 if !validBucketName.MatchString(bucketName) {
375 err = errors.New("Bucket name contains invalid characters")
376 }
377 return err
378}
379
380// CheckValidBucketName - checks if we have a valid input bucket name.
381func CheckValidBucketName(bucketName string) (err error) {
382 return checkBucketNameCommon(bucketName, false)
383}
384
385// CheckValidBucketNameStrict - checks if we have a valid input bucket name.
386// This is a stricter version.
387// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
388func CheckValidBucketNameStrict(bucketName string) (err error) {
389 return checkBucketNameCommon(bucketName, true)
390}
391
392// CheckValidObjectNamePrefix - checks if we have a valid input object name prefix.
393// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
394func CheckValidObjectNamePrefix(objectName string) error {
395 if len(objectName) > 1024 {
396 return errors.New("Object name cannot be longer than 1024 characters")
397 }
398 if !utf8.ValidString(objectName) {
399 return errors.New("Object name with non UTF-8 strings are not supported")
400 }
401 return nil
402}
403
404// CheckValidObjectName - checks if we have a valid input object name.
405// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
406func CheckValidObjectName(objectName string) error {
407 if strings.TrimSpace(objectName) == "" {
408 return errors.New("Object name cannot be empty")
409 }
410 return CheckValidObjectNamePrefix(objectName)
411}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go b/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go
new file mode 100644
index 0000000..c35e58e
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go
@@ -0,0 +1,200 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package set
19
20import (
21 "fmt"
22 "sort"
23
24 jsoniter "github.com/json-iterator/go"
25)
26
27// StringSet - uses map as set of strings.
28type StringSet map[string]struct{}
29
30var json = jsoniter.ConfigCompatibleWithStandardLibrary
31
32// ToSlice - returns StringSet as string slice.
33func (set StringSet) ToSlice() []string {
34 keys := make([]string, 0, len(set))
35 for k := range set {
36 keys = append(keys, k)
37 }
38 sort.Strings(keys)
39 return keys
40}
41
42// IsEmpty - returns whether the set is empty or not.
43func (set StringSet) IsEmpty() bool {
44 return len(set) == 0
45}
46
47// Add - adds string to the set.
48func (set StringSet) Add(s string) {
49 set[s] = struct{}{}
50}
51
52// Remove - removes string in the set. It does nothing if string does not exist in the set.
53func (set StringSet) Remove(s string) {
54 delete(set, s)
55}
56
57// Contains - checks if string is in the set.
58func (set StringSet) Contains(s string) bool {
59 _, ok := set[s]
60 return ok
61}
62
63// FuncMatch - returns new set containing each value who passes match function.
64// A 'matchFn' should accept element in a set as first argument and
65// 'matchString' as second argument. The function can do any logic to
66// compare both the arguments and should return true to accept element in
67// a set to include in output set else the element is ignored.
68func (set StringSet) FuncMatch(matchFn func(string, string) bool, matchString string) StringSet {
69 nset := NewStringSet()
70 for k := range set {
71 if matchFn(k, matchString) {
72 nset.Add(k)
73 }
74 }
75 return nset
76}
77
78// ApplyFunc - returns new set containing each value processed by 'applyFn'.
79// A 'applyFn' should accept element in a set as a argument and return
80// a processed string. The function can do any logic to return a processed
81// string.
82func (set StringSet) ApplyFunc(applyFn func(string) string) StringSet {
83 nset := NewStringSet()
84 for k := range set {
85 nset.Add(applyFn(k))
86 }
87 return nset
88}
89
90// Equals - checks whether given set is equal to current set or not.
91func (set StringSet) Equals(sset StringSet) bool {
92 // If length of set is not equal to length of given set, the
93 // set is not equal to given set.
94 if len(set) != len(sset) {
95 return false
96 }
97
98 // As both sets are equal in length, check each elements are equal.
99 for k := range set {
100 if _, ok := sset[k]; !ok {
101 return false
102 }
103 }
104
105 return true
106}
107
108// Intersection - returns the intersection with given set as new set.
109func (set StringSet) Intersection(sset StringSet) StringSet {
110 nset := NewStringSet()
111 for k := range set {
112 if _, ok := sset[k]; ok {
113 nset.Add(k)
114 }
115 }
116
117 return nset
118}
119
120// Difference - returns the difference with given set as new set.
121func (set StringSet) Difference(sset StringSet) StringSet {
122 nset := NewStringSet()
123 for k := range set {
124 if _, ok := sset[k]; !ok {
125 nset.Add(k)
126 }
127 }
128
129 return nset
130}
131
132// Union - returns the union with given set as new set.
133func (set StringSet) Union(sset StringSet) StringSet {
134 nset := NewStringSet()
135 for k := range set {
136 nset.Add(k)
137 }
138
139 for k := range sset {
140 nset.Add(k)
141 }
142
143 return nset
144}
145
146// MarshalJSON - converts to JSON data.
147func (set StringSet) MarshalJSON() ([]byte, error) {
148 return json.Marshal(set.ToSlice())
149}
150
151// UnmarshalJSON - parses JSON data and creates new set with it.
152// If 'data' contains JSON string array, the set contains each string.
153// If 'data' contains JSON string, the set contains the string as one element.
154// If 'data' contains Other JSON types, JSON parse error is returned.
155func (set *StringSet) UnmarshalJSON(data []byte) error {
156 sl := []string{}
157 var err error
158 if err = json.Unmarshal(data, &sl); err == nil {
159 *set = make(StringSet)
160 for _, s := range sl {
161 set.Add(s)
162 }
163 } else {
164 var s string
165 if err = json.Unmarshal(data, &s); err == nil {
166 *set = make(StringSet)
167 set.Add(s)
168 }
169 }
170
171 return err
172}
173
174// String - returns printable string of the set.
175func (set StringSet) String() string {
176 return fmt.Sprintf("%s", set.ToSlice())
177}
178
179// NewStringSet - creates new string set.
180func NewStringSet() StringSet {
181 return make(StringSet)
182}
183
184// CreateStringSet - creates new string set with given string values.
185func CreateStringSet(sl ...string) StringSet {
186 set := make(StringSet)
187 for _, k := range sl {
188 set.Add(k)
189 }
190 return set
191}
192
193// CopyStringSet - returns copy of given set.
194func CopyStringSet(set StringSet) StringSet {
195 nset := NewStringSet()
196 for k, v := range set {
197 nset[k] = v
198 }
199 return nset
200}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go
new file mode 100644
index 0000000..77540e2
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go
@@ -0,0 +1,224 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2022 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package signer
19
20import (
21 "bytes"
22 "fmt"
23 "io"
24 "net/http"
25 "strconv"
26 "strings"
27 "time"
28)
29
30// getUnsignedChunkLength - calculates the length of chunk metadata
31func getUnsignedChunkLength(chunkDataSize int64) int64 {
32 return int64(len(fmt.Sprintf("%x", chunkDataSize))) +
33 crlfLen +
34 chunkDataSize +
35 crlfLen
36}
37
38// getUSStreamLength - calculates the length of the overall stream (data + metadata)
39func getUSStreamLength(dataLen, chunkSize int64, trailers http.Header) int64 {
40 if dataLen <= 0 {
41 return 0
42 }
43
44 chunksCount := int64(dataLen / chunkSize)
45 remainingBytes := int64(dataLen % chunkSize)
46 streamLen := int64(0)
47 streamLen += chunksCount * getUnsignedChunkLength(chunkSize)
48 if remainingBytes > 0 {
49 streamLen += getUnsignedChunkLength(remainingBytes)
50 }
51 streamLen += getUnsignedChunkLength(0)
52 if len(trailers) > 0 {
53 for name, placeholder := range trailers {
54 if len(placeholder) > 0 {
55 streamLen += int64(len(name) + len(trailerKVSeparator) + len(placeholder[0]) + 1)
56 }
57 }
58 streamLen += crlfLen
59 }
60
61 return streamLen
62}
63
64// prepareStreamingRequest - prepares a request with appropriate
65// headers before computing the seed signature.
66func prepareUSStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) {
67 req.TransferEncoding = []string{"aws-chunked"}
68 if sessionToken != "" {
69 req.Header.Set("X-Amz-Security-Token", sessionToken)
70 }
71
72 req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat))
73 // Set content length with streaming signature for each chunk included.
74 req.ContentLength = getUSStreamLength(dataLen, int64(payloadChunkSize), req.Trailer)
75}
76
77// StreamingUSReader implements chunked upload signature as a reader on
78// top of req.Body's ReaderCloser chunk header;data;... repeat
79type StreamingUSReader struct {
80 contentLen int64 // Content-Length from req header
81 baseReadCloser io.ReadCloser // underlying io.Reader
82 bytesRead int64 // bytes read from underlying io.Reader
83 buf bytes.Buffer // holds signed chunk
84 chunkBuf []byte // holds raw data read from req Body
85 chunkBufLen int // no. of bytes read so far into chunkBuf
86 done bool // done reading the underlying reader to EOF
87 chunkNum int
88 totalChunks int
89 lastChunkSize int
90 trailer http.Header
91}
92
93// writeChunk - signs a chunk read from s.baseReader of chunkLen size.
94func (s *StreamingUSReader) writeChunk(chunkLen int, addCrLf bool) {
95 s.buf.WriteString(strconv.FormatInt(int64(chunkLen), 16) + "\r\n")
96
97 // Write chunk data into streaming buffer
98 s.buf.Write(s.chunkBuf[:chunkLen])
99
100 // Write the chunk trailer.
101 if addCrLf {
102 s.buf.Write([]byte("\r\n"))
103 }
104
105 // Reset chunkBufLen for next chunk read.
106 s.chunkBufLen = 0
107 s.chunkNum++
108}
109
110// addSignedTrailer - adds a trailer with the provided headers,
111// then signs a chunk and adds it to output.
112func (s *StreamingUSReader) addTrailer(h http.Header) {
113 olen := len(s.chunkBuf)
114 s.chunkBuf = s.chunkBuf[:0]
115 for k, v := range h {
116 s.chunkBuf = append(s.chunkBuf, []byte(strings.ToLower(k)+trailerKVSeparator+v[0]+"\n")...)
117 }
118
119 s.buf.Write(s.chunkBuf)
120 s.buf.WriteString("\r\n\r\n")
121
122 // Reset chunkBufLen for next chunk read.
123 s.chunkBuf = s.chunkBuf[:olen]
124 s.chunkBufLen = 0
125 s.chunkNum++
126}
127
128// StreamingUnsignedV4 - provides chunked upload
129func StreamingUnsignedV4(req *http.Request, sessionToken string, dataLen int64, reqTime time.Time) *http.Request {
130 // Set headers needed for streaming signature.
131 prepareUSStreamingRequest(req, sessionToken, dataLen, reqTime)
132
133 if req.Body == nil {
134 req.Body = io.NopCloser(bytes.NewReader([]byte("")))
135 }
136
137 stReader := &StreamingUSReader{
138 baseReadCloser: req.Body,
139 chunkBuf: make([]byte, payloadChunkSize),
140 contentLen: dataLen,
141 chunkNum: 1,
142 totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1,
143 lastChunkSize: int(dataLen % payloadChunkSize),
144 }
145 if len(req.Trailer) > 0 {
146 stReader.trailer = req.Trailer
147 // Remove...
148 req.Trailer = nil
149 }
150
151 req.Body = stReader
152
153 return req
154}
155
156// Read - this method performs chunk upload signature providing a
157// io.Reader interface.
158func (s *StreamingUSReader) Read(buf []byte) (int, error) {
159 switch {
160 // After the last chunk is read from underlying reader, we
161 // never re-fill s.buf.
162 case s.done:
163
164 // s.buf will be (re-)filled with next chunk when has lesser
165 // bytes than asked for.
166 case s.buf.Len() < len(buf):
167 s.chunkBufLen = 0
168 for {
169 n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:])
170 // Usually we validate `err` first, but in this case
171 // we are validating n > 0 for the following reasons.
172 //
173 // 1. n > 0, err is one of io.EOF, nil (near end of stream)
174 // A Reader returning a non-zero number of bytes at the end
175 // of the input stream may return either err == EOF or err == nil
176 //
177 // 2. n == 0, err is io.EOF (actual end of stream)
178 //
179 // Callers should always process the n > 0 bytes returned
180 // before considering the error err.
181 if n1 > 0 {
182 s.chunkBufLen += n1
183 s.bytesRead += int64(n1)
184
185 if s.chunkBufLen == payloadChunkSize ||
186 (s.chunkNum == s.totalChunks-1 &&
187 s.chunkBufLen == s.lastChunkSize) {
188 // Sign the chunk and write it to s.buf.
189 s.writeChunk(s.chunkBufLen, true)
190 break
191 }
192 }
193 if err != nil {
194 if err == io.EOF {
195 // No more data left in baseReader - last chunk.
196 // Done reading the last chunk from baseReader.
197 s.done = true
198
199 // bytes read from baseReader different than
200 // content length provided.
201 if s.bytesRead != s.contentLen {
202 return 0, fmt.Errorf("http: ContentLength=%d with Body length %d", s.contentLen, s.bytesRead)
203 }
204
205 // Sign the chunk and write it to s.buf.
206 s.writeChunk(0, len(s.trailer) == 0)
207 if len(s.trailer) > 0 {
208 // Trailer must be set now.
209 s.addTrailer(s.trailer)
210 }
211 break
212 }
213 return 0, err
214 }
215
216 }
217 }
218 return s.buf.Read(buf)
219}
220
221// Close - this method makes underlying io.ReadCloser's Close method available.
222func (s *StreamingUSReader) Close() error {
223 return s.baseReadCloser.Close()
224}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go
new file mode 100644
index 0000000..1c2f1dc
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go
@@ -0,0 +1,403 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package signer
19
20import (
21 "bytes"
22 "encoding/hex"
23 "fmt"
24 "io"
25 "net/http"
26 "strconv"
27 "strings"
28 "time"
29
30 md5simd "github.com/minio/md5-simd"
31)
32
33// Reference for constants used below -
34// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming
35const (
36 streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
37 streamingSignTrailerAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER"
38 streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD"
39 streamingTrailerHdr = "AWS4-HMAC-SHA256-TRAILER"
40 emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
41 payloadChunkSize = 64 * 1024
42 chunkSigConstLen = 17 // ";chunk-signature="
43 signatureStrLen = 64 // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2"
44 crlfLen = 2 // CRLF
45 trailerKVSeparator = ":"
46 trailerSignature = "x-amz-trailer-signature"
47)
48
49// Request headers to be ignored while calculating seed signature for
50// a request.
51var ignoredStreamingHeaders = map[string]bool{
52 "Authorization": true,
53 "User-Agent": true,
54 "Content-Type": true,
55}
56
57// getSignedChunkLength - calculates the length of chunk metadata
58func getSignedChunkLength(chunkDataSize int64) int64 {
59 return int64(len(fmt.Sprintf("%x", chunkDataSize))) +
60 chunkSigConstLen +
61 signatureStrLen +
62 crlfLen +
63 chunkDataSize +
64 crlfLen
65}
66
67// getStreamLength - calculates the length of the overall stream (data + metadata)
68func getStreamLength(dataLen, chunkSize int64, trailers http.Header) int64 {
69 if dataLen <= 0 {
70 return 0
71 }
72
73 chunksCount := int64(dataLen / chunkSize)
74 remainingBytes := int64(dataLen % chunkSize)
75 streamLen := int64(0)
76 streamLen += chunksCount * getSignedChunkLength(chunkSize)
77 if remainingBytes > 0 {
78 streamLen += getSignedChunkLength(remainingBytes)
79 }
80 streamLen += getSignedChunkLength(0)
81 if len(trailers) > 0 {
82 for name, placeholder := range trailers {
83 if len(placeholder) > 0 {
84 streamLen += int64(len(name) + len(trailerKVSeparator) + len(placeholder[0]) + 1)
85 }
86 }
87 streamLen += int64(len(trailerSignature)+len(trailerKVSeparator)) + signatureStrLen + crlfLen + crlfLen
88 }
89
90 return streamLen
91}
92
93// buildChunkStringToSign - returns the string to sign given chunk data
94// and previous signature.
95func buildChunkStringToSign(t time.Time, region, previousSig, chunkChecksum string) string {
96 stringToSignParts := []string{
97 streamingPayloadHdr,
98 t.Format(iso8601DateFormat),
99 getScope(region, t, ServiceTypeS3),
100 previousSig,
101 emptySHA256,
102 chunkChecksum,
103 }
104
105 return strings.Join(stringToSignParts, "\n")
106}
107
108// buildTrailerChunkStringToSign - returns the string to sign given chunk data
109// and previous signature.
110func buildTrailerChunkStringToSign(t time.Time, region, previousSig, chunkChecksum string) string {
111 stringToSignParts := []string{
112 streamingTrailerHdr,
113 t.Format(iso8601DateFormat),
114 getScope(region, t, ServiceTypeS3),
115 previousSig,
116 chunkChecksum,
117 }
118
119 return strings.Join(stringToSignParts, "\n")
120}
121
122// prepareStreamingRequest - prepares a request with appropriate
123// headers before computing the seed signature.
124func prepareStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) {
125 // Set x-amz-content-sha256 header.
126 if len(req.Trailer) == 0 {
127 req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm)
128 } else {
129 req.Header.Set("X-Amz-Content-Sha256", streamingSignTrailerAlgorithm)
130 for k := range req.Trailer {
131 req.Header.Add("X-Amz-Trailer", strings.ToLower(k))
132 }
133 req.TransferEncoding = []string{"aws-chunked"}
134 }
135
136 if sessionToken != "" {
137 req.Header.Set("X-Amz-Security-Token", sessionToken)
138 }
139
140 req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat))
141 // Set content length with streaming signature for each chunk included.
142 req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize), req.Trailer)
143 req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(dataLen, 10))
144}
145
146// buildChunkHeader - returns the chunk header.
147// e.g string(IntHexBase(chunk-size)) + ";chunk-signature=" + signature + \r\n + chunk-data + \r\n
148func buildChunkHeader(chunkLen int64, signature string) []byte {
149 return []byte(strconv.FormatInt(chunkLen, 16) + ";chunk-signature=" + signature + "\r\n")
150}
151
152// buildChunkSignature - returns chunk signature for a given chunk and previous signature.
153func buildChunkSignature(chunkCheckSum string, reqTime time.Time, region,
154 previousSignature, secretAccessKey string,
155) string {
156 chunkStringToSign := buildChunkStringToSign(reqTime, region,
157 previousSignature, chunkCheckSum)
158 signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3)
159 return getSignature(signingKey, chunkStringToSign)
160}
161
162// buildChunkSignature - returns chunk signature for a given chunk and previous signature.
163func buildTrailerChunkSignature(chunkChecksum string, reqTime time.Time, region,
164 previousSignature, secretAccessKey string,
165) string {
166 chunkStringToSign := buildTrailerChunkStringToSign(reqTime, region,
167 previousSignature, chunkChecksum)
168 signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3)
169 return getSignature(signingKey, chunkStringToSign)
170}
171
172// getSeedSignature - returns the seed signature for a given request.
173func (s *StreamingReader) setSeedSignature(req *http.Request) {
174 // Get canonical request
175 canonicalRequest := getCanonicalRequest(*req, ignoredStreamingHeaders, getHashedPayload(*req))
176
177 // Get string to sign from canonical request.
178 stringToSign := getStringToSignV4(s.reqTime, s.region, canonicalRequest, ServiceTypeS3)
179
180 signingKey := getSigningKey(s.secretAccessKey, s.region, s.reqTime, ServiceTypeS3)
181
182 // Calculate signature.
183 s.seedSignature = getSignature(signingKey, stringToSign)
184}
185
186// StreamingReader implements chunked upload signature as a reader on
187// top of req.Body's ReaderCloser chunk header;data;... repeat
188type StreamingReader struct {
189 accessKeyID string
190 secretAccessKey string
191 sessionToken string
192 region string
193 prevSignature string
194 seedSignature string
195 contentLen int64 // Content-Length from req header
196 baseReadCloser io.ReadCloser // underlying io.Reader
197 bytesRead int64 // bytes read from underlying io.Reader
198 buf bytes.Buffer // holds signed chunk
199 chunkBuf []byte // holds raw data read from req Body
200 chunkBufLen int // no. of bytes read so far into chunkBuf
201 done bool // done reading the underlying reader to EOF
202 reqTime time.Time
203 chunkNum int
204 totalChunks int
205 lastChunkSize int
206 trailer http.Header
207 sh256 md5simd.Hasher
208}
209
210// signChunk - signs a chunk read from s.baseReader of chunkLen size.
211func (s *StreamingReader) signChunk(chunkLen int, addCrLf bool) {
212 // Compute chunk signature for next header
213 s.sh256.Reset()
214 s.sh256.Write(s.chunkBuf[:chunkLen])
215 chunckChecksum := hex.EncodeToString(s.sh256.Sum(nil))
216
217 signature := buildChunkSignature(chunckChecksum, s.reqTime,
218 s.region, s.prevSignature, s.secretAccessKey)
219
220 // For next chunk signature computation
221 s.prevSignature = signature
222
223 // Write chunk header into streaming buffer
224 chunkHdr := buildChunkHeader(int64(chunkLen), signature)
225 s.buf.Write(chunkHdr)
226
227 // Write chunk data into streaming buffer
228 s.buf.Write(s.chunkBuf[:chunkLen])
229
230 // Write the chunk trailer.
231 if addCrLf {
232 s.buf.Write([]byte("\r\n"))
233 }
234
235 // Reset chunkBufLen for next chunk read.
236 s.chunkBufLen = 0
237 s.chunkNum++
238}
239
240// addSignedTrailer - adds a trailer with the provided headers,
241// then signs a chunk and adds it to output.
242func (s *StreamingReader) addSignedTrailer(h http.Header) {
243 olen := len(s.chunkBuf)
244 s.chunkBuf = s.chunkBuf[:0]
245 for k, v := range h {
246 s.chunkBuf = append(s.chunkBuf, []byte(strings.ToLower(k)+trailerKVSeparator+v[0]+"\n")...)
247 }
248
249 s.sh256.Reset()
250 s.sh256.Write(s.chunkBuf)
251 chunkChecksum := hex.EncodeToString(s.sh256.Sum(nil))
252 // Compute chunk signature
253 signature := buildTrailerChunkSignature(chunkChecksum, s.reqTime,
254 s.region, s.prevSignature, s.secretAccessKey)
255
256 // For next chunk signature computation
257 s.prevSignature = signature
258
259 s.buf.Write(s.chunkBuf)
260 s.buf.WriteString("\r\n" + trailerSignature + trailerKVSeparator + signature + "\r\n\r\n")
261
262 // Reset chunkBufLen for next chunk read.
263 s.chunkBuf = s.chunkBuf[:olen]
264 s.chunkBufLen = 0
265 s.chunkNum++
266}
267
268// setStreamingAuthHeader - builds and sets authorization header value
269// for streaming signature.
270func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) {
271 credential := GetCredential(s.accessKeyID, s.region, s.reqTime, ServiceTypeS3)
272 authParts := []string{
273 signV4Algorithm + " Credential=" + credential,
274 "SignedHeaders=" + getSignedHeaders(*req, ignoredStreamingHeaders),
275 "Signature=" + s.seedSignature,
276 }
277
278 // Set authorization header.
279 auth := strings.Join(authParts, ",")
280 req.Header.Set("Authorization", auth)
281}
282
283// StreamingSignV4 - provides chunked upload signatureV4 support by
284// implementing io.Reader.
285func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionToken,
286 region string, dataLen int64, reqTime time.Time, sh256 md5simd.Hasher,
287) *http.Request {
288 // Set headers needed for streaming signature.
289 prepareStreamingRequest(req, sessionToken, dataLen, reqTime)
290
291 if req.Body == nil {
292 req.Body = io.NopCloser(bytes.NewReader([]byte("")))
293 }
294
295 stReader := &StreamingReader{
296 baseReadCloser: req.Body,
297 accessKeyID: accessKeyID,
298 secretAccessKey: secretAccessKey,
299 sessionToken: sessionToken,
300 region: region,
301 reqTime: reqTime,
302 chunkBuf: make([]byte, payloadChunkSize),
303 contentLen: dataLen,
304 chunkNum: 1,
305 totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1,
306 lastChunkSize: int(dataLen % payloadChunkSize),
307 sh256: sh256,
308 }
309 if len(req.Trailer) > 0 {
310 stReader.trailer = req.Trailer
311 // Remove...
312 req.Trailer = nil
313 }
314
315 // Add the request headers required for chunk upload signing.
316
317 // Compute the seed signature.
318 stReader.setSeedSignature(req)
319
320 // Set the authorization header with the seed signature.
321 stReader.setStreamingAuthHeader(req)
322
323 // Set seed signature as prevSignature for subsequent
324 // streaming signing process.
325 stReader.prevSignature = stReader.seedSignature
326 req.Body = stReader
327
328 return req
329}
330
331// Read - this method performs chunk upload signature providing a
332// io.Reader interface.
333func (s *StreamingReader) Read(buf []byte) (int, error) {
334 switch {
335 // After the last chunk is read from underlying reader, we
336 // never re-fill s.buf.
337 case s.done:
338
339 // s.buf will be (re-)filled with next chunk when has lesser
340 // bytes than asked for.
341 case s.buf.Len() < len(buf):
342 s.chunkBufLen = 0
343 for {
344 n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:])
345 // Usually we validate `err` first, but in this case
346 // we are validating n > 0 for the following reasons.
347 //
348 // 1. n > 0, err is one of io.EOF, nil (near end of stream)
349 // A Reader returning a non-zero number of bytes at the end
350 // of the input stream may return either err == EOF or err == nil
351 //
352 // 2. n == 0, err is io.EOF (actual end of stream)
353 //
354 // Callers should always process the n > 0 bytes returned
355 // before considering the error err.
356 if n1 > 0 {
357 s.chunkBufLen += n1
358 s.bytesRead += int64(n1)
359
360 if s.chunkBufLen == payloadChunkSize ||
361 (s.chunkNum == s.totalChunks-1 &&
362 s.chunkBufLen == s.lastChunkSize) {
363 // Sign the chunk and write it to s.buf.
364 s.signChunk(s.chunkBufLen, true)
365 break
366 }
367 }
368 if err != nil {
369 if err == io.EOF {
370 // No more data left in baseReader - last chunk.
371 // Done reading the last chunk from baseReader.
372 s.done = true
373
374 // bytes read from baseReader different than
375 // content length provided.
376 if s.bytesRead != s.contentLen {
377 return 0, fmt.Errorf("http: ContentLength=%d with Body length %d", s.contentLen, s.bytesRead)
378 }
379
380 // Sign the chunk and write it to s.buf.
381 s.signChunk(0, len(s.trailer) == 0)
382 if len(s.trailer) > 0 {
383 // Trailer must be set now.
384 s.addSignedTrailer(s.trailer)
385 }
386 break
387 }
388 return 0, err
389 }
390
391 }
392 }
393 return s.buf.Read(buf)
394}
395
396// Close - this method makes underlying io.ReadCloser's Close method available.
397func (s *StreamingReader) Close() error {
398 if s.sh256 != nil {
399 s.sh256.Close()
400 s.sh256 = nil
401 }
402 return s.baseReadCloser.Close()
403}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go
new file mode 100644
index 0000000..fa4f8c9
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go
@@ -0,0 +1,319 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package signer
19
20import (
21 "bytes"
22 "crypto/hmac"
23 "crypto/sha1"
24 "encoding/base64"
25 "fmt"
26 "net/http"
27 "net/url"
28 "sort"
29 "strconv"
30 "strings"
31 "time"
32
33 "github.com/minio/minio-go/v7/pkg/s3utils"
34)
35
36// Signature and API related constants.
37const (
38 signV2Algorithm = "AWS"
39)
40
41// Encode input URL path to URL encoded path.
42func encodeURL2Path(req *http.Request, virtualHost bool) (path string) {
43 if virtualHost {
44 reqHost := getHostAddr(req)
45 dotPos := strings.Index(reqHost, ".")
46 if dotPos > -1 {
47 bucketName := reqHost[:dotPos]
48 path = "/" + bucketName
49 path += req.URL.Path
50 path = s3utils.EncodePath(path)
51 return
52 }
53 }
54 path = s3utils.EncodePath(req.URL.Path)
55 return
56}
57
58// PreSignV2 - presign the request in following style.
59// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}.
60func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64, virtualHost bool) *http.Request {
61 // Presign is not needed for anonymous credentials.
62 if accessKeyID == "" || secretAccessKey == "" {
63 return &req
64 }
65
66 d := time.Now().UTC()
67 // Find epoch expires when the request will expire.
68 epochExpires := d.Unix() + expires
69
70 // Add expires header if not present.
71 if expiresStr := req.Header.Get("Expires"); expiresStr == "" {
72 req.Header.Set("Expires", strconv.FormatInt(epochExpires, 10))
73 }
74
75 // Get presigned string to sign.
76 stringToSign := preStringToSignV2(req, virtualHost)
77 hm := hmac.New(sha1.New, []byte(secretAccessKey))
78 hm.Write([]byte(stringToSign))
79
80 // Calculate signature.
81 signature := base64.StdEncoding.EncodeToString(hm.Sum(nil))
82
83 query := req.URL.Query()
84 // Handle specially for Google Cloud Storage.
85 if strings.Contains(getHostAddr(&req), ".storage.googleapis.com") {
86 query.Set("GoogleAccessId", accessKeyID)
87 } else {
88 query.Set("AWSAccessKeyId", accessKeyID)
89 }
90
91 // Fill in Expires for presigned query.
92 query.Set("Expires", strconv.FormatInt(epochExpires, 10))
93
94 // Encode query and save.
95 req.URL.RawQuery = s3utils.QueryEncode(query)
96
97 // Save signature finally.
98 req.URL.RawQuery += "&Signature=" + s3utils.EncodePath(signature)
99
100 // Return.
101 return &req
102}
103
104// PostPresignSignatureV2 - presigned signature for PostPolicy
105// request.
106func PostPresignSignatureV2(policyBase64, secretAccessKey string) string {
107 hm := hmac.New(sha1.New, []byte(secretAccessKey))
108 hm.Write([]byte(policyBase64))
109 signature := base64.StdEncoding.EncodeToString(hm.Sum(nil))
110 return signature
111}
112
113// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature;
114// Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) );
115//
116// StringToSign = HTTP-Verb + "\n" +
117// Content-Md5 + "\n" +
118// Content-Type + "\n" +
119// Date + "\n" +
120// CanonicalizedProtocolHeaders +
121// CanonicalizedResource;
122//
123// CanonicalizedResource = [ "/" + Bucket ] +
124// <HTTP-Request-URI, from the protocol name up to the query string> +
125// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
126//
127// CanonicalizedProtocolHeaders = <described below>
128
129// SignV2 sign the request before Do() (AWS Signature Version 2).
130func SignV2(req http.Request, accessKeyID, secretAccessKey string, virtualHost bool) *http.Request {
131 // Signature calculation is not needed for anonymous credentials.
132 if accessKeyID == "" || secretAccessKey == "" {
133 return &req
134 }
135
136 // Initial time.
137 d := time.Now().UTC()
138
139 // Add date if not present.
140 if date := req.Header.Get("Date"); date == "" {
141 req.Header.Set("Date", d.Format(http.TimeFormat))
142 }
143
144 // Calculate HMAC for secretAccessKey.
145 stringToSign := stringToSignV2(req, virtualHost)
146 hm := hmac.New(sha1.New, []byte(secretAccessKey))
147 hm.Write([]byte(stringToSign))
148
149 // Prepare auth header.
150 authHeader := new(bytes.Buffer)
151 authHeader.WriteString(fmt.Sprintf("%s %s:", signV2Algorithm, accessKeyID))
152 encoder := base64.NewEncoder(base64.StdEncoding, authHeader)
153 encoder.Write(hm.Sum(nil))
154 encoder.Close()
155
156 // Set Authorization header.
157 req.Header.Set("Authorization", authHeader.String())
158
159 return &req
160}
161
162// From the Amazon docs:
163//
164// StringToSign = HTTP-Verb + "\n" +
165//
166// Content-Md5 + "\n" +
167// Content-Type + "\n" +
168// Expires + "\n" +
169// CanonicalizedProtocolHeaders +
170// CanonicalizedResource;
171func preStringToSignV2(req http.Request, virtualHost bool) string {
172 buf := new(bytes.Buffer)
173 // Write standard headers.
174 writePreSignV2Headers(buf, req)
175 // Write canonicalized protocol headers if any.
176 writeCanonicalizedHeaders(buf, req)
177 // Write canonicalized Query resources if any.
178 writeCanonicalizedResource(buf, req, virtualHost)
179 return buf.String()
180}
181
182// writePreSignV2Headers - write preSign v2 required headers.
183func writePreSignV2Headers(buf *bytes.Buffer, req http.Request) {
184 buf.WriteString(req.Method + "\n")
185 buf.WriteString(req.Header.Get("Content-Md5") + "\n")
186 buf.WriteString(req.Header.Get("Content-Type") + "\n")
187 buf.WriteString(req.Header.Get("Expires") + "\n")
188}
189
190// From the Amazon docs:
191//
192// StringToSign = HTTP-Verb + "\n" +
193//
194// Content-Md5 + "\n" +
195// Content-Type + "\n" +
196// Date + "\n" +
197// CanonicalizedProtocolHeaders +
198// CanonicalizedResource;
199func stringToSignV2(req http.Request, virtualHost bool) string {
200 buf := new(bytes.Buffer)
201 // Write standard headers.
202 writeSignV2Headers(buf, req)
203 // Write canonicalized protocol headers if any.
204 writeCanonicalizedHeaders(buf, req)
205 // Write canonicalized Query resources if any.
206 writeCanonicalizedResource(buf, req, virtualHost)
207 return buf.String()
208}
209
210// writeSignV2Headers - write signV2 required headers.
211func writeSignV2Headers(buf *bytes.Buffer, req http.Request) {
212 buf.WriteString(req.Method + "\n")
213 buf.WriteString(req.Header.Get("Content-Md5") + "\n")
214 buf.WriteString(req.Header.Get("Content-Type") + "\n")
215 buf.WriteString(req.Header.Get("Date") + "\n")
216}
217
218// writeCanonicalizedHeaders - write canonicalized headers.
219func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) {
220 var protoHeaders []string
221 vals := make(map[string][]string)
222 for k, vv := range req.Header {
223 // All the AMZ headers should be lowercase
224 lk := strings.ToLower(k)
225 if strings.HasPrefix(lk, "x-amz") {
226 protoHeaders = append(protoHeaders, lk)
227 vals[lk] = vv
228 }
229 }
230 sort.Strings(protoHeaders)
231 for _, k := range protoHeaders {
232 buf.WriteString(k)
233 buf.WriteByte(':')
234 for idx, v := range vals[k] {
235 if idx > 0 {
236 buf.WriteByte(',')
237 }
238 buf.WriteString(v)
239 }
240 buf.WriteByte('\n')
241 }
242}
243
244// AWS S3 Signature V2 calculation rule is give here:
245// http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationStringToSign
246
247// Whitelist resource list that will be used in query string for signature-V2 calculation.
248//
249// This list should be kept alphabetically sorted, do not hastily edit.
250var resourceList = []string{
251 "acl",
252 "cors",
253 "delete",
254 "encryption",
255 "legal-hold",
256 "lifecycle",
257 "location",
258 "logging",
259 "notification",
260 "partNumber",
261 "policy",
262 "replication",
263 "requestPayment",
264 "response-cache-control",
265 "response-content-disposition",
266 "response-content-encoding",
267 "response-content-language",
268 "response-content-type",
269 "response-expires",
270 "retention",
271 "select",
272 "select-type",
273 "tagging",
274 "torrent",
275 "uploadId",
276 "uploads",
277 "versionId",
278 "versioning",
279 "versions",
280 "website",
281}
282
283// From the Amazon docs:
284//
285// CanonicalizedResource = [ "/" + Bucket ] +
286//
287// <HTTP-Request-URI, from the protocol name up to the query string> +
288// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
289func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, virtualHost bool) {
290 // Save request URL.
291 requestURL := req.URL
292 // Get encoded URL path.
293 buf.WriteString(encodeURL2Path(&req, virtualHost))
294 if requestURL.RawQuery != "" {
295 var n int
296 vals, _ := url.ParseQuery(requestURL.RawQuery)
297 // Verify if any sub resource queries are present, if yes
298 // canonicallize them.
299 for _, resource := range resourceList {
300 if vv, ok := vals[resource]; ok && len(vv) > 0 {
301 n++
302 // First element
303 switch n {
304 case 1:
305 buf.WriteByte('?')
306 // The rest
307 default:
308 buf.WriteByte('&')
309 }
310 buf.WriteString(resource)
311 // Request parameters
312 if len(vv[0]) > 0 {
313 buf.WriteByte('=')
314 buf.WriteString(vv[0])
315 }
316 }
317 }
318 }
319}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go
new file mode 100644
index 0000000..ffd2514
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go
@@ -0,0 +1,351 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package signer
19
20import (
21 "bytes"
22 "encoding/hex"
23 "net/http"
24 "sort"
25 "strconv"
26 "strings"
27 "time"
28
29 "github.com/minio/minio-go/v7/pkg/s3utils"
30)
31
32// Signature and API related constants.
33const (
34 signV4Algorithm = "AWS4-HMAC-SHA256"
35 iso8601DateFormat = "20060102T150405Z"
36 yyyymmdd = "20060102"
37)
38
39// Different service types
40const (
41 ServiceTypeS3 = "s3"
42 ServiceTypeSTS = "sts"
43)
44
45// Excerpts from @lsegal -
46// https:/github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258.
47//
48// * User-Agent
49// This is ignored from signing because signing this causes problems with generating pre-signed
50// URLs (that are executed by other agents) or when customers pass requests through proxies, which
51// may modify the user-agent.
52//
53// * Authorization
54// Is skipped for obvious reasons.
55//
56// * Accept-Encoding
57// Some S3 servers like Hitachi Content Platform do not honor this header for signature
58// calculation.
59var v4IgnoredHeaders = map[string]bool{
60 "Accept-Encoding": true,
61 "Authorization": true,
62 "User-Agent": true,
63}
64
65// getSigningKey hmac seed to calculate final signature.
66func getSigningKey(secret, loc string, t time.Time, serviceType string) []byte {
67 date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd)))
68 location := sumHMAC(date, []byte(loc))
69 service := sumHMAC(location, []byte(serviceType))
70 signingKey := sumHMAC(service, []byte("aws4_request"))
71 return signingKey
72}
73
74// getSignature final signature in hexadecimal form.
75func getSignature(signingKey []byte, stringToSign string) string {
76 return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))
77}
78
79// getScope generate a string of a specific date, an AWS region, and a
80// service.
81func getScope(location string, t time.Time, serviceType string) string {
82 scope := strings.Join([]string{
83 t.Format(yyyymmdd),
84 location,
85 serviceType,
86 "aws4_request",
87 }, "/")
88 return scope
89}
90
91// GetCredential generate a credential string.
92func GetCredential(accessKeyID, location string, t time.Time, serviceType string) string {
93 scope := getScope(location, t, serviceType)
94 return accessKeyID + "/" + scope
95}
96
97// getHashedPayload get the hexadecimal value of the SHA256 hash of
98// the request payload.
99func getHashedPayload(req http.Request) string {
100 hashedPayload := req.Header.Get("X-Amz-Content-Sha256")
101 if hashedPayload == "" {
102 // Presign does not have a payload, use S3 recommended value.
103 hashedPayload = unsignedPayload
104 }
105 return hashedPayload
106}
107
108// getCanonicalHeaders generate a list of request headers for
109// signature.
110func getCanonicalHeaders(req http.Request, ignoredHeaders map[string]bool) string {
111 var headers []string
112 vals := make(map[string][]string)
113 for k, vv := range req.Header {
114 if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
115 continue // ignored header
116 }
117 headers = append(headers, strings.ToLower(k))
118 vals[strings.ToLower(k)] = vv
119 }
120 if !headerExists("host", headers) {
121 headers = append(headers, "host")
122 }
123 sort.Strings(headers)
124
125 var buf bytes.Buffer
126 // Save all the headers in canonical form <header>:<value> newline
127 // separated for each header.
128 for _, k := range headers {
129 buf.WriteString(k)
130 buf.WriteByte(':')
131 switch {
132 case k == "host":
133 buf.WriteString(getHostAddr(&req))
134 buf.WriteByte('\n')
135 default:
136 for idx, v := range vals[k] {
137 if idx > 0 {
138 buf.WriteByte(',')
139 }
140 buf.WriteString(signV4TrimAll(v))
141 }
142 buf.WriteByte('\n')
143 }
144 }
145 return buf.String()
146}
147
148func headerExists(key string, headers []string) bool {
149 for _, k := range headers {
150 if k == key {
151 return true
152 }
153 }
154 return false
155}
156
157// getSignedHeaders generate all signed request headers.
158// i.e lexically sorted, semicolon-separated list of lowercase
159// request header names.
160func getSignedHeaders(req http.Request, ignoredHeaders map[string]bool) string {
161 var headers []string
162 for k := range req.Header {
163 if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
164 continue // Ignored header found continue.
165 }
166 headers = append(headers, strings.ToLower(k))
167 }
168 if !headerExists("host", headers) {
169 headers = append(headers, "host")
170 }
171 sort.Strings(headers)
172 return strings.Join(headers, ";")
173}
174
175// getCanonicalRequest generate a canonical request of style.
176//
177// canonicalRequest =
178//
179// <HTTPMethod>\n
180// <CanonicalURI>\n
181// <CanonicalQueryString>\n
182// <CanonicalHeaders>\n
183// <SignedHeaders>\n
184// <HashedPayload>
185func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool, hashedPayload string) string {
186 req.URL.RawQuery = strings.ReplaceAll(req.URL.Query().Encode(), "+", "%20")
187 canonicalRequest := strings.Join([]string{
188 req.Method,
189 s3utils.EncodePath(req.URL.Path),
190 req.URL.RawQuery,
191 getCanonicalHeaders(req, ignoredHeaders),
192 getSignedHeaders(req, ignoredHeaders),
193 hashedPayload,
194 }, "\n")
195 return canonicalRequest
196}
197
198// getStringToSign a string based on selected query values.
199func getStringToSignV4(t time.Time, location, canonicalRequest, serviceType string) string {
200 stringToSign := signV4Algorithm + "\n" + t.Format(iso8601DateFormat) + "\n"
201 stringToSign = stringToSign + getScope(location, t, serviceType) + "\n"
202 stringToSign += hex.EncodeToString(sum256([]byte(canonicalRequest)))
203 return stringToSign
204}
205
206// PreSignV4 presign the request, in accordance with
207// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html.
208func PreSignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, expires int64) *http.Request {
209 // Presign is not needed for anonymous credentials.
210 if accessKeyID == "" || secretAccessKey == "" {
211 return &req
212 }
213
214 // Initial time.
215 t := time.Now().UTC()
216
217 // Get credential string.
218 credential := GetCredential(accessKeyID, location, t, ServiceTypeS3)
219
220 // Get all signed headers.
221 signedHeaders := getSignedHeaders(req, v4IgnoredHeaders)
222
223 // Set URL query.
224 query := req.URL.Query()
225 query.Set("X-Amz-Algorithm", signV4Algorithm)
226 query.Set("X-Amz-Date", t.Format(iso8601DateFormat))
227 query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10))
228 query.Set("X-Amz-SignedHeaders", signedHeaders)
229 query.Set("X-Amz-Credential", credential)
230 // Set session token if available.
231 if sessionToken != "" {
232 query.Set("X-Amz-Security-Token", sessionToken)
233 }
234 req.URL.RawQuery = query.Encode()
235
236 // Get canonical request.
237 canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders, getHashedPayload(req))
238
239 // Get string to sign from canonical request.
240 stringToSign := getStringToSignV4(t, location, canonicalRequest, ServiceTypeS3)
241
242 // Gext hmac signing key.
243 signingKey := getSigningKey(secretAccessKey, location, t, ServiceTypeS3)
244
245 // Calculate signature.
246 signature := getSignature(signingKey, stringToSign)
247
248 // Add signature header to RawQuery.
249 req.URL.RawQuery += "&X-Amz-Signature=" + signature
250
251 return &req
252}
253
254// PostPresignSignatureV4 - presigned signature for PostPolicy
255// requests.
256func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string {
257 // Get signining key.
258 signingkey := getSigningKey(secretAccessKey, location, t, ServiceTypeS3)
259 // Calculate signature.
260 signature := getSignature(signingkey, policyBase64)
261 return signature
262}
263
264// SignV4STS - signature v4 for STS request.
265func SignV4STS(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request {
266 return signV4(req, accessKeyID, secretAccessKey, "", location, ServiceTypeSTS, nil)
267}
268
269// Internal function called for different service types.
270func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location, serviceType string, trailer http.Header) *http.Request {
271 // Signature calculation is not needed for anonymous credentials.
272 if accessKeyID == "" || secretAccessKey == "" {
273 return &req
274 }
275
276 // Initial time.
277 t := time.Now().UTC()
278
279 // Set x-amz-date.
280 req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat))
281
282 // Set session token if available.
283 if sessionToken != "" {
284 req.Header.Set("X-Amz-Security-Token", sessionToken)
285 }
286
287 if len(trailer) > 0 {
288 for k := range trailer {
289 req.Header.Add("X-Amz-Trailer", strings.ToLower(k))
290 }
291
292 req.Header.Set("Content-Encoding", "aws-chunked")
293 req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(req.ContentLength, 10))
294 }
295
296 hashedPayload := getHashedPayload(req)
297 if serviceType == ServiceTypeSTS {
298 // Content sha256 header is not sent with the request
299 // but it is expected to have sha256 of payload for signature
300 // in STS service type request.
301 req.Header.Del("X-Amz-Content-Sha256")
302 }
303
304 // Get canonical request.
305 canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders, hashedPayload)
306
307 // Get string to sign from canonical request.
308 stringToSign := getStringToSignV4(t, location, canonicalRequest, serviceType)
309
310 // Get hmac signing key.
311 signingKey := getSigningKey(secretAccessKey, location, t, serviceType)
312
313 // Get credential string.
314 credential := GetCredential(accessKeyID, location, t, serviceType)
315
316 // Get all signed headers.
317 signedHeaders := getSignedHeaders(req, v4IgnoredHeaders)
318
319 // Calculate signature.
320 signature := getSignature(signingKey, stringToSign)
321
322 // If regular request, construct the final authorization header.
323 parts := []string{
324 signV4Algorithm + " Credential=" + credential,
325 "SignedHeaders=" + signedHeaders,
326 "Signature=" + signature,
327 }
328
329 // Set authorization header.
330 auth := strings.Join(parts, ", ")
331 req.Header.Set("Authorization", auth)
332
333 if len(trailer) > 0 {
334 // Use custom chunked encoding.
335 req.Trailer = trailer
336 return StreamingUnsignedV4(&req, sessionToken, req.ContentLength, time.Now().UTC())
337 }
338 return &req
339}
340
341// SignV4 sign the request before Do(), in accordance with
342// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html.
343func SignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string) *http.Request {
344 return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3, nil)
345}
346
347// SignV4Trailer sign the request before Do(), in accordance with
348// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html
349func SignV4Trailer(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, trailer http.Header) *http.Request {
350 return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3, trailer)
351}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go
new file mode 100644
index 0000000..87c9939
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go
@@ -0,0 +1,62 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package signer
19
20import (
21 "crypto/hmac"
22 "crypto/sha256"
23 "net/http"
24 "strings"
25)
26
27// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
28const unsignedPayload = "UNSIGNED-PAYLOAD"
29
30// sum256 calculate sha256 sum for an input byte array.
31func sum256(data []byte) []byte {
32 hash := sha256.New()
33 hash.Write(data)
34 return hash.Sum(nil)
35}
36
37// sumHMAC calculate hmac between two input byte array.
38func sumHMAC(key, data []byte) []byte {
39 hash := hmac.New(sha256.New, key)
40 hash.Write(data)
41 return hash.Sum(nil)
42}
43
44// getHostAddr returns host header if available, otherwise returns host from URL
45func getHostAddr(req *http.Request) string {
46 host := req.Header.Get("host")
47 if host != "" && req.Host != host {
48 return host
49 }
50 if req.Host != "" {
51 return req.Host
52 }
53 return req.URL.Host
54}
55
56// Trim leading and trailing spaces and replace sequential spaces with one space, following Trimall()
57// in http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
58func signV4TrimAll(input string) string {
59 // Compress adjacent spaces (a space is determined by
60 // unicode.IsSpace() internally here) to one space and return
61 return strings.Join(strings.Fields(input), " ")
62}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go b/vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go
new file mode 100644
index 0000000..b5fb956
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go
@@ -0,0 +1,66 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package sse
19
20import "encoding/xml"
21
22// ApplySSEByDefault defines default encryption configuration, KMS or SSE. To activate
23// KMS, SSEAlgoritm needs to be set to "aws:kms"
24// Minio currently does not support Kms.
25type ApplySSEByDefault struct {
26 KmsMasterKeyID string `xml:"KMSMasterKeyID,omitempty"`
27 SSEAlgorithm string `xml:"SSEAlgorithm"`
28}
29
30// Rule layer encapsulates default encryption configuration
31type Rule struct {
32 Apply ApplySSEByDefault `xml:"ApplyServerSideEncryptionByDefault"`
33}
34
35// Configuration is the default encryption configuration structure
36type Configuration struct {
37 XMLName xml.Name `xml:"ServerSideEncryptionConfiguration"`
38 Rules []Rule `xml:"Rule"`
39}
40
41// NewConfigurationSSES3 initializes a new SSE-S3 configuration
42func NewConfigurationSSES3() *Configuration {
43 return &Configuration{
44 Rules: []Rule{
45 {
46 Apply: ApplySSEByDefault{
47 SSEAlgorithm: "AES256",
48 },
49 },
50 },
51 }
52}
53
54// NewConfigurationSSEKMS initializes a new SSE-KMS configuration
55func NewConfigurationSSEKMS(kmsMasterKey string) *Configuration {
56 return &Configuration{
57 Rules: []Rule{
58 {
59 Apply: ApplySSEByDefault{
60 KmsMasterKeyID: kmsMasterKey,
61 SSEAlgorithm: "aws:kms",
62 },
63 },
64 },
65 }
66}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go b/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go
new file mode 100644
index 0000000..7a84a6f
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go
@@ -0,0 +1,413 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2020-2022 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package tags
19
20import (
21 "encoding/xml"
22 "io"
23 "net/url"
24 "regexp"
25 "sort"
26 "strings"
27 "unicode/utf8"
28)
29
30// Error contains tag specific error.
31type Error interface {
32 error
33 Code() string
34}
35
36type errTag struct {
37 code string
38 message string
39}
40
41// Code contains error code.
42func (err errTag) Code() string {
43 return err.code
44}
45
46// Error contains error message.
47func (err errTag) Error() string {
48 return err.message
49}
50
51var (
52 errTooManyObjectTags = &errTag{"BadRequest", "Tags cannot be more than 10"}
53 errTooManyTags = &errTag{"BadRequest", "Tags cannot be more than 50"}
54 errInvalidTagKey = &errTag{"InvalidTag", "The TagKey you have provided is invalid"}
55 errInvalidTagValue = &errTag{"InvalidTag", "The TagValue you have provided is invalid"}
56 errDuplicateTagKey = &errTag{"InvalidTag", "Cannot provide multiple Tags with the same key"}
57)
58
59// Tag comes with limitation as per
60// https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html amd
61// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions
62const (
63 maxKeyLength = 128
64 maxValueLength = 256
65 maxObjectTagCount = 10
66 maxTagCount = 50
67)
68
69// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions
70// borrowed from this article and also testing various ASCII characters following regex
71// is supported by AWS S3 for both tags and values.
72var validTagKeyValue = regexp.MustCompile(`^[a-zA-Z0-9-+\-._:/@ ]+$`)
73
74func checkKey(key string) error {
75 if len(key) == 0 {
76 return errInvalidTagKey
77 }
78
79 if utf8.RuneCountInString(key) > maxKeyLength || !validTagKeyValue.MatchString(key) {
80 return errInvalidTagKey
81 }
82
83 return nil
84}
85
86func checkValue(value string) error {
87 if value != "" {
88 if utf8.RuneCountInString(value) > maxValueLength || !validTagKeyValue.MatchString(value) {
89 return errInvalidTagValue
90 }
91 }
92
93 return nil
94}
95
96// Tag denotes key and value.
97type Tag struct {
98 Key string `xml:"Key"`
99 Value string `xml:"Value"`
100}
101
102func (tag Tag) String() string {
103 return tag.Key + "=" + tag.Value
104}
105
106// IsEmpty returns whether this tag is empty or not.
107func (tag Tag) IsEmpty() bool {
108 return tag.Key == ""
109}
110
111// Validate checks this tag.
112func (tag Tag) Validate() error {
113 if err := checkKey(tag.Key); err != nil {
114 return err
115 }
116
117 return checkValue(tag.Value)
118}
119
120// MarshalXML encodes to XML data.
121func (tag Tag) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
122 if err := tag.Validate(); err != nil {
123 return err
124 }
125
126 type subTag Tag // to avoid recursively calling MarshalXML()
127 return e.EncodeElement(subTag(tag), start)
128}
129
130// UnmarshalXML decodes XML data to tag.
131func (tag *Tag) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
132 type subTag Tag // to avoid recursively calling UnmarshalXML()
133 var st subTag
134 if err := d.DecodeElement(&st, &start); err != nil {
135 return err
136 }
137
138 if err := Tag(st).Validate(); err != nil {
139 return err
140 }
141
142 *tag = Tag(st)
143 return nil
144}
145
146// tagSet represents list of unique tags.
147type tagSet struct {
148 tagMap map[string]string
149 isObject bool
150}
151
152func (tags tagSet) String() string {
153 if len(tags.tagMap) == 0 {
154 return ""
155 }
156 var buf strings.Builder
157 keys := make([]string, 0, len(tags.tagMap))
158 for k := range tags.tagMap {
159 keys = append(keys, k)
160 }
161 sort.Strings(keys)
162 for _, k := range keys {
163 keyEscaped := url.QueryEscape(k)
164 valueEscaped := url.QueryEscape(tags.tagMap[k])
165 if buf.Len() > 0 {
166 buf.WriteByte('&')
167 }
168 buf.WriteString(keyEscaped)
169 buf.WriteByte('=')
170 buf.WriteString(valueEscaped)
171 }
172 return buf.String()
173}
174
175func (tags *tagSet) remove(key string) {
176 delete(tags.tagMap, key)
177}
178
179func (tags *tagSet) set(key, value string, failOnExist bool) error {
180 if failOnExist {
181 if _, found := tags.tagMap[key]; found {
182 return errDuplicateTagKey
183 }
184 }
185
186 if err := checkKey(key); err != nil {
187 return err
188 }
189
190 if err := checkValue(value); err != nil {
191 return err
192 }
193
194 if tags.isObject {
195 if len(tags.tagMap) == maxObjectTagCount {
196 return errTooManyObjectTags
197 }
198 } else if len(tags.tagMap) == maxTagCount {
199 return errTooManyTags
200 }
201
202 tags.tagMap[key] = value
203 return nil
204}
205
206func (tags tagSet) count() int {
207 return len(tags.tagMap)
208}
209
210func (tags tagSet) toMap() map[string]string {
211 m := make(map[string]string, len(tags.tagMap))
212 for key, value := range tags.tagMap {
213 m[key] = value
214 }
215 return m
216}
217
218// MarshalXML encodes to XML data.
219func (tags tagSet) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
220 tagList := struct {
221 Tags []Tag `xml:"Tag"`
222 }{}
223
224 tagList.Tags = make([]Tag, 0, len(tags.tagMap))
225 for key, value := range tags.tagMap {
226 tagList.Tags = append(tagList.Tags, Tag{key, value})
227 }
228
229 return e.EncodeElement(tagList, start)
230}
231
232// UnmarshalXML decodes XML data to tag list.
233func (tags *tagSet) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
234 tagList := struct {
235 Tags []Tag `xml:"Tag"`
236 }{}
237
238 if err := d.DecodeElement(&tagList, &start); err != nil {
239 return err
240 }
241
242 if tags.isObject {
243 if len(tagList.Tags) > maxObjectTagCount {
244 return errTooManyObjectTags
245 }
246 } else if len(tagList.Tags) > maxTagCount {
247 return errTooManyTags
248 }
249
250 m := make(map[string]string, len(tagList.Tags))
251 for _, tag := range tagList.Tags {
252 if _, found := m[tag.Key]; found {
253 return errDuplicateTagKey
254 }
255
256 m[tag.Key] = tag.Value
257 }
258
259 tags.tagMap = m
260 return nil
261}
262
263type tagging struct {
264 XMLName xml.Name `xml:"Tagging"`
265 TagSet *tagSet `xml:"TagSet"`
266}
267
268// Tags is list of tags of XML request/response as per
269// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html#API_GetBucketTagging_RequestBody
270type Tags tagging
271
272func (tags Tags) String() string {
273 return tags.TagSet.String()
274}
275
276// Remove removes a tag by its key.
277func (tags *Tags) Remove(key string) {
278 tags.TagSet.remove(key)
279}
280
281// Set sets new tag.
282func (tags *Tags) Set(key, value string) error {
283 return tags.TagSet.set(key, value, false)
284}
285
286// Count - return number of tags accounted for
287func (tags Tags) Count() int {
288 return tags.TagSet.count()
289}
290
291// ToMap returns copy of tags.
292func (tags Tags) ToMap() map[string]string {
293 return tags.TagSet.toMap()
294}
295
296// MapToObjectTags converts an input map of key and value into
297// *Tags data structure with validation.
298func MapToObjectTags(tagMap map[string]string) (*Tags, error) {
299 return NewTags(tagMap, true)
300}
301
302// MapToBucketTags converts an input map of key and value into
303// *Tags data structure with validation.
304func MapToBucketTags(tagMap map[string]string) (*Tags, error) {
305 return NewTags(tagMap, false)
306}
307
308// NewTags creates Tags from tagMap, If isObject is set, it validates for object tags.
309func NewTags(tagMap map[string]string, isObject bool) (*Tags, error) {
310 tagging := &Tags{
311 TagSet: &tagSet{
312 tagMap: make(map[string]string),
313 isObject: isObject,
314 },
315 }
316
317 for key, value := range tagMap {
318 if err := tagging.TagSet.set(key, value, true); err != nil {
319 return nil, err
320 }
321 }
322
323 return tagging, nil
324}
325
326func unmarshalXML(reader io.Reader, isObject bool) (*Tags, error) {
327 tagging := &Tags{
328 TagSet: &tagSet{
329 tagMap: make(map[string]string),
330 isObject: isObject,
331 },
332 }
333
334 if err := xml.NewDecoder(reader).Decode(tagging); err != nil {
335 return nil, err
336 }
337
338 return tagging, nil
339}
340
341// ParseBucketXML decodes XML data of tags in reader specified in
342// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html#API_PutBucketTagging_RequestSyntax.
343func ParseBucketXML(reader io.Reader) (*Tags, error) {
344 return unmarshalXML(reader, false)
345}
346
347// ParseObjectXML decodes XML data of tags in reader specified in
348// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html#API_PutObjectTagging_RequestSyntax
349func ParseObjectXML(reader io.Reader) (*Tags, error) {
350 return unmarshalXML(reader, true)
351}
352
353// stringsCut slices s around the first instance of sep,
354// returning the text before and after sep.
355// The found result reports whether sep appears in s.
356// If sep does not appear in s, cut returns s, "", false.
357func stringsCut(s, sep string) (before, after string, found bool) {
358 if i := strings.Index(s, sep); i >= 0 {
359 return s[:i], s[i+len(sep):], true
360 }
361 return s, "", false
362}
363
364func (tags *tagSet) parseTags(tgs string) (err error) {
365 for tgs != "" {
366 var key string
367 key, tgs, _ = stringsCut(tgs, "&")
368 if key == "" {
369 continue
370 }
371 key, value, _ := stringsCut(key, "=")
372 key, err1 := url.QueryUnescape(key)
373 if err1 != nil {
374 if err == nil {
375 err = err1
376 }
377 continue
378 }
379 value, err1 = url.QueryUnescape(value)
380 if err1 != nil {
381 if err == nil {
382 err = err1
383 }
384 continue
385 }
386 if err = tags.set(key, value, true); err != nil {
387 return err
388 }
389 }
390 return err
391}
392
393// Parse decodes HTTP query formatted string into tags which is limited by isObject.
394// A query formatted string is like "key1=value1&key2=value2".
395func Parse(s string, isObject bool) (*Tags, error) {
396 tagging := &Tags{
397 TagSet: &tagSet{
398 tagMap: make(map[string]string),
399 isObject: isObject,
400 },
401 }
402
403 if err := tagging.TagSet.parseTags(s); err != nil {
404 return nil, err
405 }
406
407 return tagging, nil
408}
409
410// ParseObjectTags decodes HTTP query formatted string into tags. A query formatted string is like "key1=value1&key2=value2".
411func ParseObjectTags(s string) (*Tags, error) {
412 return Parse(s, true)
413}