aboutsummaryrefslogtreecommitdiffstats
path: root/vendor/github.com/minio/minio-go/v7/api.go
diff options
context:
space:
mode:
authorLibravatar Rutger Broekhoff2023-12-29 21:31:53 +0100
committerLibravatar Rutger Broekhoff2023-12-29 21:31:53 +0100
commit404aeae4545d2426c089a5f8d5e82dae56f5212b (patch)
tree2d84e00af272b39fc04f3795ae06bc48970e57b5 /vendor/github.com/minio/minio-go/v7/api.go
parent209d8b0187ed025dec9ac149ebcced3462877bff (diff)
downloadgitolfs3-404aeae4545d2426c089a5f8d5e82dae56f5212b.tar.gz
gitolfs3-404aeae4545d2426c089a5f8d5e82dae56f5212b.zip
Make Nix builds work
Diffstat (limited to 'vendor/github.com/minio/minio-go/v7/api.go')
-rw-r--r--vendor/github.com/minio/minio-go/v7/api.go995
1 files changed, 995 insertions, 0 deletions
diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go
new file mode 100644
index 0000000..f8a9b34
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api.go
@@ -0,0 +1,995 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2023 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "bytes"
22 "context"
23 "encoding/base64"
24 "errors"
25 "fmt"
26 "hash/crc32"
27 "io"
28 "math/rand"
29 "net"
30 "net/http"
31 "net/http/cookiejar"
32 "net/http/httptrace"
33 "net/http/httputil"
34 "net/url"
35 "os"
36 "runtime"
37 "strings"
38 "sync"
39 "sync/atomic"
40 "time"
41
42 md5simd "github.com/minio/md5-simd"
43 "github.com/minio/minio-go/v7/pkg/credentials"
44 "github.com/minio/minio-go/v7/pkg/s3utils"
45 "github.com/minio/minio-go/v7/pkg/signer"
46 "golang.org/x/net/publicsuffix"
47)
48
49// Client implements Amazon S3 compatible methods.
50type Client struct {
51 // Standard options.
52
53 // Parsed endpoint url provided by the user.
54 endpointURL *url.URL
55
56 // Holds various credential providers.
57 credsProvider *credentials.Credentials
58
59 // Custom signerType value overrides all credentials.
60 overrideSignerType credentials.SignatureType
61
62 // User supplied.
63 appInfo struct {
64 appName string
65 appVersion string
66 }
67
68 // Indicate whether we are using https or not
69 secure bool
70
71 // Needs allocation.
72 httpClient *http.Client
73 httpTrace *httptrace.ClientTrace
74 bucketLocCache *bucketLocationCache
75
76 // Advanced functionality.
77 isTraceEnabled bool
78 traceErrorsOnly bool
79 traceOutput io.Writer
80
81 // S3 specific accelerated endpoint.
82 s3AccelerateEndpoint string
83
84 // Region endpoint
85 region string
86
87 // Random seed.
88 random *rand.Rand
89
90 // lookup indicates type of url lookup supported by server. If not specified,
91 // default to Auto.
92 lookup BucketLookupType
93
94 // Factory for MD5 hash functions.
95 md5Hasher func() md5simd.Hasher
96 sha256Hasher func() md5simd.Hasher
97
98 healthStatus int32
99
100 trailingHeaderSupport bool
101}
102
103// Options for New method
104type Options struct {
105 Creds *credentials.Credentials
106 Secure bool
107 Transport http.RoundTripper
108 Trace *httptrace.ClientTrace
109 Region string
110 BucketLookup BucketLookupType
111
112 // Allows setting a custom region lookup based on URL pattern
113 // not all URL patterns are covered by this library so if you
114 // have a custom endpoints with many regions you can use this
115 // function to perform region lookups appropriately.
116 CustomRegionViaURL func(u url.URL) string
117
118 // TrailingHeaders indicates server support of trailing headers.
119 // Only supported for v4 signatures.
120 TrailingHeaders bool
121
122 // Custom hash routines. Leave nil to use standard.
123 CustomMD5 func() md5simd.Hasher
124 CustomSHA256 func() md5simd.Hasher
125}
126
127// Global constants.
128const (
129 libraryName = "minio-go"
130 libraryVersion = "v7.0.66"
131)
132
133// User Agent should always following the below style.
134// Please open an issue to discuss any new changes here.
135//
136// MinIO (OS; ARCH) LIB/VER APP/VER
137const (
138 libraryUserAgentPrefix = "MinIO (" + runtime.GOOS + "; " + runtime.GOARCH + ") "
139 libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion
140)
141
142// BucketLookupType is type of url lookup supported by server.
143type BucketLookupType int
144
145// Different types of url lookup supported by the server.Initialized to BucketLookupAuto
146const (
147 BucketLookupAuto BucketLookupType = iota
148 BucketLookupDNS
149 BucketLookupPath
150)
151
152// New - instantiate minio client with options
153func New(endpoint string, opts *Options) (*Client, error) {
154 if opts == nil {
155 return nil, errors.New("no options provided")
156 }
157 clnt, err := privateNew(endpoint, opts)
158 if err != nil {
159 return nil, err
160 }
161 // If Amazon S3 set to signature v4.
162 if s3utils.IsAmazonEndpoint(*clnt.endpointURL) {
163 clnt.overrideSignerType = credentials.SignatureV4
164 }
165
166 return clnt, nil
167}
168
169// EndpointURL returns the URL of the S3 endpoint.
170func (c *Client) EndpointURL() *url.URL {
171 endpoint := *c.endpointURL // copy to prevent callers from modifying internal state
172 return &endpoint
173}
174
175// lockedRandSource provides protected rand source, implements rand.Source interface.
176type lockedRandSource struct {
177 lk sync.Mutex
178 src rand.Source
179}
180
181// Int63 returns a non-negative pseudo-random 63-bit integer as an int64.
182func (r *lockedRandSource) Int63() (n int64) {
183 r.lk.Lock()
184 n = r.src.Int63()
185 r.lk.Unlock()
186 return
187}
188
189// Seed uses the provided seed value to initialize the generator to a
190// deterministic state.
191func (r *lockedRandSource) Seed(seed int64) {
192 r.lk.Lock()
193 r.src.Seed(seed)
194 r.lk.Unlock()
195}
196
197func privateNew(endpoint string, opts *Options) (*Client, error) {
198 // construct endpoint.
199 endpointURL, err := getEndpointURL(endpoint, opts.Secure)
200 if err != nil {
201 return nil, err
202 }
203
204 // Initialize cookies to preserve server sent cookies if any and replay
205 // them upon each request.
206 jar, err := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})
207 if err != nil {
208 return nil, err
209 }
210
211 // instantiate new Client.
212 clnt := new(Client)
213
214 // Save the credentials.
215 clnt.credsProvider = opts.Creds
216
217 // Remember whether we are using https or not
218 clnt.secure = opts.Secure
219
220 // Save endpoint URL, user agent for future uses.
221 clnt.endpointURL = endpointURL
222
223 transport := opts.Transport
224 if transport == nil {
225 transport, err = DefaultTransport(opts.Secure)
226 if err != nil {
227 return nil, err
228 }
229 }
230
231 clnt.httpTrace = opts.Trace
232
233 // Instantiate http client and bucket location cache.
234 clnt.httpClient = &http.Client{
235 Jar: jar,
236 Transport: transport,
237 CheckRedirect: func(req *http.Request, via []*http.Request) error {
238 return http.ErrUseLastResponse
239 },
240 }
241
242 // Sets custom region, if region is empty bucket location cache is used automatically.
243 if opts.Region == "" {
244 if opts.CustomRegionViaURL != nil {
245 opts.Region = opts.CustomRegionViaURL(*clnt.endpointURL)
246 } else {
247 opts.Region = s3utils.GetRegionFromURL(*clnt.endpointURL)
248 }
249 }
250 clnt.region = opts.Region
251
252 // Instantiate bucket location cache.
253 clnt.bucketLocCache = newBucketLocationCache()
254
255 // Introduce a new locked random seed.
256 clnt.random = rand.New(&lockedRandSource{src: rand.NewSource(time.Now().UTC().UnixNano())})
257
258 // Add default md5 hasher.
259 clnt.md5Hasher = opts.CustomMD5
260 clnt.sha256Hasher = opts.CustomSHA256
261 if clnt.md5Hasher == nil {
262 clnt.md5Hasher = newMd5Hasher
263 }
264 if clnt.sha256Hasher == nil {
265 clnt.sha256Hasher = newSHA256Hasher
266 }
267
268 clnt.trailingHeaderSupport = opts.TrailingHeaders && clnt.overrideSignerType.IsV4()
269
270 // Sets bucket lookup style, whether server accepts DNS or Path lookup. Default is Auto - determined
271 // by the SDK. When Auto is specified, DNS lookup is used for Amazon/Google cloud endpoints and Path for all other endpoints.
272 clnt.lookup = opts.BucketLookup
273
274 // healthcheck is not initialized
275 clnt.healthStatus = unknown
276
277 // Return.
278 return clnt, nil
279}
280
281// SetAppInfo - add application details to user agent.
282func (c *Client) SetAppInfo(appName, appVersion string) {
283 // if app name and version not set, we do not set a new user agent.
284 if appName != "" && appVersion != "" {
285 c.appInfo.appName = appName
286 c.appInfo.appVersion = appVersion
287 }
288}
289
290// TraceOn - enable HTTP tracing.
291func (c *Client) TraceOn(outputStream io.Writer) {
292 // if outputStream is nil then default to os.Stdout.
293 if outputStream == nil {
294 outputStream = os.Stdout
295 }
296 // Sets a new output stream.
297 c.traceOutput = outputStream
298
299 // Enable tracing.
300 c.isTraceEnabled = true
301}
302
303// TraceErrorsOnlyOn - same as TraceOn, but only errors will be traced.
304func (c *Client) TraceErrorsOnlyOn(outputStream io.Writer) {
305 c.TraceOn(outputStream)
306 c.traceErrorsOnly = true
307}
308
309// TraceErrorsOnlyOff - Turns off the errors only tracing and everything will be traced after this call.
310// If all tracing needs to be turned off, call TraceOff().
311func (c *Client) TraceErrorsOnlyOff() {
312 c.traceErrorsOnly = false
313}
314
315// TraceOff - disable HTTP tracing.
316func (c *Client) TraceOff() {
317 // Disable tracing.
318 c.isTraceEnabled = false
319 c.traceErrorsOnly = false
320}
321
322// SetS3TransferAccelerate - turns s3 accelerated endpoint on or off for all your
323// requests. This feature is only specific to S3 for all other endpoints this
324// function does nothing. To read further details on s3 transfer acceleration
325// please vist -
326// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
327func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) {
328 if s3utils.IsAmazonEndpoint(*c.endpointURL) {
329 c.s3AccelerateEndpoint = accelerateEndpoint
330 }
331}
332
333// Hash materials provides relevant initialized hash algo writers
334// based on the expected signature type.
335//
336// - For signature v4 request if the connection is insecure compute only sha256.
337// - For signature v4 request if the connection is secure compute only md5.
338// - For anonymous request compute md5.
339func (c *Client) hashMaterials(isMd5Requested, isSha256Requested bool) (hashAlgos map[string]md5simd.Hasher, hashSums map[string][]byte) {
340 hashSums = make(map[string][]byte)
341 hashAlgos = make(map[string]md5simd.Hasher)
342 if c.overrideSignerType.IsV4() {
343 if c.secure {
344 hashAlgos["md5"] = c.md5Hasher()
345 } else {
346 if isSha256Requested {
347 hashAlgos["sha256"] = c.sha256Hasher()
348 }
349 }
350 } else {
351 if c.overrideSignerType.IsAnonymous() {
352 hashAlgos["md5"] = c.md5Hasher()
353 }
354 }
355 if isMd5Requested {
356 hashAlgos["md5"] = c.md5Hasher()
357 }
358 return hashAlgos, hashSums
359}
360
361const (
362 unknown = -1
363 offline = 0
364 online = 1
365)
366
367// IsOnline returns true if healthcheck enabled and client is online.
368// If HealthCheck function has not been called this will always return true.
369func (c *Client) IsOnline() bool {
370 return !c.IsOffline()
371}
372
373// sets online healthStatus to offline
374func (c *Client) markOffline() {
375 atomic.CompareAndSwapInt32(&c.healthStatus, online, offline)
376}
377
378// IsOffline returns true if healthcheck enabled and client is offline
379// If HealthCheck function has not been called this will always return false.
380func (c *Client) IsOffline() bool {
381 return atomic.LoadInt32(&c.healthStatus) == offline
382}
383
384// HealthCheck starts a healthcheck to see if endpoint is up.
385// Returns a context cancellation function, to stop the health check,
386// and an error if health check is already started.
387func (c *Client) HealthCheck(hcDuration time.Duration) (context.CancelFunc, error) {
388 if atomic.LoadInt32(&c.healthStatus) != unknown {
389 return nil, fmt.Errorf("health check is running")
390 }
391 if hcDuration < 1*time.Second {
392 return nil, fmt.Errorf("health check duration should be at least 1 second")
393 }
394 probeBucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "probe-health-")
395 ctx, cancelFn := context.WithCancel(context.Background())
396 atomic.StoreInt32(&c.healthStatus, offline)
397 {
398 // Change to online, if we can connect.
399 gctx, gcancel := context.WithTimeout(ctx, 3*time.Second)
400 _, err := c.getBucketLocation(gctx, probeBucketName)
401 gcancel()
402 if !IsNetworkOrHostDown(err, false) {
403 switch ToErrorResponse(err).Code {
404 case "NoSuchBucket", "AccessDenied", "":
405 atomic.CompareAndSwapInt32(&c.healthStatus, offline, online)
406 }
407 }
408 }
409
410 go func(duration time.Duration) {
411 timer := time.NewTimer(duration)
412 defer timer.Stop()
413 for {
414 select {
415 case <-ctx.Done():
416 atomic.StoreInt32(&c.healthStatus, unknown)
417 return
418 case <-timer.C:
419 // Do health check the first time and ONLY if the connection is marked offline
420 if c.IsOffline() {
421 gctx, gcancel := context.WithTimeout(context.Background(), 3*time.Second)
422 _, err := c.getBucketLocation(gctx, probeBucketName)
423 gcancel()
424 if !IsNetworkOrHostDown(err, false) {
425 switch ToErrorResponse(err).Code {
426 case "NoSuchBucket", "AccessDenied", "":
427 atomic.CompareAndSwapInt32(&c.healthStatus, offline, online)
428 }
429 }
430 }
431
432 timer.Reset(duration)
433 }
434 }
435 }(hcDuration)
436 return cancelFn, nil
437}
438
439// requestMetadata - is container for all the values to make a request.
440type requestMetadata struct {
441 // If set newRequest presigns the URL.
442 presignURL bool
443
444 // User supplied.
445 bucketName string
446 objectName string
447 queryValues url.Values
448 customHeader http.Header
449 extraPresignHeader http.Header
450 expires int64
451
452 // Generated by our internal code.
453 bucketLocation string
454 contentBody io.Reader
455 contentLength int64
456 contentMD5Base64 string // carries base64 encoded md5sum
457 contentSHA256Hex string // carries hex encoded sha256sum
458 streamSha256 bool
459 addCrc bool
460 trailer http.Header // (http.Request).Trailer. Requires v4 signature.
461}
462
463// dumpHTTP - dump HTTP request and response.
464func (c *Client) dumpHTTP(req *http.Request, resp *http.Response) error {
465 // Starts http dump.
466 _, err := fmt.Fprintln(c.traceOutput, "---------START-HTTP---------")
467 if err != nil {
468 return err
469 }
470
471 // Filter out Signature field from Authorization header.
472 origAuth := req.Header.Get("Authorization")
473 if origAuth != "" {
474 req.Header.Set("Authorization", redactSignature(origAuth))
475 }
476
477 // Only display request header.
478 reqTrace, err := httputil.DumpRequestOut(req, false)
479 if err != nil {
480 return err
481 }
482
483 // Write request to trace output.
484 _, err = fmt.Fprint(c.traceOutput, string(reqTrace))
485 if err != nil {
486 return err
487 }
488
489 // Only display response header.
490 var respTrace []byte
491
492 // For errors we make sure to dump response body as well.
493 if resp.StatusCode != http.StatusOK &&
494 resp.StatusCode != http.StatusPartialContent &&
495 resp.StatusCode != http.StatusNoContent {
496 respTrace, err = httputil.DumpResponse(resp, true)
497 if err != nil {
498 return err
499 }
500 } else {
501 respTrace, err = httputil.DumpResponse(resp, false)
502 if err != nil {
503 return err
504 }
505 }
506
507 // Write response to trace output.
508 _, err = fmt.Fprint(c.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n"))
509 if err != nil {
510 return err
511 }
512
513 // Ends the http dump.
514 _, err = fmt.Fprintln(c.traceOutput, "---------END-HTTP---------")
515 if err != nil {
516 return err
517 }
518
519 // Returns success.
520 return nil
521}
522
523// do - execute http request.
524func (c *Client) do(req *http.Request) (resp *http.Response, err error) {
525 defer func() {
526 if IsNetworkOrHostDown(err, false) {
527 c.markOffline()
528 }
529 }()
530
531 resp, err = c.httpClient.Do(req)
532 if err != nil {
533 // Handle this specifically for now until future Golang versions fix this issue properly.
534 if urlErr, ok := err.(*url.Error); ok {
535 if strings.Contains(urlErr.Err.Error(), "EOF") {
536 return nil, &url.Error{
537 Op: urlErr.Op,
538 URL: urlErr.URL,
539 Err: errors.New("Connection closed by foreign host " + urlErr.URL + ". Retry again."),
540 }
541 }
542 }
543 return nil, err
544 }
545
546 // Response cannot be non-nil, report error if thats the case.
547 if resp == nil {
548 msg := "Response is empty. " + reportIssue
549 return nil, errInvalidArgument(msg)
550 }
551
552 // If trace is enabled, dump http request and response,
553 // except when the traceErrorsOnly enabled and the response's status code is ok
554 if c.isTraceEnabled && !(c.traceErrorsOnly && resp.StatusCode == http.StatusOK) {
555 err = c.dumpHTTP(req, resp)
556 if err != nil {
557 return nil, err
558 }
559 }
560
561 return resp, nil
562}
563
564// List of success status.
565var successStatus = []int{
566 http.StatusOK,
567 http.StatusNoContent,
568 http.StatusPartialContent,
569}
570
571// executeMethod - instantiates a given method, and retries the
572// request upon any error up to maxRetries attempts in a binomially
573// delayed manner using a standard back off algorithm.
574func (c *Client) executeMethod(ctx context.Context, method string, metadata requestMetadata) (res *http.Response, err error) {
575 if c.IsOffline() {
576 return nil, errors.New(c.endpointURL.String() + " is offline.")
577 }
578
579 var retryable bool // Indicates if request can be retried.
580 var bodySeeker io.Seeker // Extracted seeker from io.Reader.
581 reqRetry := MaxRetry // Indicates how many times we can retry the request
582
583 if metadata.contentBody != nil {
584 // Check if body is seekable then it is retryable.
585 bodySeeker, retryable = metadata.contentBody.(io.Seeker)
586 switch bodySeeker {
587 case os.Stdin, os.Stdout, os.Stderr:
588 retryable = false
589 }
590 // Retry only when reader is seekable
591 if !retryable {
592 reqRetry = 1
593 }
594
595 // Figure out if the body can be closed - if yes
596 // we will definitely close it upon the function
597 // return.
598 bodyCloser, ok := metadata.contentBody.(io.Closer)
599 if ok {
600 defer bodyCloser.Close()
601 }
602 }
603
604 // Create cancel context to control 'newRetryTimer' go routine.
605 retryCtx, cancel := context.WithCancel(ctx)
606
607 // Indicate to our routine to exit cleanly upon return.
608 defer cancel()
609
610 for range c.newRetryTimer(retryCtx, reqRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter) {
611 // Retry executes the following function body if request has an
612 // error until maxRetries have been exhausted, retry attempts are
613 // performed after waiting for a given period of time in a
614 // binomial fashion.
615 if retryable {
616 // Seek back to beginning for each attempt.
617 if _, err = bodySeeker.Seek(0, 0); err != nil {
618 // If seek failed, no need to retry.
619 return nil, err
620 }
621 }
622
623 if metadata.addCrc {
624 if metadata.trailer == nil {
625 metadata.trailer = make(http.Header, 1)
626 }
627 crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
628 metadata.contentBody = newHashReaderWrapper(metadata.contentBody, crc, func(hash []byte) {
629 // Update trailer when done.
630 metadata.trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(hash))
631 })
632 metadata.trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(crc.Sum(nil)))
633 }
634 // Instantiate a new request.
635 var req *http.Request
636 req, err = c.newRequest(ctx, method, metadata)
637 if err != nil {
638 errResponse := ToErrorResponse(err)
639 if isS3CodeRetryable(errResponse.Code) {
640 continue // Retry.
641 }
642
643 return nil, err
644 }
645
646 // Initiate the request.
647 res, err = c.do(req)
648 if err != nil {
649 if isRequestErrorRetryable(err) {
650 // Retry the request
651 continue
652 }
653 return nil, err
654 }
655
656 // For any known successful http status, return quickly.
657 for _, httpStatus := range successStatus {
658 if httpStatus == res.StatusCode {
659 return res, nil
660 }
661 }
662
663 // Read the body to be saved later.
664 errBodyBytes, err := io.ReadAll(res.Body)
665 // res.Body should be closed
666 closeResponse(res)
667 if err != nil {
668 return nil, err
669 }
670
671 // Save the body.
672 errBodySeeker := bytes.NewReader(errBodyBytes)
673 res.Body = io.NopCloser(errBodySeeker)
674
675 // For errors verify if its retryable otherwise fail quickly.
676 errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName))
677
678 // Save the body back again.
679 errBodySeeker.Seek(0, 0) // Seek back to starting point.
680 res.Body = io.NopCloser(errBodySeeker)
681
682 // Bucket region if set in error response and the error
683 // code dictates invalid region, we can retry the request
684 // with the new region.
685 //
686 // Additionally, we should only retry if bucketLocation and custom
687 // region is empty.
688 if c.region == "" {
689 switch errResponse.Code {
690 case "AuthorizationHeaderMalformed":
691 fallthrough
692 case "InvalidRegion":
693 fallthrough
694 case "AccessDenied":
695 if errResponse.Region == "" {
696 // Region is empty we simply return the error.
697 return res, err
698 }
699 // Region is not empty figure out a way to
700 // handle this appropriately.
701 if metadata.bucketName != "" {
702 // Gather Cached location only if bucketName is present.
703 if location, cachedOk := c.bucketLocCache.Get(metadata.bucketName); cachedOk && location != errResponse.Region {
704 c.bucketLocCache.Set(metadata.bucketName, errResponse.Region)
705 continue // Retry.
706 }
707 } else {
708 // This is for ListBuckets() fallback.
709 if errResponse.Region != metadata.bucketLocation {
710 // Retry if the error response has a different region
711 // than the request we just made.
712 metadata.bucketLocation = errResponse.Region
713 continue // Retry
714 }
715 }
716 }
717 }
718
719 // Verify if error response code is retryable.
720 if isS3CodeRetryable(errResponse.Code) {
721 continue // Retry.
722 }
723
724 // Verify if http status code is retryable.
725 if isHTTPStatusRetryable(res.StatusCode) {
726 continue // Retry.
727 }
728
729 // For all other cases break out of the retry loop.
730 break
731 }
732
733 // Return an error when retry is canceled or deadlined
734 if e := retryCtx.Err(); e != nil {
735 return nil, e
736 }
737
738 return res, err
739}
740
741// newRequest - instantiate a new HTTP request for a given method.
742func (c *Client) newRequest(ctx context.Context, method string, metadata requestMetadata) (req *http.Request, err error) {
743 // If no method is supplied default to 'POST'.
744 if method == "" {
745 method = http.MethodPost
746 }
747
748 location := metadata.bucketLocation
749 if location == "" {
750 if metadata.bucketName != "" {
751 // Gather location only if bucketName is present.
752 location, err = c.getBucketLocation(ctx, metadata.bucketName)
753 if err != nil {
754 return nil, err
755 }
756 }
757 if location == "" {
758 location = getDefaultLocation(*c.endpointURL, c.region)
759 }
760 }
761
762 // Look if target url supports virtual host.
763 // We explicitly disallow MakeBucket calls to not use virtual DNS style,
764 // since the resolution may fail.
765 isMakeBucket := (metadata.objectName == "" && method == http.MethodPut && len(metadata.queryValues) == 0)
766 isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, metadata.bucketName) && !isMakeBucket
767
768 // Construct a new target URL.
769 targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, location,
770 isVirtualHost, metadata.queryValues)
771 if err != nil {
772 return nil, err
773 }
774
775 if c.httpTrace != nil {
776 ctx = httptrace.WithClientTrace(ctx, c.httpTrace)
777 }
778
779 // Initialize a new HTTP request for the method.
780 req, err = http.NewRequestWithContext(ctx, method, targetURL.String(), nil)
781 if err != nil {
782 return nil, err
783 }
784
785 // Get credentials from the configured credentials provider.
786 value, err := c.credsProvider.Get()
787 if err != nil {
788 return nil, err
789 }
790
791 var (
792 signerType = value.SignerType
793 accessKeyID = value.AccessKeyID
794 secretAccessKey = value.SecretAccessKey
795 sessionToken = value.SessionToken
796 )
797
798 // Custom signer set then override the behavior.
799 if c.overrideSignerType != credentials.SignatureDefault {
800 signerType = c.overrideSignerType
801 }
802
803 // If signerType returned by credentials helper is anonymous,
804 // then do not sign regardless of signerType override.
805 if value.SignerType == credentials.SignatureAnonymous {
806 signerType = credentials.SignatureAnonymous
807 }
808
809 // Generate presign url if needed, return right here.
810 if metadata.expires != 0 && metadata.presignURL {
811 if signerType.IsAnonymous() {
812 return nil, errInvalidArgument("Presigned URLs cannot be generated with anonymous credentials.")
813 }
814 if metadata.extraPresignHeader != nil {
815 if signerType.IsV2() {
816 return nil, errInvalidArgument("Extra signed headers for Presign with Signature V2 is not supported.")
817 }
818 for k, v := range metadata.extraPresignHeader {
819 req.Header.Set(k, v[0])
820 }
821 }
822 if signerType.IsV2() {
823 // Presign URL with signature v2.
824 req = signer.PreSignV2(*req, accessKeyID, secretAccessKey, metadata.expires, isVirtualHost)
825 } else if signerType.IsV4() {
826 // Presign URL with signature v4.
827 req = signer.PreSignV4(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.expires)
828 }
829 return req, nil
830 }
831
832 // Set 'User-Agent' header for the request.
833 c.setUserAgent(req)
834
835 // Set all headers.
836 for k, v := range metadata.customHeader {
837 req.Header.Set(k, v[0])
838 }
839
840 // Go net/http notoriously closes the request body.
841 // - The request Body, if non-nil, will be closed by the underlying Transport, even on errors.
842 // This can cause underlying *os.File seekers to fail, avoid that
843 // by making sure to wrap the closer as a nop.
844 if metadata.contentLength == 0 {
845 req.Body = nil
846 } else {
847 req.Body = io.NopCloser(metadata.contentBody)
848 }
849
850 // Set incoming content-length.
851 req.ContentLength = metadata.contentLength
852 if req.ContentLength <= -1 {
853 // For unknown content length, we upload using transfer-encoding: chunked.
854 req.TransferEncoding = []string{"chunked"}
855 }
856
857 // set md5Sum for content protection.
858 if len(metadata.contentMD5Base64) > 0 {
859 req.Header.Set("Content-Md5", metadata.contentMD5Base64)
860 }
861
862 // For anonymous requests just return.
863 if signerType.IsAnonymous() {
864 return req, nil
865 }
866
867 switch {
868 case signerType.IsV2():
869 // Add signature version '2' authorization header.
870 req = signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost)
871 case metadata.streamSha256 && !c.secure:
872 if len(metadata.trailer) > 0 {
873 req.Trailer = metadata.trailer
874 }
875 // Streaming signature is used by default for a PUT object request.
876 // Additionally, we also look if the initialized client is secure,
877 // if yes then we don't need to perform streaming signature.
878 req = signer.StreamingSignV4(req, accessKeyID,
879 secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC(), c.sha256Hasher())
880 default:
881 // Set sha256 sum for signature calculation only with signature version '4'.
882 shaHeader := unsignedPayload
883 if metadata.contentSHA256Hex != "" {
884 shaHeader = metadata.contentSHA256Hex
885 if len(metadata.trailer) > 0 {
886 // Sanity check, we should not end up here if upstream is sane.
887 return nil, errors.New("internal error: contentSHA256Hex with trailer not supported")
888 }
889 } else if len(metadata.trailer) > 0 {
890 shaHeader = unsignedPayloadTrailer
891 }
892 req.Header.Set("X-Amz-Content-Sha256", shaHeader)
893
894 // Add signature version '4' authorization header.
895 req = signer.SignV4Trailer(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.trailer)
896 }
897
898 // Return request.
899 return req, nil
900}
901
902// set User agent.
903func (c *Client) setUserAgent(req *http.Request) {
904 req.Header.Set("User-Agent", libraryUserAgent)
905 if c.appInfo.appName != "" && c.appInfo.appVersion != "" {
906 req.Header.Set("User-Agent", libraryUserAgent+" "+c.appInfo.appName+"/"+c.appInfo.appVersion)
907 }
908}
909
910// makeTargetURL make a new target url.
911func (c *Client) makeTargetURL(bucketName, objectName, bucketLocation string, isVirtualHostStyle bool, queryValues url.Values) (*url.URL, error) {
912 host := c.endpointURL.Host
913 // For Amazon S3 endpoint, try to fetch location based endpoint.
914 if s3utils.IsAmazonEndpoint(*c.endpointURL) {
915 if c.s3AccelerateEndpoint != "" && bucketName != "" {
916 // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
917 // Disable transfer acceleration for non-compliant bucket names.
918 if strings.Contains(bucketName, ".") {
919 return nil, errTransferAccelerationBucket(bucketName)
920 }
921 // If transfer acceleration is requested set new host.
922 // For more details about enabling transfer acceleration read here.
923 // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
924 host = c.s3AccelerateEndpoint
925 } else {
926 // Do not change the host if the endpoint URL is a FIPS S3 endpoint or a S3 PrivateLink interface endpoint
927 if !s3utils.IsAmazonFIPSEndpoint(*c.endpointURL) && !s3utils.IsAmazonPrivateLinkEndpoint(*c.endpointURL) {
928 // Fetch new host based on the bucket location.
929 host = getS3Endpoint(bucketLocation)
930 }
931 }
932 }
933
934 // Save scheme.
935 scheme := c.endpointURL.Scheme
936
937 // Strip port 80 and 443 so we won't send these ports in Host header.
938 // The reason is that browsers and curl automatically remove :80 and :443
939 // with the generated presigned urls, then a signature mismatch error.
940 if h, p, err := net.SplitHostPort(host); err == nil {
941 if scheme == "http" && p == "80" || scheme == "https" && p == "443" {
942 host = h
943 if ip := net.ParseIP(h); ip != nil && ip.To4() == nil {
944 host = "[" + h + "]"
945 }
946 }
947 }
948
949 urlStr := scheme + "://" + host + "/"
950
951 // Make URL only if bucketName is available, otherwise use the
952 // endpoint URL.
953 if bucketName != "" {
954 // If endpoint supports virtual host style use that always.
955 // Currently only S3 and Google Cloud Storage would support
956 // virtual host style.
957 if isVirtualHostStyle {
958 urlStr = scheme + "://" + bucketName + "." + host + "/"
959 if objectName != "" {
960 urlStr += s3utils.EncodePath(objectName)
961 }
962 } else {
963 // If not fall back to using path style.
964 urlStr = urlStr + bucketName + "/"
965 if objectName != "" {
966 urlStr += s3utils.EncodePath(objectName)
967 }
968 }
969 }
970
971 // If there are any query values, add them to the end.
972 if len(queryValues) > 0 {
973 urlStr = urlStr + "?" + s3utils.QueryEncode(queryValues)
974 }
975
976 return url.Parse(urlStr)
977}
978
979// returns true if virtual hosted style requests are to be used.
980func (c *Client) isVirtualHostStyleRequest(url url.URL, bucketName string) bool {
981 if bucketName == "" {
982 return false
983 }
984
985 if c.lookup == BucketLookupDNS {
986 return true
987 }
988 if c.lookup == BucketLookupPath {
989 return false
990 }
991
992 // default to virtual only for Amazon/Google storage. In all other cases use
993 // path style requests
994 return s3utils.IsVirtualHostSupported(url, bucketName)
995}