aboutsummaryrefslogtreecommitdiffstats
path: root/vendor/github.com/minio/minio-go/v7/utils.go
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/minio/minio-go/v7/utils.go')
-rw-r--r--vendor/github.com/minio/minio-go/v7/utils.go693
1 files changed, 693 insertions, 0 deletions
diff --git a/vendor/github.com/minio/minio-go/v7/utils.go b/vendor/github.com/minio/minio-go/v7/utils.go
new file mode 100644
index 0000000..e39eba0
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/utils.go
@@ -0,0 +1,693 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "context"
22 "crypto/md5"
23 fipssha256 "crypto/sha256"
24 "encoding/base64"
25 "encoding/hex"
26 "encoding/xml"
27 "errors"
28 "fmt"
29 "hash"
30 "io"
31 "math/rand"
32 "net"
33 "net/http"
34 "net/url"
35 "regexp"
36 "strconv"
37 "strings"
38 "sync"
39 "time"
40
41 md5simd "github.com/minio/md5-simd"
42 "github.com/minio/minio-go/v7/pkg/encrypt"
43 "github.com/minio/minio-go/v7/pkg/s3utils"
44 "github.com/minio/sha256-simd"
45)
46
47func trimEtag(etag string) string {
48 etag = strings.TrimPrefix(etag, "\"")
49 return strings.TrimSuffix(etag, "\"")
50}
51
52var expirationRegex = regexp.MustCompile(`expiry-date="(.*?)", rule-id="(.*?)"`)
53
54func amzExpirationToExpiryDateRuleID(expiration string) (time.Time, string) {
55 if matches := expirationRegex.FindStringSubmatch(expiration); len(matches) == 3 {
56 expTime, err := parseRFC7231Time(matches[1])
57 if err != nil {
58 return time.Time{}, ""
59 }
60 return expTime, matches[2]
61 }
62 return time.Time{}, ""
63}
64
65var restoreRegex = regexp.MustCompile(`ongoing-request="(.*?)"(, expiry-date="(.*?)")?`)
66
67func amzRestoreToStruct(restore string) (ongoing bool, expTime time.Time, err error) {
68 matches := restoreRegex.FindStringSubmatch(restore)
69 if len(matches) != 4 {
70 return false, time.Time{}, errors.New("unexpected restore header")
71 }
72 ongoing, err = strconv.ParseBool(matches[1])
73 if err != nil {
74 return false, time.Time{}, err
75 }
76 if matches[3] != "" {
77 expTime, err = parseRFC7231Time(matches[3])
78 if err != nil {
79 return false, time.Time{}, err
80 }
81 }
82 return
83}
84
85// xmlDecoder provide decoded value in xml.
86func xmlDecoder(body io.Reader, v interface{}) error {
87 d := xml.NewDecoder(body)
88 return d.Decode(v)
89}
90
91// sum256 calculate sha256sum for an input byte array, returns hex encoded.
92func sum256Hex(data []byte) string {
93 hash := newSHA256Hasher()
94 defer hash.Close()
95 hash.Write(data)
96 return hex.EncodeToString(hash.Sum(nil))
97}
98
99// sumMD5Base64 calculate md5sum for an input byte array, returns base64 encoded.
100func sumMD5Base64(data []byte) string {
101 hash := newMd5Hasher()
102 defer hash.Close()
103 hash.Write(data)
104 return base64.StdEncoding.EncodeToString(hash.Sum(nil))
105}
106
107// getEndpointURL - construct a new endpoint.
108func getEndpointURL(endpoint string, secure bool) (*url.URL, error) {
109 // If secure is false, use 'http' scheme.
110 scheme := "https"
111 if !secure {
112 scheme = "http"
113 }
114
115 // Construct a secured endpoint URL.
116 endpointURLStr := scheme + "://" + endpoint
117 endpointURL, err := url.Parse(endpointURLStr)
118 if err != nil {
119 return nil, err
120 }
121
122 // Validate incoming endpoint URL.
123 if err := isValidEndpointURL(*endpointURL); err != nil {
124 return nil, err
125 }
126 return endpointURL, nil
127}
128
129// closeResponse close non nil response with any response Body.
130// convenient wrapper to drain any remaining data on response body.
131//
132// Subsequently this allows golang http RoundTripper
133// to re-use the same connection for future requests.
134func closeResponse(resp *http.Response) {
135 // Callers should close resp.Body when done reading from it.
136 // If resp.Body is not closed, the Client's underlying RoundTripper
137 // (typically Transport) may not be able to re-use a persistent TCP
138 // connection to the server for a subsequent "keep-alive" request.
139 if resp != nil && resp.Body != nil {
140 // Drain any remaining Body and then close the connection.
141 // Without this closing connection would disallow re-using
142 // the same connection for future uses.
143 // - http://stackoverflow.com/a/17961593/4465767
144 io.Copy(io.Discard, resp.Body)
145 resp.Body.Close()
146 }
147}
148
149var (
150 // Hex encoded string of nil sha256sum bytes.
151 emptySHA256Hex = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
152
153 // Sentinel URL is the default url value which is invalid.
154 sentinelURL = url.URL{}
155)
156
157// Verify if input endpoint URL is valid.
158func isValidEndpointURL(endpointURL url.URL) error {
159 if endpointURL == sentinelURL {
160 return errInvalidArgument("Endpoint url cannot be empty.")
161 }
162 if endpointURL.Path != "/" && endpointURL.Path != "" {
163 return errInvalidArgument("Endpoint url cannot have fully qualified paths.")
164 }
165 host := endpointURL.Hostname()
166 if !s3utils.IsValidIP(host) && !s3utils.IsValidDomain(host) {
167 msg := "Endpoint: " + endpointURL.Host + " does not follow ip address or domain name standards."
168 return errInvalidArgument(msg)
169 }
170
171 if strings.Contains(host, ".s3.amazonaws.com") {
172 if !s3utils.IsAmazonEndpoint(endpointURL) {
173 return errInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.")
174 }
175 }
176 if strings.Contains(host, ".googleapis.com") {
177 if !s3utils.IsGoogleEndpoint(endpointURL) {
178 return errInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'.")
179 }
180 }
181 return nil
182}
183
184// Verify if input expires value is valid.
185func isValidExpiry(expires time.Duration) error {
186 expireSeconds := int64(expires / time.Second)
187 if expireSeconds < 1 {
188 return errInvalidArgument("Expires cannot be lesser than 1 second.")
189 }
190 if expireSeconds > 604800 {
191 return errInvalidArgument("Expires cannot be greater than 7 days.")
192 }
193 return nil
194}
195
196// Extract only necessary metadata header key/values by
197// filtering them out with a list of custom header keys.
198func extractObjMetadata(header http.Header) http.Header {
199 preserveKeys := []string{
200 "Content-Type",
201 "Cache-Control",
202 "Content-Encoding",
203 "Content-Language",
204 "Content-Disposition",
205 "X-Amz-Storage-Class",
206 "X-Amz-Object-Lock-Mode",
207 "X-Amz-Object-Lock-Retain-Until-Date",
208 "X-Amz-Object-Lock-Legal-Hold",
209 "X-Amz-Website-Redirect-Location",
210 "X-Amz-Server-Side-Encryption",
211 "X-Amz-Tagging-Count",
212 "X-Amz-Meta-",
213 // Add new headers to be preserved.
214 // if you add new headers here, please extend
215 // PutObjectOptions{} to preserve them
216 // upon upload as well.
217 }
218 filteredHeader := make(http.Header)
219 for k, v := range header {
220 var found bool
221 for _, prefix := range preserveKeys {
222 if !strings.HasPrefix(k, prefix) {
223 continue
224 }
225 found = true
226 break
227 }
228 if found {
229 filteredHeader[k] = v
230 }
231 }
232 return filteredHeader
233}
234
235const (
236 // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT
237 rfc822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT"
238 rfc822TimeFormatSingleDigitDay = "Mon, _2 Jan 2006 15:04:05 GMT"
239 rfc822TimeFormatSingleDigitDayTwoDigitYear = "Mon, _2 Jan 06 15:04:05 GMT"
240)
241
242func parseTime(t string, formats ...string) (time.Time, error) {
243 for _, format := range formats {
244 tt, err := time.Parse(format, t)
245 if err == nil {
246 return tt, nil
247 }
248 }
249 return time.Time{}, fmt.Errorf("unable to parse %s in any of the input formats: %s", t, formats)
250}
251
252func parseRFC7231Time(lastModified string) (time.Time, error) {
253 return parseTime(lastModified, rfc822TimeFormat, rfc822TimeFormatSingleDigitDay, rfc822TimeFormatSingleDigitDayTwoDigitYear)
254}
255
256// ToObjectInfo converts http header values into ObjectInfo type,
257// extracts metadata and fills in all the necessary fields in ObjectInfo.
258func ToObjectInfo(bucketName, objectName string, h http.Header) (ObjectInfo, error) {
259 var err error
260 // Trim off the odd double quotes from ETag in the beginning and end.
261 etag := trimEtag(h.Get("ETag"))
262
263 // Parse content length is exists
264 var size int64 = -1
265 contentLengthStr := h.Get("Content-Length")
266 if contentLengthStr != "" {
267 size, err = strconv.ParseInt(contentLengthStr, 10, 64)
268 if err != nil {
269 // Content-Length is not valid
270 return ObjectInfo{}, ErrorResponse{
271 Code: "InternalError",
272 Message: fmt.Sprintf("Content-Length is not an integer, failed with %v", err),
273 BucketName: bucketName,
274 Key: objectName,
275 RequestID: h.Get("x-amz-request-id"),
276 HostID: h.Get("x-amz-id-2"),
277 Region: h.Get("x-amz-bucket-region"),
278 }
279 }
280 }
281
282 // Parse Last-Modified has http time format.
283 mtime, err := parseRFC7231Time(h.Get("Last-Modified"))
284 if err != nil {
285 return ObjectInfo{}, ErrorResponse{
286 Code: "InternalError",
287 Message: fmt.Sprintf("Last-Modified time format is invalid, failed with %v", err),
288 BucketName: bucketName,
289 Key: objectName,
290 RequestID: h.Get("x-amz-request-id"),
291 HostID: h.Get("x-amz-id-2"),
292 Region: h.Get("x-amz-bucket-region"),
293 }
294 }
295
296 // Fetch content type if any present.
297 contentType := strings.TrimSpace(h.Get("Content-Type"))
298 if contentType == "" {
299 contentType = "application/octet-stream"
300 }
301
302 expiryStr := h.Get("Expires")
303 var expiry time.Time
304 if expiryStr != "" {
305 expiry, err = parseRFC7231Time(expiryStr)
306 if err != nil {
307 return ObjectInfo{}, ErrorResponse{
308 Code: "InternalError",
309 Message: fmt.Sprintf("'Expiry' is not in supported format: %v", err),
310 BucketName: bucketName,
311 Key: objectName,
312 RequestID: h.Get("x-amz-request-id"),
313 HostID: h.Get("x-amz-id-2"),
314 Region: h.Get("x-amz-bucket-region"),
315 }
316 }
317 }
318
319 metadata := extractObjMetadata(h)
320 userMetadata := make(map[string]string)
321 for k, v := range metadata {
322 if strings.HasPrefix(k, "X-Amz-Meta-") {
323 userMetadata[strings.TrimPrefix(k, "X-Amz-Meta-")] = v[0]
324 }
325 }
326 userTags := s3utils.TagDecode(h.Get(amzTaggingHeader))
327
328 var tagCount int
329 if count := h.Get(amzTaggingCount); count != "" {
330 tagCount, err = strconv.Atoi(count)
331 if err != nil {
332 return ObjectInfo{}, ErrorResponse{
333 Code: "InternalError",
334 Message: fmt.Sprintf("x-amz-tagging-count is not an integer, failed with %v", err),
335 BucketName: bucketName,
336 Key: objectName,
337 RequestID: h.Get("x-amz-request-id"),
338 HostID: h.Get("x-amz-id-2"),
339 Region: h.Get("x-amz-bucket-region"),
340 }
341 }
342 }
343
344 // Nil if not found
345 var restore *RestoreInfo
346 if restoreHdr := h.Get(amzRestore); restoreHdr != "" {
347 ongoing, expTime, err := amzRestoreToStruct(restoreHdr)
348 if err != nil {
349 return ObjectInfo{}, err
350 }
351 restore = &RestoreInfo{OngoingRestore: ongoing, ExpiryTime: expTime}
352 }
353
354 // extract lifecycle expiry date and rule ID
355 expTime, ruleID := amzExpirationToExpiryDateRuleID(h.Get(amzExpiration))
356
357 deleteMarker := h.Get(amzDeleteMarker) == "true"
358
359 // Save object metadata info.
360 return ObjectInfo{
361 ETag: etag,
362 Key: objectName,
363 Size: size,
364 LastModified: mtime,
365 ContentType: contentType,
366 Expires: expiry,
367 VersionID: h.Get(amzVersionID),
368 IsDeleteMarker: deleteMarker,
369 ReplicationStatus: h.Get(amzReplicationStatus),
370 Expiration: expTime,
371 ExpirationRuleID: ruleID,
372 // Extract only the relevant header keys describing the object.
373 // following function filters out a list of standard set of keys
374 // which are not part of object metadata.
375 Metadata: metadata,
376 UserMetadata: userMetadata,
377 UserTags: userTags,
378 UserTagCount: tagCount,
379 Restore: restore,
380
381 // Checksum values
382 ChecksumCRC32: h.Get("x-amz-checksum-crc32"),
383 ChecksumCRC32C: h.Get("x-amz-checksum-crc32c"),
384 ChecksumSHA1: h.Get("x-amz-checksum-sha1"),
385 ChecksumSHA256: h.Get("x-amz-checksum-sha256"),
386 }, nil
387}
388
389var readFull = func(r io.Reader, buf []byte) (n int, err error) {
390 // ReadFull reads exactly len(buf) bytes from r into buf.
391 // It returns the number of bytes copied and an error if
392 // fewer bytes were read. The error is EOF only if no bytes
393 // were read. If an EOF happens after reading some but not
394 // all the bytes, ReadFull returns ErrUnexpectedEOF.
395 // On return, n == len(buf) if and only if err == nil.
396 // If r returns an error having read at least len(buf) bytes,
397 // the error is dropped.
398 for n < len(buf) && err == nil {
399 var nn int
400 nn, err = r.Read(buf[n:])
401 // Some spurious io.Reader's return
402 // io.ErrUnexpectedEOF when nn == 0
403 // this behavior is undocumented
404 // so we are on purpose not using io.ReadFull
405 // implementation because this can lead
406 // to custom handling, to avoid that
407 // we simply modify the original io.ReadFull
408 // implementation to avoid this issue.
409 // io.ErrUnexpectedEOF with nn == 0 really
410 // means that io.EOF
411 if err == io.ErrUnexpectedEOF && nn == 0 {
412 err = io.EOF
413 }
414 n += nn
415 }
416 if n >= len(buf) {
417 err = nil
418 } else if n > 0 && err == io.EOF {
419 err = io.ErrUnexpectedEOF
420 }
421 return
422}
423
424// regCred matches credential string in HTTP header
425var regCred = regexp.MustCompile("Credential=([A-Z0-9]+)/")
426
427// regCred matches signature string in HTTP header
428var regSign = regexp.MustCompile("Signature=([[0-9a-f]+)")
429
430// Redact out signature value from authorization string.
431func redactSignature(origAuth string) string {
432 if !strings.HasPrefix(origAuth, signV4Algorithm) {
433 // Set a temporary redacted auth
434 return "AWS **REDACTED**:**REDACTED**"
435 }
436
437 // Signature V4 authorization header.
438
439 // Strip out accessKeyID from:
440 // Credential=<access-key-id>/<date>/<aws-region>/<aws-service>/aws4_request
441 newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/")
442
443 // Strip out 256-bit signature from: Signature=<256-bit signature>
444 return regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**")
445}
446
447// Get default location returns the location based on the input
448// URL `u`, if region override is provided then all location
449// defaults to regionOverride.
450//
451// If no other cases match then the location is set to `us-east-1`
452// as a last resort.
453func getDefaultLocation(u url.URL, regionOverride string) (location string) {
454 if regionOverride != "" {
455 return regionOverride
456 }
457 region := s3utils.GetRegionFromURL(u)
458 if region == "" {
459 region = "us-east-1"
460 }
461 return region
462}
463
464var supportedHeaders = map[string]bool{
465 "content-type": true,
466 "cache-control": true,
467 "content-encoding": true,
468 "content-disposition": true,
469 "content-language": true,
470 "x-amz-website-redirect-location": true,
471 "x-amz-object-lock-mode": true,
472 "x-amz-metadata-directive": true,
473 "x-amz-object-lock-retain-until-date": true,
474 "expires": true,
475 "x-amz-replication-status": true,
476 // Add more supported headers here.
477 // Must be lower case.
478}
479
480// isStorageClassHeader returns true if the header is a supported storage class header
481func isStorageClassHeader(headerKey string) bool {
482 return strings.EqualFold(amzStorageClass, headerKey)
483}
484
485// isStandardHeader returns true if header is a supported header and not a custom header
486func isStandardHeader(headerKey string) bool {
487 return supportedHeaders[strings.ToLower(headerKey)]
488}
489
490// sseHeaders is list of server side encryption headers
491var sseHeaders = map[string]bool{
492 "x-amz-server-side-encryption": true,
493 "x-amz-server-side-encryption-aws-kms-key-id": true,
494 "x-amz-server-side-encryption-context": true,
495 "x-amz-server-side-encryption-customer-algorithm": true,
496 "x-amz-server-side-encryption-customer-key": true,
497 "x-amz-server-side-encryption-customer-key-md5": true,
498 // Add more supported headers here.
499 // Must be lower case.
500}
501
502// isSSEHeader returns true if header is a server side encryption header.
503func isSSEHeader(headerKey string) bool {
504 return sseHeaders[strings.ToLower(headerKey)]
505}
506
507// isAmzHeader returns true if header is a x-amz-meta-* or x-amz-acl header.
508func isAmzHeader(headerKey string) bool {
509 key := strings.ToLower(headerKey)
510
511 return strings.HasPrefix(key, "x-amz-meta-") || strings.HasPrefix(key, "x-amz-grant-") || key == "x-amz-acl" || isSSEHeader(headerKey) || strings.HasPrefix(key, "x-amz-checksum-")
512}
513
514// supportedQueryValues is a list of query strings that can be passed in when using GetObject.
515var supportedQueryValues = map[string]bool{
516 "partNumber": true,
517 "versionId": true,
518 "response-cache-control": true,
519 "response-content-disposition": true,
520 "response-content-encoding": true,
521 "response-content-language": true,
522 "response-content-type": true,
523 "response-expires": true,
524}
525
526// isStandardQueryValue will return true when the passed in query string parameter is supported rather than customized.
527func isStandardQueryValue(qsKey string) bool {
528 return supportedQueryValues[qsKey]
529}
530
531// Per documentation at https://docs.aws.amazon.com/AmazonS3/latest/userguide/LogFormat.html#LogFormatCustom, the
532// set of query params starting with "x-" are ignored by S3.
533const allowedCustomQueryPrefix = "x-"
534
535func isCustomQueryValue(qsKey string) bool {
536 return strings.HasPrefix(qsKey, allowedCustomQueryPrefix)
537}
538
539var (
540 md5Pool = sync.Pool{New: func() interface{} { return md5.New() }}
541 sha256Pool = sync.Pool{New: func() interface{} { return sha256.New() }}
542)
543
544func newMd5Hasher() md5simd.Hasher {
545 return &hashWrapper{Hash: md5Pool.Get().(hash.Hash), isMD5: true}
546}
547
548func newSHA256Hasher() md5simd.Hasher {
549 if encrypt.FIPS {
550 return &hashWrapper{Hash: fipssha256.New(), isSHA256: true}
551 }
552 return &hashWrapper{Hash: sha256Pool.Get().(hash.Hash), isSHA256: true}
553}
554
555// hashWrapper implements the md5simd.Hasher interface.
556type hashWrapper struct {
557 hash.Hash
558 isMD5 bool
559 isSHA256 bool
560}
561
562// Close will put the hasher back into the pool.
563func (m *hashWrapper) Close() {
564 if m.isMD5 && m.Hash != nil {
565 m.Reset()
566 md5Pool.Put(m.Hash)
567 }
568 if m.isSHA256 && m.Hash != nil {
569 m.Reset()
570 sha256Pool.Put(m.Hash)
571 }
572 m.Hash = nil
573}
574
575const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569"
576const (
577 letterIdxBits = 6 // 6 bits to represent a letter index
578 letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
579 letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
580)
581
582// randString generates random names and prepends them with a known prefix.
583func randString(n int, src rand.Source, prefix string) string {
584 b := make([]byte, n)
585 // A rand.Int63() generates 63 random bits, enough for letterIdxMax letters!
586 for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; {
587 if remain == 0 {
588 cache, remain = src.Int63(), letterIdxMax
589 }
590 if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
591 b[i] = letterBytes[idx]
592 i--
593 }
594 cache >>= letterIdxBits
595 remain--
596 }
597 return prefix + string(b[0:30-len(prefix)])
598}
599
600// IsNetworkOrHostDown - if there was a network error or if the host is down.
601// expectTimeouts indicates that *context* timeouts are expected and does not
602// indicate a downed host. Other timeouts still returns down.
603func IsNetworkOrHostDown(err error, expectTimeouts bool) bool {
604 if err == nil {
605 return false
606 }
607
608 if errors.Is(err, context.Canceled) {
609 return false
610 }
611
612 if expectTimeouts && errors.Is(err, context.DeadlineExceeded) {
613 return false
614 }
615
616 if errors.Is(err, context.DeadlineExceeded) {
617 return true
618 }
619
620 // We need to figure if the error either a timeout
621 // or a non-temporary error.
622 urlErr := &url.Error{}
623 if errors.As(err, &urlErr) {
624 switch urlErr.Err.(type) {
625 case *net.DNSError, *net.OpError, net.UnknownNetworkError:
626 return true
627 }
628 }
629 var e net.Error
630 if errors.As(err, &e) {
631 if e.Timeout() {
632 return true
633 }
634 }
635
636 // Fallback to other mechanisms.
637 switch {
638 case strings.Contains(err.Error(), "Connection closed by foreign host"):
639 return true
640 case strings.Contains(err.Error(), "TLS handshake timeout"):
641 // If error is - tlsHandshakeTimeoutError.
642 return true
643 case strings.Contains(err.Error(), "i/o timeout"):
644 // If error is - tcp timeoutError.
645 return true
646 case strings.Contains(err.Error(), "connection timed out"):
647 // If err is a net.Dial timeout.
648 return true
649 case strings.Contains(err.Error(), "connection refused"):
650 // If err is connection refused
651 return true
652
653 case strings.Contains(strings.ToLower(err.Error()), "503 service unavailable"):
654 // Denial errors
655 return true
656 }
657 return false
658}
659
660// newHashReaderWrapper will hash all reads done through r.
661// When r returns io.EOF the done function will be called with the sum.
662func newHashReaderWrapper(r io.Reader, h hash.Hash, done func(hash []byte)) *hashReaderWrapper {
663 return &hashReaderWrapper{
664 r: r,
665 h: h,
666 done: done,
667 }
668}
669
670type hashReaderWrapper struct {
671 r io.Reader
672 h hash.Hash
673 done func(hash []byte)
674}
675
676// Read implements the io.Reader interface.
677func (h *hashReaderWrapper) Read(p []byte) (n int, err error) {
678 n, err = h.r.Read(p)
679 if n > 0 {
680 n2, err := h.h.Write(p[:n])
681 if err != nil {
682 return 0, err
683 }
684 if n2 != n {
685 return 0, io.ErrShortWrite
686 }
687 }
688 if err == io.EOF {
689 // Call back
690 h.done(h.h.Sum(nil))
691 }
692 return n, err
693}