aboutsummaryrefslogtreecommitdiffstats
path: root/vendor/github.com
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com')
-rw-r--r--vendor/github.com/dustin/go-humanize/.travis.yml21
-rw-r--r--vendor/github.com/dustin/go-humanize/LICENSE21
-rw-r--r--vendor/github.com/dustin/go-humanize/README.markdown124
-rw-r--r--vendor/github.com/dustin/go-humanize/big.go31
-rw-r--r--vendor/github.com/dustin/go-humanize/bigbytes.go189
-rw-r--r--vendor/github.com/dustin/go-humanize/bytes.go143
-rw-r--r--vendor/github.com/dustin/go-humanize/comma.go116
-rw-r--r--vendor/github.com/dustin/go-humanize/commaf.go41
-rw-r--r--vendor/github.com/dustin/go-humanize/ftoa.go49
-rw-r--r--vendor/github.com/dustin/go-humanize/humanize.go8
-rw-r--r--vendor/github.com/dustin/go-humanize/number.go192
-rw-r--r--vendor/github.com/dustin/go-humanize/ordinals.go25
-rw-r--r--vendor/github.com/dustin/go-humanize/si.go127
-rw-r--r--vendor/github.com/dustin/go-humanize/times.go117
-rw-r--r--vendor/github.com/google/uuid/CHANGELOG.md28
-rw-r--r--vendor/github.com/google/uuid/CONTRIBUTING.md26
-rw-r--r--vendor/github.com/google/uuid/CONTRIBUTORS9
-rw-r--r--vendor/github.com/google/uuid/LICENSE27
-rw-r--r--vendor/github.com/google/uuid/README.md21
-rw-r--r--vendor/github.com/google/uuid/dce.go80
-rw-r--r--vendor/github.com/google/uuid/doc.go12
-rw-r--r--vendor/github.com/google/uuid/hash.go53
-rw-r--r--vendor/github.com/google/uuid/marshal.go38
-rw-r--r--vendor/github.com/google/uuid/node.go90
-rw-r--r--vendor/github.com/google/uuid/node_js.go12
-rw-r--r--vendor/github.com/google/uuid/node_net.go33
-rw-r--r--vendor/github.com/google/uuid/null.go118
-rw-r--r--vendor/github.com/google/uuid/sql.go59
-rw-r--r--vendor/github.com/google/uuid/time.go134
-rw-r--r--vendor/github.com/google/uuid/util.go43
-rw-r--r--vendor/github.com/google/uuid/uuid.go365
-rw-r--r--vendor/github.com/google/uuid/version1.go44
-rw-r--r--vendor/github.com/google/uuid/version4.go76
-rw-r--r--vendor/github.com/google/uuid/version6.go56
-rw-r--r--vendor/github.com/google/uuid/version7.go75
-rw-r--r--vendor/github.com/json-iterator/go/.codecov.yml3
-rw-r--r--vendor/github.com/json-iterator/go/.gitignore4
-rw-r--r--vendor/github.com/json-iterator/go/.travis.yml14
-rw-r--r--vendor/github.com/json-iterator/go/Gopkg.lock21
-rw-r--r--vendor/github.com/json-iterator/go/Gopkg.toml26
-rw-r--r--vendor/github.com/json-iterator/go/LICENSE21
-rw-r--r--vendor/github.com/json-iterator/go/README.md85
-rw-r--r--vendor/github.com/json-iterator/go/adapter.go150
-rw-r--r--vendor/github.com/json-iterator/go/any.go325
-rw-r--r--vendor/github.com/json-iterator/go/any_array.go278
-rw-r--r--vendor/github.com/json-iterator/go/any_bool.go137
-rw-r--r--vendor/github.com/json-iterator/go/any_float.go83
-rw-r--r--vendor/github.com/json-iterator/go/any_int32.go74
-rw-r--r--vendor/github.com/json-iterator/go/any_int64.go74
-rw-r--r--vendor/github.com/json-iterator/go/any_invalid.go82
-rw-r--r--vendor/github.com/json-iterator/go/any_nil.go69
-rw-r--r--vendor/github.com/json-iterator/go/any_number.go123
-rw-r--r--vendor/github.com/json-iterator/go/any_object.go374
-rw-r--r--vendor/github.com/json-iterator/go/any_str.go166
-rw-r--r--vendor/github.com/json-iterator/go/any_uint32.go74
-rw-r--r--vendor/github.com/json-iterator/go/any_uint64.go74
-rw-r--r--vendor/github.com/json-iterator/go/build.sh12
-rw-r--r--vendor/github.com/json-iterator/go/config.go375
-rw-r--r--vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md7
-rw-r--r--vendor/github.com/json-iterator/go/iter.go349
-rw-r--r--vendor/github.com/json-iterator/go/iter_array.go64
-rw-r--r--vendor/github.com/json-iterator/go/iter_float.go342
-rw-r--r--vendor/github.com/json-iterator/go/iter_int.go346
-rw-r--r--vendor/github.com/json-iterator/go/iter_object.go267
-rw-r--r--vendor/github.com/json-iterator/go/iter_skip.go130
-rw-r--r--vendor/github.com/json-iterator/go/iter_skip_sloppy.go163
-rw-r--r--vendor/github.com/json-iterator/go/iter_skip_strict.go99
-rw-r--r--vendor/github.com/json-iterator/go/iter_str.go215
-rw-r--r--vendor/github.com/json-iterator/go/jsoniter.go18
-rw-r--r--vendor/github.com/json-iterator/go/pool.go42
-rw-r--r--vendor/github.com/json-iterator/go/reflect.go337
-rw-r--r--vendor/github.com/json-iterator/go/reflect_array.go104
-rw-r--r--vendor/github.com/json-iterator/go/reflect_dynamic.go70
-rw-r--r--vendor/github.com/json-iterator/go/reflect_extension.go483
-rw-r--r--vendor/github.com/json-iterator/go/reflect_json_number.go112
-rw-r--r--vendor/github.com/json-iterator/go/reflect_json_raw_message.go76
-rw-r--r--vendor/github.com/json-iterator/go/reflect_map.go346
-rw-r--r--vendor/github.com/json-iterator/go/reflect_marshaler.go225
-rw-r--r--vendor/github.com/json-iterator/go/reflect_native.go453
-rw-r--r--vendor/github.com/json-iterator/go/reflect_optional.go129
-rw-r--r--vendor/github.com/json-iterator/go/reflect_slice.go99
-rw-r--r--vendor/github.com/json-iterator/go/reflect_struct_decoder.go1097
-rw-r--r--vendor/github.com/json-iterator/go/reflect_struct_encoder.go211
-rw-r--r--vendor/github.com/json-iterator/go/stream.go210
-rw-r--r--vendor/github.com/json-iterator/go/stream_float.go111
-rw-r--r--vendor/github.com/json-iterator/go/stream_int.go190
-rw-r--r--vendor/github.com/json-iterator/go/stream_str.go372
-rw-r--r--vendor/github.com/json-iterator/go/test.sh12
-rw-r--r--vendor/github.com/klauspost/compress/LICENSE304
-rw-r--r--vendor/github.com/klauspost/compress/s2/.gitignore15
-rw-r--r--vendor/github.com/klauspost/compress/s2/LICENSE28
-rw-r--r--vendor/github.com/klauspost/compress/s2/README.md1120
-rw-r--r--vendor/github.com/klauspost/compress/s2/decode.go437
-rw-r--r--vendor/github.com/klauspost/compress/s2/decode_amd64.s568
-rw-r--r--vendor/github.com/klauspost/compress/s2/decode_arm64.s574
-rw-r--r--vendor/github.com/klauspost/compress/s2/decode_asm.go17
-rw-r--r--vendor/github.com/klauspost/compress/s2/decode_other.go292
-rw-r--r--vendor/github.com/klauspost/compress/s2/dict.go350
-rw-r--r--vendor/github.com/klauspost/compress/s2/encode.go393
-rw-r--r--vendor/github.com/klauspost/compress/s2/encode_all.go1048
-rw-r--r--vendor/github.com/klauspost/compress/s2/encode_amd64.go148
-rw-r--r--vendor/github.com/klauspost/compress/s2/encode_best.go796
-rw-r--r--vendor/github.com/klauspost/compress/s2/encode_better.go1106
-rw-r--r--vendor/github.com/klauspost/compress/s2/encode_go.go729
-rw-r--r--vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go228
-rw-r--r--vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s21169
-rw-r--r--vendor/github.com/klauspost/compress/s2/index.go596
-rw-r--r--vendor/github.com/klauspost/compress/s2/lz4convert.go585
-rw-r--r--vendor/github.com/klauspost/compress/s2/lz4sconvert.go467
-rw-r--r--vendor/github.com/klauspost/compress/s2/reader.go1062
-rw-r--r--vendor/github.com/klauspost/compress/s2/s2.go143
-rw-r--r--vendor/github.com/klauspost/compress/s2/writer.go1020
-rw-r--r--vendor/github.com/klauspost/cpuid/v2/.gitignore24
-rw-r--r--vendor/github.com/klauspost/cpuid/v2/.goreleaser.yml74
-rw-r--r--vendor/github.com/klauspost/cpuid/v2/CONTRIBUTING.txt35
-rw-r--r--vendor/github.com/klauspost/cpuid/v2/LICENSE22
-rw-r--r--vendor/github.com/klauspost/cpuid/v2/README.md497
-rw-r--r--vendor/github.com/klauspost/cpuid/v2/cpuid.go1473
-rw-r--r--vendor/github.com/klauspost/cpuid/v2/cpuid_386.s47
-rw-r--r--vendor/github.com/klauspost/cpuid/v2/cpuid_amd64.s72
-rw-r--r--vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s26
-rw-r--r--vendor/github.com/klauspost/cpuid/v2/detect_arm64.go247
-rw-r--r--vendor/github.com/klauspost/cpuid/v2/detect_ref.go15
-rw-r--r--vendor/github.com/klauspost/cpuid/v2/detect_x86.go37
-rw-r--r--vendor/github.com/klauspost/cpuid/v2/featureid_string.go279
-rw-r--r--vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go121
-rw-r--r--vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go130
-rw-r--r--vendor/github.com/klauspost/cpuid/v2/os_other_arm64.go16
-rw-r--r--vendor/github.com/klauspost/cpuid/v2/os_safe_linux_arm64.go8
-rw-r--r--vendor/github.com/klauspost/cpuid/v2/os_unsafe_linux_arm64.go11
-rw-r--r--vendor/github.com/klauspost/cpuid/v2/test-architectures.sh15
-rw-r--r--vendor/github.com/minio/md5-simd/LICENSE202
-rw-r--r--vendor/github.com/minio/md5-simd/LICENSE.Golang27
-rw-r--r--vendor/github.com/minio/md5-simd/README.md198
-rw-r--r--vendor/github.com/minio/md5-simd/block16_amd64.s228
-rw-r--r--vendor/github.com/minio/md5-simd/block8_amd64.s281
-rw-r--r--vendor/github.com/minio/md5-simd/block_amd64.go210
-rw-r--r--vendor/github.com/minio/md5-simd/md5-digest_amd64.go188
-rw-r--r--vendor/github.com/minio/md5-simd/md5-server_amd64.go397
-rw-r--r--vendor/github.com/minio/md5-simd/md5-server_fallback.go12
-rw-r--r--vendor/github.com/minio/md5-simd/md5-util_amd64.go85
-rw-r--r--vendor/github.com/minio/md5-simd/md5.go63
-rw-r--r--vendor/github.com/minio/md5-simd/md5block_amd64.go11
-rw-r--r--vendor/github.com/minio/md5-simd/md5block_amd64.s714
-rw-r--r--vendor/github.com/minio/minio-go/v7/.gitignore6
-rw-r--r--vendor/github.com/minio/minio-go/v7/.golangci.yml27
-rw-r--r--vendor/github.com/minio/minio-go/v7/CNAME1
-rw-r--r--vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md22
-rw-r--r--vendor/github.com/minio/minio-go/v7/LICENSE202
-rw-r--r--vendor/github.com/minio/minio-go/v7/MAINTAINERS.md35
-rw-r--r--vendor/github.com/minio/minio-go/v7/Makefile38
-rw-r--r--vendor/github.com/minio/minio-go/v7/NOTICE9
-rw-r--r--vendor/github.com/minio/minio-go/v7/README.md312
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go134
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go169
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-bucket-notification.go261
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-bucket-policy.go147
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-bucket-replication.go355
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go134
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go146
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-compose-object.go594
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-copy-object.go76
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-datatypes.go254
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-error-response.go284
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-get-object-acl.go152
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-get-object-file.go127
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-get-object.go683
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-get-options.go203
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-list.go1057
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go176
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-object-lock.go241
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-object-retention.go165
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-object-tagging.go177
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-presigned.go228
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-put-bucket.go123
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-put-object-common.go149
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go164
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go64
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go465
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go809
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-put-object.go473
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go246
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-remove.go548
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-restore.go182
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go390
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-select.go757
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-stat.go116
-rw-r--r--vendor/github.com/minio/minio-go/v7/api.go995
-rw-r--r--vendor/github.com/minio/minio-go/v7/bucket-cache.go256
-rw-r--r--vendor/github.com/minio/minio-go/v7/checksum.go210
-rw-r--r--vendor/github.com/minio/minio-go/v7/code_of_conduct.md80
-rw-r--r--vendor/github.com/minio/minio-go/v7/constants.go110
-rw-r--r--vendor/github.com/minio/minio-go/v7/core.go150
-rw-r--r--vendor/github.com/minio/minio-go/v7/functional_tests.go13004
-rw-r--r--vendor/github.com/minio/minio-go/v7/hook-reader.go101
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go242
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go88
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample17
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go193
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json7
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample15
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go60
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go71
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go68
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go95
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go157
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go139
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go433
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go77
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go67
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go182
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go146
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go189
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go211
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go205
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go24
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go24
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go198
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go491
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/notification/info.go78
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go440
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go971
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go411
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go200
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go224
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go403
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go319
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go351
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go62
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go66
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go413
-rw-r--r--vendor/github.com/minio/minio-go/v7/post-policy.go349
-rw-r--r--vendor/github.com/minio/minio-go/v7/retry-continous.go69
-rw-r--r--vendor/github.com/minio/minio-go/v7/retry.go148
-rw-r--r--vendor/github.com/minio/minio-go/v7/s3-endpoints.go64
-rw-r--r--vendor/github.com/minio/minio-go/v7/s3-error.go61
-rw-r--r--vendor/github.com/minio/minio-go/v7/transport.go83
-rw-r--r--vendor/github.com/minio/minio-go/v7/utils.go693
-rw-r--r--vendor/github.com/minio/sha256-simd/.gitignore1
-rw-r--r--vendor/github.com/minio/sha256-simd/LICENSE202
-rw-r--r--vendor/github.com/minio/sha256-simd/README.md137
-rw-r--r--vendor/github.com/minio/sha256-simd/cpuid_other.go50
-rw-r--r--vendor/github.com/minio/sha256-simd/sha256.go468
-rw-r--r--vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.asm686
-rw-r--r--vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.go501
-rw-r--r--vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.s267
-rw-r--r--vendor/github.com/minio/sha256-simd/sha256block_amd64.go31
-rw-r--r--vendor/github.com/minio/sha256-simd/sha256block_amd64.s266
-rw-r--r--vendor/github.com/minio/sha256-simd/sha256block_arm64.go37
-rw-r--r--vendor/github.com/minio/sha256-simd/sha256block_arm64.s192
-rw-r--r--vendor/github.com/minio/sha256-simd/sha256block_other.go29
-rw-r--r--vendor/github.com/minio/sha256-simd/test-architectures.sh15
-rw-r--r--vendor/github.com/modern-go/concurrent/.gitignore1
-rw-r--r--vendor/github.com/modern-go/concurrent/.travis.yml14
-rw-r--r--vendor/github.com/modern-go/concurrent/LICENSE201
-rw-r--r--vendor/github.com/modern-go/concurrent/README.md49
-rw-r--r--vendor/github.com/modern-go/concurrent/executor.go14
-rw-r--r--vendor/github.com/modern-go/concurrent/go_above_19.go15
-rw-r--r--vendor/github.com/modern-go/concurrent/go_below_19.go33
-rw-r--r--vendor/github.com/modern-go/concurrent/log.go13
-rw-r--r--vendor/github.com/modern-go/concurrent/test.sh12
-rw-r--r--vendor/github.com/modern-go/concurrent/unbounded_executor.go119
-rw-r--r--vendor/github.com/modern-go/reflect2/.gitignore2
-rw-r--r--vendor/github.com/modern-go/reflect2/.travis.yml15
-rw-r--r--vendor/github.com/modern-go/reflect2/Gopkg.lock9
-rw-r--r--vendor/github.com/modern-go/reflect2/Gopkg.toml31
-rw-r--r--vendor/github.com/modern-go/reflect2/LICENSE201
-rw-r--r--vendor/github.com/modern-go/reflect2/README.md71
-rw-r--r--vendor/github.com/modern-go/reflect2/go_above_118.go23
-rw-r--r--vendor/github.com/modern-go/reflect2/go_above_19.go17
-rw-r--r--vendor/github.com/modern-go/reflect2/go_below_118.go21
-rw-r--r--vendor/github.com/modern-go/reflect2/reflect2.go300
-rw-r--r--vendor/github.com/modern-go/reflect2/reflect2_amd64.s0
-rw-r--r--vendor/github.com/modern-go/reflect2/reflect2_kind.go30
-rw-r--r--vendor/github.com/modern-go/reflect2/relfect2_386.s0
-rw-r--r--vendor/github.com/modern-go/reflect2/relfect2_amd64p32.s0
-rw-r--r--vendor/github.com/modern-go/reflect2/relfect2_arm.s0
-rw-r--r--vendor/github.com/modern-go/reflect2/relfect2_arm64.s0
-rw-r--r--vendor/github.com/modern-go/reflect2/relfect2_mips64x.s0
-rw-r--r--vendor/github.com/modern-go/reflect2/relfect2_mipsx.s0
-rw-r--r--vendor/github.com/modern-go/reflect2/relfect2_ppc64x.s0
-rw-r--r--vendor/github.com/modern-go/reflect2/relfect2_s390x.s0
-rw-r--r--vendor/github.com/modern-go/reflect2/safe_field.go58
-rw-r--r--vendor/github.com/modern-go/reflect2/safe_map.go101
-rw-r--r--vendor/github.com/modern-go/reflect2/safe_slice.go92
-rw-r--r--vendor/github.com/modern-go/reflect2/safe_struct.go29
-rw-r--r--vendor/github.com/modern-go/reflect2/safe_type.go78
-rw-r--r--vendor/github.com/modern-go/reflect2/type_map.go70
-rw-r--r--vendor/github.com/modern-go/reflect2/unsafe_array.go65
-rw-r--r--vendor/github.com/modern-go/reflect2/unsafe_eface.go59
-rw-r--r--vendor/github.com/modern-go/reflect2/unsafe_field.go74
-rw-r--r--vendor/github.com/modern-go/reflect2/unsafe_iface.go64
-rw-r--r--vendor/github.com/modern-go/reflect2/unsafe_link.go76
-rw-r--r--vendor/github.com/modern-go/reflect2/unsafe_map.go130
-rw-r--r--vendor/github.com/modern-go/reflect2/unsafe_ptr.go46
-rw-r--r--vendor/github.com/modern-go/reflect2/unsafe_slice.go177
-rw-r--r--vendor/github.com/modern-go/reflect2/unsafe_struct.go59
-rw-r--r--vendor/github.com/modern-go/reflect2/unsafe_type.go85
-rw-r--r--vendor/github.com/rs/xid/.appveyor.yml27
-rw-r--r--vendor/github.com/rs/xid/.golangci.yml5
-rw-r--r--vendor/github.com/rs/xid/.travis.yml8
-rw-r--r--vendor/github.com/rs/xid/LICENSE19
-rw-r--r--vendor/github.com/rs/xid/README.md119
-rw-r--r--vendor/github.com/rs/xid/error.go11
-rw-r--r--vendor/github.com/rs/xid/hostid_darwin.go9
-rw-r--r--vendor/github.com/rs/xid/hostid_fallback.go9
-rw-r--r--vendor/github.com/rs/xid/hostid_freebsd.go9
-rw-r--r--vendor/github.com/rs/xid/hostid_linux.go13
-rw-r--r--vendor/github.com/rs/xid/hostid_windows.go38
-rw-r--r--vendor/github.com/rs/xid/id.go391
-rw-r--r--vendor/github.com/sirupsen/logrus/.gitignore4
-rw-r--r--vendor/github.com/sirupsen/logrus/.golangci.yml40
-rw-r--r--vendor/github.com/sirupsen/logrus/.travis.yml15
-rw-r--r--vendor/github.com/sirupsen/logrus/CHANGELOG.md259
-rw-r--r--vendor/github.com/sirupsen/logrus/LICENSE21
-rw-r--r--vendor/github.com/sirupsen/logrus/README.md515
-rw-r--r--vendor/github.com/sirupsen/logrus/alt_exit.go76
-rw-r--r--vendor/github.com/sirupsen/logrus/appveyor.yml14
-rw-r--r--vendor/github.com/sirupsen/logrus/buffer_pool.go43
-rw-r--r--vendor/github.com/sirupsen/logrus/doc.go26
-rw-r--r--vendor/github.com/sirupsen/logrus/entry.go442
-rw-r--r--vendor/github.com/sirupsen/logrus/exported.go270
-rw-r--r--vendor/github.com/sirupsen/logrus/formatter.go78
-rw-r--r--vendor/github.com/sirupsen/logrus/hooks.go34
-rw-r--r--vendor/github.com/sirupsen/logrus/json_formatter.go128
-rw-r--r--vendor/github.com/sirupsen/logrus/logger.go417
-rw-r--r--vendor/github.com/sirupsen/logrus/logrus.go186
-rw-r--r--vendor/github.com/sirupsen/logrus/terminal_check_appengine.go11
-rw-r--r--vendor/github.com/sirupsen/logrus/terminal_check_bsd.go13
-rw-r--r--vendor/github.com/sirupsen/logrus/terminal_check_js.go7
-rw-r--r--vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go11
-rw-r--r--vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go17
-rw-r--r--vendor/github.com/sirupsen/logrus/terminal_check_solaris.go11
-rw-r--r--vendor/github.com/sirupsen/logrus/terminal_check_unix.go13
-rw-r--r--vendor/github.com/sirupsen/logrus/terminal_check_windows.go27
-rw-r--r--vendor/github.com/sirupsen/logrus/text_formatter.go339
-rw-r--r--vendor/github.com/sirupsen/logrus/writer.go102
337 files changed, 94610 insertions, 0 deletions
diff --git a/vendor/github.com/dustin/go-humanize/.travis.yml b/vendor/github.com/dustin/go-humanize/.travis.yml
new file mode 100644
index 0000000..ac12e48
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/.travis.yml
@@ -0,0 +1,21 @@
1sudo: false
2language: go
3go_import_path: github.com/dustin/go-humanize
4go:
5 - 1.13.x
6 - 1.14.x
7 - 1.15.x
8 - 1.16.x
9 - stable
10 - master
11matrix:
12 allow_failures:
13 - go: master
14 fast_finish: true
15install:
16 - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
17script:
18 - diff -u <(echo -n) <(gofmt -d -s .)
19 - go vet .
20 - go install -v -race ./...
21 - go test -v -race ./...
diff --git a/vendor/github.com/dustin/go-humanize/LICENSE b/vendor/github.com/dustin/go-humanize/LICENSE
new file mode 100644
index 0000000..8d9a94a
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/LICENSE
@@ -0,0 +1,21 @@
1Copyright (c) 2005-2008 Dustin Sallings <[email protected]>
2
3Permission is hereby granted, free of charge, to any person obtaining a copy
4of this software and associated documentation files (the "Software"), to deal
5in the Software without restriction, including without limitation the rights
6to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7copies of the Software, and to permit persons to whom the Software is
8furnished to do so, subject to the following conditions:
9
10The above copyright notice and this permission notice shall be included in
11all copies or substantial portions of the Software.
12
13THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19SOFTWARE.
20
21<http://www.opensource.org/licenses/mit-license.php>
diff --git a/vendor/github.com/dustin/go-humanize/README.markdown b/vendor/github.com/dustin/go-humanize/README.markdown
new file mode 100644
index 0000000..7d0b16b
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/README.markdown
@@ -0,0 +1,124 @@
1# Humane Units [![Build Status](https://travis-ci.org/dustin/go-humanize.svg?branch=master)](https://travis-ci.org/dustin/go-humanize) [![GoDoc](https://godoc.org/github.com/dustin/go-humanize?status.svg)](https://godoc.org/github.com/dustin/go-humanize)
2
3Just a few functions for helping humanize times and sizes.
4
5`go get` it as `github.com/dustin/go-humanize`, import it as
6`"github.com/dustin/go-humanize"`, use it as `humanize`.
7
8See [godoc](https://pkg.go.dev/github.com/dustin/go-humanize) for
9complete documentation.
10
11## Sizes
12
13This lets you take numbers like `82854982` and convert them to useful
14strings like, `83 MB` or `79 MiB` (whichever you prefer).
15
16Example:
17
18```go
19fmt.Printf("That file is %s.", humanize.Bytes(82854982)) // That file is 83 MB.
20```
21
22## Times
23
24This lets you take a `time.Time` and spit it out in relative terms.
25For example, `12 seconds ago` or `3 days from now`.
26
27Example:
28
29```go
30fmt.Printf("This was touched %s.", humanize.Time(someTimeInstance)) // This was touched 7 hours ago.
31```
32
33Thanks to Kyle Lemons for the time implementation from an IRC
34conversation one day. It's pretty neat.
35
36## Ordinals
37
38From a [mailing list discussion][odisc] where a user wanted to be able
39to label ordinals.
40
41 0 -> 0th
42 1 -> 1st
43 2 -> 2nd
44 3 -> 3rd
45 4 -> 4th
46 [...]
47
48Example:
49
50```go
51fmt.Printf("You're my %s best friend.", humanize.Ordinal(193)) // You are my 193rd best friend.
52```
53
54## Commas
55
56Want to shove commas into numbers? Be my guest.
57
58 0 -> 0
59 100 -> 100
60 1000 -> 1,000
61 1000000000 -> 1,000,000,000
62 -100000 -> -100,000
63
64Example:
65
66```go
67fmt.Printf("You owe $%s.\n", humanize.Comma(6582491)) // You owe $6,582,491.
68```
69
70## Ftoa
71
72Nicer float64 formatter that removes trailing zeros.
73
74```go
75fmt.Printf("%f", 2.24) // 2.240000
76fmt.Printf("%s", humanize.Ftoa(2.24)) // 2.24
77fmt.Printf("%f", 2.0) // 2.000000
78fmt.Printf("%s", humanize.Ftoa(2.0)) // 2
79```
80
81## SI notation
82
83Format numbers with [SI notation][sinotation].
84
85Example:
86
87```go
88humanize.SI(0.00000000223, "M") // 2.23 nM
89```
90
91## English-specific functions
92
93The following functions are in the `humanize/english` subpackage.
94
95### Plurals
96
97Simple English pluralization
98
99```go
100english.PluralWord(1, "object", "") // object
101english.PluralWord(42, "object", "") // objects
102english.PluralWord(2, "bus", "") // buses
103english.PluralWord(99, "locus", "loci") // loci
104
105english.Plural(1, "object", "") // 1 object
106english.Plural(42, "object", "") // 42 objects
107english.Plural(2, "bus", "") // 2 buses
108english.Plural(99, "locus", "loci") // 99 loci
109```
110
111### Word series
112
113Format comma-separated words lists with conjuctions:
114
115```go
116english.WordSeries([]string{"foo"}, "and") // foo
117english.WordSeries([]string{"foo", "bar"}, "and") // foo and bar
118english.WordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar and baz
119
120english.OxfordWordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar, and baz
121```
122
123[odisc]: https://groups.google.com/d/topic/golang-nuts/l8NhI74jl-4/discussion
124[sinotation]: http://en.wikipedia.org/wiki/Metric_prefix
diff --git a/vendor/github.com/dustin/go-humanize/big.go b/vendor/github.com/dustin/go-humanize/big.go
new file mode 100644
index 0000000..f49dc33
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/big.go
@@ -0,0 +1,31 @@
1package humanize
2
3import (
4 "math/big"
5)
6
7// order of magnitude (to a max order)
8func oomm(n, b *big.Int, maxmag int) (float64, int) {
9 mag := 0
10 m := &big.Int{}
11 for n.Cmp(b) >= 0 {
12 n.DivMod(n, b, m)
13 mag++
14 if mag == maxmag && maxmag >= 0 {
15 break
16 }
17 }
18 return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
19}
20
21// total order of magnitude
22// (same as above, but with no upper limit)
23func oom(n, b *big.Int) (float64, int) {
24 mag := 0
25 m := &big.Int{}
26 for n.Cmp(b) >= 0 {
27 n.DivMod(n, b, m)
28 mag++
29 }
30 return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
31}
diff --git a/vendor/github.com/dustin/go-humanize/bigbytes.go b/vendor/github.com/dustin/go-humanize/bigbytes.go
new file mode 100644
index 0000000..3b015fd
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/bigbytes.go
@@ -0,0 +1,189 @@
1package humanize
2
3import (
4 "fmt"
5 "math/big"
6 "strings"
7 "unicode"
8)
9
10var (
11 bigIECExp = big.NewInt(1024)
12
13 // BigByte is one byte in bit.Ints
14 BigByte = big.NewInt(1)
15 // BigKiByte is 1,024 bytes in bit.Ints
16 BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp)
17 // BigMiByte is 1,024 k bytes in bit.Ints
18 BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp)
19 // BigGiByte is 1,024 m bytes in bit.Ints
20 BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp)
21 // BigTiByte is 1,024 g bytes in bit.Ints
22 BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp)
23 // BigPiByte is 1,024 t bytes in bit.Ints
24 BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp)
25 // BigEiByte is 1,024 p bytes in bit.Ints
26 BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp)
27 // BigZiByte is 1,024 e bytes in bit.Ints
28 BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp)
29 // BigYiByte is 1,024 z bytes in bit.Ints
30 BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp)
31 // BigRiByte is 1,024 y bytes in bit.Ints
32 BigRiByte = (&big.Int{}).Mul(BigYiByte, bigIECExp)
33 // BigQiByte is 1,024 r bytes in bit.Ints
34 BigQiByte = (&big.Int{}).Mul(BigRiByte, bigIECExp)
35)
36
37var (
38 bigSIExp = big.NewInt(1000)
39
40 // BigSIByte is one SI byte in big.Ints
41 BigSIByte = big.NewInt(1)
42 // BigKByte is 1,000 SI bytes in big.Ints
43 BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp)
44 // BigMByte is 1,000 SI k bytes in big.Ints
45 BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp)
46 // BigGByte is 1,000 SI m bytes in big.Ints
47 BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp)
48 // BigTByte is 1,000 SI g bytes in big.Ints
49 BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp)
50 // BigPByte is 1,000 SI t bytes in big.Ints
51 BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp)
52 // BigEByte is 1,000 SI p bytes in big.Ints
53 BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp)
54 // BigZByte is 1,000 SI e bytes in big.Ints
55 BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp)
56 // BigYByte is 1,000 SI z bytes in big.Ints
57 BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp)
58 // BigRByte is 1,000 SI y bytes in big.Ints
59 BigRByte = (&big.Int{}).Mul(BigYByte, bigSIExp)
60 // BigQByte is 1,000 SI r bytes in big.Ints
61 BigQByte = (&big.Int{}).Mul(BigRByte, bigSIExp)
62)
63
64var bigBytesSizeTable = map[string]*big.Int{
65 "b": BigByte,
66 "kib": BigKiByte,
67 "kb": BigKByte,
68 "mib": BigMiByte,
69 "mb": BigMByte,
70 "gib": BigGiByte,
71 "gb": BigGByte,
72 "tib": BigTiByte,
73 "tb": BigTByte,
74 "pib": BigPiByte,
75 "pb": BigPByte,
76 "eib": BigEiByte,
77 "eb": BigEByte,
78 "zib": BigZiByte,
79 "zb": BigZByte,
80 "yib": BigYiByte,
81 "yb": BigYByte,
82 "rib": BigRiByte,
83 "rb": BigRByte,
84 "qib": BigQiByte,
85 "qb": BigQByte,
86 // Without suffix
87 "": BigByte,
88 "ki": BigKiByte,
89 "k": BigKByte,
90 "mi": BigMiByte,
91 "m": BigMByte,
92 "gi": BigGiByte,
93 "g": BigGByte,
94 "ti": BigTiByte,
95 "t": BigTByte,
96 "pi": BigPiByte,
97 "p": BigPByte,
98 "ei": BigEiByte,
99 "e": BigEByte,
100 "z": BigZByte,
101 "zi": BigZiByte,
102 "y": BigYByte,
103 "yi": BigYiByte,
104 "r": BigRByte,
105 "ri": BigRiByte,
106 "q": BigQByte,
107 "qi": BigQiByte,
108}
109
110var ten = big.NewInt(10)
111
112func humanateBigBytes(s, base *big.Int, sizes []string) string {
113 if s.Cmp(ten) < 0 {
114 return fmt.Sprintf("%d B", s)
115 }
116 c := (&big.Int{}).Set(s)
117 val, mag := oomm(c, base, len(sizes)-1)
118 suffix := sizes[mag]
119 f := "%.0f %s"
120 if val < 10 {
121 f = "%.1f %s"
122 }
123
124 return fmt.Sprintf(f, val, suffix)
125
126}
127
128// BigBytes produces a human readable representation of an SI size.
129//
130// See also: ParseBigBytes.
131//
132// BigBytes(82854982) -> 83 MB
133func BigBytes(s *big.Int) string {
134 sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB", "RB", "QB"}
135 return humanateBigBytes(s, bigSIExp, sizes)
136}
137
138// BigIBytes produces a human readable representation of an IEC size.
139//
140// See also: ParseBigBytes.
141//
142// BigIBytes(82854982) -> 79 MiB
143func BigIBytes(s *big.Int) string {
144 sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB", "RiB", "QiB"}
145 return humanateBigBytes(s, bigIECExp, sizes)
146}
147
148// ParseBigBytes parses a string representation of bytes into the number
149// of bytes it represents.
150//
151// See also: BigBytes, BigIBytes.
152//
153// ParseBigBytes("42 MB") -> 42000000, nil
154// ParseBigBytes("42 mib") -> 44040192, nil
155func ParseBigBytes(s string) (*big.Int, error) {
156 lastDigit := 0
157 hasComma := false
158 for _, r := range s {
159 if !(unicode.IsDigit(r) || r == '.' || r == ',') {
160 break
161 }
162 if r == ',' {
163 hasComma = true
164 }
165 lastDigit++
166 }
167
168 num := s[:lastDigit]
169 if hasComma {
170 num = strings.Replace(num, ",", "", -1)
171 }
172
173 val := &big.Rat{}
174 _, err := fmt.Sscanf(num, "%f", val)
175 if err != nil {
176 return nil, err
177 }
178
179 extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
180 if m, ok := bigBytesSizeTable[extra]; ok {
181 mv := (&big.Rat{}).SetInt(m)
182 val.Mul(val, mv)
183 rv := &big.Int{}
184 rv.Div(val.Num(), val.Denom())
185 return rv, nil
186 }
187
188 return nil, fmt.Errorf("unhandled size name: %v", extra)
189}
diff --git a/vendor/github.com/dustin/go-humanize/bytes.go b/vendor/github.com/dustin/go-humanize/bytes.go
new file mode 100644
index 0000000..0b498f4
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/bytes.go
@@ -0,0 +1,143 @@
1package humanize
2
3import (
4 "fmt"
5 "math"
6 "strconv"
7 "strings"
8 "unicode"
9)
10
11// IEC Sizes.
12// kibis of bits
13const (
14 Byte = 1 << (iota * 10)
15 KiByte
16 MiByte
17 GiByte
18 TiByte
19 PiByte
20 EiByte
21)
22
23// SI Sizes.
24const (
25 IByte = 1
26 KByte = IByte * 1000
27 MByte = KByte * 1000
28 GByte = MByte * 1000
29 TByte = GByte * 1000
30 PByte = TByte * 1000
31 EByte = PByte * 1000
32)
33
34var bytesSizeTable = map[string]uint64{
35 "b": Byte,
36 "kib": KiByte,
37 "kb": KByte,
38 "mib": MiByte,
39 "mb": MByte,
40 "gib": GiByte,
41 "gb": GByte,
42 "tib": TiByte,
43 "tb": TByte,
44 "pib": PiByte,
45 "pb": PByte,
46 "eib": EiByte,
47 "eb": EByte,
48 // Without suffix
49 "": Byte,
50 "ki": KiByte,
51 "k": KByte,
52 "mi": MiByte,
53 "m": MByte,
54 "gi": GiByte,
55 "g": GByte,
56 "ti": TiByte,
57 "t": TByte,
58 "pi": PiByte,
59 "p": PByte,
60 "ei": EiByte,
61 "e": EByte,
62}
63
64func logn(n, b float64) float64 {
65 return math.Log(n) / math.Log(b)
66}
67
68func humanateBytes(s uint64, base float64, sizes []string) string {
69 if s < 10 {
70 return fmt.Sprintf("%d B", s)
71 }
72 e := math.Floor(logn(float64(s), base))
73 suffix := sizes[int(e)]
74 val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10
75 f := "%.0f %s"
76 if val < 10 {
77 f = "%.1f %s"
78 }
79
80 return fmt.Sprintf(f, val, suffix)
81}
82
83// Bytes produces a human readable representation of an SI size.
84//
85// See also: ParseBytes.
86//
87// Bytes(82854982) -> 83 MB
88func Bytes(s uint64) string {
89 sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"}
90 return humanateBytes(s, 1000, sizes)
91}
92
93// IBytes produces a human readable representation of an IEC size.
94//
95// See also: ParseBytes.
96//
97// IBytes(82854982) -> 79 MiB
98func IBytes(s uint64) string {
99 sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"}
100 return humanateBytes(s, 1024, sizes)
101}
102
103// ParseBytes parses a string representation of bytes into the number
104// of bytes it represents.
105//
106// See Also: Bytes, IBytes.
107//
108// ParseBytes("42 MB") -> 42000000, nil
109// ParseBytes("42 mib") -> 44040192, nil
110func ParseBytes(s string) (uint64, error) {
111 lastDigit := 0
112 hasComma := false
113 for _, r := range s {
114 if !(unicode.IsDigit(r) || r == '.' || r == ',') {
115 break
116 }
117 if r == ',' {
118 hasComma = true
119 }
120 lastDigit++
121 }
122
123 num := s[:lastDigit]
124 if hasComma {
125 num = strings.Replace(num, ",", "", -1)
126 }
127
128 f, err := strconv.ParseFloat(num, 64)
129 if err != nil {
130 return 0, err
131 }
132
133 extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
134 if m, ok := bytesSizeTable[extra]; ok {
135 f *= float64(m)
136 if f >= math.MaxUint64 {
137 return 0, fmt.Errorf("too large: %v", s)
138 }
139 return uint64(f), nil
140 }
141
142 return 0, fmt.Errorf("unhandled size name: %v", extra)
143}
diff --git a/vendor/github.com/dustin/go-humanize/comma.go b/vendor/github.com/dustin/go-humanize/comma.go
new file mode 100644
index 0000000..520ae3e
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/comma.go
@@ -0,0 +1,116 @@
1package humanize
2
3import (
4 "bytes"
5 "math"
6 "math/big"
7 "strconv"
8 "strings"
9)
10
11// Comma produces a string form of the given number in base 10 with
12// commas after every three orders of magnitude.
13//
14// e.g. Comma(834142) -> 834,142
15func Comma(v int64) string {
16 sign := ""
17
18 // Min int64 can't be negated to a usable value, so it has to be special cased.
19 if v == math.MinInt64 {
20 return "-9,223,372,036,854,775,808"
21 }
22
23 if v < 0 {
24 sign = "-"
25 v = 0 - v
26 }
27
28 parts := []string{"", "", "", "", "", "", ""}
29 j := len(parts) - 1
30
31 for v > 999 {
32 parts[j] = strconv.FormatInt(v%1000, 10)
33 switch len(parts[j]) {
34 case 2:
35 parts[j] = "0" + parts[j]
36 case 1:
37 parts[j] = "00" + parts[j]
38 }
39 v = v / 1000
40 j--
41 }
42 parts[j] = strconv.Itoa(int(v))
43 return sign + strings.Join(parts[j:], ",")
44}
45
46// Commaf produces a string form of the given number in base 10 with
47// commas after every three orders of magnitude.
48//
49// e.g. Commaf(834142.32) -> 834,142.32
50func Commaf(v float64) string {
51 buf := &bytes.Buffer{}
52 if v < 0 {
53 buf.Write([]byte{'-'})
54 v = 0 - v
55 }
56
57 comma := []byte{','}
58
59 parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".")
60 pos := 0
61 if len(parts[0])%3 != 0 {
62 pos += len(parts[0]) % 3
63 buf.WriteString(parts[0][:pos])
64 buf.Write(comma)
65 }
66 for ; pos < len(parts[0]); pos += 3 {
67 buf.WriteString(parts[0][pos : pos+3])
68 buf.Write(comma)
69 }
70 buf.Truncate(buf.Len() - 1)
71
72 if len(parts) > 1 {
73 buf.Write([]byte{'.'})
74 buf.WriteString(parts[1])
75 }
76 return buf.String()
77}
78
79// CommafWithDigits works like the Commaf but limits the resulting
80// string to the given number of decimal places.
81//
82// e.g. CommafWithDigits(834142.32, 1) -> 834,142.3
83func CommafWithDigits(f float64, decimals int) string {
84 return stripTrailingDigits(Commaf(f), decimals)
85}
86
87// BigComma produces a string form of the given big.Int in base 10
88// with commas after every three orders of magnitude.
89func BigComma(b *big.Int) string {
90 sign := ""
91 if b.Sign() < 0 {
92 sign = "-"
93 b.Abs(b)
94 }
95
96 athousand := big.NewInt(1000)
97 c := (&big.Int{}).Set(b)
98 _, m := oom(c, athousand)
99 parts := make([]string, m+1)
100 j := len(parts) - 1
101
102 mod := &big.Int{}
103 for b.Cmp(athousand) >= 0 {
104 b.DivMod(b, athousand, mod)
105 parts[j] = strconv.FormatInt(mod.Int64(), 10)
106 switch len(parts[j]) {
107 case 2:
108 parts[j] = "0" + parts[j]
109 case 1:
110 parts[j] = "00" + parts[j]
111 }
112 j--
113 }
114 parts[j] = strconv.Itoa(int(b.Int64()))
115 return sign + strings.Join(parts[j:], ",")
116}
diff --git a/vendor/github.com/dustin/go-humanize/commaf.go b/vendor/github.com/dustin/go-humanize/commaf.go
new file mode 100644
index 0000000..2bc83a0
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/commaf.go
@@ -0,0 +1,41 @@
1//go:build go1.6
2// +build go1.6
3
4package humanize
5
6import (
7 "bytes"
8 "math/big"
9 "strings"
10)
11
12// BigCommaf produces a string form of the given big.Float in base 10
13// with commas after every three orders of magnitude.
14func BigCommaf(v *big.Float) string {
15 buf := &bytes.Buffer{}
16 if v.Sign() < 0 {
17 buf.Write([]byte{'-'})
18 v.Abs(v)
19 }
20
21 comma := []byte{','}
22
23 parts := strings.Split(v.Text('f', -1), ".")
24 pos := 0
25 if len(parts[0])%3 != 0 {
26 pos += len(parts[0]) % 3
27 buf.WriteString(parts[0][:pos])
28 buf.Write(comma)
29 }
30 for ; pos < len(parts[0]); pos += 3 {
31 buf.WriteString(parts[0][pos : pos+3])
32 buf.Write(comma)
33 }
34 buf.Truncate(buf.Len() - 1)
35
36 if len(parts) > 1 {
37 buf.Write([]byte{'.'})
38 buf.WriteString(parts[1])
39 }
40 return buf.String()
41}
diff --git a/vendor/github.com/dustin/go-humanize/ftoa.go b/vendor/github.com/dustin/go-humanize/ftoa.go
new file mode 100644
index 0000000..bce923f
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/ftoa.go
@@ -0,0 +1,49 @@
1package humanize
2
3import (
4 "strconv"
5 "strings"
6)
7
8func stripTrailingZeros(s string) string {
9 if !strings.ContainsRune(s, '.') {
10 return s
11 }
12 offset := len(s) - 1
13 for offset > 0 {
14 if s[offset] == '.' {
15 offset--
16 break
17 }
18 if s[offset] != '0' {
19 break
20 }
21 offset--
22 }
23 return s[:offset+1]
24}
25
26func stripTrailingDigits(s string, digits int) string {
27 if i := strings.Index(s, "."); i >= 0 {
28 if digits <= 0 {
29 return s[:i]
30 }
31 i++
32 if i+digits >= len(s) {
33 return s
34 }
35 return s[:i+digits]
36 }
37 return s
38}
39
40// Ftoa converts a float to a string with no trailing zeros.
41func Ftoa(num float64) string {
42 return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64))
43}
44
45// FtoaWithDigits converts a float to a string but limits the resulting string
46// to the given number of decimal places, and no trailing zeros.
47func FtoaWithDigits(num float64, digits int) string {
48 return stripTrailingZeros(stripTrailingDigits(strconv.FormatFloat(num, 'f', 6, 64), digits))
49}
diff --git a/vendor/github.com/dustin/go-humanize/humanize.go b/vendor/github.com/dustin/go-humanize/humanize.go
new file mode 100644
index 0000000..a2c2da3
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/humanize.go
@@ -0,0 +1,8 @@
1/*
2Package humanize converts boring ugly numbers to human-friendly strings and back.
3
4Durations can be turned into strings such as "3 days ago", numbers
5representing sizes like 82854982 into useful strings like, "83 MB" or
6"79 MiB" (whichever you prefer).
7*/
8package humanize
diff --git a/vendor/github.com/dustin/go-humanize/number.go b/vendor/github.com/dustin/go-humanize/number.go
new file mode 100644
index 0000000..6470d0d
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/number.go
@@ -0,0 +1,192 @@
1package humanize
2
3/*
4Slightly adapted from the source to fit go-humanize.
5
6Author: https://github.com/gorhill
7Source: https://gist.github.com/gorhill/5285193
8
9*/
10
11import (
12 "math"
13 "strconv"
14)
15
16var (
17 renderFloatPrecisionMultipliers = [...]float64{
18 1,
19 10,
20 100,
21 1000,
22 10000,
23 100000,
24 1000000,
25 10000000,
26 100000000,
27 1000000000,
28 }
29
30 renderFloatPrecisionRounders = [...]float64{
31 0.5,
32 0.05,
33 0.005,
34 0.0005,
35 0.00005,
36 0.000005,
37 0.0000005,
38 0.00000005,
39 0.000000005,
40 0.0000000005,
41 }
42)
43
44// FormatFloat produces a formatted number as string based on the following user-specified criteria:
45// * thousands separator
46// * decimal separator
47// * decimal precision
48//
49// Usage: s := RenderFloat(format, n)
50// The format parameter tells how to render the number n.
51//
52// See examples: http://play.golang.org/p/LXc1Ddm1lJ
53//
54// Examples of format strings, given n = 12345.6789:
55// "#,###.##" => "12,345.67"
56// "#,###." => "12,345"
57// "#,###" => "12345,678"
58// "#\u202F###,##" => "12 345,68"
59// "#.###,###### => 12.345,678900
60// "" (aka default format) => 12,345.67
61//
62// The highest precision allowed is 9 digits after the decimal symbol.
63// There is also a version for integer number, FormatInteger(),
64// which is convenient for calls within template.
65func FormatFloat(format string, n float64) string {
66 // Special cases:
67 // NaN = "NaN"
68 // +Inf = "+Infinity"
69 // -Inf = "-Infinity"
70 if math.IsNaN(n) {
71 return "NaN"
72 }
73 if n > math.MaxFloat64 {
74 return "Infinity"
75 }
76 if n < (0.0 - math.MaxFloat64) {
77 return "-Infinity"
78 }
79
80 // default format
81 precision := 2
82 decimalStr := "."
83 thousandStr := ","
84 positiveStr := ""
85 negativeStr := "-"
86
87 if len(format) > 0 {
88 format := []rune(format)
89
90 // If there is an explicit format directive,
91 // then default values are these:
92 precision = 9
93 thousandStr = ""
94
95 // collect indices of meaningful formatting directives
96 formatIndx := []int{}
97 for i, char := range format {
98 if char != '#' && char != '0' {
99 formatIndx = append(formatIndx, i)
100 }
101 }
102
103 if len(formatIndx) > 0 {
104 // Directive at index 0:
105 // Must be a '+'
106 // Raise an error if not the case
107 // index: 0123456789
108 // +0.000,000
109 // +000,000.0
110 // +0000.00
111 // +0000
112 if formatIndx[0] == 0 {
113 if format[formatIndx[0]] != '+' {
114 panic("RenderFloat(): invalid positive sign directive")
115 }
116 positiveStr = "+"
117 formatIndx = formatIndx[1:]
118 }
119
120 // Two directives:
121 // First is thousands separator
122 // Raise an error if not followed by 3-digit
123 // 0123456789
124 // 0.000,000
125 // 000,000.00
126 if len(formatIndx) == 2 {
127 if (formatIndx[1] - formatIndx[0]) != 4 {
128 panic("RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers")
129 }
130 thousandStr = string(format[formatIndx[0]])
131 formatIndx = formatIndx[1:]
132 }
133
134 // One directive:
135 // Directive is decimal separator
136 // The number of digit-specifier following the separator indicates wanted precision
137 // 0123456789
138 // 0.00
139 // 000,0000
140 if len(formatIndx) == 1 {
141 decimalStr = string(format[formatIndx[0]])
142 precision = len(format) - formatIndx[0] - 1
143 }
144 }
145 }
146
147 // generate sign part
148 var signStr string
149 if n >= 0.000000001 {
150 signStr = positiveStr
151 } else if n <= -0.000000001 {
152 signStr = negativeStr
153 n = -n
154 } else {
155 signStr = ""
156 n = 0.0
157 }
158
159 // split number into integer and fractional parts
160 intf, fracf := math.Modf(n + renderFloatPrecisionRounders[precision])
161
162 // generate integer part string
163 intStr := strconv.FormatInt(int64(intf), 10)
164
165 // add thousand separator if required
166 if len(thousandStr) > 0 {
167 for i := len(intStr); i > 3; {
168 i -= 3
169 intStr = intStr[:i] + thousandStr + intStr[i:]
170 }
171 }
172
173 // no fractional part, we can leave now
174 if precision == 0 {
175 return signStr + intStr
176 }
177
178 // generate fractional part
179 fracStr := strconv.Itoa(int(fracf * renderFloatPrecisionMultipliers[precision]))
180 // may need padding
181 if len(fracStr) < precision {
182 fracStr = "000000000000000"[:precision-len(fracStr)] + fracStr
183 }
184
185 return signStr + intStr + decimalStr + fracStr
186}
187
188// FormatInteger produces a formatted number as string.
189// See FormatFloat.
190func FormatInteger(format string, n int) string {
191 return FormatFloat(format, float64(n))
192}
diff --git a/vendor/github.com/dustin/go-humanize/ordinals.go b/vendor/github.com/dustin/go-humanize/ordinals.go
new file mode 100644
index 0000000..43d88a8
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/ordinals.go
@@ -0,0 +1,25 @@
1package humanize
2
3import "strconv"
4
5// Ordinal gives you the input number in a rank/ordinal format.
6//
7// Ordinal(3) -> 3rd
8func Ordinal(x int) string {
9 suffix := "th"
10 switch x % 10 {
11 case 1:
12 if x%100 != 11 {
13 suffix = "st"
14 }
15 case 2:
16 if x%100 != 12 {
17 suffix = "nd"
18 }
19 case 3:
20 if x%100 != 13 {
21 suffix = "rd"
22 }
23 }
24 return strconv.Itoa(x) + suffix
25}
diff --git a/vendor/github.com/dustin/go-humanize/si.go b/vendor/github.com/dustin/go-humanize/si.go
new file mode 100644
index 0000000..8b85019
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/si.go
@@ -0,0 +1,127 @@
1package humanize
2
3import (
4 "errors"
5 "math"
6 "regexp"
7 "strconv"
8)
9
10var siPrefixTable = map[float64]string{
11 -30: "q", // quecto
12 -27: "r", // ronto
13 -24: "y", // yocto
14 -21: "z", // zepto
15 -18: "a", // atto
16 -15: "f", // femto
17 -12: "p", // pico
18 -9: "n", // nano
19 -6: "µ", // micro
20 -3: "m", // milli
21 0: "",
22 3: "k", // kilo
23 6: "M", // mega
24 9: "G", // giga
25 12: "T", // tera
26 15: "P", // peta
27 18: "E", // exa
28 21: "Z", // zetta
29 24: "Y", // yotta
30 27: "R", // ronna
31 30: "Q", // quetta
32}
33
34var revSIPrefixTable = revfmap(siPrefixTable)
35
36// revfmap reverses the map and precomputes the power multiplier
37func revfmap(in map[float64]string) map[string]float64 {
38 rv := map[string]float64{}
39 for k, v := range in {
40 rv[v] = math.Pow(10, k)
41 }
42 return rv
43}
44
45var riParseRegex *regexp.Regexp
46
47func init() {
48 ri := `^([\-0-9.]+)\s?([`
49 for _, v := range siPrefixTable {
50 ri += v
51 }
52 ri += `]?)(.*)`
53
54 riParseRegex = regexp.MustCompile(ri)
55}
56
57// ComputeSI finds the most appropriate SI prefix for the given number
58// and returns the prefix along with the value adjusted to be within
59// that prefix.
60//
61// See also: SI, ParseSI.
62//
63// e.g. ComputeSI(2.2345e-12) -> (2.2345, "p")
64func ComputeSI(input float64) (float64, string) {
65 if input == 0 {
66 return 0, ""
67 }
68 mag := math.Abs(input)
69 exponent := math.Floor(logn(mag, 10))
70 exponent = math.Floor(exponent/3) * 3
71
72 value := mag / math.Pow(10, exponent)
73
74 // Handle special case where value is exactly 1000.0
75 // Should return 1 M instead of 1000 k
76 if value == 1000.0 {
77 exponent += 3
78 value = mag / math.Pow(10, exponent)
79 }
80
81 value = math.Copysign(value, input)
82
83 prefix := siPrefixTable[exponent]
84 return value, prefix
85}
86
87// SI returns a string with default formatting.
88//
89// SI uses Ftoa to format float value, removing trailing zeros.
90//
91// See also: ComputeSI, ParseSI.
92//
93// e.g. SI(1000000, "B") -> 1 MB
94// e.g. SI(2.2345e-12, "F") -> 2.2345 pF
95func SI(input float64, unit string) string {
96 value, prefix := ComputeSI(input)
97 return Ftoa(value) + " " + prefix + unit
98}
99
100// SIWithDigits works like SI but limits the resulting string to the
101// given number of decimal places.
102//
103// e.g. SIWithDigits(1000000, 0, "B") -> 1 MB
104// e.g. SIWithDigits(2.2345e-12, 2, "F") -> 2.23 pF
105func SIWithDigits(input float64, decimals int, unit string) string {
106 value, prefix := ComputeSI(input)
107 return FtoaWithDigits(value, decimals) + " " + prefix + unit
108}
109
110var errInvalid = errors.New("invalid input")
111
112// ParseSI parses an SI string back into the number and unit.
113//
114// See also: SI, ComputeSI.
115//
116// e.g. ParseSI("2.2345 pF") -> (2.2345e-12, "F", nil)
117func ParseSI(input string) (float64, string, error) {
118 found := riParseRegex.FindStringSubmatch(input)
119 if len(found) != 4 {
120 return 0, "", errInvalid
121 }
122 mag := revSIPrefixTable[found[2]]
123 unit := found[3]
124
125 base, err := strconv.ParseFloat(found[1], 64)
126 return base * mag, unit, err
127}
diff --git a/vendor/github.com/dustin/go-humanize/times.go b/vendor/github.com/dustin/go-humanize/times.go
new file mode 100644
index 0000000..dd3fbf5
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/times.go
@@ -0,0 +1,117 @@
1package humanize
2
3import (
4 "fmt"
5 "math"
6 "sort"
7 "time"
8)
9
10// Seconds-based time units
11const (
12 Day = 24 * time.Hour
13 Week = 7 * Day
14 Month = 30 * Day
15 Year = 12 * Month
16 LongTime = 37 * Year
17)
18
19// Time formats a time into a relative string.
20//
21// Time(someT) -> "3 weeks ago"
22func Time(then time.Time) string {
23 return RelTime(then, time.Now(), "ago", "from now")
24}
25
26// A RelTimeMagnitude struct contains a relative time point at which
27// the relative format of time will switch to a new format string. A
28// slice of these in ascending order by their "D" field is passed to
29// CustomRelTime to format durations.
30//
31// The Format field is a string that may contain a "%s" which will be
32// replaced with the appropriate signed label (e.g. "ago" or "from
33// now") and a "%d" that will be replaced by the quantity.
34//
35// The DivBy field is the amount of time the time difference must be
36// divided by in order to display correctly.
37//
38// e.g. if D is 2*time.Minute and you want to display "%d minutes %s"
39// DivBy should be time.Minute so whatever the duration is will be
40// expressed in minutes.
41type RelTimeMagnitude struct {
42 D time.Duration
43 Format string
44 DivBy time.Duration
45}
46
47var defaultMagnitudes = []RelTimeMagnitude{
48 {time.Second, "now", time.Second},
49 {2 * time.Second, "1 second %s", 1},
50 {time.Minute, "%d seconds %s", time.Second},
51 {2 * time.Minute, "1 minute %s", 1},
52 {time.Hour, "%d minutes %s", time.Minute},
53 {2 * time.Hour, "1 hour %s", 1},
54 {Day, "%d hours %s", time.Hour},
55 {2 * Day, "1 day %s", 1},
56 {Week, "%d days %s", Day},
57 {2 * Week, "1 week %s", 1},
58 {Month, "%d weeks %s", Week},
59 {2 * Month, "1 month %s", 1},
60 {Year, "%d months %s", Month},
61 {18 * Month, "1 year %s", 1},
62 {2 * Year, "2 years %s", 1},
63 {LongTime, "%d years %s", Year},
64 {math.MaxInt64, "a long while %s", 1},
65}
66
67// RelTime formats a time into a relative string.
68//
69// It takes two times and two labels. In addition to the generic time
70// delta string (e.g. 5 minutes), the labels are used applied so that
71// the label corresponding to the smaller time is applied.
72//
73// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier"
74func RelTime(a, b time.Time, albl, blbl string) string {
75 return CustomRelTime(a, b, albl, blbl, defaultMagnitudes)
76}
77
78// CustomRelTime formats a time into a relative string.
79//
80// It takes two times two labels and a table of relative time formats.
81// In addition to the generic time delta string (e.g. 5 minutes), the
82// labels are used applied so that the label corresponding to the
83// smaller time is applied.
84func CustomRelTime(a, b time.Time, albl, blbl string, magnitudes []RelTimeMagnitude) string {
85 lbl := albl
86 diff := b.Sub(a)
87
88 if a.After(b) {
89 lbl = blbl
90 diff = a.Sub(b)
91 }
92
93 n := sort.Search(len(magnitudes), func(i int) bool {
94 return magnitudes[i].D > diff
95 })
96
97 if n >= len(magnitudes) {
98 n = len(magnitudes) - 1
99 }
100 mag := magnitudes[n]
101 args := []interface{}{}
102 escaped := false
103 for _, ch := range mag.Format {
104 if escaped {
105 switch ch {
106 case 's':
107 args = append(args, lbl)
108 case 'd':
109 args = append(args, diff/mag.DivBy)
110 }
111 escaped = false
112 } else {
113 escaped = ch == '%'
114 }
115 }
116 return fmt.Sprintf(mag.Format, args...)
117}
diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md
new file mode 100644
index 0000000..c9fb829
--- /dev/null
+++ b/vendor/github.com/google/uuid/CHANGELOG.md
@@ -0,0 +1,28 @@
1# Changelog
2
3## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12)
4
5
6### Features
7
8* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29))
9
10## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26)
11
12
13### Features
14
15* UUIDs slice type with Strings() convenience method ([#133](https://github.com/google/uuid/issues/133)) ([cd5fbbd](https://github.com/google/uuid/commit/cd5fbbdd02f3e3467ac18940e07e062be1f864b4))
16
17### Fixes
18
19* Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior)
20
21## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18)
22
23
24### Bug Fixes
25
26* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0))
27
28## Changelog
diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md
new file mode 100644
index 0000000..a502fdc
--- /dev/null
+++ b/vendor/github.com/google/uuid/CONTRIBUTING.md
@@ -0,0 +1,26 @@
1# How to contribute
2
3We definitely welcome patches and contribution to this project!
4
5### Tips
6
7Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org).
8
9Always try to include a test case! If it is not possible or not necessary,
10please explain why in the pull request description.
11
12### Releasing
13
14Commits that would precipitate a SemVer change, as described in the Conventional
15Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action)
16to create a release candidate pull request. Once submitted, `release-please`
17will create a release.
18
19For tips on how to work with `release-please`, see its documentation.
20
21### Legal requirements
22
23In order to protect both you and ourselves, you will need to sign the
24[Contributor License Agreement](https://cla.developers.google.com/clas).
25
26You may have already signed it for other Google projects.
diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS
new file mode 100644
index 0000000..b4bb97f
--- /dev/null
+++ b/vendor/github.com/google/uuid/CONTRIBUTORS
@@ -0,0 +1,9 @@
1Paul Borman <[email protected]>
2bmatsuo
3shawnps
4theory
5jboverfelt
6dsymonds
7cd1
8wallclockbuilder
9dansouza
diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE
new file mode 100644
index 0000000..5dc6826
--- /dev/null
+++ b/vendor/github.com/google/uuid/LICENSE
@@ -0,0 +1,27 @@
1Copyright (c) 2009,2014 Google Inc. All rights reserved.
2
3Redistribution and use in source and binary forms, with or without
4modification, are permitted provided that the following conditions are
5met:
6
7 * Redistributions of source code must retain the above copyright
8notice, this list of conditions and the following disclaimer.
9 * Redistributions in binary form must reproduce the above
10copyright notice, this list of conditions and the following disclaimer
11in the documentation and/or other materials provided with the
12distribution.
13 * Neither the name of Google Inc. nor the names of its
14contributors may be used to endorse or promote products derived from
15this software without specific prior written permission.
16
17THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md
new file mode 100644
index 0000000..3e9a618
--- /dev/null
+++ b/vendor/github.com/google/uuid/README.md
@@ -0,0 +1,21 @@
1# uuid
2The uuid package generates and inspects UUIDs based on
3[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122)
4and DCE 1.1: Authentication and Security Services.
5
6This package is based on the github.com/pborman/uuid package (previously named
7code.google.com/p/go-uuid). It differs from these earlier packages in that
8a UUID is a 16 byte array rather than a byte slice. One loss due to this
9change is the ability to represent an invalid UUID (vs a NIL UUID).
10
11###### Install
12```sh
13go get github.com/google/uuid
14```
15
16###### Documentation
17[![Go Reference](https://pkg.go.dev/badge/github.com/google/uuid.svg)](https://pkg.go.dev/github.com/google/uuid)
18
19Full `go doc` style documentation for the package can be viewed online without
20installing this package by using the GoDoc site here:
21http://pkg.go.dev/github.com/google/uuid
diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go
new file mode 100644
index 0000000..fa820b9
--- /dev/null
+++ b/vendor/github.com/google/uuid/dce.go
@@ -0,0 +1,80 @@
1// Copyright 2016 Google Inc. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package uuid
6
7import (
8 "encoding/binary"
9 "fmt"
10 "os"
11)
12
13// A Domain represents a Version 2 domain
14type Domain byte
15
16// Domain constants for DCE Security (Version 2) UUIDs.
17const (
18 Person = Domain(0)
19 Group = Domain(1)
20 Org = Domain(2)
21)
22
23// NewDCESecurity returns a DCE Security (Version 2) UUID.
24//
25// The domain should be one of Person, Group or Org.
26// On a POSIX system the id should be the users UID for the Person
27// domain and the users GID for the Group. The meaning of id for
28// the domain Org or on non-POSIX systems is site defined.
29//
30// For a given domain/id pair the same token may be returned for up to
31// 7 minutes and 10 seconds.
32func NewDCESecurity(domain Domain, id uint32) (UUID, error) {
33 uuid, err := NewUUID()
34 if err == nil {
35 uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2
36 uuid[9] = byte(domain)
37 binary.BigEndian.PutUint32(uuid[0:], id)
38 }
39 return uuid, err
40}
41
42// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
43// domain with the id returned by os.Getuid.
44//
45// NewDCESecurity(Person, uint32(os.Getuid()))
46func NewDCEPerson() (UUID, error) {
47 return NewDCESecurity(Person, uint32(os.Getuid()))
48}
49
50// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
51// domain with the id returned by os.Getgid.
52//
53// NewDCESecurity(Group, uint32(os.Getgid()))
54func NewDCEGroup() (UUID, error) {
55 return NewDCESecurity(Group, uint32(os.Getgid()))
56}
57
58// Domain returns the domain for a Version 2 UUID. Domains are only defined
59// for Version 2 UUIDs.
60func (uuid UUID) Domain() Domain {
61 return Domain(uuid[9])
62}
63
64// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2
65// UUIDs.
66func (uuid UUID) ID() uint32 {
67 return binary.BigEndian.Uint32(uuid[0:4])
68}
69
70func (d Domain) String() string {
71 switch d {
72 case Person:
73 return "Person"
74 case Group:
75 return "Group"
76 case Org:
77 return "Org"
78 }
79 return fmt.Sprintf("Domain%d", int(d))
80}
diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go
new file mode 100644
index 0000000..5b8a4b9
--- /dev/null
+++ b/vendor/github.com/google/uuid/doc.go
@@ -0,0 +1,12 @@
1// Copyright 2016 Google Inc. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Package uuid generates and inspects UUIDs.
6//
7// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security
8// Services.
9//
10// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to
11// maps or compared directly.
12package uuid
diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go
new file mode 100644
index 0000000..b404f4b
--- /dev/null
+++ b/vendor/github.com/google/uuid/hash.go
@@ -0,0 +1,53 @@
1// Copyright 2016 Google Inc. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package uuid
6
7import (
8 "crypto/md5"
9 "crypto/sha1"
10 "hash"
11)
12
13// Well known namespace IDs and UUIDs
14var (
15 NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8"))
16 NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8"))
17 NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
18 NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
19 Nil UUID // empty UUID, all zeros
20)
21
22// NewHash returns a new UUID derived from the hash of space concatenated with
23// data generated by h. The hash should be at least 16 byte in length. The
24// first 16 bytes of the hash are used to form the UUID. The version of the
25// UUID will be the lower 4 bits of version. NewHash is used to implement
26// NewMD5 and NewSHA1.
27func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
28 h.Reset()
29 h.Write(space[:]) //nolint:errcheck
30 h.Write(data) //nolint:errcheck
31 s := h.Sum(nil)
32 var uuid UUID
33 copy(uuid[:], s)
34 uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4)
35 uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
36 return uuid
37}
38
39// NewMD5 returns a new MD5 (Version 3) UUID based on the
40// supplied name space and data. It is the same as calling:
41//
42// NewHash(md5.New(), space, data, 3)
43func NewMD5(space UUID, data []byte) UUID {
44 return NewHash(md5.New(), space, data, 3)
45}
46
47// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
48// supplied name space and data. It is the same as calling:
49//
50// NewHash(sha1.New(), space, data, 5)
51func NewSHA1(space UUID, data []byte) UUID {
52 return NewHash(sha1.New(), space, data, 5)
53}
diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go
new file mode 100644
index 0000000..14bd340
--- /dev/null
+++ b/vendor/github.com/google/uuid/marshal.go
@@ -0,0 +1,38 @@
1// Copyright 2016 Google Inc. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package uuid
6
7import "fmt"
8
9// MarshalText implements encoding.TextMarshaler.
10func (uuid UUID) MarshalText() ([]byte, error) {
11 var js [36]byte
12 encodeHex(js[:], uuid)
13 return js[:], nil
14}
15
16// UnmarshalText implements encoding.TextUnmarshaler.
17func (uuid *UUID) UnmarshalText(data []byte) error {
18 id, err := ParseBytes(data)
19 if err != nil {
20 return err
21 }
22 *uuid = id
23 return nil
24}
25
26// MarshalBinary implements encoding.BinaryMarshaler.
27func (uuid UUID) MarshalBinary() ([]byte, error) {
28 return uuid[:], nil
29}
30
31// UnmarshalBinary implements encoding.BinaryUnmarshaler.
32func (uuid *UUID) UnmarshalBinary(data []byte) error {
33 if len(data) != 16 {
34 return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
35 }
36 copy(uuid[:], data)
37 return nil
38}
diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go
new file mode 100644
index 0000000..d651a2b
--- /dev/null
+++ b/vendor/github.com/google/uuid/node.go
@@ -0,0 +1,90 @@
1// Copyright 2016 Google Inc. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package uuid
6
7import (
8 "sync"
9)
10
11var (
12 nodeMu sync.Mutex
13 ifname string // name of interface being used
14 nodeID [6]byte // hardware for version 1 UUIDs
15 zeroID [6]byte // nodeID with only 0's
16)
17
18// NodeInterface returns the name of the interface from which the NodeID was
19// derived. The interface "user" is returned if the NodeID was set by
20// SetNodeID.
21func NodeInterface() string {
22 defer nodeMu.Unlock()
23 nodeMu.Lock()
24 return ifname
25}
26
27// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs.
28// If name is "" then the first usable interface found will be used or a random
29// Node ID will be generated. If a named interface cannot be found then false
30// is returned.
31//
32// SetNodeInterface never fails when name is "".
33func SetNodeInterface(name string) bool {
34 defer nodeMu.Unlock()
35 nodeMu.Lock()
36 return setNodeInterface(name)
37}
38
39func setNodeInterface(name string) bool {
40 iname, addr := getHardwareInterface(name) // null implementation for js
41 if iname != "" && addr != nil {
42 ifname = iname
43 copy(nodeID[:], addr)
44 return true
45 }
46
47 // We found no interfaces with a valid hardware address. If name
48 // does not specify a specific interface generate a random Node ID
49 // (section 4.1.6)
50 if name == "" {
51 ifname = "random"
52 randomBits(nodeID[:])
53 return true
54 }
55 return false
56}
57
58// NodeID returns a slice of a copy of the current Node ID, setting the Node ID
59// if not already set.
60func NodeID() []byte {
61 defer nodeMu.Unlock()
62 nodeMu.Lock()
63 if nodeID == zeroID {
64 setNodeInterface("")
65 }
66 nid := nodeID
67 return nid[:]
68}
69
70// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes
71// of id are used. If id is less than 6 bytes then false is returned and the
72// Node ID is not set.
73func SetNodeID(id []byte) bool {
74 if len(id) < 6 {
75 return false
76 }
77 defer nodeMu.Unlock()
78 nodeMu.Lock()
79 copy(nodeID[:], id)
80 ifname = "user"
81 return true
82}
83
84// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is
85// not valid. The NodeID is only well defined for version 1 and 2 UUIDs.
86func (uuid UUID) NodeID() []byte {
87 var node [6]byte
88 copy(node[:], uuid[10:])
89 return node[:]
90}
diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go
new file mode 100644
index 0000000..b2a0bc8
--- /dev/null
+++ b/vendor/github.com/google/uuid/node_js.go
@@ -0,0 +1,12 @@
1// Copyright 2017 Google Inc. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// +build js
6
7package uuid
8
9// getHardwareInterface returns nil values for the JS version of the code.
10// This removes the "net" dependency, because it is not used in the browser.
11// Using the "net" library inflates the size of the transpiled JS code by 673k bytes.
12func getHardwareInterface(name string) (string, []byte) { return "", nil }
diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go
new file mode 100644
index 0000000..0cbbcdd
--- /dev/null
+++ b/vendor/github.com/google/uuid/node_net.go
@@ -0,0 +1,33 @@
1// Copyright 2017 Google Inc. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// +build !js
6
7package uuid
8
9import "net"
10
11var interfaces []net.Interface // cached list of interfaces
12
13// getHardwareInterface returns the name and hardware address of interface name.
14// If name is "" then the name and hardware address of one of the system's
15// interfaces is returned. If no interfaces are found (name does not exist or
16// there are no interfaces) then "", nil is returned.
17//
18// Only addresses of at least 6 bytes are returned.
19func getHardwareInterface(name string) (string, []byte) {
20 if interfaces == nil {
21 var err error
22 interfaces, err = net.Interfaces()
23 if err != nil {
24 return "", nil
25 }
26 }
27 for _, ifs := range interfaces {
28 if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) {
29 return ifs.Name, ifs.HardwareAddr
30 }
31 }
32 return "", nil
33}
diff --git a/vendor/github.com/google/uuid/null.go b/vendor/github.com/google/uuid/null.go
new file mode 100644
index 0000000..d7fcbf2
--- /dev/null
+++ b/vendor/github.com/google/uuid/null.go
@@ -0,0 +1,118 @@
1// Copyright 2021 Google Inc. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package uuid
6
7import (
8 "bytes"
9 "database/sql/driver"
10 "encoding/json"
11 "fmt"
12)
13
14var jsonNull = []byte("null")
15
16// NullUUID represents a UUID that may be null.
17// NullUUID implements the SQL driver.Scanner interface so
18// it can be used as a scan destination:
19//
20// var u uuid.NullUUID
21// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u)
22// ...
23// if u.Valid {
24// // use u.UUID
25// } else {
26// // NULL value
27// }
28//
29type NullUUID struct {
30 UUID UUID
31 Valid bool // Valid is true if UUID is not NULL
32}
33
34// Scan implements the SQL driver.Scanner interface.
35func (nu *NullUUID) Scan(value interface{}) error {
36 if value == nil {
37 nu.UUID, nu.Valid = Nil, false
38 return nil
39 }
40
41 err := nu.UUID.Scan(value)
42 if err != nil {
43 nu.Valid = false
44 return err
45 }
46
47 nu.Valid = true
48 return nil
49}
50
51// Value implements the driver Valuer interface.
52func (nu NullUUID) Value() (driver.Value, error) {
53 if !nu.Valid {
54 return nil, nil
55 }
56 // Delegate to UUID Value function
57 return nu.UUID.Value()
58}
59
60// MarshalBinary implements encoding.BinaryMarshaler.
61func (nu NullUUID) MarshalBinary() ([]byte, error) {
62 if nu.Valid {
63 return nu.UUID[:], nil
64 }
65
66 return []byte(nil), nil
67}
68
69// UnmarshalBinary implements encoding.BinaryUnmarshaler.
70func (nu *NullUUID) UnmarshalBinary(data []byte) error {
71 if len(data) != 16 {
72 return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
73 }
74 copy(nu.UUID[:], data)
75 nu.Valid = true
76 return nil
77}
78
79// MarshalText implements encoding.TextMarshaler.
80func (nu NullUUID) MarshalText() ([]byte, error) {
81 if nu.Valid {
82 return nu.UUID.MarshalText()
83 }
84
85 return jsonNull, nil
86}
87
88// UnmarshalText implements encoding.TextUnmarshaler.
89func (nu *NullUUID) UnmarshalText(data []byte) error {
90 id, err := ParseBytes(data)
91 if err != nil {
92 nu.Valid = false
93 return err
94 }
95 nu.UUID = id
96 nu.Valid = true
97 return nil
98}
99
100// MarshalJSON implements json.Marshaler.
101func (nu NullUUID) MarshalJSON() ([]byte, error) {
102 if nu.Valid {
103 return json.Marshal(nu.UUID)
104 }
105
106 return jsonNull, nil
107}
108
109// UnmarshalJSON implements json.Unmarshaler.
110func (nu *NullUUID) UnmarshalJSON(data []byte) error {
111 if bytes.Equal(data, jsonNull) {
112 *nu = NullUUID{}
113 return nil // valid null UUID
114 }
115 err := json.Unmarshal(data, &nu.UUID)
116 nu.Valid = err == nil
117 return err
118}
diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go
new file mode 100644
index 0000000..2e02ec0
--- /dev/null
+++ b/vendor/github.com/google/uuid/sql.go
@@ -0,0 +1,59 @@
1// Copyright 2016 Google Inc. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package uuid
6
7import (
8 "database/sql/driver"
9 "fmt"
10)
11
12// Scan implements sql.Scanner so UUIDs can be read from databases transparently.
13// Currently, database types that map to string and []byte are supported. Please
14// consult database-specific driver documentation for matching types.
15func (uuid *UUID) Scan(src interface{}) error {
16 switch src := src.(type) {
17 case nil:
18 return nil
19
20 case string:
21 // if an empty UUID comes from a table, we return a null UUID
22 if src == "" {
23 return nil
24 }
25
26 // see Parse for required string format
27 u, err := Parse(src)
28 if err != nil {
29 return fmt.Errorf("Scan: %v", err)
30 }
31
32 *uuid = u
33
34 case []byte:
35 // if an empty UUID comes from a table, we return a null UUID
36 if len(src) == 0 {
37 return nil
38 }
39
40 // assumes a simple slice of bytes if 16 bytes
41 // otherwise attempts to parse
42 if len(src) != 16 {
43 return uuid.Scan(string(src))
44 }
45 copy((*uuid)[:], src)
46
47 default:
48 return fmt.Errorf("Scan: unable to scan type %T into UUID", src)
49 }
50
51 return nil
52}
53
54// Value implements sql.Valuer so that UUIDs can be written to databases
55// transparently. Currently, UUIDs map to strings. Please consult
56// database-specific driver documentation for matching types.
57func (uuid UUID) Value() (driver.Value, error) {
58 return uuid.String(), nil
59}
diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go
new file mode 100644
index 0000000..c351129
--- /dev/null
+++ b/vendor/github.com/google/uuid/time.go
@@ -0,0 +1,134 @@
1// Copyright 2016 Google Inc. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package uuid
6
7import (
8 "encoding/binary"
9 "sync"
10 "time"
11)
12
13// A Time represents a time as the number of 100's of nanoseconds since 15 Oct
14// 1582.
15type Time int64
16
17const (
18 lillian = 2299160 // Julian day of 15 Oct 1582
19 unix = 2440587 // Julian day of 1 Jan 1970
20 epoch = unix - lillian // Days between epochs
21 g1582 = epoch * 86400 // seconds between epochs
22 g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs
23)
24
25var (
26 timeMu sync.Mutex
27 lasttime uint64 // last time we returned
28 clockSeq uint16 // clock sequence for this run
29
30 timeNow = time.Now // for testing
31)
32
33// UnixTime converts t the number of seconds and nanoseconds using the Unix
34// epoch of 1 Jan 1970.
35func (t Time) UnixTime() (sec, nsec int64) {
36 sec = int64(t - g1582ns100)
37 nsec = (sec % 10000000) * 100
38 sec /= 10000000
39 return sec, nsec
40}
41
42// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
43// clock sequence as well as adjusting the clock sequence as needed. An error
44// is returned if the current time cannot be determined.
45func GetTime() (Time, uint16, error) {
46 defer timeMu.Unlock()
47 timeMu.Lock()
48 return getTime()
49}
50
51func getTime() (Time, uint16, error) {
52 t := timeNow()
53
54 // If we don't have a clock sequence already, set one.
55 if clockSeq == 0 {
56 setClockSequence(-1)
57 }
58 now := uint64(t.UnixNano()/100) + g1582ns100
59
60 // If time has gone backwards with this clock sequence then we
61 // increment the clock sequence
62 if now <= lasttime {
63 clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000
64 }
65 lasttime = now
66 return Time(now), clockSeq, nil
67}
68
69// ClockSequence returns the current clock sequence, generating one if not
70// already set. The clock sequence is only used for Version 1 UUIDs.
71//
72// The uuid package does not use global static storage for the clock sequence or
73// the last time a UUID was generated. Unless SetClockSequence is used, a new
74// random clock sequence is generated the first time a clock sequence is
75// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1)
76func ClockSequence() int {
77 defer timeMu.Unlock()
78 timeMu.Lock()
79 return clockSequence()
80}
81
82func clockSequence() int {
83 if clockSeq == 0 {
84 setClockSequence(-1)
85 }
86 return int(clockSeq & 0x3fff)
87}
88
89// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to
90// -1 causes a new sequence to be generated.
91func SetClockSequence(seq int) {
92 defer timeMu.Unlock()
93 timeMu.Lock()
94 setClockSequence(seq)
95}
96
97func setClockSequence(seq int) {
98 if seq == -1 {
99 var b [2]byte
100 randomBits(b[:]) // clock sequence
101 seq = int(b[0])<<8 | int(b[1])
102 }
103 oldSeq := clockSeq
104 clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant
105 if oldSeq != clockSeq {
106 lasttime = 0
107 }
108}
109
110// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
111// uuid. The time is only defined for version 1, 2, 6 and 7 UUIDs.
112func (uuid UUID) Time() Time {
113 var t Time
114 switch uuid.Version() {
115 case 6:
116 time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110
117 t = Time(time)
118 case 7:
119 time := binary.BigEndian.Uint64(uuid[:8])
120 t = Time((time>>16)*10000 + g1582ns100)
121 default: // forward compatible
122 time := int64(binary.BigEndian.Uint32(uuid[0:4]))
123 time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
124 time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
125 t = Time(time)
126 }
127 return t
128}
129
130// ClockSequence returns the clock sequence encoded in uuid.
131// The clock sequence is only well defined for version 1 and 2 UUIDs.
132func (uuid UUID) ClockSequence() int {
133 return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff
134}
diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go
new file mode 100644
index 0000000..5ea6c73
--- /dev/null
+++ b/vendor/github.com/google/uuid/util.go
@@ -0,0 +1,43 @@
1// Copyright 2016 Google Inc. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package uuid
6
7import (
8 "io"
9)
10
11// randomBits completely fills slice b with random data.
12func randomBits(b []byte) {
13 if _, err := io.ReadFull(rander, b); err != nil {
14 panic(err.Error()) // rand should never fail
15 }
16}
17
18// xvalues returns the value of a byte as a hexadecimal digit or 255.
19var xvalues = [256]byte{
20 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
21 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
22 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
23 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
24 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
25 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
26 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
27 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
28 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
29 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
30 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
31 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
32 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
33 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
34 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
35 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
36}
37
38// xtob converts hex characters x1 and x2 into a byte.
39func xtob(x1, x2 byte) (byte, bool) {
40 b1 := xvalues[x1]
41 b2 := xvalues[x2]
42 return (b1 << 4) | b2, b1 != 255 && b2 != 255
43}
diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go
new file mode 100644
index 0000000..5232b48
--- /dev/null
+++ b/vendor/github.com/google/uuid/uuid.go
@@ -0,0 +1,365 @@
1// Copyright 2018 Google Inc. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package uuid
6
7import (
8 "bytes"
9 "crypto/rand"
10 "encoding/hex"
11 "errors"
12 "fmt"
13 "io"
14 "strings"
15 "sync"
16)
17
18// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
19// 4122.
20type UUID [16]byte
21
22// A Version represents a UUID's version.
23type Version byte
24
25// A Variant represents a UUID's variant.
26type Variant byte
27
28// Constants returned by Variant.
29const (
30 Invalid = Variant(iota) // Invalid UUID
31 RFC4122 // The variant specified in RFC4122
32 Reserved // Reserved, NCS backward compatibility.
33 Microsoft // Reserved, Microsoft Corporation backward compatibility.
34 Future // Reserved for future definition.
35)
36
37const randPoolSize = 16 * 16
38
39var (
40 rander = rand.Reader // random function
41 poolEnabled = false
42 poolMu sync.Mutex
43 poolPos = randPoolSize // protected with poolMu
44 pool [randPoolSize]byte // protected with poolMu
45)
46
47type invalidLengthError struct{ len int }
48
49func (err invalidLengthError) Error() string {
50 return fmt.Sprintf("invalid UUID length: %d", err.len)
51}
52
53// IsInvalidLengthError is matcher function for custom error invalidLengthError
54func IsInvalidLengthError(err error) bool {
55 _, ok := err.(invalidLengthError)
56 return ok
57}
58
59// Parse decodes s into a UUID or returns an error if it cannot be parsed. Both
60// the standard UUID forms defined in RFC 4122
61// (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
62// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) are decoded. In addition,
63// Parse accepts non-standard strings such as the raw hex encoding
64// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and 38 byte "Microsoft style" encodings,
65// e.g. {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}. Only the middle 36 bytes are
66// examined in the latter case. Parse should not be used to validate strings as
67// it parses non-standard encodings as indicated above.
68func Parse(s string) (UUID, error) {
69 var uuid UUID
70 switch len(s) {
71 // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
72 case 36:
73
74 // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
75 case 36 + 9:
76 if !strings.EqualFold(s[:9], "urn:uuid:") {
77 return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9])
78 }
79 s = s[9:]
80
81 // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
82 case 36 + 2:
83 s = s[1:]
84
85 // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
86 case 32:
87 var ok bool
88 for i := range uuid {
89 uuid[i], ok = xtob(s[i*2], s[i*2+1])
90 if !ok {
91 return uuid, errors.New("invalid UUID format")
92 }
93 }
94 return uuid, nil
95 default:
96 return uuid, invalidLengthError{len(s)}
97 }
98 // s is now at least 36 bytes long
99 // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
100 if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
101 return uuid, errors.New("invalid UUID format")
102 }
103 for i, x := range [16]int{
104 0, 2, 4, 6,
105 9, 11,
106 14, 16,
107 19, 21,
108 24, 26, 28, 30, 32, 34,
109 } {
110 v, ok := xtob(s[x], s[x+1])
111 if !ok {
112 return uuid, errors.New("invalid UUID format")
113 }
114 uuid[i] = v
115 }
116 return uuid, nil
117}
118
119// ParseBytes is like Parse, except it parses a byte slice instead of a string.
120func ParseBytes(b []byte) (UUID, error) {
121 var uuid UUID
122 switch len(b) {
123 case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
124 case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
125 if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) {
126 return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9])
127 }
128 b = b[9:]
129 case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
130 b = b[1:]
131 case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
132 var ok bool
133 for i := 0; i < 32; i += 2 {
134 uuid[i/2], ok = xtob(b[i], b[i+1])
135 if !ok {
136 return uuid, errors.New("invalid UUID format")
137 }
138 }
139 return uuid, nil
140 default:
141 return uuid, invalidLengthError{len(b)}
142 }
143 // s is now at least 36 bytes long
144 // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
145 if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' {
146 return uuid, errors.New("invalid UUID format")
147 }
148 for i, x := range [16]int{
149 0, 2, 4, 6,
150 9, 11,
151 14, 16,
152 19, 21,
153 24, 26, 28, 30, 32, 34,
154 } {
155 v, ok := xtob(b[x], b[x+1])
156 if !ok {
157 return uuid, errors.New("invalid UUID format")
158 }
159 uuid[i] = v
160 }
161 return uuid, nil
162}
163
164// MustParse is like Parse but panics if the string cannot be parsed.
165// It simplifies safe initialization of global variables holding compiled UUIDs.
166func MustParse(s string) UUID {
167 uuid, err := Parse(s)
168 if err != nil {
169 panic(`uuid: Parse(` + s + `): ` + err.Error())
170 }
171 return uuid
172}
173
174// FromBytes creates a new UUID from a byte slice. Returns an error if the slice
175// does not have a length of 16. The bytes are copied from the slice.
176func FromBytes(b []byte) (uuid UUID, err error) {
177 err = uuid.UnmarshalBinary(b)
178 return uuid, err
179}
180
181// Must returns uuid if err is nil and panics otherwise.
182func Must(uuid UUID, err error) UUID {
183 if err != nil {
184 panic(err)
185 }
186 return uuid
187}
188
189// Validate returns an error if s is not a properly formatted UUID in one of the following formats:
190// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
191// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
192// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
193// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
194// It returns an error if the format is invalid, otherwise nil.
195func Validate(s string) error {
196 switch len(s) {
197 // Standard UUID format
198 case 36:
199
200 // UUID with "urn:uuid:" prefix
201 case 36 + 9:
202 if !strings.EqualFold(s[:9], "urn:uuid:") {
203 return fmt.Errorf("invalid urn prefix: %q", s[:9])
204 }
205 s = s[9:]
206
207 // UUID enclosed in braces
208 case 36 + 2:
209 if s[0] != '{' || s[len(s)-1] != '}' {
210 return fmt.Errorf("invalid bracketed UUID format")
211 }
212 s = s[1 : len(s)-1]
213
214 // UUID without hyphens
215 case 32:
216 for i := 0; i < len(s); i += 2 {
217 _, ok := xtob(s[i], s[i+1])
218 if !ok {
219 return errors.New("invalid UUID format")
220 }
221 }
222
223 default:
224 return invalidLengthError{len(s)}
225 }
226
227 // Check for standard UUID format
228 if len(s) == 36 {
229 if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
230 return errors.New("invalid UUID format")
231 }
232 for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} {
233 if _, ok := xtob(s[x], s[x+1]); !ok {
234 return errors.New("invalid UUID format")
235 }
236 }
237 }
238
239 return nil
240}
241
242// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
243// , or "" if uuid is invalid.
244func (uuid UUID) String() string {
245 var buf [36]byte
246 encodeHex(buf[:], uuid)
247 return string(buf[:])
248}
249
250// URN returns the RFC 2141 URN form of uuid,
251// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid.
252func (uuid UUID) URN() string {
253 var buf [36 + 9]byte
254 copy(buf[:], "urn:uuid:")
255 encodeHex(buf[9:], uuid)
256 return string(buf[:])
257}
258
259func encodeHex(dst []byte, uuid UUID) {
260 hex.Encode(dst, uuid[:4])
261 dst[8] = '-'
262 hex.Encode(dst[9:13], uuid[4:6])
263 dst[13] = '-'
264 hex.Encode(dst[14:18], uuid[6:8])
265 dst[18] = '-'
266 hex.Encode(dst[19:23], uuid[8:10])
267 dst[23] = '-'
268 hex.Encode(dst[24:], uuid[10:])
269}
270
271// Variant returns the variant encoded in uuid.
272func (uuid UUID) Variant() Variant {
273 switch {
274 case (uuid[8] & 0xc0) == 0x80:
275 return RFC4122
276 case (uuid[8] & 0xe0) == 0xc0:
277 return Microsoft
278 case (uuid[8] & 0xe0) == 0xe0:
279 return Future
280 default:
281 return Reserved
282 }
283}
284
285// Version returns the version of uuid.
286func (uuid UUID) Version() Version {
287 return Version(uuid[6] >> 4)
288}
289
290func (v Version) String() string {
291 if v > 15 {
292 return fmt.Sprintf("BAD_VERSION_%d", v)
293 }
294 return fmt.Sprintf("VERSION_%d", v)
295}
296
297func (v Variant) String() string {
298 switch v {
299 case RFC4122:
300 return "RFC4122"
301 case Reserved:
302 return "Reserved"
303 case Microsoft:
304 return "Microsoft"
305 case Future:
306 return "Future"
307 case Invalid:
308 return "Invalid"
309 }
310 return fmt.Sprintf("BadVariant%d", int(v))
311}
312
313// SetRand sets the random number generator to r, which implements io.Reader.
314// If r.Read returns an error when the package requests random data then
315// a panic will be issued.
316//
317// Calling SetRand with nil sets the random number generator to the default
318// generator.
319func SetRand(r io.Reader) {
320 if r == nil {
321 rander = rand.Reader
322 return
323 }
324 rander = r
325}
326
327// EnableRandPool enables internal randomness pool used for Random
328// (Version 4) UUID generation. The pool contains random bytes read from
329// the random number generator on demand in batches. Enabling the pool
330// may improve the UUID generation throughput significantly.
331//
332// Since the pool is stored on the Go heap, this feature may be a bad fit
333// for security sensitive applications.
334//
335// Both EnableRandPool and DisableRandPool are not thread-safe and should
336// only be called when there is no possibility that New or any other
337// UUID Version 4 generation function will be called concurrently.
338func EnableRandPool() {
339 poolEnabled = true
340}
341
342// DisableRandPool disables the randomness pool if it was previously
343// enabled with EnableRandPool.
344//
345// Both EnableRandPool and DisableRandPool are not thread-safe and should
346// only be called when there is no possibility that New or any other
347// UUID Version 4 generation function will be called concurrently.
348func DisableRandPool() {
349 poolEnabled = false
350 defer poolMu.Unlock()
351 poolMu.Lock()
352 poolPos = randPoolSize
353}
354
355// UUIDs is a slice of UUID types.
356type UUIDs []UUID
357
358// Strings returns a string slice containing the string form of each UUID in uuids.
359func (uuids UUIDs) Strings() []string {
360 var uuidStrs = make([]string, len(uuids))
361 for i, uuid := range uuids {
362 uuidStrs[i] = uuid.String()
363 }
364 return uuidStrs
365}
diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go
new file mode 100644
index 0000000..4631096
--- /dev/null
+++ b/vendor/github.com/google/uuid/version1.go
@@ -0,0 +1,44 @@
1// Copyright 2016 Google Inc. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package uuid
6
7import (
8 "encoding/binary"
9)
10
11// NewUUID returns a Version 1 UUID based on the current NodeID and clock
12// sequence, and the current time. If the NodeID has not been set by SetNodeID
13// or SetNodeInterface then it will be set automatically. If the NodeID cannot
14// be set NewUUID returns nil. If clock sequence has not been set by
15// SetClockSequence then it will be set automatically. If GetTime fails to
16// return the current NewUUID returns nil and an error.
17//
18// In most cases, New should be used.
19func NewUUID() (UUID, error) {
20 var uuid UUID
21 now, seq, err := GetTime()
22 if err != nil {
23 return uuid, err
24 }
25
26 timeLow := uint32(now & 0xffffffff)
27 timeMid := uint16((now >> 32) & 0xffff)
28 timeHi := uint16((now >> 48) & 0x0fff)
29 timeHi |= 0x1000 // Version 1
30
31 binary.BigEndian.PutUint32(uuid[0:], timeLow)
32 binary.BigEndian.PutUint16(uuid[4:], timeMid)
33 binary.BigEndian.PutUint16(uuid[6:], timeHi)
34 binary.BigEndian.PutUint16(uuid[8:], seq)
35
36 nodeMu.Lock()
37 if nodeID == zeroID {
38 setNodeInterface("")
39 }
40 copy(uuid[10:], nodeID[:])
41 nodeMu.Unlock()
42
43 return uuid, nil
44}
diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go
new file mode 100644
index 0000000..7697802
--- /dev/null
+++ b/vendor/github.com/google/uuid/version4.go
@@ -0,0 +1,76 @@
1// Copyright 2016 Google Inc. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package uuid
6
7import "io"
8
9// New creates a new random UUID or panics. New is equivalent to
10// the expression
11//
12// uuid.Must(uuid.NewRandom())
13func New() UUID {
14 return Must(NewRandom())
15}
16
17// NewString creates a new random UUID and returns it as a string or panics.
18// NewString is equivalent to the expression
19//
20// uuid.New().String()
21func NewString() string {
22 return Must(NewRandom()).String()
23}
24
25// NewRandom returns a Random (Version 4) UUID.
26//
27// The strength of the UUIDs is based on the strength of the crypto/rand
28// package.
29//
30// Uses the randomness pool if it was enabled with EnableRandPool.
31//
32// A note about uniqueness derived from the UUID Wikipedia entry:
33//
34// Randomly generated UUIDs have 122 random bits. One's annual risk of being
35// hit by a meteorite is estimated to be one chance in 17 billion, that
36// means the probability is about 0.00000000006 (6 × 10−11),
37// equivalent to the odds of creating a few tens of trillions of UUIDs in a
38// year and having one duplicate.
39func NewRandom() (UUID, error) {
40 if !poolEnabled {
41 return NewRandomFromReader(rander)
42 }
43 return newRandomFromPool()
44}
45
46// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader.
47func NewRandomFromReader(r io.Reader) (UUID, error) {
48 var uuid UUID
49 _, err := io.ReadFull(r, uuid[:])
50 if err != nil {
51 return Nil, err
52 }
53 uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
54 uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
55 return uuid, nil
56}
57
58func newRandomFromPool() (UUID, error) {
59 var uuid UUID
60 poolMu.Lock()
61 if poolPos == randPoolSize {
62 _, err := io.ReadFull(rander, pool[:])
63 if err != nil {
64 poolMu.Unlock()
65 return Nil, err
66 }
67 poolPos = 0
68 }
69 copy(uuid[:], pool[poolPos:(poolPos+16)])
70 poolPos += 16
71 poolMu.Unlock()
72
73 uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
74 uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
75 return uuid, nil
76}
diff --git a/vendor/github.com/google/uuid/version6.go b/vendor/github.com/google/uuid/version6.go
new file mode 100644
index 0000000..339a959
--- /dev/null
+++ b/vendor/github.com/google/uuid/version6.go
@@ -0,0 +1,56 @@
1// Copyright 2023 Google Inc. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package uuid
6
7import "encoding/binary"
8
9// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality.
10// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs.
11// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead.
12//
13// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6
14//
15// NewV6 returns a Version 6 UUID based on the current NodeID and clock
16// sequence, and the current time. If the NodeID has not been set by SetNodeID
17// or SetNodeInterface then it will be set automatically. If the NodeID cannot
18// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by
19// SetClockSequence then it will be set automatically. If GetTime fails to
20// return the current NewV6 returns Nil and an error.
21func NewV6() (UUID, error) {
22 var uuid UUID
23 now, seq, err := GetTime()
24 if err != nil {
25 return uuid, err
26 }
27
28 /*
29 0 1 2 3
30 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
31 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
32 | time_high |
33 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
34 | time_mid | time_low_and_version |
35 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
36 |clk_seq_hi_res | clk_seq_low | node (0-1) |
37 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
38 | node (2-5) |
39 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
40 */
41
42 binary.BigEndian.PutUint64(uuid[0:], uint64(now))
43 binary.BigEndian.PutUint16(uuid[8:], seq)
44
45 uuid[6] = 0x60 | (uuid[6] & 0x0F)
46 uuid[8] = 0x80 | (uuid[8] & 0x3F)
47
48 nodeMu.Lock()
49 if nodeID == zeroID {
50 setNodeInterface("")
51 }
52 copy(uuid[10:], nodeID[:])
53 nodeMu.Unlock()
54
55 return uuid, nil
56}
diff --git a/vendor/github.com/google/uuid/version7.go b/vendor/github.com/google/uuid/version7.go
new file mode 100644
index 0000000..ba9dd5e
--- /dev/null
+++ b/vendor/github.com/google/uuid/version7.go
@@ -0,0 +1,75 @@
1// Copyright 2023 Google Inc. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package uuid
6
7import (
8 "io"
9)
10
11// UUID version 7 features a time-ordered value field derived from the widely
12// implemented and well known Unix Epoch timestamp source,
13// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded.
14// As well as improved entropy characteristics over versions 1 or 6.
15//
16// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7
17//
18// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible.
19//
20// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch).
21// Uses the randomness pool if it was enabled with EnableRandPool.
22// On error, NewV7 returns Nil and an error
23func NewV7() (UUID, error) {
24 uuid, err := NewRandom()
25 if err != nil {
26 return uuid, err
27 }
28 makeV7(uuid[:])
29 return uuid, nil
30}
31
32// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch).
33// it use NewRandomFromReader fill random bits.
34// On error, NewV7FromReader returns Nil and an error.
35func NewV7FromReader(r io.Reader) (UUID, error) {
36 uuid, err := NewRandomFromReader(r)
37 if err != nil {
38 return uuid, err
39 }
40
41 makeV7(uuid[:])
42 return uuid, nil
43}
44
45// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6])
46// uuid[8] already has the right version number (Variant is 10)
47// see function NewV7 and NewV7FromReader
48func makeV7(uuid []byte) {
49 /*
50 0 1 2 3
51 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
52 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
53 | unix_ts_ms |
54 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
55 | unix_ts_ms | ver | rand_a |
56 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
57 |var| rand_b |
58 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
59 | rand_b |
60 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
61 */
62 _ = uuid[15] // bounds check
63
64 t := timeNow().UnixMilli()
65
66 uuid[0] = byte(t >> 40)
67 uuid[1] = byte(t >> 32)
68 uuid[2] = byte(t >> 24)
69 uuid[3] = byte(t >> 16)
70 uuid[4] = byte(t >> 8)
71 uuid[5] = byte(t)
72
73 uuid[6] = 0x70 | (uuid[6] & 0x0F)
74 // uuid[8] has already has right version
75}
diff --git a/vendor/github.com/json-iterator/go/.codecov.yml b/vendor/github.com/json-iterator/go/.codecov.yml
new file mode 100644
index 0000000..955dc0b
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/.codecov.yml
@@ -0,0 +1,3 @@
1ignore:
2 - "output_tests/.*"
3
diff --git a/vendor/github.com/json-iterator/go/.gitignore b/vendor/github.com/json-iterator/go/.gitignore
new file mode 100644
index 0000000..1555653
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/.gitignore
@@ -0,0 +1,4 @@
1/vendor
2/bug_test.go
3/coverage.txt
4/.idea
diff --git a/vendor/github.com/json-iterator/go/.travis.yml b/vendor/github.com/json-iterator/go/.travis.yml
new file mode 100644
index 0000000..449e67c
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/.travis.yml
@@ -0,0 +1,14 @@
1language: go
2
3go:
4 - 1.8.x
5 - 1.x
6
7before_install:
8 - go get -t -v ./...
9
10script:
11 - ./test.sh
12
13after_success:
14 - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/json-iterator/go/Gopkg.lock b/vendor/github.com/json-iterator/go/Gopkg.lock
new file mode 100644
index 0000000..c8a9fbb
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/Gopkg.lock
@@ -0,0 +1,21 @@
1# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
2
3
4[[projects]]
5 name = "github.com/modern-go/concurrent"
6 packages = ["."]
7 revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a"
8 version = "1.0.0"
9
10[[projects]]
11 name = "github.com/modern-go/reflect2"
12 packages = ["."]
13 revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd"
14 version = "1.0.1"
15
16[solve-meta]
17 analyzer-name = "dep"
18 analyzer-version = 1
19 inputs-digest = "ea54a775e5a354cb015502d2e7aa4b74230fc77e894f34a838b268c25ec8eeb8"
20 solver-name = "gps-cdcl"
21 solver-version = 1
diff --git a/vendor/github.com/json-iterator/go/Gopkg.toml b/vendor/github.com/json-iterator/go/Gopkg.toml
new file mode 100644
index 0000000..313a0f8
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/Gopkg.toml
@@ -0,0 +1,26 @@
1# Gopkg.toml example
2#
3# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
4# for detailed Gopkg.toml documentation.
5#
6# required = ["github.com/user/thing/cmd/thing"]
7# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
8#
9# [[constraint]]
10# name = "github.com/user/project"
11# version = "1.0.0"
12#
13# [[constraint]]
14# name = "github.com/user/project2"
15# branch = "dev"
16# source = "github.com/myfork/project2"
17#
18# [[override]]
19# name = "github.com/x/y"
20# version = "2.4.0"
21
22ignored = ["github.com/davecgh/go-spew*","github.com/google/gofuzz*","github.com/stretchr/testify*"]
23
24[[constraint]]
25 name = "github.com/modern-go/reflect2"
26 version = "1.0.1"
diff --git a/vendor/github.com/json-iterator/go/LICENSE b/vendor/github.com/json-iterator/go/LICENSE
new file mode 100644
index 0000000..2cf4f5a
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/LICENSE
@@ -0,0 +1,21 @@
1MIT License
2
3Copyright (c) 2016 json-iterator
4
5Permission is hereby granted, free of charge, to any person obtaining a copy
6of this software and associated documentation files (the "Software"), to deal
7in the Software without restriction, including without limitation the rights
8to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9copies of the Software, and to permit persons to whom the Software is
10furnished to do so, subject to the following conditions:
11
12The above copyright notice and this permission notice shall be included in all
13copies or substantial portions of the Software.
14
15THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21SOFTWARE.
diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md
new file mode 100644
index 0000000..c589add
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/README.md
@@ -0,0 +1,85 @@
1[![Sourcegraph](https://sourcegraph.com/github.com/json-iterator/go/-/badge.svg)](https://sourcegraph.com/github.com/json-iterator/go?badge)
2[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://pkg.go.dev/github.com/json-iterator/go)
3[![Build Status](https://travis-ci.org/json-iterator/go.svg?branch=master)](https://travis-ci.org/json-iterator/go)
4[![codecov](https://codecov.io/gh/json-iterator/go/branch/master/graph/badge.svg)](https://codecov.io/gh/json-iterator/go)
5[![rcard](https://goreportcard.com/badge/github.com/json-iterator/go)](https://goreportcard.com/report/github.com/json-iterator/go)
6[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://raw.githubusercontent.com/json-iterator/go/master/LICENSE)
7[![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby)
8
9A high-performance 100% compatible drop-in replacement of "encoding/json"
10
11# Benchmark
12
13![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png)
14
15Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/github.com/json-iterator/go-benchmark/benchmark_medium_payload_test.go
16
17Raw Result (easyjson requires static code generation)
18
19| | ns/op | allocation bytes | allocation times |
20| --------------- | ----------- | ---------------- | ---------------- |
21| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op |
22| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op |
23| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op |
24| std encode | 2213 ns/op | 712 B/op | 5 allocs/op |
25| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op |
26| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op |
27
28Always benchmark with your own workload.
29The result depends heavily on the data input.
30
31# Usage
32
33100% compatibility with standard lib
34
35Replace
36
37```go
38import "encoding/json"
39json.Marshal(&data)
40```
41
42with
43
44```go
45import jsoniter "github.com/json-iterator/go"
46
47var json = jsoniter.ConfigCompatibleWithStandardLibrary
48json.Marshal(&data)
49```
50
51Replace
52
53```go
54import "encoding/json"
55json.Unmarshal(input, &data)
56```
57
58with
59
60```go
61import jsoniter "github.com/json-iterator/go"
62
63var json = jsoniter.ConfigCompatibleWithStandardLibrary
64json.Unmarshal(input, &data)
65```
66
67[More documentation](http://jsoniter.com/migrate-from-go-std.html)
68
69# How to get
70
71```
72go get github.com/json-iterator/go
73```
74
75# Contribution Welcomed !
76
77Contributors
78
79- [thockin](https://github.com/thockin)
80- [mattn](https://github.com/mattn)
81- [cch123](https://github.com/cch123)
82- [Oleg Shaldybin](https://github.com/olegshaldybin)
83- [Jason Toffaletti](https://github.com/toffaletti)
84
85Report issue or pull request, or email [email protected], or [![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby)
diff --git a/vendor/github.com/json-iterator/go/adapter.go b/vendor/github.com/json-iterator/go/adapter.go
new file mode 100644
index 0000000..92d2cc4
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/adapter.go
@@ -0,0 +1,150 @@
1package jsoniter
2
3import (
4 "bytes"
5 "io"
6)
7
8// RawMessage to make replace json with jsoniter
9type RawMessage []byte
10
11// Unmarshal adapts to json/encoding Unmarshal API
12//
13// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v.
14// Refer to https://godoc.org/encoding/json#Unmarshal for more information
15func Unmarshal(data []byte, v interface{}) error {
16 return ConfigDefault.Unmarshal(data, v)
17}
18
19// UnmarshalFromString is a convenient method to read from string instead of []byte
20func UnmarshalFromString(str string, v interface{}) error {
21 return ConfigDefault.UnmarshalFromString(str, v)
22}
23
24// Get quick method to get value from deeply nested JSON structure
25func Get(data []byte, path ...interface{}) Any {
26 return ConfigDefault.Get(data, path...)
27}
28
29// Marshal adapts to json/encoding Marshal API
30//
31// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API
32// Refer to https://godoc.org/encoding/json#Marshal for more information
33func Marshal(v interface{}) ([]byte, error) {
34 return ConfigDefault.Marshal(v)
35}
36
37// MarshalIndent same as json.MarshalIndent. Prefix is not supported.
38func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
39 return ConfigDefault.MarshalIndent(v, prefix, indent)
40}
41
42// MarshalToString convenient method to write as string instead of []byte
43func MarshalToString(v interface{}) (string, error) {
44 return ConfigDefault.MarshalToString(v)
45}
46
47// NewDecoder adapts to json/stream NewDecoder API.
48//
49// NewDecoder returns a new decoder that reads from r.
50//
51// Instead of a json/encoding Decoder, an Decoder is returned
52// Refer to https://godoc.org/encoding/json#NewDecoder for more information
53func NewDecoder(reader io.Reader) *Decoder {
54 return ConfigDefault.NewDecoder(reader)
55}
56
57// Decoder reads and decodes JSON values from an input stream.
58// Decoder provides identical APIs with json/stream Decoder (Token() and UseNumber() are in progress)
59type Decoder struct {
60 iter *Iterator
61}
62
63// Decode decode JSON into interface{}
64func (adapter *Decoder) Decode(obj interface{}) error {
65 if adapter.iter.head == adapter.iter.tail && adapter.iter.reader != nil {
66 if !adapter.iter.loadMore() {
67 return io.EOF
68 }
69 }
70 adapter.iter.ReadVal(obj)
71 err := adapter.iter.Error
72 if err == io.EOF {
73 return nil
74 }
75 return adapter.iter.Error
76}
77
78// More is there more?
79func (adapter *Decoder) More() bool {
80 iter := adapter.iter
81 if iter.Error != nil {
82 return false
83 }
84 c := iter.nextToken()
85 if c == 0 {
86 return false
87 }
88 iter.unreadByte()
89 return c != ']' && c != '}'
90}
91
92// Buffered remaining buffer
93func (adapter *Decoder) Buffered() io.Reader {
94 remaining := adapter.iter.buf[adapter.iter.head:adapter.iter.tail]
95 return bytes.NewReader(remaining)
96}
97
98// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
99// Number instead of as a float64.
100func (adapter *Decoder) UseNumber() {
101 cfg := adapter.iter.cfg.configBeforeFrozen
102 cfg.UseNumber = true
103 adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions)
104}
105
106// DisallowUnknownFields causes the Decoder to return an error when the destination
107// is a struct and the input contains object keys which do not match any
108// non-ignored, exported fields in the destination.
109func (adapter *Decoder) DisallowUnknownFields() {
110 cfg := adapter.iter.cfg.configBeforeFrozen
111 cfg.DisallowUnknownFields = true
112 adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions)
113}
114
115// NewEncoder same as json.NewEncoder
116func NewEncoder(writer io.Writer) *Encoder {
117 return ConfigDefault.NewEncoder(writer)
118}
119
120// Encoder same as json.Encoder
121type Encoder struct {
122 stream *Stream
123}
124
125// Encode encode interface{} as JSON to io.Writer
126func (adapter *Encoder) Encode(val interface{}) error {
127 adapter.stream.WriteVal(val)
128 adapter.stream.WriteRaw("\n")
129 adapter.stream.Flush()
130 return adapter.stream.Error
131}
132
133// SetIndent set the indention. Prefix is not supported
134func (adapter *Encoder) SetIndent(prefix, indent string) {
135 config := adapter.stream.cfg.configBeforeFrozen
136 config.IndentionStep = len(indent)
137 adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions)
138}
139
140// SetEscapeHTML escape html by default, set to false to disable
141func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) {
142 config := adapter.stream.cfg.configBeforeFrozen
143 config.EscapeHTML = escapeHTML
144 adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions)
145}
146
147// Valid reports whether data is a valid JSON encoding.
148func Valid(data []byte) bool {
149 return ConfigDefault.Valid(data)
150}
diff --git a/vendor/github.com/json-iterator/go/any.go b/vendor/github.com/json-iterator/go/any.go
new file mode 100644
index 0000000..f6b8aea
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any.go
@@ -0,0 +1,325 @@
1package jsoniter
2
3import (
4 "errors"
5 "fmt"
6 "github.com/modern-go/reflect2"
7 "io"
8 "reflect"
9 "strconv"
10 "unsafe"
11)
12
13// Any generic object representation.
14// The lazy json implementation holds []byte and parse lazily.
15type Any interface {
16 LastError() error
17 ValueType() ValueType
18 MustBeValid() Any
19 ToBool() bool
20 ToInt() int
21 ToInt32() int32
22 ToInt64() int64
23 ToUint() uint
24 ToUint32() uint32
25 ToUint64() uint64
26 ToFloat32() float32
27 ToFloat64() float64
28 ToString() string
29 ToVal(val interface{})
30 Get(path ...interface{}) Any
31 Size() int
32 Keys() []string
33 GetInterface() interface{}
34 WriteTo(stream *Stream)
35}
36
37type baseAny struct{}
38
39func (any *baseAny) Get(path ...interface{}) Any {
40 return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)}
41}
42
43func (any *baseAny) Size() int {
44 return 0
45}
46
47func (any *baseAny) Keys() []string {
48 return []string{}
49}
50
51func (any *baseAny) ToVal(obj interface{}) {
52 panic("not implemented")
53}
54
55// WrapInt32 turn int32 into Any interface
56func WrapInt32(val int32) Any {
57 return &int32Any{baseAny{}, val}
58}
59
60// WrapInt64 turn int64 into Any interface
61func WrapInt64(val int64) Any {
62 return &int64Any{baseAny{}, val}
63}
64
65// WrapUint32 turn uint32 into Any interface
66func WrapUint32(val uint32) Any {
67 return &uint32Any{baseAny{}, val}
68}
69
70// WrapUint64 turn uint64 into Any interface
71func WrapUint64(val uint64) Any {
72 return &uint64Any{baseAny{}, val}
73}
74
75// WrapFloat64 turn float64 into Any interface
76func WrapFloat64(val float64) Any {
77 return &floatAny{baseAny{}, val}
78}
79
80// WrapString turn string into Any interface
81func WrapString(val string) Any {
82 return &stringAny{baseAny{}, val}
83}
84
85// Wrap turn a go object into Any interface
86func Wrap(val interface{}) Any {
87 if val == nil {
88 return &nilAny{}
89 }
90 asAny, isAny := val.(Any)
91 if isAny {
92 return asAny
93 }
94 typ := reflect2.TypeOf(val)
95 switch typ.Kind() {
96 case reflect.Slice:
97 return wrapArray(val)
98 case reflect.Struct:
99 return wrapStruct(val)
100 case reflect.Map:
101 return wrapMap(val)
102 case reflect.String:
103 return WrapString(val.(string))
104 case reflect.Int:
105 if strconv.IntSize == 32 {
106 return WrapInt32(int32(val.(int)))
107 }
108 return WrapInt64(int64(val.(int)))
109 case reflect.Int8:
110 return WrapInt32(int32(val.(int8)))
111 case reflect.Int16:
112 return WrapInt32(int32(val.(int16)))
113 case reflect.Int32:
114 return WrapInt32(val.(int32))
115 case reflect.Int64:
116 return WrapInt64(val.(int64))
117 case reflect.Uint:
118 if strconv.IntSize == 32 {
119 return WrapUint32(uint32(val.(uint)))
120 }
121 return WrapUint64(uint64(val.(uint)))
122 case reflect.Uintptr:
123 if ptrSize == 32 {
124 return WrapUint32(uint32(val.(uintptr)))
125 }
126 return WrapUint64(uint64(val.(uintptr)))
127 case reflect.Uint8:
128 return WrapUint32(uint32(val.(uint8)))
129 case reflect.Uint16:
130 return WrapUint32(uint32(val.(uint16)))
131 case reflect.Uint32:
132 return WrapUint32(uint32(val.(uint32)))
133 case reflect.Uint64:
134 return WrapUint64(val.(uint64))
135 case reflect.Float32:
136 return WrapFloat64(float64(val.(float32)))
137 case reflect.Float64:
138 return WrapFloat64(val.(float64))
139 case reflect.Bool:
140 if val.(bool) == true {
141 return &trueAny{}
142 }
143 return &falseAny{}
144 }
145 return &invalidAny{baseAny{}, fmt.Errorf("unsupported type: %v", typ)}
146}
147
148// ReadAny read next JSON element as an Any object. It is a better json.RawMessage.
149func (iter *Iterator) ReadAny() Any {
150 return iter.readAny()
151}
152
153func (iter *Iterator) readAny() Any {
154 c := iter.nextToken()
155 switch c {
156 case '"':
157 iter.unreadByte()
158 return &stringAny{baseAny{}, iter.ReadString()}
159 case 'n':
160 iter.skipThreeBytes('u', 'l', 'l') // null
161 return &nilAny{}
162 case 't':
163 iter.skipThreeBytes('r', 'u', 'e') // true
164 return &trueAny{}
165 case 'f':
166 iter.skipFourBytes('a', 'l', 's', 'e') // false
167 return &falseAny{}
168 case '{':
169 return iter.readObjectAny()
170 case '[':
171 return iter.readArrayAny()
172 case '-':
173 return iter.readNumberAny(false)
174 case 0:
175 return &invalidAny{baseAny{}, errors.New("input is empty")}
176 default:
177 return iter.readNumberAny(true)
178 }
179}
180
181func (iter *Iterator) readNumberAny(positive bool) Any {
182 iter.startCapture(iter.head - 1)
183 iter.skipNumber()
184 lazyBuf := iter.stopCapture()
185 return &numberLazyAny{baseAny{}, iter.cfg, lazyBuf, nil}
186}
187
188func (iter *Iterator) readObjectAny() Any {
189 iter.startCapture(iter.head - 1)
190 iter.skipObject()
191 lazyBuf := iter.stopCapture()
192 return &objectLazyAny{baseAny{}, iter.cfg, lazyBuf, nil}
193}
194
195func (iter *Iterator) readArrayAny() Any {
196 iter.startCapture(iter.head - 1)
197 iter.skipArray()
198 lazyBuf := iter.stopCapture()
199 return &arrayLazyAny{baseAny{}, iter.cfg, lazyBuf, nil}
200}
201
202func locateObjectField(iter *Iterator, target string) []byte {
203 var found []byte
204 iter.ReadObjectCB(func(iter *Iterator, field string) bool {
205 if field == target {
206 found = iter.SkipAndReturnBytes()
207 return false
208 }
209 iter.Skip()
210 return true
211 })
212 return found
213}
214
215func locateArrayElement(iter *Iterator, target int) []byte {
216 var found []byte
217 n := 0
218 iter.ReadArrayCB(func(iter *Iterator) bool {
219 if n == target {
220 found = iter.SkipAndReturnBytes()
221 return false
222 }
223 iter.Skip()
224 n++
225 return true
226 })
227 return found
228}
229
230func locatePath(iter *Iterator, path []interface{}) Any {
231 for i, pathKeyObj := range path {
232 switch pathKey := pathKeyObj.(type) {
233 case string:
234 valueBytes := locateObjectField(iter, pathKey)
235 if valueBytes == nil {
236 return newInvalidAny(path[i:])
237 }
238 iter.ResetBytes(valueBytes)
239 case int:
240 valueBytes := locateArrayElement(iter, pathKey)
241 if valueBytes == nil {
242 return newInvalidAny(path[i:])
243 }
244 iter.ResetBytes(valueBytes)
245 case int32:
246 if '*' == pathKey {
247 return iter.readAny().Get(path[i:]...)
248 }
249 return newInvalidAny(path[i:])
250 default:
251 return newInvalidAny(path[i:])
252 }
253 }
254 if iter.Error != nil && iter.Error != io.EOF {
255 return &invalidAny{baseAny{}, iter.Error}
256 }
257 return iter.readAny()
258}
259
260var anyType = reflect2.TypeOfPtr((*Any)(nil)).Elem()
261
262func createDecoderOfAny(ctx *ctx, typ reflect2.Type) ValDecoder {
263 if typ == anyType {
264 return &directAnyCodec{}
265 }
266 if typ.Implements(anyType) {
267 return &anyCodec{
268 valType: typ,
269 }
270 }
271 return nil
272}
273
274func createEncoderOfAny(ctx *ctx, typ reflect2.Type) ValEncoder {
275 if typ == anyType {
276 return &directAnyCodec{}
277 }
278 if typ.Implements(anyType) {
279 return &anyCodec{
280 valType: typ,
281 }
282 }
283 return nil
284}
285
286type anyCodec struct {
287 valType reflect2.Type
288}
289
290func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
291 panic("not implemented")
292}
293
294func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
295 obj := codec.valType.UnsafeIndirect(ptr)
296 any := obj.(Any)
297 any.WriteTo(stream)
298}
299
300func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool {
301 obj := codec.valType.UnsafeIndirect(ptr)
302 any := obj.(Any)
303 return any.Size() == 0
304}
305
306type directAnyCodec struct {
307}
308
309func (codec *directAnyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
310 *(*Any)(ptr) = iter.readAny()
311}
312
313func (codec *directAnyCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
314 any := *(*Any)(ptr)
315 if any == nil {
316 stream.WriteNil()
317 return
318 }
319 any.WriteTo(stream)
320}
321
322func (codec *directAnyCodec) IsEmpty(ptr unsafe.Pointer) bool {
323 any := *(*Any)(ptr)
324 return any.Size() == 0
325}
diff --git a/vendor/github.com/json-iterator/go/any_array.go b/vendor/github.com/json-iterator/go/any_array.go
new file mode 100644
index 0000000..0449e9a
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_array.go
@@ -0,0 +1,278 @@
1package jsoniter
2
3import (
4 "reflect"
5 "unsafe"
6)
7
8type arrayLazyAny struct {
9 baseAny
10 cfg *frozenConfig
11 buf []byte
12 err error
13}
14
15func (any *arrayLazyAny) ValueType() ValueType {
16 return ArrayValue
17}
18
19func (any *arrayLazyAny) MustBeValid() Any {
20 return any
21}
22
23func (any *arrayLazyAny) LastError() error {
24 return any.err
25}
26
27func (any *arrayLazyAny) ToBool() bool {
28 iter := any.cfg.BorrowIterator(any.buf)
29 defer any.cfg.ReturnIterator(iter)
30 return iter.ReadArray()
31}
32
33func (any *arrayLazyAny) ToInt() int {
34 if any.ToBool() {
35 return 1
36 }
37 return 0
38}
39
40func (any *arrayLazyAny) ToInt32() int32 {
41 if any.ToBool() {
42 return 1
43 }
44 return 0
45}
46
47func (any *arrayLazyAny) ToInt64() int64 {
48 if any.ToBool() {
49 return 1
50 }
51 return 0
52}
53
54func (any *arrayLazyAny) ToUint() uint {
55 if any.ToBool() {
56 return 1
57 }
58 return 0
59}
60
61func (any *arrayLazyAny) ToUint32() uint32 {
62 if any.ToBool() {
63 return 1
64 }
65 return 0
66}
67
68func (any *arrayLazyAny) ToUint64() uint64 {
69 if any.ToBool() {
70 return 1
71 }
72 return 0
73}
74
75func (any *arrayLazyAny) ToFloat32() float32 {
76 if any.ToBool() {
77 return 1
78 }
79 return 0
80}
81
82func (any *arrayLazyAny) ToFloat64() float64 {
83 if any.ToBool() {
84 return 1
85 }
86 return 0
87}
88
89func (any *arrayLazyAny) ToString() string {
90 return *(*string)(unsafe.Pointer(&any.buf))
91}
92
93func (any *arrayLazyAny) ToVal(val interface{}) {
94 iter := any.cfg.BorrowIterator(any.buf)
95 defer any.cfg.ReturnIterator(iter)
96 iter.ReadVal(val)
97}
98
99func (any *arrayLazyAny) Get(path ...interface{}) Any {
100 if len(path) == 0 {
101 return any
102 }
103 switch firstPath := path[0].(type) {
104 case int:
105 iter := any.cfg.BorrowIterator(any.buf)
106 defer any.cfg.ReturnIterator(iter)
107 valueBytes := locateArrayElement(iter, firstPath)
108 if valueBytes == nil {
109 return newInvalidAny(path)
110 }
111 iter.ResetBytes(valueBytes)
112 return locatePath(iter, path[1:])
113 case int32:
114 if '*' == firstPath {
115 iter := any.cfg.BorrowIterator(any.buf)
116 defer any.cfg.ReturnIterator(iter)
117 arr := make([]Any, 0)
118 iter.ReadArrayCB(func(iter *Iterator) bool {
119 found := iter.readAny().Get(path[1:]...)
120 if found.ValueType() != InvalidValue {
121 arr = append(arr, found)
122 }
123 return true
124 })
125 return wrapArray(arr)
126 }
127 return newInvalidAny(path)
128 default:
129 return newInvalidAny(path)
130 }
131}
132
133func (any *arrayLazyAny) Size() int {
134 size := 0
135 iter := any.cfg.BorrowIterator(any.buf)
136 defer any.cfg.ReturnIterator(iter)
137 iter.ReadArrayCB(func(iter *Iterator) bool {
138 size++
139 iter.Skip()
140 return true
141 })
142 return size
143}
144
145func (any *arrayLazyAny) WriteTo(stream *Stream) {
146 stream.Write(any.buf)
147}
148
149func (any *arrayLazyAny) GetInterface() interface{} {
150 iter := any.cfg.BorrowIterator(any.buf)
151 defer any.cfg.ReturnIterator(iter)
152 return iter.Read()
153}
154
155type arrayAny struct {
156 baseAny
157 val reflect.Value
158}
159
160func wrapArray(val interface{}) *arrayAny {
161 return &arrayAny{baseAny{}, reflect.ValueOf(val)}
162}
163
164func (any *arrayAny) ValueType() ValueType {
165 return ArrayValue
166}
167
168func (any *arrayAny) MustBeValid() Any {
169 return any
170}
171
172func (any *arrayAny) LastError() error {
173 return nil
174}
175
176func (any *arrayAny) ToBool() bool {
177 return any.val.Len() != 0
178}
179
180func (any *arrayAny) ToInt() int {
181 if any.val.Len() == 0 {
182 return 0
183 }
184 return 1
185}
186
187func (any *arrayAny) ToInt32() int32 {
188 if any.val.Len() == 0 {
189 return 0
190 }
191 return 1
192}
193
194func (any *arrayAny) ToInt64() int64 {
195 if any.val.Len() == 0 {
196 return 0
197 }
198 return 1
199}
200
201func (any *arrayAny) ToUint() uint {
202 if any.val.Len() == 0 {
203 return 0
204 }
205 return 1
206}
207
208func (any *arrayAny) ToUint32() uint32 {
209 if any.val.Len() == 0 {
210 return 0
211 }
212 return 1
213}
214
215func (any *arrayAny) ToUint64() uint64 {
216 if any.val.Len() == 0 {
217 return 0
218 }
219 return 1
220}
221
222func (any *arrayAny) ToFloat32() float32 {
223 if any.val.Len() == 0 {
224 return 0
225 }
226 return 1
227}
228
229func (any *arrayAny) ToFloat64() float64 {
230 if any.val.Len() == 0 {
231 return 0
232 }
233 return 1
234}
235
236func (any *arrayAny) ToString() string {
237 str, _ := MarshalToString(any.val.Interface())
238 return str
239}
240
241func (any *arrayAny) Get(path ...interface{}) Any {
242 if len(path) == 0 {
243 return any
244 }
245 switch firstPath := path[0].(type) {
246 case int:
247 if firstPath < 0 || firstPath >= any.val.Len() {
248 return newInvalidAny(path)
249 }
250 return Wrap(any.val.Index(firstPath).Interface())
251 case int32:
252 if '*' == firstPath {
253 mappedAll := make([]Any, 0)
254 for i := 0; i < any.val.Len(); i++ {
255 mapped := Wrap(any.val.Index(i).Interface()).Get(path[1:]...)
256 if mapped.ValueType() != InvalidValue {
257 mappedAll = append(mappedAll, mapped)
258 }
259 }
260 return wrapArray(mappedAll)
261 }
262 return newInvalidAny(path)
263 default:
264 return newInvalidAny(path)
265 }
266}
267
268func (any *arrayAny) Size() int {
269 return any.val.Len()
270}
271
272func (any *arrayAny) WriteTo(stream *Stream) {
273 stream.WriteVal(any.val)
274}
275
276func (any *arrayAny) GetInterface() interface{} {
277 return any.val.Interface()
278}
diff --git a/vendor/github.com/json-iterator/go/any_bool.go b/vendor/github.com/json-iterator/go/any_bool.go
new file mode 100644
index 0000000..9452324
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_bool.go
@@ -0,0 +1,137 @@
1package jsoniter
2
3type trueAny struct {
4 baseAny
5}
6
7func (any *trueAny) LastError() error {
8 return nil
9}
10
11func (any *trueAny) ToBool() bool {
12 return true
13}
14
15func (any *trueAny) ToInt() int {
16 return 1
17}
18
19func (any *trueAny) ToInt32() int32 {
20 return 1
21}
22
23func (any *trueAny) ToInt64() int64 {
24 return 1
25}
26
27func (any *trueAny) ToUint() uint {
28 return 1
29}
30
31func (any *trueAny) ToUint32() uint32 {
32 return 1
33}
34
35func (any *trueAny) ToUint64() uint64 {
36 return 1
37}
38
39func (any *trueAny) ToFloat32() float32 {
40 return 1
41}
42
43func (any *trueAny) ToFloat64() float64 {
44 return 1
45}
46
47func (any *trueAny) ToString() string {
48 return "true"
49}
50
51func (any *trueAny) WriteTo(stream *Stream) {
52 stream.WriteTrue()
53}
54
55func (any *trueAny) Parse() *Iterator {
56 return nil
57}
58
59func (any *trueAny) GetInterface() interface{} {
60 return true
61}
62
63func (any *trueAny) ValueType() ValueType {
64 return BoolValue
65}
66
67func (any *trueAny) MustBeValid() Any {
68 return any
69}
70
71type falseAny struct {
72 baseAny
73}
74
75func (any *falseAny) LastError() error {
76 return nil
77}
78
79func (any *falseAny) ToBool() bool {
80 return false
81}
82
83func (any *falseAny) ToInt() int {
84 return 0
85}
86
87func (any *falseAny) ToInt32() int32 {
88 return 0
89}
90
91func (any *falseAny) ToInt64() int64 {
92 return 0
93}
94
95func (any *falseAny) ToUint() uint {
96 return 0
97}
98
99func (any *falseAny) ToUint32() uint32 {
100 return 0
101}
102
103func (any *falseAny) ToUint64() uint64 {
104 return 0
105}
106
107func (any *falseAny) ToFloat32() float32 {
108 return 0
109}
110
111func (any *falseAny) ToFloat64() float64 {
112 return 0
113}
114
115func (any *falseAny) ToString() string {
116 return "false"
117}
118
119func (any *falseAny) WriteTo(stream *Stream) {
120 stream.WriteFalse()
121}
122
123func (any *falseAny) Parse() *Iterator {
124 return nil
125}
126
127func (any *falseAny) GetInterface() interface{} {
128 return false
129}
130
131func (any *falseAny) ValueType() ValueType {
132 return BoolValue
133}
134
135func (any *falseAny) MustBeValid() Any {
136 return any
137}
diff --git a/vendor/github.com/json-iterator/go/any_float.go b/vendor/github.com/json-iterator/go/any_float.go
new file mode 100644
index 0000000..35fdb09
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_float.go
@@ -0,0 +1,83 @@
1package jsoniter
2
3import (
4 "strconv"
5)
6
7type floatAny struct {
8 baseAny
9 val float64
10}
11
12func (any *floatAny) Parse() *Iterator {
13 return nil
14}
15
16func (any *floatAny) ValueType() ValueType {
17 return NumberValue
18}
19
20func (any *floatAny) MustBeValid() Any {
21 return any
22}
23
24func (any *floatAny) LastError() error {
25 return nil
26}
27
28func (any *floatAny) ToBool() bool {
29 return any.ToFloat64() != 0
30}
31
32func (any *floatAny) ToInt() int {
33 return int(any.val)
34}
35
36func (any *floatAny) ToInt32() int32 {
37 return int32(any.val)
38}
39
40func (any *floatAny) ToInt64() int64 {
41 return int64(any.val)
42}
43
44func (any *floatAny) ToUint() uint {
45 if any.val > 0 {
46 return uint(any.val)
47 }
48 return 0
49}
50
51func (any *floatAny) ToUint32() uint32 {
52 if any.val > 0 {
53 return uint32(any.val)
54 }
55 return 0
56}
57
58func (any *floatAny) ToUint64() uint64 {
59 if any.val > 0 {
60 return uint64(any.val)
61 }
62 return 0
63}
64
65func (any *floatAny) ToFloat32() float32 {
66 return float32(any.val)
67}
68
69func (any *floatAny) ToFloat64() float64 {
70 return any.val
71}
72
73func (any *floatAny) ToString() string {
74 return strconv.FormatFloat(any.val, 'E', -1, 64)
75}
76
77func (any *floatAny) WriteTo(stream *Stream) {
78 stream.WriteFloat64(any.val)
79}
80
81func (any *floatAny) GetInterface() interface{} {
82 return any.val
83}
diff --git a/vendor/github.com/json-iterator/go/any_int32.go b/vendor/github.com/json-iterator/go/any_int32.go
new file mode 100644
index 0000000..1b56f39
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_int32.go
@@ -0,0 +1,74 @@
1package jsoniter
2
3import (
4 "strconv"
5)
6
7type int32Any struct {
8 baseAny
9 val int32
10}
11
12func (any *int32Any) LastError() error {
13 return nil
14}
15
16func (any *int32Any) ValueType() ValueType {
17 return NumberValue
18}
19
20func (any *int32Any) MustBeValid() Any {
21 return any
22}
23
24func (any *int32Any) ToBool() bool {
25 return any.val != 0
26}
27
28func (any *int32Any) ToInt() int {
29 return int(any.val)
30}
31
32func (any *int32Any) ToInt32() int32 {
33 return any.val
34}
35
36func (any *int32Any) ToInt64() int64 {
37 return int64(any.val)
38}
39
40func (any *int32Any) ToUint() uint {
41 return uint(any.val)
42}
43
44func (any *int32Any) ToUint32() uint32 {
45 return uint32(any.val)
46}
47
48func (any *int32Any) ToUint64() uint64 {
49 return uint64(any.val)
50}
51
52func (any *int32Any) ToFloat32() float32 {
53 return float32(any.val)
54}
55
56func (any *int32Any) ToFloat64() float64 {
57 return float64(any.val)
58}
59
60func (any *int32Any) ToString() string {
61 return strconv.FormatInt(int64(any.val), 10)
62}
63
64func (any *int32Any) WriteTo(stream *Stream) {
65 stream.WriteInt32(any.val)
66}
67
68func (any *int32Any) Parse() *Iterator {
69 return nil
70}
71
72func (any *int32Any) GetInterface() interface{} {
73 return any.val
74}
diff --git a/vendor/github.com/json-iterator/go/any_int64.go b/vendor/github.com/json-iterator/go/any_int64.go
new file mode 100644
index 0000000..c440d72
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_int64.go
@@ -0,0 +1,74 @@
1package jsoniter
2
3import (
4 "strconv"
5)
6
7type int64Any struct {
8 baseAny
9 val int64
10}
11
12func (any *int64Any) LastError() error {
13 return nil
14}
15
16func (any *int64Any) ValueType() ValueType {
17 return NumberValue
18}
19
20func (any *int64Any) MustBeValid() Any {
21 return any
22}
23
24func (any *int64Any) ToBool() bool {
25 return any.val != 0
26}
27
28func (any *int64Any) ToInt() int {
29 return int(any.val)
30}
31
32func (any *int64Any) ToInt32() int32 {
33 return int32(any.val)
34}
35
36func (any *int64Any) ToInt64() int64 {
37 return any.val
38}
39
40func (any *int64Any) ToUint() uint {
41 return uint(any.val)
42}
43
44func (any *int64Any) ToUint32() uint32 {
45 return uint32(any.val)
46}
47
48func (any *int64Any) ToUint64() uint64 {
49 return uint64(any.val)
50}
51
52func (any *int64Any) ToFloat32() float32 {
53 return float32(any.val)
54}
55
56func (any *int64Any) ToFloat64() float64 {
57 return float64(any.val)
58}
59
60func (any *int64Any) ToString() string {
61 return strconv.FormatInt(any.val, 10)
62}
63
64func (any *int64Any) WriteTo(stream *Stream) {
65 stream.WriteInt64(any.val)
66}
67
68func (any *int64Any) Parse() *Iterator {
69 return nil
70}
71
72func (any *int64Any) GetInterface() interface{} {
73 return any.val
74}
diff --git a/vendor/github.com/json-iterator/go/any_invalid.go b/vendor/github.com/json-iterator/go/any_invalid.go
new file mode 100644
index 0000000..1d859ea
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_invalid.go
@@ -0,0 +1,82 @@
1package jsoniter
2
3import "fmt"
4
5type invalidAny struct {
6 baseAny
7 err error
8}
9
10func newInvalidAny(path []interface{}) *invalidAny {
11 return &invalidAny{baseAny{}, fmt.Errorf("%v not found", path)}
12}
13
14func (any *invalidAny) LastError() error {
15 return any.err
16}
17
18func (any *invalidAny) ValueType() ValueType {
19 return InvalidValue
20}
21
22func (any *invalidAny) MustBeValid() Any {
23 panic(any.err)
24}
25
26func (any *invalidAny) ToBool() bool {
27 return false
28}
29
30func (any *invalidAny) ToInt() int {
31 return 0
32}
33
34func (any *invalidAny) ToInt32() int32 {
35 return 0
36}
37
38func (any *invalidAny) ToInt64() int64 {
39 return 0
40}
41
42func (any *invalidAny) ToUint() uint {
43 return 0
44}
45
46func (any *invalidAny) ToUint32() uint32 {
47 return 0
48}
49
50func (any *invalidAny) ToUint64() uint64 {
51 return 0
52}
53
54func (any *invalidAny) ToFloat32() float32 {
55 return 0
56}
57
58func (any *invalidAny) ToFloat64() float64 {
59 return 0
60}
61
62func (any *invalidAny) ToString() string {
63 return ""
64}
65
66func (any *invalidAny) WriteTo(stream *Stream) {
67}
68
69func (any *invalidAny) Get(path ...interface{}) Any {
70 if any.err == nil {
71 return &invalidAny{baseAny{}, fmt.Errorf("get %v from invalid", path)}
72 }
73 return &invalidAny{baseAny{}, fmt.Errorf("%v, get %v from invalid", any.err, path)}
74}
75
76func (any *invalidAny) Parse() *Iterator {
77 return nil
78}
79
80func (any *invalidAny) GetInterface() interface{} {
81 return nil
82}
diff --git a/vendor/github.com/json-iterator/go/any_nil.go b/vendor/github.com/json-iterator/go/any_nil.go
new file mode 100644
index 0000000..d04cb54
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_nil.go
@@ -0,0 +1,69 @@
1package jsoniter
2
3type nilAny struct {
4 baseAny
5}
6
7func (any *nilAny) LastError() error {
8 return nil
9}
10
11func (any *nilAny) ValueType() ValueType {
12 return NilValue
13}
14
15func (any *nilAny) MustBeValid() Any {
16 return any
17}
18
19func (any *nilAny) ToBool() bool {
20 return false
21}
22
23func (any *nilAny) ToInt() int {
24 return 0
25}
26
27func (any *nilAny) ToInt32() int32 {
28 return 0
29}
30
31func (any *nilAny) ToInt64() int64 {
32 return 0
33}
34
35func (any *nilAny) ToUint() uint {
36 return 0
37}
38
39func (any *nilAny) ToUint32() uint32 {
40 return 0
41}
42
43func (any *nilAny) ToUint64() uint64 {
44 return 0
45}
46
47func (any *nilAny) ToFloat32() float32 {
48 return 0
49}
50
51func (any *nilAny) ToFloat64() float64 {
52 return 0
53}
54
55func (any *nilAny) ToString() string {
56 return ""
57}
58
59func (any *nilAny) WriteTo(stream *Stream) {
60 stream.WriteNil()
61}
62
63func (any *nilAny) Parse() *Iterator {
64 return nil
65}
66
67func (any *nilAny) GetInterface() interface{} {
68 return nil
69}
diff --git a/vendor/github.com/json-iterator/go/any_number.go b/vendor/github.com/json-iterator/go/any_number.go
new file mode 100644
index 0000000..9d1e901
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_number.go
@@ -0,0 +1,123 @@
1package jsoniter
2
3import (
4 "io"
5 "unsafe"
6)
7
8type numberLazyAny struct {
9 baseAny
10 cfg *frozenConfig
11 buf []byte
12 err error
13}
14
15func (any *numberLazyAny) ValueType() ValueType {
16 return NumberValue
17}
18
19func (any *numberLazyAny) MustBeValid() Any {
20 return any
21}
22
23func (any *numberLazyAny) LastError() error {
24 return any.err
25}
26
27func (any *numberLazyAny) ToBool() bool {
28 return any.ToFloat64() != 0
29}
30
31func (any *numberLazyAny) ToInt() int {
32 iter := any.cfg.BorrowIterator(any.buf)
33 defer any.cfg.ReturnIterator(iter)
34 val := iter.ReadInt()
35 if iter.Error != nil && iter.Error != io.EOF {
36 any.err = iter.Error
37 }
38 return val
39}
40
41func (any *numberLazyAny) ToInt32() int32 {
42 iter := any.cfg.BorrowIterator(any.buf)
43 defer any.cfg.ReturnIterator(iter)
44 val := iter.ReadInt32()
45 if iter.Error != nil && iter.Error != io.EOF {
46 any.err = iter.Error
47 }
48 return val
49}
50
51func (any *numberLazyAny) ToInt64() int64 {
52 iter := any.cfg.BorrowIterator(any.buf)
53 defer any.cfg.ReturnIterator(iter)
54 val := iter.ReadInt64()
55 if iter.Error != nil && iter.Error != io.EOF {
56 any.err = iter.Error
57 }
58 return val
59}
60
61func (any *numberLazyAny) ToUint() uint {
62 iter := any.cfg.BorrowIterator(any.buf)
63 defer any.cfg.ReturnIterator(iter)
64 val := iter.ReadUint()
65 if iter.Error != nil && iter.Error != io.EOF {
66 any.err = iter.Error
67 }
68 return val
69}
70
71func (any *numberLazyAny) ToUint32() uint32 {
72 iter := any.cfg.BorrowIterator(any.buf)
73 defer any.cfg.ReturnIterator(iter)
74 val := iter.ReadUint32()
75 if iter.Error != nil && iter.Error != io.EOF {
76 any.err = iter.Error
77 }
78 return val
79}
80
81func (any *numberLazyAny) ToUint64() uint64 {
82 iter := any.cfg.BorrowIterator(any.buf)
83 defer any.cfg.ReturnIterator(iter)
84 val := iter.ReadUint64()
85 if iter.Error != nil && iter.Error != io.EOF {
86 any.err = iter.Error
87 }
88 return val
89}
90
91func (any *numberLazyAny) ToFloat32() float32 {
92 iter := any.cfg.BorrowIterator(any.buf)
93 defer any.cfg.ReturnIterator(iter)
94 val := iter.ReadFloat32()
95 if iter.Error != nil && iter.Error != io.EOF {
96 any.err = iter.Error
97 }
98 return val
99}
100
101func (any *numberLazyAny) ToFloat64() float64 {
102 iter := any.cfg.BorrowIterator(any.buf)
103 defer any.cfg.ReturnIterator(iter)
104 val := iter.ReadFloat64()
105 if iter.Error != nil && iter.Error != io.EOF {
106 any.err = iter.Error
107 }
108 return val
109}
110
111func (any *numberLazyAny) ToString() string {
112 return *(*string)(unsafe.Pointer(&any.buf))
113}
114
115func (any *numberLazyAny) WriteTo(stream *Stream) {
116 stream.Write(any.buf)
117}
118
119func (any *numberLazyAny) GetInterface() interface{} {
120 iter := any.cfg.BorrowIterator(any.buf)
121 defer any.cfg.ReturnIterator(iter)
122 return iter.Read()
123}
diff --git a/vendor/github.com/json-iterator/go/any_object.go b/vendor/github.com/json-iterator/go/any_object.go
new file mode 100644
index 0000000..c44ef5c
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_object.go
@@ -0,0 +1,374 @@
1package jsoniter
2
3import (
4 "reflect"
5 "unsafe"
6)
7
8type objectLazyAny struct {
9 baseAny
10 cfg *frozenConfig
11 buf []byte
12 err error
13}
14
15func (any *objectLazyAny) ValueType() ValueType {
16 return ObjectValue
17}
18
19func (any *objectLazyAny) MustBeValid() Any {
20 return any
21}
22
23func (any *objectLazyAny) LastError() error {
24 return any.err
25}
26
27func (any *objectLazyAny) ToBool() bool {
28 return true
29}
30
31func (any *objectLazyAny) ToInt() int {
32 return 0
33}
34
35func (any *objectLazyAny) ToInt32() int32 {
36 return 0
37}
38
39func (any *objectLazyAny) ToInt64() int64 {
40 return 0
41}
42
43func (any *objectLazyAny) ToUint() uint {
44 return 0
45}
46
47func (any *objectLazyAny) ToUint32() uint32 {
48 return 0
49}
50
51func (any *objectLazyAny) ToUint64() uint64 {
52 return 0
53}
54
55func (any *objectLazyAny) ToFloat32() float32 {
56 return 0
57}
58
59func (any *objectLazyAny) ToFloat64() float64 {
60 return 0
61}
62
63func (any *objectLazyAny) ToString() string {
64 return *(*string)(unsafe.Pointer(&any.buf))
65}
66
67func (any *objectLazyAny) ToVal(obj interface{}) {
68 iter := any.cfg.BorrowIterator(any.buf)
69 defer any.cfg.ReturnIterator(iter)
70 iter.ReadVal(obj)
71}
72
73func (any *objectLazyAny) Get(path ...interface{}) Any {
74 if len(path) == 0 {
75 return any
76 }
77 switch firstPath := path[0].(type) {
78 case string:
79 iter := any.cfg.BorrowIterator(any.buf)
80 defer any.cfg.ReturnIterator(iter)
81 valueBytes := locateObjectField(iter, firstPath)
82 if valueBytes == nil {
83 return newInvalidAny(path)
84 }
85 iter.ResetBytes(valueBytes)
86 return locatePath(iter, path[1:])
87 case int32:
88 if '*' == firstPath {
89 mappedAll := map[string]Any{}
90 iter := any.cfg.BorrowIterator(any.buf)
91 defer any.cfg.ReturnIterator(iter)
92 iter.ReadMapCB(func(iter *Iterator, field string) bool {
93 mapped := locatePath(iter, path[1:])
94 if mapped.ValueType() != InvalidValue {
95 mappedAll[field] = mapped
96 }
97 return true
98 })
99 return wrapMap(mappedAll)
100 }
101 return newInvalidAny(path)
102 default:
103 return newInvalidAny(path)
104 }
105}
106
107func (any *objectLazyAny) Keys() []string {
108 keys := []string{}
109 iter := any.cfg.BorrowIterator(any.buf)
110 defer any.cfg.ReturnIterator(iter)
111 iter.ReadMapCB(func(iter *Iterator, field string) bool {
112 iter.Skip()
113 keys = append(keys, field)
114 return true
115 })
116 return keys
117}
118
119func (any *objectLazyAny) Size() int {
120 size := 0
121 iter := any.cfg.BorrowIterator(any.buf)
122 defer any.cfg.ReturnIterator(iter)
123 iter.ReadObjectCB(func(iter *Iterator, field string) bool {
124 iter.Skip()
125 size++
126 return true
127 })
128 return size
129}
130
131func (any *objectLazyAny) WriteTo(stream *Stream) {
132 stream.Write(any.buf)
133}
134
135func (any *objectLazyAny) GetInterface() interface{} {
136 iter := any.cfg.BorrowIterator(any.buf)
137 defer any.cfg.ReturnIterator(iter)
138 return iter.Read()
139}
140
141type objectAny struct {
142 baseAny
143 err error
144 val reflect.Value
145}
146
147func wrapStruct(val interface{}) *objectAny {
148 return &objectAny{baseAny{}, nil, reflect.ValueOf(val)}
149}
150
151func (any *objectAny) ValueType() ValueType {
152 return ObjectValue
153}
154
155func (any *objectAny) MustBeValid() Any {
156 return any
157}
158
159func (any *objectAny) Parse() *Iterator {
160 return nil
161}
162
163func (any *objectAny) LastError() error {
164 return any.err
165}
166
167func (any *objectAny) ToBool() bool {
168 return any.val.NumField() != 0
169}
170
171func (any *objectAny) ToInt() int {
172 return 0
173}
174
175func (any *objectAny) ToInt32() int32 {
176 return 0
177}
178
179func (any *objectAny) ToInt64() int64 {
180 return 0
181}
182
183func (any *objectAny) ToUint() uint {
184 return 0
185}
186
187func (any *objectAny) ToUint32() uint32 {
188 return 0
189}
190
191func (any *objectAny) ToUint64() uint64 {
192 return 0
193}
194
195func (any *objectAny) ToFloat32() float32 {
196 return 0
197}
198
199func (any *objectAny) ToFloat64() float64 {
200 return 0
201}
202
203func (any *objectAny) ToString() string {
204 str, err := MarshalToString(any.val.Interface())
205 any.err = err
206 return str
207}
208
209func (any *objectAny) Get(path ...interface{}) Any {
210 if len(path) == 0 {
211 return any
212 }
213 switch firstPath := path[0].(type) {
214 case string:
215 field := any.val.FieldByName(firstPath)
216 if !field.IsValid() {
217 return newInvalidAny(path)
218 }
219 return Wrap(field.Interface())
220 case int32:
221 if '*' == firstPath {
222 mappedAll := map[string]Any{}
223 for i := 0; i < any.val.NumField(); i++ {
224 field := any.val.Field(i)
225 if field.CanInterface() {
226 mapped := Wrap(field.Interface()).Get(path[1:]...)
227 if mapped.ValueType() != InvalidValue {
228 mappedAll[any.val.Type().Field(i).Name] = mapped
229 }
230 }
231 }
232 return wrapMap(mappedAll)
233 }
234 return newInvalidAny(path)
235 default:
236 return newInvalidAny(path)
237 }
238}
239
240func (any *objectAny) Keys() []string {
241 keys := make([]string, 0, any.val.NumField())
242 for i := 0; i < any.val.NumField(); i++ {
243 keys = append(keys, any.val.Type().Field(i).Name)
244 }
245 return keys
246}
247
248func (any *objectAny) Size() int {
249 return any.val.NumField()
250}
251
252func (any *objectAny) WriteTo(stream *Stream) {
253 stream.WriteVal(any.val)
254}
255
256func (any *objectAny) GetInterface() interface{} {
257 return any.val.Interface()
258}
259
260type mapAny struct {
261 baseAny
262 err error
263 val reflect.Value
264}
265
266func wrapMap(val interface{}) *mapAny {
267 return &mapAny{baseAny{}, nil, reflect.ValueOf(val)}
268}
269
270func (any *mapAny) ValueType() ValueType {
271 return ObjectValue
272}
273
274func (any *mapAny) MustBeValid() Any {
275 return any
276}
277
278func (any *mapAny) Parse() *Iterator {
279 return nil
280}
281
282func (any *mapAny) LastError() error {
283 return any.err
284}
285
286func (any *mapAny) ToBool() bool {
287 return true
288}
289
290func (any *mapAny) ToInt() int {
291 return 0
292}
293
294func (any *mapAny) ToInt32() int32 {
295 return 0
296}
297
298func (any *mapAny) ToInt64() int64 {
299 return 0
300}
301
302func (any *mapAny) ToUint() uint {
303 return 0
304}
305
306func (any *mapAny) ToUint32() uint32 {
307 return 0
308}
309
310func (any *mapAny) ToUint64() uint64 {
311 return 0
312}
313
314func (any *mapAny) ToFloat32() float32 {
315 return 0
316}
317
318func (any *mapAny) ToFloat64() float64 {
319 return 0
320}
321
322func (any *mapAny) ToString() string {
323 str, err := MarshalToString(any.val.Interface())
324 any.err = err
325 return str
326}
327
328func (any *mapAny) Get(path ...interface{}) Any {
329 if len(path) == 0 {
330 return any
331 }
332 switch firstPath := path[0].(type) {
333 case int32:
334 if '*' == firstPath {
335 mappedAll := map[string]Any{}
336 for _, key := range any.val.MapKeys() {
337 keyAsStr := key.String()
338 element := Wrap(any.val.MapIndex(key).Interface())
339 mapped := element.Get(path[1:]...)
340 if mapped.ValueType() != InvalidValue {
341 mappedAll[keyAsStr] = mapped
342 }
343 }
344 return wrapMap(mappedAll)
345 }
346 return newInvalidAny(path)
347 default:
348 value := any.val.MapIndex(reflect.ValueOf(firstPath))
349 if !value.IsValid() {
350 return newInvalidAny(path)
351 }
352 return Wrap(value.Interface())
353 }
354}
355
356func (any *mapAny) Keys() []string {
357 keys := make([]string, 0, any.val.Len())
358 for _, key := range any.val.MapKeys() {
359 keys = append(keys, key.String())
360 }
361 return keys
362}
363
364func (any *mapAny) Size() int {
365 return any.val.Len()
366}
367
368func (any *mapAny) WriteTo(stream *Stream) {
369 stream.WriteVal(any.val)
370}
371
372func (any *mapAny) GetInterface() interface{} {
373 return any.val.Interface()
374}
diff --git a/vendor/github.com/json-iterator/go/any_str.go b/vendor/github.com/json-iterator/go/any_str.go
new file mode 100644
index 0000000..1f12f66
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_str.go
@@ -0,0 +1,166 @@
1package jsoniter
2
3import (
4 "fmt"
5 "strconv"
6)
7
8type stringAny struct {
9 baseAny
10 val string
11}
12
13func (any *stringAny) Get(path ...interface{}) Any {
14 if len(path) == 0 {
15 return any
16 }
17 return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)}
18}
19
20func (any *stringAny) Parse() *Iterator {
21 return nil
22}
23
24func (any *stringAny) ValueType() ValueType {
25 return StringValue
26}
27
28func (any *stringAny) MustBeValid() Any {
29 return any
30}
31
32func (any *stringAny) LastError() error {
33 return nil
34}
35
36func (any *stringAny) ToBool() bool {
37 str := any.ToString()
38 if str == "0" {
39 return false
40 }
41 for _, c := range str {
42 switch c {
43 case ' ', '\n', '\r', '\t':
44 default:
45 return true
46 }
47 }
48 return false
49}
50
51func (any *stringAny) ToInt() int {
52 return int(any.ToInt64())
53
54}
55
56func (any *stringAny) ToInt32() int32 {
57 return int32(any.ToInt64())
58}
59
60func (any *stringAny) ToInt64() int64 {
61 if any.val == "" {
62 return 0
63 }
64
65 flag := 1
66 startPos := 0
67 if any.val[0] == '+' || any.val[0] == '-' {
68 startPos = 1
69 }
70
71 if any.val[0] == '-' {
72 flag = -1
73 }
74
75 endPos := startPos
76 for i := startPos; i < len(any.val); i++ {
77 if any.val[i] >= '0' && any.val[i] <= '9' {
78 endPos = i + 1
79 } else {
80 break
81 }
82 }
83 parsed, _ := strconv.ParseInt(any.val[startPos:endPos], 10, 64)
84 return int64(flag) * parsed
85}
86
87func (any *stringAny) ToUint() uint {
88 return uint(any.ToUint64())
89}
90
91func (any *stringAny) ToUint32() uint32 {
92 return uint32(any.ToUint64())
93}
94
95func (any *stringAny) ToUint64() uint64 {
96 if any.val == "" {
97 return 0
98 }
99
100 startPos := 0
101
102 if any.val[0] == '-' {
103 return 0
104 }
105 if any.val[0] == '+' {
106 startPos = 1
107 }
108
109 endPos := startPos
110 for i := startPos; i < len(any.val); i++ {
111 if any.val[i] >= '0' && any.val[i] <= '9' {
112 endPos = i + 1
113 } else {
114 break
115 }
116 }
117 parsed, _ := strconv.ParseUint(any.val[startPos:endPos], 10, 64)
118 return parsed
119}
120
121func (any *stringAny) ToFloat32() float32 {
122 return float32(any.ToFloat64())
123}
124
125func (any *stringAny) ToFloat64() float64 {
126 if len(any.val) == 0 {
127 return 0
128 }
129
130 // first char invalid
131 if any.val[0] != '+' && any.val[0] != '-' && (any.val[0] > '9' || any.val[0] < '0') {
132 return 0
133 }
134
135 // extract valid num expression from string
136 // eg 123true => 123, -12.12xxa => -12.12
137 endPos := 1
138 for i := 1; i < len(any.val); i++ {
139 if any.val[i] == '.' || any.val[i] == 'e' || any.val[i] == 'E' || any.val[i] == '+' || any.val[i] == '-' {
140 endPos = i + 1
141 continue
142 }
143
144 // end position is the first char which is not digit
145 if any.val[i] >= '0' && any.val[i] <= '9' {
146 endPos = i + 1
147 } else {
148 endPos = i
149 break
150 }
151 }
152 parsed, _ := strconv.ParseFloat(any.val[:endPos], 64)
153 return parsed
154}
155
156func (any *stringAny) ToString() string {
157 return any.val
158}
159
160func (any *stringAny) WriteTo(stream *Stream) {
161 stream.WriteString(any.val)
162}
163
164func (any *stringAny) GetInterface() interface{} {
165 return any.val
166}
diff --git a/vendor/github.com/json-iterator/go/any_uint32.go b/vendor/github.com/json-iterator/go/any_uint32.go
new file mode 100644
index 0000000..656bbd3
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_uint32.go
@@ -0,0 +1,74 @@
1package jsoniter
2
3import (
4 "strconv"
5)
6
7type uint32Any struct {
8 baseAny
9 val uint32
10}
11
12func (any *uint32Any) LastError() error {
13 return nil
14}
15
16func (any *uint32Any) ValueType() ValueType {
17 return NumberValue
18}
19
20func (any *uint32Any) MustBeValid() Any {
21 return any
22}
23
24func (any *uint32Any) ToBool() bool {
25 return any.val != 0
26}
27
28func (any *uint32Any) ToInt() int {
29 return int(any.val)
30}
31
32func (any *uint32Any) ToInt32() int32 {
33 return int32(any.val)
34}
35
36func (any *uint32Any) ToInt64() int64 {
37 return int64(any.val)
38}
39
40func (any *uint32Any) ToUint() uint {
41 return uint(any.val)
42}
43
44func (any *uint32Any) ToUint32() uint32 {
45 return any.val
46}
47
48func (any *uint32Any) ToUint64() uint64 {
49 return uint64(any.val)
50}
51
52func (any *uint32Any) ToFloat32() float32 {
53 return float32(any.val)
54}
55
56func (any *uint32Any) ToFloat64() float64 {
57 return float64(any.val)
58}
59
60func (any *uint32Any) ToString() string {
61 return strconv.FormatInt(int64(any.val), 10)
62}
63
64func (any *uint32Any) WriteTo(stream *Stream) {
65 stream.WriteUint32(any.val)
66}
67
68func (any *uint32Any) Parse() *Iterator {
69 return nil
70}
71
72func (any *uint32Any) GetInterface() interface{} {
73 return any.val
74}
diff --git a/vendor/github.com/json-iterator/go/any_uint64.go b/vendor/github.com/json-iterator/go/any_uint64.go
new file mode 100644
index 0000000..7df2fce
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_uint64.go
@@ -0,0 +1,74 @@
1package jsoniter
2
3import (
4 "strconv"
5)
6
7type uint64Any struct {
8 baseAny
9 val uint64
10}
11
12func (any *uint64Any) LastError() error {
13 return nil
14}
15
16func (any *uint64Any) ValueType() ValueType {
17 return NumberValue
18}
19
20func (any *uint64Any) MustBeValid() Any {
21 return any
22}
23
24func (any *uint64Any) ToBool() bool {
25 return any.val != 0
26}
27
28func (any *uint64Any) ToInt() int {
29 return int(any.val)
30}
31
32func (any *uint64Any) ToInt32() int32 {
33 return int32(any.val)
34}
35
36func (any *uint64Any) ToInt64() int64 {
37 return int64(any.val)
38}
39
40func (any *uint64Any) ToUint() uint {
41 return uint(any.val)
42}
43
44func (any *uint64Any) ToUint32() uint32 {
45 return uint32(any.val)
46}
47
48func (any *uint64Any) ToUint64() uint64 {
49 return any.val
50}
51
52func (any *uint64Any) ToFloat32() float32 {
53 return float32(any.val)
54}
55
56func (any *uint64Any) ToFloat64() float64 {
57 return float64(any.val)
58}
59
60func (any *uint64Any) ToString() string {
61 return strconv.FormatUint(any.val, 10)
62}
63
64func (any *uint64Any) WriteTo(stream *Stream) {
65 stream.WriteUint64(any.val)
66}
67
68func (any *uint64Any) Parse() *Iterator {
69 return nil
70}
71
72func (any *uint64Any) GetInterface() interface{} {
73 return any.val
74}
diff --git a/vendor/github.com/json-iterator/go/build.sh b/vendor/github.com/json-iterator/go/build.sh
new file mode 100644
index 0000000..b45ef68
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/build.sh
@@ -0,0 +1,12 @@
1#!/bin/bash
2set -e
3set -x
4
5if [ ! -d /tmp/build-golang/src/github.com/json-iterator ]; then
6 mkdir -p /tmp/build-golang/src/github.com/json-iterator
7 ln -s $PWD /tmp/build-golang/src/github.com/json-iterator/go
8fi
9export GOPATH=/tmp/build-golang
10go get -u github.com/golang/dep/cmd/dep
11cd /tmp/build-golang/src/github.com/json-iterator/go
12exec $GOPATH/bin/dep ensure -update
diff --git a/vendor/github.com/json-iterator/go/config.go b/vendor/github.com/json-iterator/go/config.go
new file mode 100644
index 0000000..2adcdc3
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/config.go
@@ -0,0 +1,375 @@
1package jsoniter
2
3import (
4 "encoding/json"
5 "io"
6 "reflect"
7 "sync"
8 "unsafe"
9
10 "github.com/modern-go/concurrent"
11 "github.com/modern-go/reflect2"
12)
13
14// Config customize how the API should behave.
15// The API is created from Config by Froze.
16type Config struct {
17 IndentionStep int
18 MarshalFloatWith6Digits bool
19 EscapeHTML bool
20 SortMapKeys bool
21 UseNumber bool
22 DisallowUnknownFields bool
23 TagKey string
24 OnlyTaggedField bool
25 ValidateJsonRawMessage bool
26 ObjectFieldMustBeSimpleString bool
27 CaseSensitive bool
28}
29
30// API the public interface of this package.
31// Primary Marshal and Unmarshal.
32type API interface {
33 IteratorPool
34 StreamPool
35 MarshalToString(v interface{}) (string, error)
36 Marshal(v interface{}) ([]byte, error)
37 MarshalIndent(v interface{}, prefix, indent string) ([]byte, error)
38 UnmarshalFromString(str string, v interface{}) error
39 Unmarshal(data []byte, v interface{}) error
40 Get(data []byte, path ...interface{}) Any
41 NewEncoder(writer io.Writer) *Encoder
42 NewDecoder(reader io.Reader) *Decoder
43 Valid(data []byte) bool
44 RegisterExtension(extension Extension)
45 DecoderOf(typ reflect2.Type) ValDecoder
46 EncoderOf(typ reflect2.Type) ValEncoder
47}
48
49// ConfigDefault the default API
50var ConfigDefault = Config{
51 EscapeHTML: true,
52}.Froze()
53
54// ConfigCompatibleWithStandardLibrary tries to be 100% compatible with standard library behavior
55var ConfigCompatibleWithStandardLibrary = Config{
56 EscapeHTML: true,
57 SortMapKeys: true,
58 ValidateJsonRawMessage: true,
59}.Froze()
60
61// ConfigFastest marshals float with only 6 digits precision
62var ConfigFastest = Config{
63 EscapeHTML: false,
64 MarshalFloatWith6Digits: true, // will lose precession
65 ObjectFieldMustBeSimpleString: true, // do not unescape object field
66}.Froze()
67
68type frozenConfig struct {
69 configBeforeFrozen Config
70 sortMapKeys bool
71 indentionStep int
72 objectFieldMustBeSimpleString bool
73 onlyTaggedField bool
74 disallowUnknownFields bool
75 decoderCache *concurrent.Map
76 encoderCache *concurrent.Map
77 encoderExtension Extension
78 decoderExtension Extension
79 extraExtensions []Extension
80 streamPool *sync.Pool
81 iteratorPool *sync.Pool
82 caseSensitive bool
83}
84
85func (cfg *frozenConfig) initCache() {
86 cfg.decoderCache = concurrent.NewMap()
87 cfg.encoderCache = concurrent.NewMap()
88}
89
90func (cfg *frozenConfig) addDecoderToCache(cacheKey uintptr, decoder ValDecoder) {
91 cfg.decoderCache.Store(cacheKey, decoder)
92}
93
94func (cfg *frozenConfig) addEncoderToCache(cacheKey uintptr, encoder ValEncoder) {
95 cfg.encoderCache.Store(cacheKey, encoder)
96}
97
98func (cfg *frozenConfig) getDecoderFromCache(cacheKey uintptr) ValDecoder {
99 decoder, found := cfg.decoderCache.Load(cacheKey)
100 if found {
101 return decoder.(ValDecoder)
102 }
103 return nil
104}
105
106func (cfg *frozenConfig) getEncoderFromCache(cacheKey uintptr) ValEncoder {
107 encoder, found := cfg.encoderCache.Load(cacheKey)
108 if found {
109 return encoder.(ValEncoder)
110 }
111 return nil
112}
113
114var cfgCache = concurrent.NewMap()
115
116func getFrozenConfigFromCache(cfg Config) *frozenConfig {
117 obj, found := cfgCache.Load(cfg)
118 if found {
119 return obj.(*frozenConfig)
120 }
121 return nil
122}
123
124func addFrozenConfigToCache(cfg Config, frozenConfig *frozenConfig) {
125 cfgCache.Store(cfg, frozenConfig)
126}
127
128// Froze forge API from config
129func (cfg Config) Froze() API {
130 api := &frozenConfig{
131 sortMapKeys: cfg.SortMapKeys,
132 indentionStep: cfg.IndentionStep,
133 objectFieldMustBeSimpleString: cfg.ObjectFieldMustBeSimpleString,
134 onlyTaggedField: cfg.OnlyTaggedField,
135 disallowUnknownFields: cfg.DisallowUnknownFields,
136 caseSensitive: cfg.CaseSensitive,
137 }
138 api.streamPool = &sync.Pool{
139 New: func() interface{} {
140 return NewStream(api, nil, 512)
141 },
142 }
143 api.iteratorPool = &sync.Pool{
144 New: func() interface{} {
145 return NewIterator(api)
146 },
147 }
148 api.initCache()
149 encoderExtension := EncoderExtension{}
150 decoderExtension := DecoderExtension{}
151 if cfg.MarshalFloatWith6Digits {
152 api.marshalFloatWith6Digits(encoderExtension)
153 }
154 if cfg.EscapeHTML {
155 api.escapeHTML(encoderExtension)
156 }
157 if cfg.UseNumber {
158 api.useNumber(decoderExtension)
159 }
160 if cfg.ValidateJsonRawMessage {
161 api.validateJsonRawMessage(encoderExtension)
162 }
163 api.encoderExtension = encoderExtension
164 api.decoderExtension = decoderExtension
165 api.configBeforeFrozen = cfg
166 return api
167}
168
169func (cfg Config) frozeWithCacheReuse(extraExtensions []Extension) *frozenConfig {
170 api := getFrozenConfigFromCache(cfg)
171 if api != nil {
172 return api
173 }
174 api = cfg.Froze().(*frozenConfig)
175 for _, extension := range extraExtensions {
176 api.RegisterExtension(extension)
177 }
178 addFrozenConfigToCache(cfg, api)
179 return api
180}
181
182func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) {
183 encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) {
184 rawMessage := *(*json.RawMessage)(ptr)
185 iter := cfg.BorrowIterator([]byte(rawMessage))
186 defer cfg.ReturnIterator(iter)
187 iter.Read()
188 if iter.Error != nil && iter.Error != io.EOF {
189 stream.WriteRaw("null")
190 } else {
191 stream.WriteRaw(string(rawMessage))
192 }
193 }, func(ptr unsafe.Pointer) bool {
194 return len(*((*json.RawMessage)(ptr))) == 0
195 }}
196 extension[reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()] = encoder
197 extension[reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()] = encoder
198}
199
200func (cfg *frozenConfig) useNumber(extension DecoderExtension) {
201 extension[reflect2.TypeOfPtr((*interface{})(nil)).Elem()] = &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) {
202 exitingValue := *((*interface{})(ptr))
203 if exitingValue != nil && reflect.TypeOf(exitingValue).Kind() == reflect.Ptr {
204 iter.ReadVal(exitingValue)
205 return
206 }
207 if iter.WhatIsNext() == NumberValue {
208 *((*interface{})(ptr)) = json.Number(iter.readNumberAsString())
209 } else {
210 *((*interface{})(ptr)) = iter.Read()
211 }
212 }}
213}
214func (cfg *frozenConfig) getTagKey() string {
215 tagKey := cfg.configBeforeFrozen.TagKey
216 if tagKey == "" {
217 return "json"
218 }
219 return tagKey
220}
221
222func (cfg *frozenConfig) RegisterExtension(extension Extension) {
223 cfg.extraExtensions = append(cfg.extraExtensions, extension)
224 copied := cfg.configBeforeFrozen
225 cfg.configBeforeFrozen = copied
226}
227
228type lossyFloat32Encoder struct {
229}
230
231func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) {
232 stream.WriteFloat32Lossy(*((*float32)(ptr)))
233}
234
235func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool {
236 return *((*float32)(ptr)) == 0
237}
238
239type lossyFloat64Encoder struct {
240}
241
242func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) {
243 stream.WriteFloat64Lossy(*((*float64)(ptr)))
244}
245
246func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool {
247 return *((*float64)(ptr)) == 0
248}
249
250// EnableLossyFloatMarshalling keeps 10**(-6) precision
251// for float variables for better performance.
252func (cfg *frozenConfig) marshalFloatWith6Digits(extension EncoderExtension) {
253 // for better performance
254 extension[reflect2.TypeOfPtr((*float32)(nil)).Elem()] = &lossyFloat32Encoder{}
255 extension[reflect2.TypeOfPtr((*float64)(nil)).Elem()] = &lossyFloat64Encoder{}
256}
257
258type htmlEscapedStringEncoder struct {
259}
260
261func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
262 str := *((*string)(ptr))
263 stream.WriteStringWithHTMLEscaped(str)
264}
265
266func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool {
267 return *((*string)(ptr)) == ""
268}
269
270func (cfg *frozenConfig) escapeHTML(encoderExtension EncoderExtension) {
271 encoderExtension[reflect2.TypeOfPtr((*string)(nil)).Elem()] = &htmlEscapedStringEncoder{}
272}
273
274func (cfg *frozenConfig) cleanDecoders() {
275 typeDecoders = map[string]ValDecoder{}
276 fieldDecoders = map[string]ValDecoder{}
277 *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig))
278}
279
280func (cfg *frozenConfig) cleanEncoders() {
281 typeEncoders = map[string]ValEncoder{}
282 fieldEncoders = map[string]ValEncoder{}
283 *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig))
284}
285
286func (cfg *frozenConfig) MarshalToString(v interface{}) (string, error) {
287 stream := cfg.BorrowStream(nil)
288 defer cfg.ReturnStream(stream)
289 stream.WriteVal(v)
290 if stream.Error != nil {
291 return "", stream.Error
292 }
293 return string(stream.Buffer()), nil
294}
295
296func (cfg *frozenConfig) Marshal(v interface{}) ([]byte, error) {
297 stream := cfg.BorrowStream(nil)
298 defer cfg.ReturnStream(stream)
299 stream.WriteVal(v)
300 if stream.Error != nil {
301 return nil, stream.Error
302 }
303 result := stream.Buffer()
304 copied := make([]byte, len(result))
305 copy(copied, result)
306 return copied, nil
307}
308
309func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
310 if prefix != "" {
311 panic("prefix is not supported")
312 }
313 for _, r := range indent {
314 if r != ' ' {
315 panic("indent can only be space")
316 }
317 }
318 newCfg := cfg.configBeforeFrozen
319 newCfg.IndentionStep = len(indent)
320 return newCfg.frozeWithCacheReuse(cfg.extraExtensions).Marshal(v)
321}
322
323func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error {
324 data := []byte(str)
325 iter := cfg.BorrowIterator(data)
326 defer cfg.ReturnIterator(iter)
327 iter.ReadVal(v)
328 c := iter.nextToken()
329 if c == 0 {
330 if iter.Error == io.EOF {
331 return nil
332 }
333 return iter.Error
334 }
335 iter.ReportError("Unmarshal", "there are bytes left after unmarshal")
336 return iter.Error
337}
338
339func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any {
340 iter := cfg.BorrowIterator(data)
341 defer cfg.ReturnIterator(iter)
342 return locatePath(iter, path)
343}
344
345func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error {
346 iter := cfg.BorrowIterator(data)
347 defer cfg.ReturnIterator(iter)
348 iter.ReadVal(v)
349 c := iter.nextToken()
350 if c == 0 {
351 if iter.Error == io.EOF {
352 return nil
353 }
354 return iter.Error
355 }
356 iter.ReportError("Unmarshal", "there are bytes left after unmarshal")
357 return iter.Error
358}
359
360func (cfg *frozenConfig) NewEncoder(writer io.Writer) *Encoder {
361 stream := NewStream(cfg, writer, 512)
362 return &Encoder{stream}
363}
364
365func (cfg *frozenConfig) NewDecoder(reader io.Reader) *Decoder {
366 iter := Parse(cfg, reader, 512)
367 return &Decoder{iter}
368}
369
370func (cfg *frozenConfig) Valid(data []byte) bool {
371 iter := cfg.BorrowIterator(data)
372 defer cfg.ReturnIterator(iter)
373 iter.Skip()
374 return iter.Error == nil
375}
diff --git a/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md b/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md
new file mode 100644
index 0000000..3095662
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md
@@ -0,0 +1,7 @@
1| json type \ dest type | bool | int | uint | float |string|
2| --- | --- | --- | --- |--|--|
3| number | positive => true <br/> negative => true <br/> zero => false| 23.2 => 23 <br/> -32.1 => -32| 12.1 => 12 <br/> -12.1 => 0|as normal|same as origin|
4| string | empty string => false <br/> string "0" => false <br/> other strings => true | "123.32" => 123 <br/> "-123.4" => -123 <br/> "123.23xxxw" => 123 <br/> "abcde12" => 0 <br/> "-32.1" => -32| 13.2 => 13 <br/> -1.1 => 0 |12.1 => 12.1 <br/> -12.3 => -12.3<br/> 12.4xxa => 12.4 <br/> +1.1e2 =>110 |same as origin|
5| bool | true => true <br/> false => false| true => 1 <br/> false => 0 | true => 1 <br/> false => 0 |true => 1 <br/>false => 0|true => "true" <br/> false => "false"|
6| object | true | 0 | 0 |0|originnal json|
7| array | empty array => false <br/> nonempty array => true| [] => 0 <br/> [1,2] => 1 | [] => 0 <br/> [1,2] => 1 |[] => 0<br/>[1,2] => 1|original json| \ No newline at end of file
diff --git a/vendor/github.com/json-iterator/go/iter.go b/vendor/github.com/json-iterator/go/iter.go
new file mode 100644
index 0000000..29b31cf
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter.go
@@ -0,0 +1,349 @@
1package jsoniter
2
3import (
4 "encoding/json"
5 "fmt"
6 "io"
7)
8
9// ValueType the type for JSON element
10type ValueType int
11
12const (
13 // InvalidValue invalid JSON element
14 InvalidValue ValueType = iota
15 // StringValue JSON element "string"
16 StringValue
17 // NumberValue JSON element 100 or 0.10
18 NumberValue
19 // NilValue JSON element null
20 NilValue
21 // BoolValue JSON element true or false
22 BoolValue
23 // ArrayValue JSON element []
24 ArrayValue
25 // ObjectValue JSON element {}
26 ObjectValue
27)
28
29var hexDigits []byte
30var valueTypes []ValueType
31
32func init() {
33 hexDigits = make([]byte, 256)
34 for i := 0; i < len(hexDigits); i++ {
35 hexDigits[i] = 255
36 }
37 for i := '0'; i <= '9'; i++ {
38 hexDigits[i] = byte(i - '0')
39 }
40 for i := 'a'; i <= 'f'; i++ {
41 hexDigits[i] = byte((i - 'a') + 10)
42 }
43 for i := 'A'; i <= 'F'; i++ {
44 hexDigits[i] = byte((i - 'A') + 10)
45 }
46 valueTypes = make([]ValueType, 256)
47 for i := 0; i < len(valueTypes); i++ {
48 valueTypes[i] = InvalidValue
49 }
50 valueTypes['"'] = StringValue
51 valueTypes['-'] = NumberValue
52 valueTypes['0'] = NumberValue
53 valueTypes['1'] = NumberValue
54 valueTypes['2'] = NumberValue
55 valueTypes['3'] = NumberValue
56 valueTypes['4'] = NumberValue
57 valueTypes['5'] = NumberValue
58 valueTypes['6'] = NumberValue
59 valueTypes['7'] = NumberValue
60 valueTypes['8'] = NumberValue
61 valueTypes['9'] = NumberValue
62 valueTypes['t'] = BoolValue
63 valueTypes['f'] = BoolValue
64 valueTypes['n'] = NilValue
65 valueTypes['['] = ArrayValue
66 valueTypes['{'] = ObjectValue
67}
68
69// Iterator is a io.Reader like object, with JSON specific read functions.
70// Error is not returned as return value, but stored as Error member on this iterator instance.
71type Iterator struct {
72 cfg *frozenConfig
73 reader io.Reader
74 buf []byte
75 head int
76 tail int
77 depth int
78 captureStartedAt int
79 captured []byte
80 Error error
81 Attachment interface{} // open for customized decoder
82}
83
84// NewIterator creates an empty Iterator instance
85func NewIterator(cfg API) *Iterator {
86 return &Iterator{
87 cfg: cfg.(*frozenConfig),
88 reader: nil,
89 buf: nil,
90 head: 0,
91 tail: 0,
92 depth: 0,
93 }
94}
95
96// Parse creates an Iterator instance from io.Reader
97func Parse(cfg API, reader io.Reader, bufSize int) *Iterator {
98 return &Iterator{
99 cfg: cfg.(*frozenConfig),
100 reader: reader,
101 buf: make([]byte, bufSize),
102 head: 0,
103 tail: 0,
104 depth: 0,
105 }
106}
107
108// ParseBytes creates an Iterator instance from byte array
109func ParseBytes(cfg API, input []byte) *Iterator {
110 return &Iterator{
111 cfg: cfg.(*frozenConfig),
112 reader: nil,
113 buf: input,
114 head: 0,
115 tail: len(input),
116 depth: 0,
117 }
118}
119
120// ParseString creates an Iterator instance from string
121func ParseString(cfg API, input string) *Iterator {
122 return ParseBytes(cfg, []byte(input))
123}
124
125// Pool returns a pool can provide more iterator with same configuration
126func (iter *Iterator) Pool() IteratorPool {
127 return iter.cfg
128}
129
130// Reset reuse iterator instance by specifying another reader
131func (iter *Iterator) Reset(reader io.Reader) *Iterator {
132 iter.reader = reader
133 iter.head = 0
134 iter.tail = 0
135 iter.depth = 0
136 return iter
137}
138
139// ResetBytes reuse iterator instance by specifying another byte array as input
140func (iter *Iterator) ResetBytes(input []byte) *Iterator {
141 iter.reader = nil
142 iter.buf = input
143 iter.head = 0
144 iter.tail = len(input)
145 iter.depth = 0
146 return iter
147}
148
149// WhatIsNext gets ValueType of relatively next json element
150func (iter *Iterator) WhatIsNext() ValueType {
151 valueType := valueTypes[iter.nextToken()]
152 iter.unreadByte()
153 return valueType
154}
155
156func (iter *Iterator) skipWhitespacesWithoutLoadMore() bool {
157 for i := iter.head; i < iter.tail; i++ {
158 c := iter.buf[i]
159 switch c {
160 case ' ', '\n', '\t', '\r':
161 continue
162 }
163 iter.head = i
164 return false
165 }
166 return true
167}
168
169func (iter *Iterator) isObjectEnd() bool {
170 c := iter.nextToken()
171 if c == ',' {
172 return false
173 }
174 if c == '}' {
175 return true
176 }
177 iter.ReportError("isObjectEnd", "object ended prematurely, unexpected char "+string([]byte{c}))
178 return true
179}
180
181func (iter *Iterator) nextToken() byte {
182 // a variation of skip whitespaces, returning the next non-whitespace token
183 for {
184 for i := iter.head; i < iter.tail; i++ {
185 c := iter.buf[i]
186 switch c {
187 case ' ', '\n', '\t', '\r':
188 continue
189 }
190 iter.head = i + 1
191 return c
192 }
193 if !iter.loadMore() {
194 return 0
195 }
196 }
197}
198
199// ReportError record a error in iterator instance with current position.
200func (iter *Iterator) ReportError(operation string, msg string) {
201 if iter.Error != nil {
202 if iter.Error != io.EOF {
203 return
204 }
205 }
206 peekStart := iter.head - 10
207 if peekStart < 0 {
208 peekStart = 0
209 }
210 peekEnd := iter.head + 10
211 if peekEnd > iter.tail {
212 peekEnd = iter.tail
213 }
214 parsing := string(iter.buf[peekStart:peekEnd])
215 contextStart := iter.head - 50
216 if contextStart < 0 {
217 contextStart = 0
218 }
219 contextEnd := iter.head + 50
220 if contextEnd > iter.tail {
221 contextEnd = iter.tail
222 }
223 context := string(iter.buf[contextStart:contextEnd])
224 iter.Error = fmt.Errorf("%s: %s, error found in #%v byte of ...|%s|..., bigger context ...|%s|...",
225 operation, msg, iter.head-peekStart, parsing, context)
226}
227
228// CurrentBuffer gets current buffer as string for debugging purpose
229func (iter *Iterator) CurrentBuffer() string {
230 peekStart := iter.head - 10
231 if peekStart < 0 {
232 peekStart = 0
233 }
234 return fmt.Sprintf("parsing #%v byte, around ...|%s|..., whole buffer ...|%s|...", iter.head,
235 string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail]))
236}
237
238func (iter *Iterator) readByte() (ret byte) {
239 if iter.head == iter.tail {
240 if iter.loadMore() {
241 ret = iter.buf[iter.head]
242 iter.head++
243 return ret
244 }
245 return 0
246 }
247 ret = iter.buf[iter.head]
248 iter.head++
249 return ret
250}
251
252func (iter *Iterator) loadMore() bool {
253 if iter.reader == nil {
254 if iter.Error == nil {
255 iter.head = iter.tail
256 iter.Error = io.EOF
257 }
258 return false
259 }
260 if iter.captured != nil {
261 iter.captured = append(iter.captured,
262 iter.buf[iter.captureStartedAt:iter.tail]...)
263 iter.captureStartedAt = 0
264 }
265 for {
266 n, err := iter.reader.Read(iter.buf)
267 if n == 0 {
268 if err != nil {
269 if iter.Error == nil {
270 iter.Error = err
271 }
272 return false
273 }
274 } else {
275 iter.head = 0
276 iter.tail = n
277 return true
278 }
279 }
280}
281
282func (iter *Iterator) unreadByte() {
283 if iter.Error != nil {
284 return
285 }
286 iter.head--
287 return
288}
289
290// Read read the next JSON element as generic interface{}.
291func (iter *Iterator) Read() interface{} {
292 valueType := iter.WhatIsNext()
293 switch valueType {
294 case StringValue:
295 return iter.ReadString()
296 case NumberValue:
297 if iter.cfg.configBeforeFrozen.UseNumber {
298 return json.Number(iter.readNumberAsString())
299 }
300 return iter.ReadFloat64()
301 case NilValue:
302 iter.skipFourBytes('n', 'u', 'l', 'l')
303 return nil
304 case BoolValue:
305 return iter.ReadBool()
306 case ArrayValue:
307 arr := []interface{}{}
308 iter.ReadArrayCB(func(iter *Iterator) bool {
309 var elem interface{}
310 iter.ReadVal(&elem)
311 arr = append(arr, elem)
312 return true
313 })
314 return arr
315 case ObjectValue:
316 obj := map[string]interface{}{}
317 iter.ReadMapCB(func(Iter *Iterator, field string) bool {
318 var elem interface{}
319 iter.ReadVal(&elem)
320 obj[field] = elem
321 return true
322 })
323 return obj
324 default:
325 iter.ReportError("Read", fmt.Sprintf("unexpected value type: %v", valueType))
326 return nil
327 }
328}
329
330// limit maximum depth of nesting, as allowed by https://tools.ietf.org/html/rfc7159#section-9
331const maxDepth = 10000
332
333func (iter *Iterator) incrementDepth() (success bool) {
334 iter.depth++
335 if iter.depth <= maxDepth {
336 return true
337 }
338 iter.ReportError("incrementDepth", "exceeded max depth")
339 return false
340}
341
342func (iter *Iterator) decrementDepth() (success bool) {
343 iter.depth--
344 if iter.depth >= 0 {
345 return true
346 }
347 iter.ReportError("decrementDepth", "unexpected negative nesting")
348 return false
349}
diff --git a/vendor/github.com/json-iterator/go/iter_array.go b/vendor/github.com/json-iterator/go/iter_array.go
new file mode 100644
index 0000000..204fe0e
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_array.go
@@ -0,0 +1,64 @@
1package jsoniter
2
3// ReadArray read array element, tells if the array has more element to read.
4func (iter *Iterator) ReadArray() (ret bool) {
5 c := iter.nextToken()
6 switch c {
7 case 'n':
8 iter.skipThreeBytes('u', 'l', 'l')
9 return false // null
10 case '[':
11 c = iter.nextToken()
12 if c != ']' {
13 iter.unreadByte()
14 return true
15 }
16 return false
17 case ']':
18 return false
19 case ',':
20 return true
21 default:
22 iter.ReportError("ReadArray", "expect [ or , or ] or n, but found "+string([]byte{c}))
23 return
24 }
25}
26
27// ReadArrayCB read array with callback
28func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) {
29 c := iter.nextToken()
30 if c == '[' {
31 if !iter.incrementDepth() {
32 return false
33 }
34 c = iter.nextToken()
35 if c != ']' {
36 iter.unreadByte()
37 if !callback(iter) {
38 iter.decrementDepth()
39 return false
40 }
41 c = iter.nextToken()
42 for c == ',' {
43 if !callback(iter) {
44 iter.decrementDepth()
45 return false
46 }
47 c = iter.nextToken()
48 }
49 if c != ']' {
50 iter.ReportError("ReadArrayCB", "expect ] in the end, but found "+string([]byte{c}))
51 iter.decrementDepth()
52 return false
53 }
54 return iter.decrementDepth()
55 }
56 return iter.decrementDepth()
57 }
58 if c == 'n' {
59 iter.skipThreeBytes('u', 'l', 'l')
60 return true // null
61 }
62 iter.ReportError("ReadArrayCB", "expect [ or n, but found "+string([]byte{c}))
63 return false
64}
diff --git a/vendor/github.com/json-iterator/go/iter_float.go b/vendor/github.com/json-iterator/go/iter_float.go
new file mode 100644
index 0000000..8a3d8b6
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_float.go
@@ -0,0 +1,342 @@
1package jsoniter
2
3import (
4 "encoding/json"
5 "io"
6 "math/big"
7 "strconv"
8 "strings"
9 "unsafe"
10)
11
12var floatDigits []int8
13
14const invalidCharForNumber = int8(-1)
15const endOfNumber = int8(-2)
16const dotInNumber = int8(-3)
17
18func init() {
19 floatDigits = make([]int8, 256)
20 for i := 0; i < len(floatDigits); i++ {
21 floatDigits[i] = invalidCharForNumber
22 }
23 for i := int8('0'); i <= int8('9'); i++ {
24 floatDigits[i] = i - int8('0')
25 }
26 floatDigits[','] = endOfNumber
27 floatDigits[']'] = endOfNumber
28 floatDigits['}'] = endOfNumber
29 floatDigits[' '] = endOfNumber
30 floatDigits['\t'] = endOfNumber
31 floatDigits['\n'] = endOfNumber
32 floatDigits['.'] = dotInNumber
33}
34
35// ReadBigFloat read big.Float
36func (iter *Iterator) ReadBigFloat() (ret *big.Float) {
37 str := iter.readNumberAsString()
38 if iter.Error != nil && iter.Error != io.EOF {
39 return nil
40 }
41 prec := 64
42 if len(str) > prec {
43 prec = len(str)
44 }
45 val, _, err := big.ParseFloat(str, 10, uint(prec), big.ToZero)
46 if err != nil {
47 iter.Error = err
48 return nil
49 }
50 return val
51}
52
53// ReadBigInt read big.Int
54func (iter *Iterator) ReadBigInt() (ret *big.Int) {
55 str := iter.readNumberAsString()
56 if iter.Error != nil && iter.Error != io.EOF {
57 return nil
58 }
59 ret = big.NewInt(0)
60 var success bool
61 ret, success = ret.SetString(str, 10)
62 if !success {
63 iter.ReportError("ReadBigInt", "invalid big int")
64 return nil
65 }
66 return ret
67}
68
69//ReadFloat32 read float32
70func (iter *Iterator) ReadFloat32() (ret float32) {
71 c := iter.nextToken()
72 if c == '-' {
73 return -iter.readPositiveFloat32()
74 }
75 iter.unreadByte()
76 return iter.readPositiveFloat32()
77}
78
79func (iter *Iterator) readPositiveFloat32() (ret float32) {
80 i := iter.head
81 // first char
82 if i == iter.tail {
83 return iter.readFloat32SlowPath()
84 }
85 c := iter.buf[i]
86 i++
87 ind := floatDigits[c]
88 switch ind {
89 case invalidCharForNumber:
90 return iter.readFloat32SlowPath()
91 case endOfNumber:
92 iter.ReportError("readFloat32", "empty number")
93 return
94 case dotInNumber:
95 iter.ReportError("readFloat32", "leading dot is invalid")
96 return
97 case 0:
98 if i == iter.tail {
99 return iter.readFloat32SlowPath()
100 }
101 c = iter.buf[i]
102 switch c {
103 case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
104 iter.ReportError("readFloat32", "leading zero is invalid")
105 return
106 }
107 }
108 value := uint64(ind)
109 // chars before dot
110non_decimal_loop:
111 for ; i < iter.tail; i++ {
112 c = iter.buf[i]
113 ind := floatDigits[c]
114 switch ind {
115 case invalidCharForNumber:
116 return iter.readFloat32SlowPath()
117 case endOfNumber:
118 iter.head = i
119 return float32(value)
120 case dotInNumber:
121 break non_decimal_loop
122 }
123 if value > uint64SafeToMultiple10 {
124 return iter.readFloat32SlowPath()
125 }
126 value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind;
127 }
128 // chars after dot
129 if c == '.' {
130 i++
131 decimalPlaces := 0
132 if i == iter.tail {
133 return iter.readFloat32SlowPath()
134 }
135 for ; i < iter.tail; i++ {
136 c = iter.buf[i]
137 ind := floatDigits[c]
138 switch ind {
139 case endOfNumber:
140 if decimalPlaces > 0 && decimalPlaces < len(pow10) {
141 iter.head = i
142 return float32(float64(value) / float64(pow10[decimalPlaces]))
143 }
144 // too many decimal places
145 return iter.readFloat32SlowPath()
146 case invalidCharForNumber, dotInNumber:
147 return iter.readFloat32SlowPath()
148 }
149 decimalPlaces++
150 if value > uint64SafeToMultiple10 {
151 return iter.readFloat32SlowPath()
152 }
153 value = (value << 3) + (value << 1) + uint64(ind)
154 }
155 }
156 return iter.readFloat32SlowPath()
157}
158
159func (iter *Iterator) readNumberAsString() (ret string) {
160 strBuf := [16]byte{}
161 str := strBuf[0:0]
162load_loop:
163 for {
164 for i := iter.head; i < iter.tail; i++ {
165 c := iter.buf[i]
166 switch c {
167 case '+', '-', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
168 str = append(str, c)
169 continue
170 default:
171 iter.head = i
172 break load_loop
173 }
174 }
175 if !iter.loadMore() {
176 break
177 }
178 }
179 if iter.Error != nil && iter.Error != io.EOF {
180 return
181 }
182 if len(str) == 0 {
183 iter.ReportError("readNumberAsString", "invalid number")
184 }
185 return *(*string)(unsafe.Pointer(&str))
186}
187
188func (iter *Iterator) readFloat32SlowPath() (ret float32) {
189 str := iter.readNumberAsString()
190 if iter.Error != nil && iter.Error != io.EOF {
191 return
192 }
193 errMsg := validateFloat(str)
194 if errMsg != "" {
195 iter.ReportError("readFloat32SlowPath", errMsg)
196 return
197 }
198 val, err := strconv.ParseFloat(str, 32)
199 if err != nil {
200 iter.Error = err
201 return
202 }
203 return float32(val)
204}
205
206// ReadFloat64 read float64
207func (iter *Iterator) ReadFloat64() (ret float64) {
208 c := iter.nextToken()
209 if c == '-' {
210 return -iter.readPositiveFloat64()
211 }
212 iter.unreadByte()
213 return iter.readPositiveFloat64()
214}
215
216func (iter *Iterator) readPositiveFloat64() (ret float64) {
217 i := iter.head
218 // first char
219 if i == iter.tail {
220 return iter.readFloat64SlowPath()
221 }
222 c := iter.buf[i]
223 i++
224 ind := floatDigits[c]
225 switch ind {
226 case invalidCharForNumber:
227 return iter.readFloat64SlowPath()
228 case endOfNumber:
229 iter.ReportError("readFloat64", "empty number")
230 return
231 case dotInNumber:
232 iter.ReportError("readFloat64", "leading dot is invalid")
233 return
234 case 0:
235 if i == iter.tail {
236 return iter.readFloat64SlowPath()
237 }
238 c = iter.buf[i]
239 switch c {
240 case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
241 iter.ReportError("readFloat64", "leading zero is invalid")
242 return
243 }
244 }
245 value := uint64(ind)
246 // chars before dot
247non_decimal_loop:
248 for ; i < iter.tail; i++ {
249 c = iter.buf[i]
250 ind := floatDigits[c]
251 switch ind {
252 case invalidCharForNumber:
253 return iter.readFloat64SlowPath()
254 case endOfNumber:
255 iter.head = i
256 return float64(value)
257 case dotInNumber:
258 break non_decimal_loop
259 }
260 if value > uint64SafeToMultiple10 {
261 return iter.readFloat64SlowPath()
262 }
263 value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind;
264 }
265 // chars after dot
266 if c == '.' {
267 i++
268 decimalPlaces := 0
269 if i == iter.tail {
270 return iter.readFloat64SlowPath()
271 }
272 for ; i < iter.tail; i++ {
273 c = iter.buf[i]
274 ind := floatDigits[c]
275 switch ind {
276 case endOfNumber:
277 if decimalPlaces > 0 && decimalPlaces < len(pow10) {
278 iter.head = i
279 return float64(value) / float64(pow10[decimalPlaces])
280 }
281 // too many decimal places
282 return iter.readFloat64SlowPath()
283 case invalidCharForNumber, dotInNumber:
284 return iter.readFloat64SlowPath()
285 }
286 decimalPlaces++
287 if value > uint64SafeToMultiple10 {
288 return iter.readFloat64SlowPath()
289 }
290 value = (value << 3) + (value << 1) + uint64(ind)
291 if value > maxFloat64 {
292 return iter.readFloat64SlowPath()
293 }
294 }
295 }
296 return iter.readFloat64SlowPath()
297}
298
299func (iter *Iterator) readFloat64SlowPath() (ret float64) {
300 str := iter.readNumberAsString()
301 if iter.Error != nil && iter.Error != io.EOF {
302 return
303 }
304 errMsg := validateFloat(str)
305 if errMsg != "" {
306 iter.ReportError("readFloat64SlowPath", errMsg)
307 return
308 }
309 val, err := strconv.ParseFloat(str, 64)
310 if err != nil {
311 iter.Error = err
312 return
313 }
314 return val
315}
316
317func validateFloat(str string) string {
318 // strconv.ParseFloat is not validating `1.` or `1.e1`
319 if len(str) == 0 {
320 return "empty number"
321 }
322 if str[0] == '-' {
323 return "-- is not valid"
324 }
325 dotPos := strings.IndexByte(str, '.')
326 if dotPos != -1 {
327 if dotPos == len(str)-1 {
328 return "dot can not be last character"
329 }
330 switch str[dotPos+1] {
331 case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
332 default:
333 return "missing digit after dot"
334 }
335 }
336 return ""
337}
338
339// ReadNumber read json.Number
340func (iter *Iterator) ReadNumber() (ret json.Number) {
341 return json.Number(iter.readNumberAsString())
342}
diff --git a/vendor/github.com/json-iterator/go/iter_int.go b/vendor/github.com/json-iterator/go/iter_int.go
new file mode 100644
index 0000000..d786a89
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_int.go
@@ -0,0 +1,346 @@
1package jsoniter
2
3import (
4 "math"
5 "strconv"
6)
7
8var intDigits []int8
9
10const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1
11const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1
12const maxFloat64 = 1<<53 - 1
13
14func init() {
15 intDigits = make([]int8, 256)
16 for i := 0; i < len(intDigits); i++ {
17 intDigits[i] = invalidCharForNumber
18 }
19 for i := int8('0'); i <= int8('9'); i++ {
20 intDigits[i] = i - int8('0')
21 }
22}
23
24// ReadUint read uint
25func (iter *Iterator) ReadUint() uint {
26 if strconv.IntSize == 32 {
27 return uint(iter.ReadUint32())
28 }
29 return uint(iter.ReadUint64())
30}
31
32// ReadInt read int
33func (iter *Iterator) ReadInt() int {
34 if strconv.IntSize == 32 {
35 return int(iter.ReadInt32())
36 }
37 return int(iter.ReadInt64())
38}
39
40// ReadInt8 read int8
41func (iter *Iterator) ReadInt8() (ret int8) {
42 c := iter.nextToken()
43 if c == '-' {
44 val := iter.readUint32(iter.readByte())
45 if val > math.MaxInt8+1 {
46 iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10))
47 return
48 }
49 return -int8(val)
50 }
51 val := iter.readUint32(c)
52 if val > math.MaxInt8 {
53 iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10))
54 return
55 }
56 return int8(val)
57}
58
59// ReadUint8 read uint8
60func (iter *Iterator) ReadUint8() (ret uint8) {
61 val := iter.readUint32(iter.nextToken())
62 if val > math.MaxUint8 {
63 iter.ReportError("ReadUint8", "overflow: "+strconv.FormatInt(int64(val), 10))
64 return
65 }
66 return uint8(val)
67}
68
69// ReadInt16 read int16
70func (iter *Iterator) ReadInt16() (ret int16) {
71 c := iter.nextToken()
72 if c == '-' {
73 val := iter.readUint32(iter.readByte())
74 if val > math.MaxInt16+1 {
75 iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10))
76 return
77 }
78 return -int16(val)
79 }
80 val := iter.readUint32(c)
81 if val > math.MaxInt16 {
82 iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10))
83 return
84 }
85 return int16(val)
86}
87
88// ReadUint16 read uint16
89func (iter *Iterator) ReadUint16() (ret uint16) {
90 val := iter.readUint32(iter.nextToken())
91 if val > math.MaxUint16 {
92 iter.ReportError("ReadUint16", "overflow: "+strconv.FormatInt(int64(val), 10))
93 return
94 }
95 return uint16(val)
96}
97
98// ReadInt32 read int32
99func (iter *Iterator) ReadInt32() (ret int32) {
100 c := iter.nextToken()
101 if c == '-' {
102 val := iter.readUint32(iter.readByte())
103 if val > math.MaxInt32+1 {
104 iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10))
105 return
106 }
107 return -int32(val)
108 }
109 val := iter.readUint32(c)
110 if val > math.MaxInt32 {
111 iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10))
112 return
113 }
114 return int32(val)
115}
116
117// ReadUint32 read uint32
118func (iter *Iterator) ReadUint32() (ret uint32) {
119 return iter.readUint32(iter.nextToken())
120}
121
122func (iter *Iterator) readUint32(c byte) (ret uint32) {
123 ind := intDigits[c]
124 if ind == 0 {
125 iter.assertInteger()
126 return 0 // single zero
127 }
128 if ind == invalidCharForNumber {
129 iter.ReportError("readUint32", "unexpected character: "+string([]byte{byte(ind)}))
130 return
131 }
132 value := uint32(ind)
133 if iter.tail-iter.head > 10 {
134 i := iter.head
135 ind2 := intDigits[iter.buf[i]]
136 if ind2 == invalidCharForNumber {
137 iter.head = i
138 iter.assertInteger()
139 return value
140 }
141 i++
142 ind3 := intDigits[iter.buf[i]]
143 if ind3 == invalidCharForNumber {
144 iter.head = i
145 iter.assertInteger()
146 return value*10 + uint32(ind2)
147 }
148 //iter.head = i + 1
149 //value = value * 100 + uint32(ind2) * 10 + uint32(ind3)
150 i++
151 ind4 := intDigits[iter.buf[i]]
152 if ind4 == invalidCharForNumber {
153 iter.head = i
154 iter.assertInteger()
155 return value*100 + uint32(ind2)*10 + uint32(ind3)
156 }
157 i++
158 ind5 := intDigits[iter.buf[i]]
159 if ind5 == invalidCharForNumber {
160 iter.head = i
161 iter.assertInteger()
162 return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4)
163 }
164 i++
165 ind6 := intDigits[iter.buf[i]]
166 if ind6 == invalidCharForNumber {
167 iter.head = i
168 iter.assertInteger()
169 return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5)
170 }
171 i++
172 ind7 := intDigits[iter.buf[i]]
173 if ind7 == invalidCharForNumber {
174 iter.head = i
175 iter.assertInteger()
176 return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6)
177 }
178 i++
179 ind8 := intDigits[iter.buf[i]]
180 if ind8 == invalidCharForNumber {
181 iter.head = i
182 iter.assertInteger()
183 return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7)
184 }
185 i++
186 ind9 := intDigits[iter.buf[i]]
187 value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8)
188 iter.head = i
189 if ind9 == invalidCharForNumber {
190 iter.assertInteger()
191 return value
192 }
193 }
194 for {
195 for i := iter.head; i < iter.tail; i++ {
196 ind = intDigits[iter.buf[i]]
197 if ind == invalidCharForNumber {
198 iter.head = i
199 iter.assertInteger()
200 return value
201 }
202 if value > uint32SafeToMultiply10 {
203 value2 := (value << 3) + (value << 1) + uint32(ind)
204 if value2 < value {
205 iter.ReportError("readUint32", "overflow")
206 return
207 }
208 value = value2
209 continue
210 }
211 value = (value << 3) + (value << 1) + uint32(ind)
212 }
213 if !iter.loadMore() {
214 iter.assertInteger()
215 return value
216 }
217 }
218}
219
220// ReadInt64 read int64
221func (iter *Iterator) ReadInt64() (ret int64) {
222 c := iter.nextToken()
223 if c == '-' {
224 val := iter.readUint64(iter.readByte())
225 if val > math.MaxInt64+1 {
226 iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10))
227 return
228 }
229 return -int64(val)
230 }
231 val := iter.readUint64(c)
232 if val > math.MaxInt64 {
233 iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10))
234 return
235 }
236 return int64(val)
237}
238
239// ReadUint64 read uint64
240func (iter *Iterator) ReadUint64() uint64 {
241 return iter.readUint64(iter.nextToken())
242}
243
244func (iter *Iterator) readUint64(c byte) (ret uint64) {
245 ind := intDigits[c]
246 if ind == 0 {
247 iter.assertInteger()
248 return 0 // single zero
249 }
250 if ind == invalidCharForNumber {
251 iter.ReportError("readUint64", "unexpected character: "+string([]byte{byte(ind)}))
252 return
253 }
254 value := uint64(ind)
255 if iter.tail-iter.head > 10 {
256 i := iter.head
257 ind2 := intDigits[iter.buf[i]]
258 if ind2 == invalidCharForNumber {
259 iter.head = i
260 iter.assertInteger()
261 return value
262 }
263 i++
264 ind3 := intDigits[iter.buf[i]]
265 if ind3 == invalidCharForNumber {
266 iter.head = i
267 iter.assertInteger()
268 return value*10 + uint64(ind2)
269 }
270 //iter.head = i + 1
271 //value = value * 100 + uint32(ind2) * 10 + uint32(ind3)
272 i++
273 ind4 := intDigits[iter.buf[i]]
274 if ind4 == invalidCharForNumber {
275 iter.head = i
276 iter.assertInteger()
277 return value*100 + uint64(ind2)*10 + uint64(ind3)
278 }
279 i++
280 ind5 := intDigits[iter.buf[i]]
281 if ind5 == invalidCharForNumber {
282 iter.head = i
283 iter.assertInteger()
284 return value*1000 + uint64(ind2)*100 + uint64(ind3)*10 + uint64(ind4)
285 }
286 i++
287 ind6 := intDigits[iter.buf[i]]
288 if ind6 == invalidCharForNumber {
289 iter.head = i
290 iter.assertInteger()
291 return value*10000 + uint64(ind2)*1000 + uint64(ind3)*100 + uint64(ind4)*10 + uint64(ind5)
292 }
293 i++
294 ind7 := intDigits[iter.buf[i]]
295 if ind7 == invalidCharForNumber {
296 iter.head = i
297 iter.assertInteger()
298 return value*100000 + uint64(ind2)*10000 + uint64(ind3)*1000 + uint64(ind4)*100 + uint64(ind5)*10 + uint64(ind6)
299 }
300 i++
301 ind8 := intDigits[iter.buf[i]]
302 if ind8 == invalidCharForNumber {
303 iter.head = i
304 iter.assertInteger()
305 return value*1000000 + uint64(ind2)*100000 + uint64(ind3)*10000 + uint64(ind4)*1000 + uint64(ind5)*100 + uint64(ind6)*10 + uint64(ind7)
306 }
307 i++
308 ind9 := intDigits[iter.buf[i]]
309 value = value*10000000 + uint64(ind2)*1000000 + uint64(ind3)*100000 + uint64(ind4)*10000 + uint64(ind5)*1000 + uint64(ind6)*100 + uint64(ind7)*10 + uint64(ind8)
310 iter.head = i
311 if ind9 == invalidCharForNumber {
312 iter.assertInteger()
313 return value
314 }
315 }
316 for {
317 for i := iter.head; i < iter.tail; i++ {
318 ind = intDigits[iter.buf[i]]
319 if ind == invalidCharForNumber {
320 iter.head = i
321 iter.assertInteger()
322 return value
323 }
324 if value > uint64SafeToMultiple10 {
325 value2 := (value << 3) + (value << 1) + uint64(ind)
326 if value2 < value {
327 iter.ReportError("readUint64", "overflow")
328 return
329 }
330 value = value2
331 continue
332 }
333 value = (value << 3) + (value << 1) + uint64(ind)
334 }
335 if !iter.loadMore() {
336 iter.assertInteger()
337 return value
338 }
339 }
340}
341
342func (iter *Iterator) assertInteger() {
343 if iter.head < iter.tail && iter.buf[iter.head] == '.' {
344 iter.ReportError("assertInteger", "can not decode float as int")
345 }
346}
diff --git a/vendor/github.com/json-iterator/go/iter_object.go b/vendor/github.com/json-iterator/go/iter_object.go
new file mode 100644
index 0000000..58ee89c
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_object.go
@@ -0,0 +1,267 @@
1package jsoniter
2
3import (
4 "fmt"
5 "strings"
6)
7
8// ReadObject read one field from object.
9// If object ended, returns empty string.
10// Otherwise, returns the field name.
11func (iter *Iterator) ReadObject() (ret string) {
12 c := iter.nextToken()
13 switch c {
14 case 'n':
15 iter.skipThreeBytes('u', 'l', 'l')
16 return "" // null
17 case '{':
18 c = iter.nextToken()
19 if c == '"' {
20 iter.unreadByte()
21 field := iter.ReadString()
22 c = iter.nextToken()
23 if c != ':' {
24 iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
25 }
26 return field
27 }
28 if c == '}' {
29 return "" // end of object
30 }
31 iter.ReportError("ReadObject", `expect " after {, but found `+string([]byte{c}))
32 return
33 case ',':
34 field := iter.ReadString()
35 c = iter.nextToken()
36 if c != ':' {
37 iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
38 }
39 return field
40 case '}':
41 return "" // end of object
42 default:
43 iter.ReportError("ReadObject", fmt.Sprintf(`expect { or , or } or n, but found %s`, string([]byte{c})))
44 return
45 }
46}
47
48// CaseInsensitive
49func (iter *Iterator) readFieldHash() int64 {
50 hash := int64(0x811c9dc5)
51 c := iter.nextToken()
52 if c != '"' {
53 iter.ReportError("readFieldHash", `expect ", but found `+string([]byte{c}))
54 return 0
55 }
56 for {
57 for i := iter.head; i < iter.tail; i++ {
58 // require ascii string and no escape
59 b := iter.buf[i]
60 if b == '\\' {
61 iter.head = i
62 for _, b := range iter.readStringSlowPath() {
63 if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive {
64 b += 'a' - 'A'
65 }
66 hash ^= int64(b)
67 hash *= 0x1000193
68 }
69 c = iter.nextToken()
70 if c != ':' {
71 iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c}))
72 return 0
73 }
74 return hash
75 }
76 if b == '"' {
77 iter.head = i + 1
78 c = iter.nextToken()
79 if c != ':' {
80 iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c}))
81 return 0
82 }
83 return hash
84 }
85 if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive {
86 b += 'a' - 'A'
87 }
88 hash ^= int64(b)
89 hash *= 0x1000193
90 }
91 if !iter.loadMore() {
92 iter.ReportError("readFieldHash", `incomplete field name`)
93 return 0
94 }
95 }
96}
97
98func calcHash(str string, caseSensitive bool) int64 {
99 if !caseSensitive {
100 str = strings.ToLower(str)
101 }
102 hash := int64(0x811c9dc5)
103 for _, b := range []byte(str) {
104 hash ^= int64(b)
105 hash *= 0x1000193
106 }
107 return int64(hash)
108}
109
110// ReadObjectCB read object with callback, the key is ascii only and field name not copied
111func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool {
112 c := iter.nextToken()
113 var field string
114 if c == '{' {
115 if !iter.incrementDepth() {
116 return false
117 }
118 c = iter.nextToken()
119 if c == '"' {
120 iter.unreadByte()
121 field = iter.ReadString()
122 c = iter.nextToken()
123 if c != ':' {
124 iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
125 }
126 if !callback(iter, field) {
127 iter.decrementDepth()
128 return false
129 }
130 c = iter.nextToken()
131 for c == ',' {
132 field = iter.ReadString()
133 c = iter.nextToken()
134 if c != ':' {
135 iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
136 }
137 if !callback(iter, field) {
138 iter.decrementDepth()
139 return false
140 }
141 c = iter.nextToken()
142 }
143 if c != '}' {
144 iter.ReportError("ReadObjectCB", `object not ended with }`)
145 iter.decrementDepth()
146 return false
147 }
148 return iter.decrementDepth()
149 }
150 if c == '}' {
151 return iter.decrementDepth()
152 }
153 iter.ReportError("ReadObjectCB", `expect " after {, but found `+string([]byte{c}))
154 iter.decrementDepth()
155 return false
156 }
157 if c == 'n' {
158 iter.skipThreeBytes('u', 'l', 'l')
159 return true // null
160 }
161 iter.ReportError("ReadObjectCB", `expect { or n, but found `+string([]byte{c}))
162 return false
163}
164
165// ReadMapCB read map with callback, the key can be any string
166func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool {
167 c := iter.nextToken()
168 if c == '{' {
169 if !iter.incrementDepth() {
170 return false
171 }
172 c = iter.nextToken()
173 if c == '"' {
174 iter.unreadByte()
175 field := iter.ReadString()
176 if iter.nextToken() != ':' {
177 iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
178 iter.decrementDepth()
179 return false
180 }
181 if !callback(iter, field) {
182 iter.decrementDepth()
183 return false
184 }
185 c = iter.nextToken()
186 for c == ',' {
187 field = iter.ReadString()
188 if iter.nextToken() != ':' {
189 iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
190 iter.decrementDepth()
191 return false
192 }
193 if !callback(iter, field) {
194 iter.decrementDepth()
195 return false
196 }
197 c = iter.nextToken()
198 }
199 if c != '}' {
200 iter.ReportError("ReadMapCB", `object not ended with }`)
201 iter.decrementDepth()
202 return false
203 }
204 return iter.decrementDepth()
205 }
206 if c == '}' {
207 return iter.decrementDepth()
208 }
209 iter.ReportError("ReadMapCB", `expect " after {, but found `+string([]byte{c}))
210 iter.decrementDepth()
211 return false
212 }
213 if c == 'n' {
214 iter.skipThreeBytes('u', 'l', 'l')
215 return true // null
216 }
217 iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c}))
218 return false
219}
220
221func (iter *Iterator) readObjectStart() bool {
222 c := iter.nextToken()
223 if c == '{' {
224 c = iter.nextToken()
225 if c == '}' {
226 return false
227 }
228 iter.unreadByte()
229 return true
230 } else if c == 'n' {
231 iter.skipThreeBytes('u', 'l', 'l')
232 return false
233 }
234 iter.ReportError("readObjectStart", "expect { or n, but found "+string([]byte{c}))
235 return false
236}
237
238func (iter *Iterator) readObjectFieldAsBytes() (ret []byte) {
239 str := iter.ReadStringAsSlice()
240 if iter.skipWhitespacesWithoutLoadMore() {
241 if ret == nil {
242 ret = make([]byte, len(str))
243 copy(ret, str)
244 }
245 if !iter.loadMore() {
246 return
247 }
248 }
249 if iter.buf[iter.head] != ':' {
250 iter.ReportError("readObjectFieldAsBytes", "expect : after object field, but found "+string([]byte{iter.buf[iter.head]}))
251 return
252 }
253 iter.head++
254 if iter.skipWhitespacesWithoutLoadMore() {
255 if ret == nil {
256 ret = make([]byte, len(str))
257 copy(ret, str)
258 }
259 if !iter.loadMore() {
260 return
261 }
262 }
263 if ret == nil {
264 return str
265 }
266 return ret
267}
diff --git a/vendor/github.com/json-iterator/go/iter_skip.go b/vendor/github.com/json-iterator/go/iter_skip.go
new file mode 100644
index 0000000..e91eefb
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_skip.go
@@ -0,0 +1,130 @@
1package jsoniter
2
3import "fmt"
4
5// ReadNil reads a json object as nil and
6// returns whether it's a nil or not
7func (iter *Iterator) ReadNil() (ret bool) {
8 c := iter.nextToken()
9 if c == 'n' {
10 iter.skipThreeBytes('u', 'l', 'l') // null
11 return true
12 }
13 iter.unreadByte()
14 return false
15}
16
17// ReadBool reads a json object as BoolValue
18func (iter *Iterator) ReadBool() (ret bool) {
19 c := iter.nextToken()
20 if c == 't' {
21 iter.skipThreeBytes('r', 'u', 'e')
22 return true
23 }
24 if c == 'f' {
25 iter.skipFourBytes('a', 'l', 's', 'e')
26 return false
27 }
28 iter.ReportError("ReadBool", "expect t or f, but found "+string([]byte{c}))
29 return
30}
31
32// SkipAndReturnBytes skip next JSON element, and return its content as []byte.
33// The []byte can be kept, it is a copy of data.
34func (iter *Iterator) SkipAndReturnBytes() []byte {
35 iter.startCapture(iter.head)
36 iter.Skip()
37 return iter.stopCapture()
38}
39
40// SkipAndAppendBytes skips next JSON element and appends its content to
41// buffer, returning the result.
42func (iter *Iterator) SkipAndAppendBytes(buf []byte) []byte {
43 iter.startCaptureTo(buf, iter.head)
44 iter.Skip()
45 return iter.stopCapture()
46}
47
48func (iter *Iterator) startCaptureTo(buf []byte, captureStartedAt int) {
49 if iter.captured != nil {
50 panic("already in capture mode")
51 }
52 iter.captureStartedAt = captureStartedAt
53 iter.captured = buf
54}
55
56func (iter *Iterator) startCapture(captureStartedAt int) {
57 iter.startCaptureTo(make([]byte, 0, 32), captureStartedAt)
58}
59
60func (iter *Iterator) stopCapture() []byte {
61 if iter.captured == nil {
62 panic("not in capture mode")
63 }
64 captured := iter.captured
65 remaining := iter.buf[iter.captureStartedAt:iter.head]
66 iter.captureStartedAt = -1
67 iter.captured = nil
68 return append(captured, remaining...)
69}
70
71// Skip skips a json object and positions to relatively the next json object
72func (iter *Iterator) Skip() {
73 c := iter.nextToken()
74 switch c {
75 case '"':
76 iter.skipString()
77 case 'n':
78 iter.skipThreeBytes('u', 'l', 'l') // null
79 case 't':
80 iter.skipThreeBytes('r', 'u', 'e') // true
81 case 'f':
82 iter.skipFourBytes('a', 'l', 's', 'e') // false
83 case '0':
84 iter.unreadByte()
85 iter.ReadFloat32()
86 case '-', '1', '2', '3', '4', '5', '6', '7', '8', '9':
87 iter.skipNumber()
88 case '[':
89 iter.skipArray()
90 case '{':
91 iter.skipObject()
92 default:
93 iter.ReportError("Skip", fmt.Sprintf("do not know how to skip: %v", c))
94 return
95 }
96}
97
98func (iter *Iterator) skipFourBytes(b1, b2, b3, b4 byte) {
99 if iter.readByte() != b1 {
100 iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
101 return
102 }
103 if iter.readByte() != b2 {
104 iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
105 return
106 }
107 if iter.readByte() != b3 {
108 iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
109 return
110 }
111 if iter.readByte() != b4 {
112 iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
113 return
114 }
115}
116
117func (iter *Iterator) skipThreeBytes(b1, b2, b3 byte) {
118 if iter.readByte() != b1 {
119 iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3})))
120 return
121 }
122 if iter.readByte() != b2 {
123 iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3})))
124 return
125 }
126 if iter.readByte() != b3 {
127 iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3})))
128 return
129 }
130}
diff --git a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go
new file mode 100644
index 0000000..9303de4
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go
@@ -0,0 +1,163 @@
1//+build jsoniter_sloppy
2
3package jsoniter
4
5// sloppy but faster implementation, do not validate the input json
6
7func (iter *Iterator) skipNumber() {
8 for {
9 for i := iter.head; i < iter.tail; i++ {
10 c := iter.buf[i]
11 switch c {
12 case ' ', '\n', '\r', '\t', ',', '}', ']':
13 iter.head = i
14 return
15 }
16 }
17 if !iter.loadMore() {
18 return
19 }
20 }
21}
22
23func (iter *Iterator) skipArray() {
24 level := 1
25 if !iter.incrementDepth() {
26 return
27 }
28 for {
29 for i := iter.head; i < iter.tail; i++ {
30 switch iter.buf[i] {
31 case '"': // If inside string, skip it
32 iter.head = i + 1
33 iter.skipString()
34 i = iter.head - 1 // it will be i++ soon
35 case '[': // If open symbol, increase level
36 level++
37 if !iter.incrementDepth() {
38 return
39 }
40 case ']': // If close symbol, increase level
41 level--
42 if !iter.decrementDepth() {
43 return
44 }
45
46 // If we have returned to the original level, we're done
47 if level == 0 {
48 iter.head = i + 1
49 return
50 }
51 }
52 }
53 if !iter.loadMore() {
54 iter.ReportError("skipObject", "incomplete array")
55 return
56 }
57 }
58}
59
60func (iter *Iterator) skipObject() {
61 level := 1
62 if !iter.incrementDepth() {
63 return
64 }
65
66 for {
67 for i := iter.head; i < iter.tail; i++ {
68 switch iter.buf[i] {
69 case '"': // If inside string, skip it
70 iter.head = i + 1
71 iter.skipString()
72 i = iter.head - 1 // it will be i++ soon
73 case '{': // If open symbol, increase level
74 level++
75 if !iter.incrementDepth() {
76 return
77 }
78 case '}': // If close symbol, increase level
79 level--
80 if !iter.decrementDepth() {
81 return
82 }
83
84 // If we have returned to the original level, we're done
85 if level == 0 {
86 iter.head = i + 1
87 return
88 }
89 }
90 }
91 if !iter.loadMore() {
92 iter.ReportError("skipObject", "incomplete object")
93 return
94 }
95 }
96}
97
98func (iter *Iterator) skipString() {
99 for {
100 end, escaped := iter.findStringEnd()
101 if end == -1 {
102 if !iter.loadMore() {
103 iter.ReportError("skipString", "incomplete string")
104 return
105 }
106 if escaped {
107 iter.head = 1 // skip the first char as last char read is \
108 }
109 } else {
110 iter.head = end
111 return
112 }
113 }
114}
115
116// adapted from: https://github.com/buger/jsonparser/blob/master/parser.go
117// Tries to find the end of string
118// Support if string contains escaped quote symbols.
119func (iter *Iterator) findStringEnd() (int, bool) {
120 escaped := false
121 for i := iter.head; i < iter.tail; i++ {
122 c := iter.buf[i]
123 if c == '"' {
124 if !escaped {
125 return i + 1, false
126 }
127 j := i - 1
128 for {
129 if j < iter.head || iter.buf[j] != '\\' {
130 // even number of backslashes
131 // either end of buffer, or " found
132 return i + 1, true
133 }
134 j--
135 if j < iter.head || iter.buf[j] != '\\' {
136 // odd number of backslashes
137 // it is \" or \\\"
138 break
139 }
140 j--
141 }
142 } else if c == '\\' {
143 escaped = true
144 }
145 }
146 j := iter.tail - 1
147 for {
148 if j < iter.head || iter.buf[j] != '\\' {
149 // even number of backslashes
150 // either end of buffer, or " found
151 return -1, false // do not end with \
152 }
153 j--
154 if j < iter.head || iter.buf[j] != '\\' {
155 // odd number of backslashes
156 // it is \" or \\\"
157 break
158 }
159 j--
160
161 }
162 return -1, true // end with \
163}
diff --git a/vendor/github.com/json-iterator/go/iter_skip_strict.go b/vendor/github.com/json-iterator/go/iter_skip_strict.go
new file mode 100644
index 0000000..6cf66d0
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_skip_strict.go
@@ -0,0 +1,99 @@
1//+build !jsoniter_sloppy
2
3package jsoniter
4
5import (
6 "fmt"
7 "io"
8)
9
10func (iter *Iterator) skipNumber() {
11 if !iter.trySkipNumber() {
12 iter.unreadByte()
13 if iter.Error != nil && iter.Error != io.EOF {
14 return
15 }
16 iter.ReadFloat64()
17 if iter.Error != nil && iter.Error != io.EOF {
18 iter.Error = nil
19 iter.ReadBigFloat()
20 }
21 }
22}
23
24func (iter *Iterator) trySkipNumber() bool {
25 dotFound := false
26 for i := iter.head; i < iter.tail; i++ {
27 c := iter.buf[i]
28 switch c {
29 case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
30 case '.':
31 if dotFound {
32 iter.ReportError("validateNumber", `more than one dot found in number`)
33 return true // already failed
34 }
35 if i+1 == iter.tail {
36 return false
37 }
38 c = iter.buf[i+1]
39 switch c {
40 case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
41 default:
42 iter.ReportError("validateNumber", `missing digit after dot`)
43 return true // already failed
44 }
45 dotFound = true
46 default:
47 switch c {
48 case ',', ']', '}', ' ', '\t', '\n', '\r':
49 if iter.head == i {
50 return false // if - without following digits
51 }
52 iter.head = i
53 return true // must be valid
54 }
55 return false // may be invalid
56 }
57 }
58 return false
59}
60
61func (iter *Iterator) skipString() {
62 if !iter.trySkipString() {
63 iter.unreadByte()
64 iter.ReadString()
65 }
66}
67
68func (iter *Iterator) trySkipString() bool {
69 for i := iter.head; i < iter.tail; i++ {
70 c := iter.buf[i]
71 if c == '"' {
72 iter.head = i + 1
73 return true // valid
74 } else if c == '\\' {
75 return false
76 } else if c < ' ' {
77 iter.ReportError("trySkipString",
78 fmt.Sprintf(`invalid control character found: %d`, c))
79 return true // already failed
80 }
81 }
82 return false
83}
84
85func (iter *Iterator) skipObject() {
86 iter.unreadByte()
87 iter.ReadObjectCB(func(iter *Iterator, field string) bool {
88 iter.Skip()
89 return true
90 })
91}
92
93func (iter *Iterator) skipArray() {
94 iter.unreadByte()
95 iter.ReadArrayCB(func(iter *Iterator) bool {
96 iter.Skip()
97 return true
98 })
99}
diff --git a/vendor/github.com/json-iterator/go/iter_str.go b/vendor/github.com/json-iterator/go/iter_str.go
new file mode 100644
index 0000000..adc487e
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_str.go
@@ -0,0 +1,215 @@
1package jsoniter
2
3import (
4 "fmt"
5 "unicode/utf16"
6)
7
8// ReadString read string from iterator
9func (iter *Iterator) ReadString() (ret string) {
10 c := iter.nextToken()
11 if c == '"' {
12 for i := iter.head; i < iter.tail; i++ {
13 c := iter.buf[i]
14 if c == '"' {
15 ret = string(iter.buf[iter.head:i])
16 iter.head = i + 1
17 return ret
18 } else if c == '\\' {
19 break
20 } else if c < ' ' {
21 iter.ReportError("ReadString",
22 fmt.Sprintf(`invalid control character found: %d`, c))
23 return
24 }
25 }
26 return iter.readStringSlowPath()
27 } else if c == 'n' {
28 iter.skipThreeBytes('u', 'l', 'l')
29 return ""
30 }
31 iter.ReportError("ReadString", `expects " or n, but found `+string([]byte{c}))
32 return
33}
34
35func (iter *Iterator) readStringSlowPath() (ret string) {
36 var str []byte
37 var c byte
38 for iter.Error == nil {
39 c = iter.readByte()
40 if c == '"' {
41 return string(str)
42 }
43 if c == '\\' {
44 c = iter.readByte()
45 str = iter.readEscapedChar(c, str)
46 } else {
47 str = append(str, c)
48 }
49 }
50 iter.ReportError("readStringSlowPath", "unexpected end of input")
51 return
52}
53
54func (iter *Iterator) readEscapedChar(c byte, str []byte) []byte {
55 switch c {
56 case 'u':
57 r := iter.readU4()
58 if utf16.IsSurrogate(r) {
59 c = iter.readByte()
60 if iter.Error != nil {
61 return nil
62 }
63 if c != '\\' {
64 iter.unreadByte()
65 str = appendRune(str, r)
66 return str
67 }
68 c = iter.readByte()
69 if iter.Error != nil {
70 return nil
71 }
72 if c != 'u' {
73 str = appendRune(str, r)
74 return iter.readEscapedChar(c, str)
75 }
76 r2 := iter.readU4()
77 if iter.Error != nil {
78 return nil
79 }
80 combined := utf16.DecodeRune(r, r2)
81 if combined == '\uFFFD' {
82 str = appendRune(str, r)
83 str = appendRune(str, r2)
84 } else {
85 str = appendRune(str, combined)
86 }
87 } else {
88 str = appendRune(str, r)
89 }
90 case '"':
91 str = append(str, '"')
92 case '\\':
93 str = append(str, '\\')
94 case '/':
95 str = append(str, '/')
96 case 'b':
97 str = append(str, '\b')
98 case 'f':
99 str = append(str, '\f')
100 case 'n':
101 str = append(str, '\n')
102 case 'r':
103 str = append(str, '\r')
104 case 't':
105 str = append(str, '\t')
106 default:
107 iter.ReportError("readEscapedChar",
108 `invalid escape char after \`)
109 return nil
110 }
111 return str
112}
113
114// ReadStringAsSlice read string from iterator without copying into string form.
115// The []byte can not be kept, as it will change after next iterator call.
116func (iter *Iterator) ReadStringAsSlice() (ret []byte) {
117 c := iter.nextToken()
118 if c == '"' {
119 for i := iter.head; i < iter.tail; i++ {
120 // require ascii string and no escape
121 // for: field name, base64, number
122 if iter.buf[i] == '"' {
123 // fast path: reuse the underlying buffer
124 ret = iter.buf[iter.head:i]
125 iter.head = i + 1
126 return ret
127 }
128 }
129 readLen := iter.tail - iter.head
130 copied := make([]byte, readLen, readLen*2)
131 copy(copied, iter.buf[iter.head:iter.tail])
132 iter.head = iter.tail
133 for iter.Error == nil {
134 c := iter.readByte()
135 if c == '"' {
136 return copied
137 }
138 copied = append(copied, c)
139 }
140 return copied
141 }
142 iter.ReportError("ReadStringAsSlice", `expects " or n, but found `+string([]byte{c}))
143 return
144}
145
146func (iter *Iterator) readU4() (ret rune) {
147 for i := 0; i < 4; i++ {
148 c := iter.readByte()
149 if iter.Error != nil {
150 return
151 }
152 if c >= '0' && c <= '9' {
153 ret = ret*16 + rune(c-'0')
154 } else if c >= 'a' && c <= 'f' {
155 ret = ret*16 + rune(c-'a'+10)
156 } else if c >= 'A' && c <= 'F' {
157 ret = ret*16 + rune(c-'A'+10)
158 } else {
159 iter.ReportError("readU4", "expects 0~9 or a~f, but found "+string([]byte{c}))
160 return
161 }
162 }
163 return ret
164}
165
166const (
167 t1 = 0x00 // 0000 0000
168 tx = 0x80 // 1000 0000
169 t2 = 0xC0 // 1100 0000
170 t3 = 0xE0 // 1110 0000
171 t4 = 0xF0 // 1111 0000
172 t5 = 0xF8 // 1111 1000
173
174 maskx = 0x3F // 0011 1111
175 mask2 = 0x1F // 0001 1111
176 mask3 = 0x0F // 0000 1111
177 mask4 = 0x07 // 0000 0111
178
179 rune1Max = 1<<7 - 1
180 rune2Max = 1<<11 - 1
181 rune3Max = 1<<16 - 1
182
183 surrogateMin = 0xD800
184 surrogateMax = 0xDFFF
185
186 maxRune = '\U0010FFFF' // Maximum valid Unicode code point.
187 runeError = '\uFFFD' // the "error" Rune or "Unicode replacement character"
188)
189
190func appendRune(p []byte, r rune) []byte {
191 // Negative values are erroneous. Making it unsigned addresses the problem.
192 switch i := uint32(r); {
193 case i <= rune1Max:
194 p = append(p, byte(r))
195 return p
196 case i <= rune2Max:
197 p = append(p, t2|byte(r>>6))
198 p = append(p, tx|byte(r)&maskx)
199 return p
200 case i > maxRune, surrogateMin <= i && i <= surrogateMax:
201 r = runeError
202 fallthrough
203 case i <= rune3Max:
204 p = append(p, t3|byte(r>>12))
205 p = append(p, tx|byte(r>>6)&maskx)
206 p = append(p, tx|byte(r)&maskx)
207 return p
208 default:
209 p = append(p, t4|byte(r>>18))
210 p = append(p, tx|byte(r>>12)&maskx)
211 p = append(p, tx|byte(r>>6)&maskx)
212 p = append(p, tx|byte(r)&maskx)
213 return p
214 }
215}
diff --git a/vendor/github.com/json-iterator/go/jsoniter.go b/vendor/github.com/json-iterator/go/jsoniter.go
new file mode 100644
index 0000000..c2934f9
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/jsoniter.go
@@ -0,0 +1,18 @@
1// Package jsoniter implements encoding and decoding of JSON as defined in
2// RFC 4627 and provides interfaces with identical syntax of standard lib encoding/json.
3// Converting from encoding/json to jsoniter is no more than replacing the package with jsoniter
4// and variable type declarations (if any).
5// jsoniter interfaces gives 100% compatibility with code using standard lib.
6//
7// "JSON and Go"
8// (https://golang.org/doc/articles/json_and_go.html)
9// gives a description of how Marshal/Unmarshal operate
10// between arbitrary or predefined json objects and bytes,
11// and it applies to jsoniter.Marshal/Unmarshal as well.
12//
13// Besides, jsoniter.Iterator provides a different set of interfaces
14// iterating given bytes/string/reader
15// and yielding parsed elements one by one.
16// This set of interfaces reads input as required and gives
17// better performance.
18package jsoniter
diff --git a/vendor/github.com/json-iterator/go/pool.go b/vendor/github.com/json-iterator/go/pool.go
new file mode 100644
index 0000000..e2389b5
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/pool.go
@@ -0,0 +1,42 @@
1package jsoniter
2
3import (
4 "io"
5)
6
7// IteratorPool a thread safe pool of iterators with same configuration
8type IteratorPool interface {
9 BorrowIterator(data []byte) *Iterator
10 ReturnIterator(iter *Iterator)
11}
12
13// StreamPool a thread safe pool of streams with same configuration
14type StreamPool interface {
15 BorrowStream(writer io.Writer) *Stream
16 ReturnStream(stream *Stream)
17}
18
19func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream {
20 stream := cfg.streamPool.Get().(*Stream)
21 stream.Reset(writer)
22 return stream
23}
24
25func (cfg *frozenConfig) ReturnStream(stream *Stream) {
26 stream.out = nil
27 stream.Error = nil
28 stream.Attachment = nil
29 cfg.streamPool.Put(stream)
30}
31
32func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator {
33 iter := cfg.iteratorPool.Get().(*Iterator)
34 iter.ResetBytes(data)
35 return iter
36}
37
38func (cfg *frozenConfig) ReturnIterator(iter *Iterator) {
39 iter.Error = nil
40 iter.Attachment = nil
41 cfg.iteratorPool.Put(iter)
42}
diff --git a/vendor/github.com/json-iterator/go/reflect.go b/vendor/github.com/json-iterator/go/reflect.go
new file mode 100644
index 0000000..39acb32
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect.go
@@ -0,0 +1,337 @@
1package jsoniter
2
3import (
4 "fmt"
5 "reflect"
6 "unsafe"
7
8 "github.com/modern-go/reflect2"
9)
10
11// ValDecoder is an internal type registered to cache as needed.
12// Don't confuse jsoniter.ValDecoder with json.Decoder.
13// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link).
14//
15// Reflection on type to create decoders, which is then cached
16// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions
17// 1. create instance of new value, for example *int will need a int to be allocated
18// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New
19// 3. assignment to map, both key and value will be reflect.Value
20// For a simple struct binding, it will be reflect.Value free and allocation free
21type ValDecoder interface {
22 Decode(ptr unsafe.Pointer, iter *Iterator)
23}
24
25// ValEncoder is an internal type registered to cache as needed.
26// Don't confuse jsoniter.ValEncoder with json.Encoder.
27// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link).
28type ValEncoder interface {
29 IsEmpty(ptr unsafe.Pointer) bool
30 Encode(ptr unsafe.Pointer, stream *Stream)
31}
32
33type checkIsEmpty interface {
34 IsEmpty(ptr unsafe.Pointer) bool
35}
36
37type ctx struct {
38 *frozenConfig
39 prefix string
40 encoders map[reflect2.Type]ValEncoder
41 decoders map[reflect2.Type]ValDecoder
42}
43
44func (b *ctx) caseSensitive() bool {
45 if b.frozenConfig == nil {
46 // default is case-insensitive
47 return false
48 }
49 return b.frozenConfig.caseSensitive
50}
51
52func (b *ctx) append(prefix string) *ctx {
53 return &ctx{
54 frozenConfig: b.frozenConfig,
55 prefix: b.prefix + " " + prefix,
56 encoders: b.encoders,
57 decoders: b.decoders,
58 }
59}
60
61// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal
62func (iter *Iterator) ReadVal(obj interface{}) {
63 depth := iter.depth
64 cacheKey := reflect2.RTypeOf(obj)
65 decoder := iter.cfg.getDecoderFromCache(cacheKey)
66 if decoder == nil {
67 typ := reflect2.TypeOf(obj)
68 if typ == nil || typ.Kind() != reflect.Ptr {
69 iter.ReportError("ReadVal", "can only unmarshal into pointer")
70 return
71 }
72 decoder = iter.cfg.DecoderOf(typ)
73 }
74 ptr := reflect2.PtrOf(obj)
75 if ptr == nil {
76 iter.ReportError("ReadVal", "can not read into nil pointer")
77 return
78 }
79 decoder.Decode(ptr, iter)
80 if iter.depth != depth {
81 iter.ReportError("ReadVal", "unexpected mismatched nesting")
82 return
83 }
84}
85
86// WriteVal copy the go interface into underlying JSON, same as json.Marshal
87func (stream *Stream) WriteVal(val interface{}) {
88 if nil == val {
89 stream.WriteNil()
90 return
91 }
92 cacheKey := reflect2.RTypeOf(val)
93 encoder := stream.cfg.getEncoderFromCache(cacheKey)
94 if encoder == nil {
95 typ := reflect2.TypeOf(val)
96 encoder = stream.cfg.EncoderOf(typ)
97 }
98 encoder.Encode(reflect2.PtrOf(val), stream)
99}
100
101func (cfg *frozenConfig) DecoderOf(typ reflect2.Type) ValDecoder {
102 cacheKey := typ.RType()
103 decoder := cfg.getDecoderFromCache(cacheKey)
104 if decoder != nil {
105 return decoder
106 }
107 ctx := &ctx{
108 frozenConfig: cfg,
109 prefix: "",
110 decoders: map[reflect2.Type]ValDecoder{},
111 encoders: map[reflect2.Type]ValEncoder{},
112 }
113 ptrType := typ.(*reflect2.UnsafePtrType)
114 decoder = decoderOfType(ctx, ptrType.Elem())
115 cfg.addDecoderToCache(cacheKey, decoder)
116 return decoder
117}
118
119func decoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder {
120 decoder := getTypeDecoderFromExtension(ctx, typ)
121 if decoder != nil {
122 return decoder
123 }
124 decoder = createDecoderOfType(ctx, typ)
125 for _, extension := range extensions {
126 decoder = extension.DecorateDecoder(typ, decoder)
127 }
128 decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder)
129 for _, extension := range ctx.extraExtensions {
130 decoder = extension.DecorateDecoder(typ, decoder)
131 }
132 return decoder
133}
134
135func createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder {
136 decoder := ctx.decoders[typ]
137 if decoder != nil {
138 return decoder
139 }
140 placeholder := &placeholderDecoder{}
141 ctx.decoders[typ] = placeholder
142 decoder = _createDecoderOfType(ctx, typ)
143 placeholder.decoder = decoder
144 return decoder
145}
146
147func _createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder {
148 decoder := createDecoderOfJsonRawMessage(ctx, typ)
149 if decoder != nil {
150 return decoder
151 }
152 decoder = createDecoderOfJsonNumber(ctx, typ)
153 if decoder != nil {
154 return decoder
155 }
156 decoder = createDecoderOfMarshaler(ctx, typ)
157 if decoder != nil {
158 return decoder
159 }
160 decoder = createDecoderOfAny(ctx, typ)
161 if decoder != nil {
162 return decoder
163 }
164 decoder = createDecoderOfNative(ctx, typ)
165 if decoder != nil {
166 return decoder
167 }
168 switch typ.Kind() {
169 case reflect.Interface:
170 ifaceType, isIFace := typ.(*reflect2.UnsafeIFaceType)
171 if isIFace {
172 return &ifaceDecoder{valType: ifaceType}
173 }
174 return &efaceDecoder{}
175 case reflect.Struct:
176 return decoderOfStruct(ctx, typ)
177 case reflect.Array:
178 return decoderOfArray(ctx, typ)
179 case reflect.Slice:
180 return decoderOfSlice(ctx, typ)
181 case reflect.Map:
182 return decoderOfMap(ctx, typ)
183 case reflect.Ptr:
184 return decoderOfOptional(ctx, typ)
185 default:
186 return &lazyErrorDecoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())}
187 }
188}
189
190func (cfg *frozenConfig) EncoderOf(typ reflect2.Type) ValEncoder {
191 cacheKey := typ.RType()
192 encoder := cfg.getEncoderFromCache(cacheKey)
193 if encoder != nil {
194 return encoder
195 }
196 ctx := &ctx{
197 frozenConfig: cfg,
198 prefix: "",
199 decoders: map[reflect2.Type]ValDecoder{},
200 encoders: map[reflect2.Type]ValEncoder{},
201 }
202 encoder = encoderOfType(ctx, typ)
203 if typ.LikePtr() {
204 encoder = &onePtrEncoder{encoder}
205 }
206 cfg.addEncoderToCache(cacheKey, encoder)
207 return encoder
208}
209
210type onePtrEncoder struct {
211 encoder ValEncoder
212}
213
214func (encoder *onePtrEncoder) IsEmpty(ptr unsafe.Pointer) bool {
215 return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr))
216}
217
218func (encoder *onePtrEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
219 encoder.encoder.Encode(unsafe.Pointer(&ptr), stream)
220}
221
222func encoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder {
223 encoder := getTypeEncoderFromExtension(ctx, typ)
224 if encoder != nil {
225 return encoder
226 }
227 encoder = createEncoderOfType(ctx, typ)
228 for _, extension := range extensions {
229 encoder = extension.DecorateEncoder(typ, encoder)
230 }
231 encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder)
232 for _, extension := range ctx.extraExtensions {
233 encoder = extension.DecorateEncoder(typ, encoder)
234 }
235 return encoder
236}
237
238func createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder {
239 encoder := ctx.encoders[typ]
240 if encoder != nil {
241 return encoder
242 }
243 placeholder := &placeholderEncoder{}
244 ctx.encoders[typ] = placeholder
245 encoder = _createEncoderOfType(ctx, typ)
246 placeholder.encoder = encoder
247 return encoder
248}
249func _createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder {
250 encoder := createEncoderOfJsonRawMessage(ctx, typ)
251 if encoder != nil {
252 return encoder
253 }
254 encoder = createEncoderOfJsonNumber(ctx, typ)
255 if encoder != nil {
256 return encoder
257 }
258 encoder = createEncoderOfMarshaler(ctx, typ)
259 if encoder != nil {
260 return encoder
261 }
262 encoder = createEncoderOfAny(ctx, typ)
263 if encoder != nil {
264 return encoder
265 }
266 encoder = createEncoderOfNative(ctx, typ)
267 if encoder != nil {
268 return encoder
269 }
270 kind := typ.Kind()
271 switch kind {
272 case reflect.Interface:
273 return &dynamicEncoder{typ}
274 case reflect.Struct:
275 return encoderOfStruct(ctx, typ)
276 case reflect.Array:
277 return encoderOfArray(ctx, typ)
278 case reflect.Slice:
279 return encoderOfSlice(ctx, typ)
280 case reflect.Map:
281 return encoderOfMap(ctx, typ)
282 case reflect.Ptr:
283 return encoderOfOptional(ctx, typ)
284 default:
285 return &lazyErrorEncoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())}
286 }
287}
288
289type lazyErrorDecoder struct {
290 err error
291}
292
293func (decoder *lazyErrorDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
294 if iter.WhatIsNext() != NilValue {
295 if iter.Error == nil {
296 iter.Error = decoder.err
297 }
298 } else {
299 iter.Skip()
300 }
301}
302
303type lazyErrorEncoder struct {
304 err error
305}
306
307func (encoder *lazyErrorEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
308 if ptr == nil {
309 stream.WriteNil()
310 } else if stream.Error == nil {
311 stream.Error = encoder.err
312 }
313}
314
315func (encoder *lazyErrorEncoder) IsEmpty(ptr unsafe.Pointer) bool {
316 return false
317}
318
319type placeholderDecoder struct {
320 decoder ValDecoder
321}
322
323func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
324 decoder.decoder.Decode(ptr, iter)
325}
326
327type placeholderEncoder struct {
328 encoder ValEncoder
329}
330
331func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
332 encoder.encoder.Encode(ptr, stream)
333}
334
335func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool {
336 return encoder.encoder.IsEmpty(ptr)
337}
diff --git a/vendor/github.com/json-iterator/go/reflect_array.go b/vendor/github.com/json-iterator/go/reflect_array.go
new file mode 100644
index 0000000..13a0b7b
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_array.go
@@ -0,0 +1,104 @@
1package jsoniter
2
3import (
4 "fmt"
5 "github.com/modern-go/reflect2"
6 "io"
7 "unsafe"
8)
9
10func decoderOfArray(ctx *ctx, typ reflect2.Type) ValDecoder {
11 arrayType := typ.(*reflect2.UnsafeArrayType)
12 decoder := decoderOfType(ctx.append("[arrayElem]"), arrayType.Elem())
13 return &arrayDecoder{arrayType, decoder}
14}
15
16func encoderOfArray(ctx *ctx, typ reflect2.Type) ValEncoder {
17 arrayType := typ.(*reflect2.UnsafeArrayType)
18 if arrayType.Len() == 0 {
19 return emptyArrayEncoder{}
20 }
21 encoder := encoderOfType(ctx.append("[arrayElem]"), arrayType.Elem())
22 return &arrayEncoder{arrayType, encoder}
23}
24
25type emptyArrayEncoder struct{}
26
27func (encoder emptyArrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
28 stream.WriteEmptyArray()
29}
30
31func (encoder emptyArrayEncoder) IsEmpty(ptr unsafe.Pointer) bool {
32 return true
33}
34
35type arrayEncoder struct {
36 arrayType *reflect2.UnsafeArrayType
37 elemEncoder ValEncoder
38}
39
40func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
41 stream.WriteArrayStart()
42 elemPtr := unsafe.Pointer(ptr)
43 encoder.elemEncoder.Encode(elemPtr, stream)
44 for i := 1; i < encoder.arrayType.Len(); i++ {
45 stream.WriteMore()
46 elemPtr = encoder.arrayType.UnsafeGetIndex(ptr, i)
47 encoder.elemEncoder.Encode(elemPtr, stream)
48 }
49 stream.WriteArrayEnd()
50 if stream.Error != nil && stream.Error != io.EOF {
51 stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error())
52 }
53}
54
55func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool {
56 return false
57}
58
59type arrayDecoder struct {
60 arrayType *reflect2.UnsafeArrayType
61 elemDecoder ValDecoder
62}
63
64func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
65 decoder.doDecode(ptr, iter)
66 if iter.Error != nil && iter.Error != io.EOF {
67 iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error())
68 }
69}
70
71func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) {
72 c := iter.nextToken()
73 arrayType := decoder.arrayType
74 if c == 'n' {
75 iter.skipThreeBytes('u', 'l', 'l')
76 return
77 }
78 if c != '[' {
79 iter.ReportError("decode array", "expect [ or n, but found "+string([]byte{c}))
80 return
81 }
82 c = iter.nextToken()
83 if c == ']' {
84 return
85 }
86 iter.unreadByte()
87 elemPtr := arrayType.UnsafeGetIndex(ptr, 0)
88 decoder.elemDecoder.Decode(elemPtr, iter)
89 length := 1
90 for c = iter.nextToken(); c == ','; c = iter.nextToken() {
91 if length >= arrayType.Len() {
92 iter.Skip()
93 continue
94 }
95 idx := length
96 length += 1
97 elemPtr = arrayType.UnsafeGetIndex(ptr, idx)
98 decoder.elemDecoder.Decode(elemPtr, iter)
99 }
100 if c != ']' {
101 iter.ReportError("decode array", "expect ], but found "+string([]byte{c}))
102 return
103 }
104}
diff --git a/vendor/github.com/json-iterator/go/reflect_dynamic.go b/vendor/github.com/json-iterator/go/reflect_dynamic.go
new file mode 100644
index 0000000..8b6bc8b
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_dynamic.go
@@ -0,0 +1,70 @@
1package jsoniter
2
3import (
4 "github.com/modern-go/reflect2"
5 "reflect"
6 "unsafe"
7)
8
9type dynamicEncoder struct {
10 valType reflect2.Type
11}
12
13func (encoder *dynamicEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
14 obj := encoder.valType.UnsafeIndirect(ptr)
15 stream.WriteVal(obj)
16}
17
18func (encoder *dynamicEncoder) IsEmpty(ptr unsafe.Pointer) bool {
19 return encoder.valType.UnsafeIndirect(ptr) == nil
20}
21
22type efaceDecoder struct {
23}
24
25func (decoder *efaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
26 pObj := (*interface{})(ptr)
27 obj := *pObj
28 if obj == nil {
29 *pObj = iter.Read()
30 return
31 }
32 typ := reflect2.TypeOf(obj)
33 if typ.Kind() != reflect.Ptr {
34 *pObj = iter.Read()
35 return
36 }
37 ptrType := typ.(*reflect2.UnsafePtrType)
38 ptrElemType := ptrType.Elem()
39 if iter.WhatIsNext() == NilValue {
40 if ptrElemType.Kind() != reflect.Ptr {
41 iter.skipFourBytes('n', 'u', 'l', 'l')
42 *pObj = nil
43 return
44 }
45 }
46 if reflect2.IsNil(obj) {
47 obj := ptrElemType.New()
48 iter.ReadVal(obj)
49 *pObj = obj
50 return
51 }
52 iter.ReadVal(obj)
53}
54
55type ifaceDecoder struct {
56 valType *reflect2.UnsafeIFaceType
57}
58
59func (decoder *ifaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
60 if iter.ReadNil() {
61 decoder.valType.UnsafeSet(ptr, decoder.valType.UnsafeNew())
62 return
63 }
64 obj := decoder.valType.UnsafeIndirect(ptr)
65 if reflect2.IsNil(obj) {
66 iter.ReportError("decode non empty interface", "can not unmarshal into nil")
67 return
68 }
69 iter.ReadVal(obj)
70}
diff --git a/vendor/github.com/json-iterator/go/reflect_extension.go b/vendor/github.com/json-iterator/go/reflect_extension.go
new file mode 100644
index 0000000..74a97bf
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_extension.go
@@ -0,0 +1,483 @@
1package jsoniter
2
3import (
4 "fmt"
5 "github.com/modern-go/reflect2"
6 "reflect"
7 "sort"
8 "strings"
9 "unicode"
10 "unsafe"
11)
12
13var typeDecoders = map[string]ValDecoder{}
14var fieldDecoders = map[string]ValDecoder{}
15var typeEncoders = map[string]ValEncoder{}
16var fieldEncoders = map[string]ValEncoder{}
17var extensions = []Extension{}
18
19// StructDescriptor describe how should we encode/decode the struct
20type StructDescriptor struct {
21 Type reflect2.Type
22 Fields []*Binding
23}
24
25// GetField get one field from the descriptor by its name.
26// Can not use map here to keep field orders.
27func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding {
28 for _, binding := range structDescriptor.Fields {
29 if binding.Field.Name() == fieldName {
30 return binding
31 }
32 }
33 return nil
34}
35
36// Binding describe how should we encode/decode the struct field
37type Binding struct {
38 levels []int
39 Field reflect2.StructField
40 FromNames []string
41 ToNames []string
42 Encoder ValEncoder
43 Decoder ValDecoder
44}
45
46// Extension the one for all SPI. Customize encoding/decoding by specifying alternate encoder/decoder.
47// Can also rename fields by UpdateStructDescriptor.
48type Extension interface {
49 UpdateStructDescriptor(structDescriptor *StructDescriptor)
50 CreateMapKeyDecoder(typ reflect2.Type) ValDecoder
51 CreateMapKeyEncoder(typ reflect2.Type) ValEncoder
52 CreateDecoder(typ reflect2.Type) ValDecoder
53 CreateEncoder(typ reflect2.Type) ValEncoder
54 DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder
55 DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder
56}
57
58// DummyExtension embed this type get dummy implementation for all methods of Extension
59type DummyExtension struct {
60}
61
62// UpdateStructDescriptor No-op
63func (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) {
64}
65
66// CreateMapKeyDecoder No-op
67func (extension *DummyExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder {
68 return nil
69}
70
71// CreateMapKeyEncoder No-op
72func (extension *DummyExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder {
73 return nil
74}
75
76// CreateDecoder No-op
77func (extension *DummyExtension) CreateDecoder(typ reflect2.Type) ValDecoder {
78 return nil
79}
80
81// CreateEncoder No-op
82func (extension *DummyExtension) CreateEncoder(typ reflect2.Type) ValEncoder {
83 return nil
84}
85
86// DecorateDecoder No-op
87func (extension *DummyExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder {
88 return decoder
89}
90
91// DecorateEncoder No-op
92func (extension *DummyExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder {
93 return encoder
94}
95
96type EncoderExtension map[reflect2.Type]ValEncoder
97
98// UpdateStructDescriptor No-op
99func (extension EncoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) {
100}
101
102// CreateDecoder No-op
103func (extension EncoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder {
104 return nil
105}
106
107// CreateEncoder get encoder from map
108func (extension EncoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder {
109 return extension[typ]
110}
111
112// CreateMapKeyDecoder No-op
113func (extension EncoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder {
114 return nil
115}
116
117// CreateMapKeyEncoder No-op
118func (extension EncoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder {
119 return nil
120}
121
122// DecorateDecoder No-op
123func (extension EncoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder {
124 return decoder
125}
126
127// DecorateEncoder No-op
128func (extension EncoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder {
129 return encoder
130}
131
132type DecoderExtension map[reflect2.Type]ValDecoder
133
134// UpdateStructDescriptor No-op
135func (extension DecoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) {
136}
137
138// CreateMapKeyDecoder No-op
139func (extension DecoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder {
140 return nil
141}
142
143// CreateMapKeyEncoder No-op
144func (extension DecoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder {
145 return nil
146}
147
148// CreateDecoder get decoder from map
149func (extension DecoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder {
150 return extension[typ]
151}
152
153// CreateEncoder No-op
154func (extension DecoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder {
155 return nil
156}
157
158// DecorateDecoder No-op
159func (extension DecoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder {
160 return decoder
161}
162
163// DecorateEncoder No-op
164func (extension DecoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder {
165 return encoder
166}
167
168type funcDecoder struct {
169 fun DecoderFunc
170}
171
172func (decoder *funcDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
173 decoder.fun(ptr, iter)
174}
175
176type funcEncoder struct {
177 fun EncoderFunc
178 isEmptyFunc func(ptr unsafe.Pointer) bool
179}
180
181func (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
182 encoder.fun(ptr, stream)
183}
184
185func (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool {
186 if encoder.isEmptyFunc == nil {
187 return false
188 }
189 return encoder.isEmptyFunc(ptr)
190}
191
192// DecoderFunc the function form of TypeDecoder
193type DecoderFunc func(ptr unsafe.Pointer, iter *Iterator)
194
195// EncoderFunc the function form of TypeEncoder
196type EncoderFunc func(ptr unsafe.Pointer, stream *Stream)
197
198// RegisterTypeDecoderFunc register TypeDecoder for a type with function
199func RegisterTypeDecoderFunc(typ string, fun DecoderFunc) {
200 typeDecoders[typ] = &funcDecoder{fun}
201}
202
203// RegisterTypeDecoder register TypeDecoder for a typ
204func RegisterTypeDecoder(typ string, decoder ValDecoder) {
205 typeDecoders[typ] = decoder
206}
207
208// RegisterFieldDecoderFunc register TypeDecoder for a struct field with function
209func RegisterFieldDecoderFunc(typ string, field string, fun DecoderFunc) {
210 RegisterFieldDecoder(typ, field, &funcDecoder{fun})
211}
212
213// RegisterFieldDecoder register TypeDecoder for a struct field
214func RegisterFieldDecoder(typ string, field string, decoder ValDecoder) {
215 fieldDecoders[fmt.Sprintf("%s/%s", typ, field)] = decoder
216}
217
218// RegisterTypeEncoderFunc register TypeEncoder for a type with encode/isEmpty function
219func RegisterTypeEncoderFunc(typ string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) {
220 typeEncoders[typ] = &funcEncoder{fun, isEmptyFunc}
221}
222
223// RegisterTypeEncoder register TypeEncoder for a type
224func RegisterTypeEncoder(typ string, encoder ValEncoder) {
225 typeEncoders[typ] = encoder
226}
227
228// RegisterFieldEncoderFunc register TypeEncoder for a struct field with encode/isEmpty function
229func RegisterFieldEncoderFunc(typ string, field string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) {
230 RegisterFieldEncoder(typ, field, &funcEncoder{fun, isEmptyFunc})
231}
232
233// RegisterFieldEncoder register TypeEncoder for a struct field
234func RegisterFieldEncoder(typ string, field string, encoder ValEncoder) {
235 fieldEncoders[fmt.Sprintf("%s/%s", typ, field)] = encoder
236}
237
238// RegisterExtension register extension
239func RegisterExtension(extension Extension) {
240 extensions = append(extensions, extension)
241}
242
243func getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder {
244 decoder := _getTypeDecoderFromExtension(ctx, typ)
245 if decoder != nil {
246 for _, extension := range extensions {
247 decoder = extension.DecorateDecoder(typ, decoder)
248 }
249 decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder)
250 for _, extension := range ctx.extraExtensions {
251 decoder = extension.DecorateDecoder(typ, decoder)
252 }
253 }
254 return decoder
255}
256func _getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder {
257 for _, extension := range extensions {
258 decoder := extension.CreateDecoder(typ)
259 if decoder != nil {
260 return decoder
261 }
262 }
263 decoder := ctx.decoderExtension.CreateDecoder(typ)
264 if decoder != nil {
265 return decoder
266 }
267 for _, extension := range ctx.extraExtensions {
268 decoder := extension.CreateDecoder(typ)
269 if decoder != nil {
270 return decoder
271 }
272 }
273 typeName := typ.String()
274 decoder = typeDecoders[typeName]
275 if decoder != nil {
276 return decoder
277 }
278 if typ.Kind() == reflect.Ptr {
279 ptrType := typ.(*reflect2.UnsafePtrType)
280 decoder := typeDecoders[ptrType.Elem().String()]
281 if decoder != nil {
282 return &OptionalDecoder{ptrType.Elem(), decoder}
283 }
284 }
285 return nil
286}
287
288func getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder {
289 encoder := _getTypeEncoderFromExtension(ctx, typ)
290 if encoder != nil {
291 for _, extension := range extensions {
292 encoder = extension.DecorateEncoder(typ, encoder)
293 }
294 encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder)
295 for _, extension := range ctx.extraExtensions {
296 encoder = extension.DecorateEncoder(typ, encoder)
297 }
298 }
299 return encoder
300}
301
302func _getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder {
303 for _, extension := range extensions {
304 encoder := extension.CreateEncoder(typ)
305 if encoder != nil {
306 return encoder
307 }
308 }
309 encoder := ctx.encoderExtension.CreateEncoder(typ)
310 if encoder != nil {
311 return encoder
312 }
313 for _, extension := range ctx.extraExtensions {
314 encoder := extension.CreateEncoder(typ)
315 if encoder != nil {
316 return encoder
317 }
318 }
319 typeName := typ.String()
320 encoder = typeEncoders[typeName]
321 if encoder != nil {
322 return encoder
323 }
324 if typ.Kind() == reflect.Ptr {
325 typePtr := typ.(*reflect2.UnsafePtrType)
326 encoder := typeEncoders[typePtr.Elem().String()]
327 if encoder != nil {
328 return &OptionalEncoder{encoder}
329 }
330 }
331 return nil
332}
333
334func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor {
335 structType := typ.(*reflect2.UnsafeStructType)
336 embeddedBindings := []*Binding{}
337 bindings := []*Binding{}
338 for i := 0; i < structType.NumField(); i++ {
339 field := structType.Field(i)
340 tag, hastag := field.Tag().Lookup(ctx.getTagKey())
341 if ctx.onlyTaggedField && !hastag && !field.Anonymous() {
342 continue
343 }
344 if tag == "-" || field.Name() == "_" {
345 continue
346 }
347 tagParts := strings.Split(tag, ",")
348 if field.Anonymous() && (tag == "" || tagParts[0] == "") {
349 if field.Type().Kind() == reflect.Struct {
350 structDescriptor := describeStruct(ctx, field.Type())
351 for _, binding := range structDescriptor.Fields {
352 binding.levels = append([]int{i}, binding.levels...)
353 omitempty := binding.Encoder.(*structFieldEncoder).omitempty
354 binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty}
355 binding.Decoder = &structFieldDecoder{field, binding.Decoder}
356 embeddedBindings = append(embeddedBindings, binding)
357 }
358 continue
359 } else if field.Type().Kind() == reflect.Ptr {
360 ptrType := field.Type().(*reflect2.UnsafePtrType)
361 if ptrType.Elem().Kind() == reflect.Struct {
362 structDescriptor := describeStruct(ctx, ptrType.Elem())
363 for _, binding := range structDescriptor.Fields {
364 binding.levels = append([]int{i}, binding.levels...)
365 omitempty := binding.Encoder.(*structFieldEncoder).omitempty
366 binding.Encoder = &dereferenceEncoder{binding.Encoder}
367 binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty}
368 binding.Decoder = &dereferenceDecoder{ptrType.Elem(), binding.Decoder}
369 binding.Decoder = &structFieldDecoder{field, binding.Decoder}
370 embeddedBindings = append(embeddedBindings, binding)
371 }
372 continue
373 }
374 }
375 }
376 fieldNames := calcFieldNames(field.Name(), tagParts[0], tag)
377 fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name())
378 decoder := fieldDecoders[fieldCacheKey]
379 if decoder == nil {
380 decoder = decoderOfType(ctx.append(field.Name()), field.Type())
381 }
382 encoder := fieldEncoders[fieldCacheKey]
383 if encoder == nil {
384 encoder = encoderOfType(ctx.append(field.Name()), field.Type())
385 }
386 binding := &Binding{
387 Field: field,
388 FromNames: fieldNames,
389 ToNames: fieldNames,
390 Decoder: decoder,
391 Encoder: encoder,
392 }
393 binding.levels = []int{i}
394 bindings = append(bindings, binding)
395 }
396 return createStructDescriptor(ctx, typ, bindings, embeddedBindings)
397}
398func createStructDescriptor(ctx *ctx, typ reflect2.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor {
399 structDescriptor := &StructDescriptor{
400 Type: typ,
401 Fields: bindings,
402 }
403 for _, extension := range extensions {
404 extension.UpdateStructDescriptor(structDescriptor)
405 }
406 ctx.encoderExtension.UpdateStructDescriptor(structDescriptor)
407 ctx.decoderExtension.UpdateStructDescriptor(structDescriptor)
408 for _, extension := range ctx.extraExtensions {
409 extension.UpdateStructDescriptor(structDescriptor)
410 }
411 processTags(structDescriptor, ctx.frozenConfig)
412 // merge normal & embedded bindings & sort with original order
413 allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...))
414 sort.Sort(allBindings)
415 structDescriptor.Fields = allBindings
416 return structDescriptor
417}
418
419type sortableBindings []*Binding
420
421func (bindings sortableBindings) Len() int {
422 return len(bindings)
423}
424
425func (bindings sortableBindings) Less(i, j int) bool {
426 left := bindings[i].levels
427 right := bindings[j].levels
428 k := 0
429 for {
430 if left[k] < right[k] {
431 return true
432 } else if left[k] > right[k] {
433 return false
434 }
435 k++
436 }
437}
438
439func (bindings sortableBindings) Swap(i, j int) {
440 bindings[i], bindings[j] = bindings[j], bindings[i]
441}
442
443func processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) {
444 for _, binding := range structDescriptor.Fields {
445 shouldOmitEmpty := false
446 tagParts := strings.Split(binding.Field.Tag().Get(cfg.getTagKey()), ",")
447 for _, tagPart := range tagParts[1:] {
448 if tagPart == "omitempty" {
449 shouldOmitEmpty = true
450 } else if tagPart == "string" {
451 if binding.Field.Type().Kind() == reflect.String {
452 binding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg}
453 binding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg}
454 } else {
455 binding.Decoder = &stringModeNumberDecoder{binding.Decoder}
456 binding.Encoder = &stringModeNumberEncoder{binding.Encoder}
457 }
458 }
459 }
460 binding.Decoder = &structFieldDecoder{binding.Field, binding.Decoder}
461 binding.Encoder = &structFieldEncoder{binding.Field, binding.Encoder, shouldOmitEmpty}
462 }
463}
464
465func calcFieldNames(originalFieldName string, tagProvidedFieldName string, wholeTag string) []string {
466 // ignore?
467 if wholeTag == "-" {
468 return []string{}
469 }
470 // rename?
471 var fieldNames []string
472 if tagProvidedFieldName == "" {
473 fieldNames = []string{originalFieldName}
474 } else {
475 fieldNames = []string{tagProvidedFieldName}
476 }
477 // private?
478 isNotExported := unicode.IsLower(rune(originalFieldName[0])) || originalFieldName[0] == '_'
479 if isNotExported {
480 fieldNames = []string{}
481 }
482 return fieldNames
483}
diff --git a/vendor/github.com/json-iterator/go/reflect_json_number.go b/vendor/github.com/json-iterator/go/reflect_json_number.go
new file mode 100644
index 0000000..98d45c1
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_json_number.go
@@ -0,0 +1,112 @@
1package jsoniter
2
3import (
4 "encoding/json"
5 "github.com/modern-go/reflect2"
6 "strconv"
7 "unsafe"
8)
9
10type Number string
11
12// String returns the literal text of the number.
13func (n Number) String() string { return string(n) }
14
15// Float64 returns the number as a float64.
16func (n Number) Float64() (float64, error) {
17 return strconv.ParseFloat(string(n), 64)
18}
19
20// Int64 returns the number as an int64.
21func (n Number) Int64() (int64, error) {
22 return strconv.ParseInt(string(n), 10, 64)
23}
24
25func CastJsonNumber(val interface{}) (string, bool) {
26 switch typedVal := val.(type) {
27 case json.Number:
28 return string(typedVal), true
29 case Number:
30 return string(typedVal), true
31 }
32 return "", false
33}
34
35var jsonNumberType = reflect2.TypeOfPtr((*json.Number)(nil)).Elem()
36var jsoniterNumberType = reflect2.TypeOfPtr((*Number)(nil)).Elem()
37
38func createDecoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValDecoder {
39 if typ.AssignableTo(jsonNumberType) {
40 return &jsonNumberCodec{}
41 }
42 if typ.AssignableTo(jsoniterNumberType) {
43 return &jsoniterNumberCodec{}
44 }
45 return nil
46}
47
48func createEncoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValEncoder {
49 if typ.AssignableTo(jsonNumberType) {
50 return &jsonNumberCodec{}
51 }
52 if typ.AssignableTo(jsoniterNumberType) {
53 return &jsoniterNumberCodec{}
54 }
55 return nil
56}
57
58type jsonNumberCodec struct {
59}
60
61func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
62 switch iter.WhatIsNext() {
63 case StringValue:
64 *((*json.Number)(ptr)) = json.Number(iter.ReadString())
65 case NilValue:
66 iter.skipFourBytes('n', 'u', 'l', 'l')
67 *((*json.Number)(ptr)) = ""
68 default:
69 *((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString()))
70 }
71}
72
73func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
74 number := *((*json.Number)(ptr))
75 if len(number) == 0 {
76 stream.writeByte('0')
77 } else {
78 stream.WriteRaw(string(number))
79 }
80}
81
82func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool {
83 return len(*((*json.Number)(ptr))) == 0
84}
85
86type jsoniterNumberCodec struct {
87}
88
89func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
90 switch iter.WhatIsNext() {
91 case StringValue:
92 *((*Number)(ptr)) = Number(iter.ReadString())
93 case NilValue:
94 iter.skipFourBytes('n', 'u', 'l', 'l')
95 *((*Number)(ptr)) = ""
96 default:
97 *((*Number)(ptr)) = Number([]byte(iter.readNumberAsString()))
98 }
99}
100
101func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
102 number := *((*Number)(ptr))
103 if len(number) == 0 {
104 stream.writeByte('0')
105 } else {
106 stream.WriteRaw(string(number))
107 }
108}
109
110func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool {
111 return len(*((*Number)(ptr))) == 0
112}
diff --git a/vendor/github.com/json-iterator/go/reflect_json_raw_message.go b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go
new file mode 100644
index 0000000..eba434f
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go
@@ -0,0 +1,76 @@
1package jsoniter
2
3import (
4 "encoding/json"
5 "github.com/modern-go/reflect2"
6 "unsafe"
7)
8
9var jsonRawMessageType = reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()
10var jsoniterRawMessageType = reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()
11
12func createEncoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValEncoder {
13 if typ == jsonRawMessageType {
14 return &jsonRawMessageCodec{}
15 }
16 if typ == jsoniterRawMessageType {
17 return &jsoniterRawMessageCodec{}
18 }
19 return nil
20}
21
22func createDecoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValDecoder {
23 if typ == jsonRawMessageType {
24 return &jsonRawMessageCodec{}
25 }
26 if typ == jsoniterRawMessageType {
27 return &jsoniterRawMessageCodec{}
28 }
29 return nil
30}
31
32type jsonRawMessageCodec struct {
33}
34
35func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
36 if iter.ReadNil() {
37 *((*json.RawMessage)(ptr)) = nil
38 } else {
39 *((*json.RawMessage)(ptr)) = iter.SkipAndReturnBytes()
40 }
41}
42
43func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
44 if *((*json.RawMessage)(ptr)) == nil {
45 stream.WriteNil()
46 } else {
47 stream.WriteRaw(string(*((*json.RawMessage)(ptr))))
48 }
49}
50
51func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool {
52 return len(*((*json.RawMessage)(ptr))) == 0
53}
54
55type jsoniterRawMessageCodec struct {
56}
57
58func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
59 if iter.ReadNil() {
60 *((*RawMessage)(ptr)) = nil
61 } else {
62 *((*RawMessage)(ptr)) = iter.SkipAndReturnBytes()
63 }
64}
65
66func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
67 if *((*RawMessage)(ptr)) == nil {
68 stream.WriteNil()
69 } else {
70 stream.WriteRaw(string(*((*RawMessage)(ptr))))
71 }
72}
73
74func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool {
75 return len(*((*RawMessage)(ptr))) == 0
76}
diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go
new file mode 100644
index 0000000..5829671
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_map.go
@@ -0,0 +1,346 @@
1package jsoniter
2
3import (
4 "fmt"
5 "github.com/modern-go/reflect2"
6 "io"
7 "reflect"
8 "sort"
9 "unsafe"
10)
11
12func decoderOfMap(ctx *ctx, typ reflect2.Type) ValDecoder {
13 mapType := typ.(*reflect2.UnsafeMapType)
14 keyDecoder := decoderOfMapKey(ctx.append("[mapKey]"), mapType.Key())
15 elemDecoder := decoderOfType(ctx.append("[mapElem]"), mapType.Elem())
16 return &mapDecoder{
17 mapType: mapType,
18 keyType: mapType.Key(),
19 elemType: mapType.Elem(),
20 keyDecoder: keyDecoder,
21 elemDecoder: elemDecoder,
22 }
23}
24
25func encoderOfMap(ctx *ctx, typ reflect2.Type) ValEncoder {
26 mapType := typ.(*reflect2.UnsafeMapType)
27 if ctx.sortMapKeys {
28 return &sortKeysMapEncoder{
29 mapType: mapType,
30 keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()),
31 elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()),
32 }
33 }
34 return &mapEncoder{
35 mapType: mapType,
36 keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()),
37 elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()),
38 }
39}
40
41func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder {
42 decoder := ctx.decoderExtension.CreateMapKeyDecoder(typ)
43 if decoder != nil {
44 return decoder
45 }
46 for _, extension := range ctx.extraExtensions {
47 decoder := extension.CreateMapKeyDecoder(typ)
48 if decoder != nil {
49 return decoder
50 }
51 }
52
53 ptrType := reflect2.PtrTo(typ)
54 if ptrType.Implements(unmarshalerType) {
55 return &referenceDecoder{
56 &unmarshalerDecoder{
57 valType: ptrType,
58 },
59 }
60 }
61 if typ.Implements(unmarshalerType) {
62 return &unmarshalerDecoder{
63 valType: typ,
64 }
65 }
66 if ptrType.Implements(textUnmarshalerType) {
67 return &referenceDecoder{
68 &textUnmarshalerDecoder{
69 valType: ptrType,
70 },
71 }
72 }
73 if typ.Implements(textUnmarshalerType) {
74 return &textUnmarshalerDecoder{
75 valType: typ,
76 }
77 }
78
79 switch typ.Kind() {
80 case reflect.String:
81 return decoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String))
82 case reflect.Bool,
83 reflect.Uint8, reflect.Int8,
84 reflect.Uint16, reflect.Int16,
85 reflect.Uint32, reflect.Int32,
86 reflect.Uint64, reflect.Int64,
87 reflect.Uint, reflect.Int,
88 reflect.Float32, reflect.Float64,
89 reflect.Uintptr:
90 typ = reflect2.DefaultTypeOfKind(typ.Kind())
91 return &numericMapKeyDecoder{decoderOfType(ctx, typ)}
92 default:
93 return &lazyErrorDecoder{err: fmt.Errorf("unsupported map key type: %v", typ)}
94 }
95}
96
97func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder {
98 encoder := ctx.encoderExtension.CreateMapKeyEncoder(typ)
99 if encoder != nil {
100 return encoder
101 }
102 for _, extension := range ctx.extraExtensions {
103 encoder := extension.CreateMapKeyEncoder(typ)
104 if encoder != nil {
105 return encoder
106 }
107 }
108
109 if typ == textMarshalerType {
110 return &directTextMarshalerEncoder{
111 stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
112 }
113 }
114 if typ.Implements(textMarshalerType) {
115 return &textMarshalerEncoder{
116 valType: typ,
117 stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
118 }
119 }
120
121 switch typ.Kind() {
122 case reflect.String:
123 return encoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String))
124 case reflect.Bool,
125 reflect.Uint8, reflect.Int8,
126 reflect.Uint16, reflect.Int16,
127 reflect.Uint32, reflect.Int32,
128 reflect.Uint64, reflect.Int64,
129 reflect.Uint, reflect.Int,
130 reflect.Float32, reflect.Float64,
131 reflect.Uintptr:
132 typ = reflect2.DefaultTypeOfKind(typ.Kind())
133 return &numericMapKeyEncoder{encoderOfType(ctx, typ)}
134 default:
135 if typ.Kind() == reflect.Interface {
136 return &dynamicMapKeyEncoder{ctx, typ}
137 }
138 return &lazyErrorEncoder{err: fmt.Errorf("unsupported map key type: %v", typ)}
139 }
140}
141
142type mapDecoder struct {
143 mapType *reflect2.UnsafeMapType
144 keyType reflect2.Type
145 elemType reflect2.Type
146 keyDecoder ValDecoder
147 elemDecoder ValDecoder
148}
149
150func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
151 mapType := decoder.mapType
152 c := iter.nextToken()
153 if c == 'n' {
154 iter.skipThreeBytes('u', 'l', 'l')
155 *(*unsafe.Pointer)(ptr) = nil
156 mapType.UnsafeSet(ptr, mapType.UnsafeNew())
157 return
158 }
159 if mapType.UnsafeIsNil(ptr) {
160 mapType.UnsafeSet(ptr, mapType.UnsafeMakeMap(0))
161 }
162 if c != '{' {
163 iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c}))
164 return
165 }
166 c = iter.nextToken()
167 if c == '}' {
168 return
169 }
170 iter.unreadByte()
171 key := decoder.keyType.UnsafeNew()
172 decoder.keyDecoder.Decode(key, iter)
173 c = iter.nextToken()
174 if c != ':' {
175 iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
176 return
177 }
178 elem := decoder.elemType.UnsafeNew()
179 decoder.elemDecoder.Decode(elem, iter)
180 decoder.mapType.UnsafeSetIndex(ptr, key, elem)
181 for c = iter.nextToken(); c == ','; c = iter.nextToken() {
182 key := decoder.keyType.UnsafeNew()
183 decoder.keyDecoder.Decode(key, iter)
184 c = iter.nextToken()
185 if c != ':' {
186 iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
187 return
188 }
189 elem := decoder.elemType.UnsafeNew()
190 decoder.elemDecoder.Decode(elem, iter)
191 decoder.mapType.UnsafeSetIndex(ptr, key, elem)
192 }
193 if c != '}' {
194 iter.ReportError("ReadMapCB", `expect }, but found `+string([]byte{c}))
195 }
196}
197
198type numericMapKeyDecoder struct {
199 decoder ValDecoder
200}
201
202func (decoder *numericMapKeyDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
203 c := iter.nextToken()
204 if c != '"' {
205 iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c}))
206 return
207 }
208 decoder.decoder.Decode(ptr, iter)
209 c = iter.nextToken()
210 if c != '"' {
211 iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c}))
212 return
213 }
214}
215
216type numericMapKeyEncoder struct {
217 encoder ValEncoder
218}
219
220func (encoder *numericMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
221 stream.writeByte('"')
222 encoder.encoder.Encode(ptr, stream)
223 stream.writeByte('"')
224}
225
226func (encoder *numericMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool {
227 return false
228}
229
230type dynamicMapKeyEncoder struct {
231 ctx *ctx
232 valType reflect2.Type
233}
234
235func (encoder *dynamicMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
236 obj := encoder.valType.UnsafeIndirect(ptr)
237 encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).Encode(reflect2.PtrOf(obj), stream)
238}
239
240func (encoder *dynamicMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool {
241 obj := encoder.valType.UnsafeIndirect(ptr)
242 return encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).IsEmpty(reflect2.PtrOf(obj))
243}
244
245type mapEncoder struct {
246 mapType *reflect2.UnsafeMapType
247 keyEncoder ValEncoder
248 elemEncoder ValEncoder
249}
250
251func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
252 if *(*unsafe.Pointer)(ptr) == nil {
253 stream.WriteNil()
254 return
255 }
256 stream.WriteObjectStart()
257 iter := encoder.mapType.UnsafeIterate(ptr)
258 for i := 0; iter.HasNext(); i++ {
259 if i != 0 {
260 stream.WriteMore()
261 }
262 key, elem := iter.UnsafeNext()
263 encoder.keyEncoder.Encode(key, stream)
264 if stream.indention > 0 {
265 stream.writeTwoBytes(byte(':'), byte(' '))
266 } else {
267 stream.writeByte(':')
268 }
269 encoder.elemEncoder.Encode(elem, stream)
270 }
271 stream.WriteObjectEnd()
272}
273
274func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool {
275 iter := encoder.mapType.UnsafeIterate(ptr)
276 return !iter.HasNext()
277}
278
279type sortKeysMapEncoder struct {
280 mapType *reflect2.UnsafeMapType
281 keyEncoder ValEncoder
282 elemEncoder ValEncoder
283}
284
285func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
286 if *(*unsafe.Pointer)(ptr) == nil {
287 stream.WriteNil()
288 return
289 }
290 stream.WriteObjectStart()
291 mapIter := encoder.mapType.UnsafeIterate(ptr)
292 subStream := stream.cfg.BorrowStream(nil)
293 subStream.Attachment = stream.Attachment
294 subIter := stream.cfg.BorrowIterator(nil)
295 keyValues := encodedKeyValues{}
296 for mapIter.HasNext() {
297 key, elem := mapIter.UnsafeNext()
298 subStreamIndex := subStream.Buffered()
299 encoder.keyEncoder.Encode(key, subStream)
300 if subStream.Error != nil && subStream.Error != io.EOF && stream.Error == nil {
301 stream.Error = subStream.Error
302 }
303 encodedKey := subStream.Buffer()[subStreamIndex:]
304 subIter.ResetBytes(encodedKey)
305 decodedKey := subIter.ReadString()
306 if stream.indention > 0 {
307 subStream.writeTwoBytes(byte(':'), byte(' '))
308 } else {
309 subStream.writeByte(':')
310 }
311 encoder.elemEncoder.Encode(elem, subStream)
312 keyValues = append(keyValues, encodedKV{
313 key: decodedKey,
314 keyValue: subStream.Buffer()[subStreamIndex:],
315 })
316 }
317 sort.Sort(keyValues)
318 for i, keyValue := range keyValues {
319 if i != 0 {
320 stream.WriteMore()
321 }
322 stream.Write(keyValue.keyValue)
323 }
324 if subStream.Error != nil && stream.Error == nil {
325 stream.Error = subStream.Error
326 }
327 stream.WriteObjectEnd()
328 stream.cfg.ReturnStream(subStream)
329 stream.cfg.ReturnIterator(subIter)
330}
331
332func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool {
333 iter := encoder.mapType.UnsafeIterate(ptr)
334 return !iter.HasNext()
335}
336
337type encodedKeyValues []encodedKV
338
339type encodedKV struct {
340 key string
341 keyValue []byte
342}
343
344func (sv encodedKeyValues) Len() int { return len(sv) }
345func (sv encodedKeyValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
346func (sv encodedKeyValues) Less(i, j int) bool { return sv[i].key < sv[j].key }
diff --git a/vendor/github.com/json-iterator/go/reflect_marshaler.go b/vendor/github.com/json-iterator/go/reflect_marshaler.go
new file mode 100644
index 0000000..3e21f37
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_marshaler.go
@@ -0,0 +1,225 @@
1package jsoniter
2
3import (
4 "encoding"
5 "encoding/json"
6 "unsafe"
7
8 "github.com/modern-go/reflect2"
9)
10
11var marshalerType = reflect2.TypeOfPtr((*json.Marshaler)(nil)).Elem()
12var unmarshalerType = reflect2.TypeOfPtr((*json.Unmarshaler)(nil)).Elem()
13var textMarshalerType = reflect2.TypeOfPtr((*encoding.TextMarshaler)(nil)).Elem()
14var textUnmarshalerType = reflect2.TypeOfPtr((*encoding.TextUnmarshaler)(nil)).Elem()
15
16func createDecoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValDecoder {
17 ptrType := reflect2.PtrTo(typ)
18 if ptrType.Implements(unmarshalerType) {
19 return &referenceDecoder{
20 &unmarshalerDecoder{ptrType},
21 }
22 }
23 if ptrType.Implements(textUnmarshalerType) {
24 return &referenceDecoder{
25 &textUnmarshalerDecoder{ptrType},
26 }
27 }
28 return nil
29}
30
31func createEncoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValEncoder {
32 if typ == marshalerType {
33 checkIsEmpty := createCheckIsEmpty(ctx, typ)
34 var encoder ValEncoder = &directMarshalerEncoder{
35 checkIsEmpty: checkIsEmpty,
36 }
37 return encoder
38 }
39 if typ.Implements(marshalerType) {
40 checkIsEmpty := createCheckIsEmpty(ctx, typ)
41 var encoder ValEncoder = &marshalerEncoder{
42 valType: typ,
43 checkIsEmpty: checkIsEmpty,
44 }
45 return encoder
46 }
47 ptrType := reflect2.PtrTo(typ)
48 if ctx.prefix != "" && ptrType.Implements(marshalerType) {
49 checkIsEmpty := createCheckIsEmpty(ctx, ptrType)
50 var encoder ValEncoder = &marshalerEncoder{
51 valType: ptrType,
52 checkIsEmpty: checkIsEmpty,
53 }
54 return &referenceEncoder{encoder}
55 }
56 if typ == textMarshalerType {
57 checkIsEmpty := createCheckIsEmpty(ctx, typ)
58 var encoder ValEncoder = &directTextMarshalerEncoder{
59 checkIsEmpty: checkIsEmpty,
60 stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
61 }
62 return encoder
63 }
64 if typ.Implements(textMarshalerType) {
65 checkIsEmpty := createCheckIsEmpty(ctx, typ)
66 var encoder ValEncoder = &textMarshalerEncoder{
67 valType: typ,
68 stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
69 checkIsEmpty: checkIsEmpty,
70 }
71 return encoder
72 }
73 // if prefix is empty, the type is the root type
74 if ctx.prefix != "" && ptrType.Implements(textMarshalerType) {
75 checkIsEmpty := createCheckIsEmpty(ctx, ptrType)
76 var encoder ValEncoder = &textMarshalerEncoder{
77 valType: ptrType,
78 stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
79 checkIsEmpty: checkIsEmpty,
80 }
81 return &referenceEncoder{encoder}
82 }
83 return nil
84}
85
86type marshalerEncoder struct {
87 checkIsEmpty checkIsEmpty
88 valType reflect2.Type
89}
90
91func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
92 obj := encoder.valType.UnsafeIndirect(ptr)
93 if encoder.valType.IsNullable() && reflect2.IsNil(obj) {
94 stream.WriteNil()
95 return
96 }
97 marshaler := obj.(json.Marshaler)
98 bytes, err := marshaler.MarshalJSON()
99 if err != nil {
100 stream.Error = err
101 } else {
102 // html escape was already done by jsoniter
103 // but the extra '\n' should be trimed
104 l := len(bytes)
105 if l > 0 && bytes[l-1] == '\n' {
106 bytes = bytes[:l-1]
107 }
108 stream.Write(bytes)
109 }
110}
111
112func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
113 return encoder.checkIsEmpty.IsEmpty(ptr)
114}
115
116type directMarshalerEncoder struct {
117 checkIsEmpty checkIsEmpty
118}
119
120func (encoder *directMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
121 marshaler := *(*json.Marshaler)(ptr)
122 if marshaler == nil {
123 stream.WriteNil()
124 return
125 }
126 bytes, err := marshaler.MarshalJSON()
127 if err != nil {
128 stream.Error = err
129 } else {
130 stream.Write(bytes)
131 }
132}
133
134func (encoder *directMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
135 return encoder.checkIsEmpty.IsEmpty(ptr)
136}
137
138type textMarshalerEncoder struct {
139 valType reflect2.Type
140 stringEncoder ValEncoder
141 checkIsEmpty checkIsEmpty
142}
143
144func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
145 obj := encoder.valType.UnsafeIndirect(ptr)
146 if encoder.valType.IsNullable() && reflect2.IsNil(obj) {
147 stream.WriteNil()
148 return
149 }
150 marshaler := (obj).(encoding.TextMarshaler)
151 bytes, err := marshaler.MarshalText()
152 if err != nil {
153 stream.Error = err
154 } else {
155 str := string(bytes)
156 encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream)
157 }
158}
159
160func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
161 return encoder.checkIsEmpty.IsEmpty(ptr)
162}
163
164type directTextMarshalerEncoder struct {
165 stringEncoder ValEncoder
166 checkIsEmpty checkIsEmpty
167}
168
169func (encoder *directTextMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
170 marshaler := *(*encoding.TextMarshaler)(ptr)
171 if marshaler == nil {
172 stream.WriteNil()
173 return
174 }
175 bytes, err := marshaler.MarshalText()
176 if err != nil {
177 stream.Error = err
178 } else {
179 str := string(bytes)
180 encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream)
181 }
182}
183
184func (encoder *directTextMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
185 return encoder.checkIsEmpty.IsEmpty(ptr)
186}
187
188type unmarshalerDecoder struct {
189 valType reflect2.Type
190}
191
192func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
193 valType := decoder.valType
194 obj := valType.UnsafeIndirect(ptr)
195 unmarshaler := obj.(json.Unmarshaler)
196 iter.nextToken()
197 iter.unreadByte() // skip spaces
198 bytes := iter.SkipAndReturnBytes()
199 err := unmarshaler.UnmarshalJSON(bytes)
200 if err != nil {
201 iter.ReportError("unmarshalerDecoder", err.Error())
202 }
203}
204
205type textUnmarshalerDecoder struct {
206 valType reflect2.Type
207}
208
209func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
210 valType := decoder.valType
211 obj := valType.UnsafeIndirect(ptr)
212 if reflect2.IsNil(obj) {
213 ptrType := valType.(*reflect2.UnsafePtrType)
214 elemType := ptrType.Elem()
215 elem := elemType.UnsafeNew()
216 ptrType.UnsafeSet(ptr, unsafe.Pointer(&elem))
217 obj = valType.UnsafeIndirect(ptr)
218 }
219 unmarshaler := (obj).(encoding.TextUnmarshaler)
220 str := iter.ReadString()
221 err := unmarshaler.UnmarshalText([]byte(str))
222 if err != nil {
223 iter.ReportError("textUnmarshalerDecoder", err.Error())
224 }
225}
diff --git a/vendor/github.com/json-iterator/go/reflect_native.go b/vendor/github.com/json-iterator/go/reflect_native.go
new file mode 100644
index 0000000..f88722d
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_native.go
@@ -0,0 +1,453 @@
1package jsoniter
2
3import (
4 "encoding/base64"
5 "reflect"
6 "strconv"
7 "unsafe"
8
9 "github.com/modern-go/reflect2"
10)
11
12const ptrSize = 32 << uintptr(^uintptr(0)>>63)
13
14func createEncoderOfNative(ctx *ctx, typ reflect2.Type) ValEncoder {
15 if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 {
16 sliceDecoder := decoderOfSlice(ctx, typ)
17 return &base64Codec{sliceDecoder: sliceDecoder}
18 }
19 typeName := typ.String()
20 kind := typ.Kind()
21 switch kind {
22 case reflect.String:
23 if typeName != "string" {
24 return encoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem())
25 }
26 return &stringCodec{}
27 case reflect.Int:
28 if typeName != "int" {
29 return encoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem())
30 }
31 if strconv.IntSize == 32 {
32 return &int32Codec{}
33 }
34 return &int64Codec{}
35 case reflect.Int8:
36 if typeName != "int8" {
37 return encoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem())
38 }
39 return &int8Codec{}
40 case reflect.Int16:
41 if typeName != "int16" {
42 return encoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem())
43 }
44 return &int16Codec{}
45 case reflect.Int32:
46 if typeName != "int32" {
47 return encoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem())
48 }
49 return &int32Codec{}
50 case reflect.Int64:
51 if typeName != "int64" {
52 return encoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem())
53 }
54 return &int64Codec{}
55 case reflect.Uint:
56 if typeName != "uint" {
57 return encoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem())
58 }
59 if strconv.IntSize == 32 {
60 return &uint32Codec{}
61 }
62 return &uint64Codec{}
63 case reflect.Uint8:
64 if typeName != "uint8" {
65 return encoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem())
66 }
67 return &uint8Codec{}
68 case reflect.Uint16:
69 if typeName != "uint16" {
70 return encoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem())
71 }
72 return &uint16Codec{}
73 case reflect.Uint32:
74 if typeName != "uint32" {
75 return encoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem())
76 }
77 return &uint32Codec{}
78 case reflect.Uintptr:
79 if typeName != "uintptr" {
80 return encoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem())
81 }
82 if ptrSize == 32 {
83 return &uint32Codec{}
84 }
85 return &uint64Codec{}
86 case reflect.Uint64:
87 if typeName != "uint64" {
88 return encoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem())
89 }
90 return &uint64Codec{}
91 case reflect.Float32:
92 if typeName != "float32" {
93 return encoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem())
94 }
95 return &float32Codec{}
96 case reflect.Float64:
97 if typeName != "float64" {
98 return encoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem())
99 }
100 return &float64Codec{}
101 case reflect.Bool:
102 if typeName != "bool" {
103 return encoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem())
104 }
105 return &boolCodec{}
106 }
107 return nil
108}
109
110func createDecoderOfNative(ctx *ctx, typ reflect2.Type) ValDecoder {
111 if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 {
112 sliceDecoder := decoderOfSlice(ctx, typ)
113 return &base64Codec{sliceDecoder: sliceDecoder}
114 }
115 typeName := typ.String()
116 switch typ.Kind() {
117 case reflect.String:
118 if typeName != "string" {
119 return decoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem())
120 }
121 return &stringCodec{}
122 case reflect.Int:
123 if typeName != "int" {
124 return decoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem())
125 }
126 if strconv.IntSize == 32 {
127 return &int32Codec{}
128 }
129 return &int64Codec{}
130 case reflect.Int8:
131 if typeName != "int8" {
132 return decoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem())
133 }
134 return &int8Codec{}
135 case reflect.Int16:
136 if typeName != "int16" {
137 return decoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem())
138 }
139 return &int16Codec{}
140 case reflect.Int32:
141 if typeName != "int32" {
142 return decoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem())
143 }
144 return &int32Codec{}
145 case reflect.Int64:
146 if typeName != "int64" {
147 return decoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem())
148 }
149 return &int64Codec{}
150 case reflect.Uint:
151 if typeName != "uint" {
152 return decoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem())
153 }
154 if strconv.IntSize == 32 {
155 return &uint32Codec{}
156 }
157 return &uint64Codec{}
158 case reflect.Uint8:
159 if typeName != "uint8" {
160 return decoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem())
161 }
162 return &uint8Codec{}
163 case reflect.Uint16:
164 if typeName != "uint16" {
165 return decoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem())
166 }
167 return &uint16Codec{}
168 case reflect.Uint32:
169 if typeName != "uint32" {
170 return decoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem())
171 }
172 return &uint32Codec{}
173 case reflect.Uintptr:
174 if typeName != "uintptr" {
175 return decoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem())
176 }
177 if ptrSize == 32 {
178 return &uint32Codec{}
179 }
180 return &uint64Codec{}
181 case reflect.Uint64:
182 if typeName != "uint64" {
183 return decoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem())
184 }
185 return &uint64Codec{}
186 case reflect.Float32:
187 if typeName != "float32" {
188 return decoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem())
189 }
190 return &float32Codec{}
191 case reflect.Float64:
192 if typeName != "float64" {
193 return decoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem())
194 }
195 return &float64Codec{}
196 case reflect.Bool:
197 if typeName != "bool" {
198 return decoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem())
199 }
200 return &boolCodec{}
201 }
202 return nil
203}
204
205type stringCodec struct {
206}
207
208func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
209 *((*string)(ptr)) = iter.ReadString()
210}
211
212func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
213 str := *((*string)(ptr))
214 stream.WriteString(str)
215}
216
217func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool {
218 return *((*string)(ptr)) == ""
219}
220
221type int8Codec struct {
222}
223
224func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
225 if !iter.ReadNil() {
226 *((*int8)(ptr)) = iter.ReadInt8()
227 }
228}
229
230func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
231 stream.WriteInt8(*((*int8)(ptr)))
232}
233
234func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool {
235 return *((*int8)(ptr)) == 0
236}
237
238type int16Codec struct {
239}
240
241func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
242 if !iter.ReadNil() {
243 *((*int16)(ptr)) = iter.ReadInt16()
244 }
245}
246
247func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
248 stream.WriteInt16(*((*int16)(ptr)))
249}
250
251func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool {
252 return *((*int16)(ptr)) == 0
253}
254
255type int32Codec struct {
256}
257
258func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
259 if !iter.ReadNil() {
260 *((*int32)(ptr)) = iter.ReadInt32()
261 }
262}
263
264func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
265 stream.WriteInt32(*((*int32)(ptr)))
266}
267
268func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool {
269 return *((*int32)(ptr)) == 0
270}
271
272type int64Codec struct {
273}
274
275func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
276 if !iter.ReadNil() {
277 *((*int64)(ptr)) = iter.ReadInt64()
278 }
279}
280
281func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
282 stream.WriteInt64(*((*int64)(ptr)))
283}
284
285func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool {
286 return *((*int64)(ptr)) == 0
287}
288
289type uint8Codec struct {
290}
291
292func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
293 if !iter.ReadNil() {
294 *((*uint8)(ptr)) = iter.ReadUint8()
295 }
296}
297
298func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
299 stream.WriteUint8(*((*uint8)(ptr)))
300}
301
302func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool {
303 return *((*uint8)(ptr)) == 0
304}
305
306type uint16Codec struct {
307}
308
309func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
310 if !iter.ReadNil() {
311 *((*uint16)(ptr)) = iter.ReadUint16()
312 }
313}
314
315func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
316 stream.WriteUint16(*((*uint16)(ptr)))
317}
318
319func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool {
320 return *((*uint16)(ptr)) == 0
321}
322
323type uint32Codec struct {
324}
325
326func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
327 if !iter.ReadNil() {
328 *((*uint32)(ptr)) = iter.ReadUint32()
329 }
330}
331
332func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
333 stream.WriteUint32(*((*uint32)(ptr)))
334}
335
336func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool {
337 return *((*uint32)(ptr)) == 0
338}
339
340type uint64Codec struct {
341}
342
343func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
344 if !iter.ReadNil() {
345 *((*uint64)(ptr)) = iter.ReadUint64()
346 }
347}
348
349func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
350 stream.WriteUint64(*((*uint64)(ptr)))
351}
352
353func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool {
354 return *((*uint64)(ptr)) == 0
355}
356
357type float32Codec struct {
358}
359
360func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
361 if !iter.ReadNil() {
362 *((*float32)(ptr)) = iter.ReadFloat32()
363 }
364}
365
366func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
367 stream.WriteFloat32(*((*float32)(ptr)))
368}
369
370func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool {
371 return *((*float32)(ptr)) == 0
372}
373
374type float64Codec struct {
375}
376
377func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
378 if !iter.ReadNil() {
379 *((*float64)(ptr)) = iter.ReadFloat64()
380 }
381}
382
383func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
384 stream.WriteFloat64(*((*float64)(ptr)))
385}
386
387func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool {
388 return *((*float64)(ptr)) == 0
389}
390
391type boolCodec struct {
392}
393
394func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
395 if !iter.ReadNil() {
396 *((*bool)(ptr)) = iter.ReadBool()
397 }
398}
399
400func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
401 stream.WriteBool(*((*bool)(ptr)))
402}
403
404func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool {
405 return !(*((*bool)(ptr)))
406}
407
408type base64Codec struct {
409 sliceType *reflect2.UnsafeSliceType
410 sliceDecoder ValDecoder
411}
412
413func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
414 if iter.ReadNil() {
415 codec.sliceType.UnsafeSetNil(ptr)
416 return
417 }
418 switch iter.WhatIsNext() {
419 case StringValue:
420 src := iter.ReadString()
421 dst, err := base64.StdEncoding.DecodeString(src)
422 if err != nil {
423 iter.ReportError("decode base64", err.Error())
424 } else {
425 codec.sliceType.UnsafeSet(ptr, unsafe.Pointer(&dst))
426 }
427 case ArrayValue:
428 codec.sliceDecoder.Decode(ptr, iter)
429 default:
430 iter.ReportError("base64Codec", "invalid input")
431 }
432}
433
434func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
435 if codec.sliceType.UnsafeIsNil(ptr) {
436 stream.WriteNil()
437 return
438 }
439 src := *((*[]byte)(ptr))
440 encoding := base64.StdEncoding
441 stream.writeByte('"')
442 if len(src) != 0 {
443 size := encoding.EncodedLen(len(src))
444 buf := make([]byte, size)
445 encoding.Encode(buf, src)
446 stream.buf = append(stream.buf, buf...)
447 }
448 stream.writeByte('"')
449}
450
451func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool {
452 return len(*((*[]byte)(ptr))) == 0
453}
diff --git a/vendor/github.com/json-iterator/go/reflect_optional.go b/vendor/github.com/json-iterator/go/reflect_optional.go
new file mode 100644
index 0000000..fa71f47
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_optional.go
@@ -0,0 +1,129 @@
1package jsoniter
2
3import (
4 "github.com/modern-go/reflect2"
5 "unsafe"
6)
7
8func decoderOfOptional(ctx *ctx, typ reflect2.Type) ValDecoder {
9 ptrType := typ.(*reflect2.UnsafePtrType)
10 elemType := ptrType.Elem()
11 decoder := decoderOfType(ctx, elemType)
12 return &OptionalDecoder{elemType, decoder}
13}
14
15func encoderOfOptional(ctx *ctx, typ reflect2.Type) ValEncoder {
16 ptrType := typ.(*reflect2.UnsafePtrType)
17 elemType := ptrType.Elem()
18 elemEncoder := encoderOfType(ctx, elemType)
19 encoder := &OptionalEncoder{elemEncoder}
20 return encoder
21}
22
23type OptionalDecoder struct {
24 ValueType reflect2.Type
25 ValueDecoder ValDecoder
26}
27
28func (decoder *OptionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
29 if iter.ReadNil() {
30 *((*unsafe.Pointer)(ptr)) = nil
31 } else {
32 if *((*unsafe.Pointer)(ptr)) == nil {
33 //pointer to null, we have to allocate memory to hold the value
34 newPtr := decoder.ValueType.UnsafeNew()
35 decoder.ValueDecoder.Decode(newPtr, iter)
36 *((*unsafe.Pointer)(ptr)) = newPtr
37 } else {
38 //reuse existing instance
39 decoder.ValueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter)
40 }
41 }
42}
43
44type dereferenceDecoder struct {
45 // only to deference a pointer
46 valueType reflect2.Type
47 valueDecoder ValDecoder
48}
49
50func (decoder *dereferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
51 if *((*unsafe.Pointer)(ptr)) == nil {
52 //pointer to null, we have to allocate memory to hold the value
53 newPtr := decoder.valueType.UnsafeNew()
54 decoder.valueDecoder.Decode(newPtr, iter)
55 *((*unsafe.Pointer)(ptr)) = newPtr
56 } else {
57 //reuse existing instance
58 decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter)
59 }
60}
61
62type OptionalEncoder struct {
63 ValueEncoder ValEncoder
64}
65
66func (encoder *OptionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
67 if *((*unsafe.Pointer)(ptr)) == nil {
68 stream.WriteNil()
69 } else {
70 encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream)
71 }
72}
73
74func (encoder *OptionalEncoder) IsEmpty(ptr unsafe.Pointer) bool {
75 return *((*unsafe.Pointer)(ptr)) == nil
76}
77
78type dereferenceEncoder struct {
79 ValueEncoder ValEncoder
80}
81
82func (encoder *dereferenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
83 if *((*unsafe.Pointer)(ptr)) == nil {
84 stream.WriteNil()
85 } else {
86 encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream)
87 }
88}
89
90func (encoder *dereferenceEncoder) IsEmpty(ptr unsafe.Pointer) bool {
91 dePtr := *((*unsafe.Pointer)(ptr))
92 if dePtr == nil {
93 return true
94 }
95 return encoder.ValueEncoder.IsEmpty(dePtr)
96}
97
98func (encoder *dereferenceEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool {
99 deReferenced := *((*unsafe.Pointer)(ptr))
100 if deReferenced == nil {
101 return true
102 }
103 isEmbeddedPtrNil, converted := encoder.ValueEncoder.(IsEmbeddedPtrNil)
104 if !converted {
105 return false
106 }
107 fieldPtr := unsafe.Pointer(deReferenced)
108 return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr)
109}
110
111type referenceEncoder struct {
112 encoder ValEncoder
113}
114
115func (encoder *referenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
116 encoder.encoder.Encode(unsafe.Pointer(&ptr), stream)
117}
118
119func (encoder *referenceEncoder) IsEmpty(ptr unsafe.Pointer) bool {
120 return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr))
121}
122
123type referenceDecoder struct {
124 decoder ValDecoder
125}
126
127func (decoder *referenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
128 decoder.decoder.Decode(unsafe.Pointer(&ptr), iter)
129}
diff --git a/vendor/github.com/json-iterator/go/reflect_slice.go b/vendor/github.com/json-iterator/go/reflect_slice.go
new file mode 100644
index 0000000..9441d79
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_slice.go
@@ -0,0 +1,99 @@
1package jsoniter
2
3import (
4 "fmt"
5 "github.com/modern-go/reflect2"
6 "io"
7 "unsafe"
8)
9
10func decoderOfSlice(ctx *ctx, typ reflect2.Type) ValDecoder {
11 sliceType := typ.(*reflect2.UnsafeSliceType)
12 decoder := decoderOfType(ctx.append("[sliceElem]"), sliceType.Elem())
13 return &sliceDecoder{sliceType, decoder}
14}
15
16func encoderOfSlice(ctx *ctx, typ reflect2.Type) ValEncoder {
17 sliceType := typ.(*reflect2.UnsafeSliceType)
18 encoder := encoderOfType(ctx.append("[sliceElem]"), sliceType.Elem())
19 return &sliceEncoder{sliceType, encoder}
20}
21
22type sliceEncoder struct {
23 sliceType *reflect2.UnsafeSliceType
24 elemEncoder ValEncoder
25}
26
27func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
28 if encoder.sliceType.UnsafeIsNil(ptr) {
29 stream.WriteNil()
30 return
31 }
32 length := encoder.sliceType.UnsafeLengthOf(ptr)
33 if length == 0 {
34 stream.WriteEmptyArray()
35 return
36 }
37 stream.WriteArrayStart()
38 encoder.elemEncoder.Encode(encoder.sliceType.UnsafeGetIndex(ptr, 0), stream)
39 for i := 1; i < length; i++ {
40 stream.WriteMore()
41 elemPtr := encoder.sliceType.UnsafeGetIndex(ptr, i)
42 encoder.elemEncoder.Encode(elemPtr, stream)
43 }
44 stream.WriteArrayEnd()
45 if stream.Error != nil && stream.Error != io.EOF {
46 stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error())
47 }
48}
49
50func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool {
51 return encoder.sliceType.UnsafeLengthOf(ptr) == 0
52}
53
54type sliceDecoder struct {
55 sliceType *reflect2.UnsafeSliceType
56 elemDecoder ValDecoder
57}
58
59func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
60 decoder.doDecode(ptr, iter)
61 if iter.Error != nil && iter.Error != io.EOF {
62 iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error())
63 }
64}
65
66func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) {
67 c := iter.nextToken()
68 sliceType := decoder.sliceType
69 if c == 'n' {
70 iter.skipThreeBytes('u', 'l', 'l')
71 sliceType.UnsafeSetNil(ptr)
72 return
73 }
74 if c != '[' {
75 iter.ReportError("decode slice", "expect [ or n, but found "+string([]byte{c}))
76 return
77 }
78 c = iter.nextToken()
79 if c == ']' {
80 sliceType.UnsafeSet(ptr, sliceType.UnsafeMakeSlice(0, 0))
81 return
82 }
83 iter.unreadByte()
84 sliceType.UnsafeGrow(ptr, 1)
85 elemPtr := sliceType.UnsafeGetIndex(ptr, 0)
86 decoder.elemDecoder.Decode(elemPtr, iter)
87 length := 1
88 for c = iter.nextToken(); c == ','; c = iter.nextToken() {
89 idx := length
90 length += 1
91 sliceType.UnsafeGrow(ptr, length)
92 elemPtr = sliceType.UnsafeGetIndex(ptr, idx)
93 decoder.elemDecoder.Decode(elemPtr, iter)
94 }
95 if c != ']' {
96 iter.ReportError("decode slice", "expect ], but found "+string([]byte{c}))
97 return
98 }
99}
diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
new file mode 100644
index 0000000..92ae912
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
@@ -0,0 +1,1097 @@
1package jsoniter
2
3import (
4 "fmt"
5 "io"
6 "strings"
7 "unsafe"
8
9 "github.com/modern-go/reflect2"
10)
11
12func decoderOfStruct(ctx *ctx, typ reflect2.Type) ValDecoder {
13 bindings := map[string]*Binding{}
14 structDescriptor := describeStruct(ctx, typ)
15 for _, binding := range structDescriptor.Fields {
16 for _, fromName := range binding.FromNames {
17 old := bindings[fromName]
18 if old == nil {
19 bindings[fromName] = binding
20 continue
21 }
22 ignoreOld, ignoreNew := resolveConflictBinding(ctx.frozenConfig, old, binding)
23 if ignoreOld {
24 delete(bindings, fromName)
25 }
26 if !ignoreNew {
27 bindings[fromName] = binding
28 }
29 }
30 }
31 fields := map[string]*structFieldDecoder{}
32 for k, binding := range bindings {
33 fields[k] = binding.Decoder.(*structFieldDecoder)
34 }
35
36 if !ctx.caseSensitive() {
37 for k, binding := range bindings {
38 if _, found := fields[strings.ToLower(k)]; !found {
39 fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder)
40 }
41 }
42 }
43
44 return createStructDecoder(ctx, typ, fields)
45}
46
47func createStructDecoder(ctx *ctx, typ reflect2.Type, fields map[string]*structFieldDecoder) ValDecoder {
48 if ctx.disallowUnknownFields {
49 return &generalStructDecoder{typ: typ, fields: fields, disallowUnknownFields: true}
50 }
51 knownHash := map[int64]struct{}{
52 0: {},
53 }
54
55 switch len(fields) {
56 case 0:
57 return &skipObjectDecoder{typ}
58 case 1:
59 for fieldName, fieldDecoder := range fields {
60 fieldHash := calcHash(fieldName, ctx.caseSensitive())
61 _, known := knownHash[fieldHash]
62 if known {
63 return &generalStructDecoder{typ, fields, false}
64 }
65 knownHash[fieldHash] = struct{}{}
66 return &oneFieldStructDecoder{typ, fieldHash, fieldDecoder}
67 }
68 case 2:
69 var fieldHash1 int64
70 var fieldHash2 int64
71 var fieldDecoder1 *structFieldDecoder
72 var fieldDecoder2 *structFieldDecoder
73 for fieldName, fieldDecoder := range fields {
74 fieldHash := calcHash(fieldName, ctx.caseSensitive())
75 _, known := knownHash[fieldHash]
76 if known {
77 return &generalStructDecoder{typ, fields, false}
78 }
79 knownHash[fieldHash] = struct{}{}
80 if fieldHash1 == 0 {
81 fieldHash1 = fieldHash
82 fieldDecoder1 = fieldDecoder
83 } else {
84 fieldHash2 = fieldHash
85 fieldDecoder2 = fieldDecoder
86 }
87 }
88 return &twoFieldsStructDecoder{typ, fieldHash1, fieldDecoder1, fieldHash2, fieldDecoder2}
89 case 3:
90 var fieldName1 int64
91 var fieldName2 int64
92 var fieldName3 int64
93 var fieldDecoder1 *structFieldDecoder
94 var fieldDecoder2 *structFieldDecoder
95 var fieldDecoder3 *structFieldDecoder
96 for fieldName, fieldDecoder := range fields {
97 fieldHash := calcHash(fieldName, ctx.caseSensitive())
98 _, known := knownHash[fieldHash]
99 if known {
100 return &generalStructDecoder{typ, fields, false}
101 }
102 knownHash[fieldHash] = struct{}{}
103 if fieldName1 == 0 {
104 fieldName1 = fieldHash
105 fieldDecoder1 = fieldDecoder
106 } else if fieldName2 == 0 {
107 fieldName2 = fieldHash
108 fieldDecoder2 = fieldDecoder
109 } else {
110 fieldName3 = fieldHash
111 fieldDecoder3 = fieldDecoder
112 }
113 }
114 return &threeFieldsStructDecoder{typ,
115 fieldName1, fieldDecoder1,
116 fieldName2, fieldDecoder2,
117 fieldName3, fieldDecoder3}
118 case 4:
119 var fieldName1 int64
120 var fieldName2 int64
121 var fieldName3 int64
122 var fieldName4 int64
123 var fieldDecoder1 *structFieldDecoder
124 var fieldDecoder2 *structFieldDecoder
125 var fieldDecoder3 *structFieldDecoder
126 var fieldDecoder4 *structFieldDecoder
127 for fieldName, fieldDecoder := range fields {
128 fieldHash := calcHash(fieldName, ctx.caseSensitive())
129 _, known := knownHash[fieldHash]
130 if known {
131 return &generalStructDecoder{typ, fields, false}
132 }
133 knownHash[fieldHash] = struct{}{}
134 if fieldName1 == 0 {
135 fieldName1 = fieldHash
136 fieldDecoder1 = fieldDecoder
137 } else if fieldName2 == 0 {
138 fieldName2 = fieldHash
139 fieldDecoder2 = fieldDecoder
140 } else if fieldName3 == 0 {
141 fieldName3 = fieldHash
142 fieldDecoder3 = fieldDecoder
143 } else {
144 fieldName4 = fieldHash
145 fieldDecoder4 = fieldDecoder
146 }
147 }
148 return &fourFieldsStructDecoder{typ,
149 fieldName1, fieldDecoder1,
150 fieldName2, fieldDecoder2,
151 fieldName3, fieldDecoder3,
152 fieldName4, fieldDecoder4}
153 case 5:
154 var fieldName1 int64
155 var fieldName2 int64
156 var fieldName3 int64
157 var fieldName4 int64
158 var fieldName5 int64
159 var fieldDecoder1 *structFieldDecoder
160 var fieldDecoder2 *structFieldDecoder
161 var fieldDecoder3 *structFieldDecoder
162 var fieldDecoder4 *structFieldDecoder
163 var fieldDecoder5 *structFieldDecoder
164 for fieldName, fieldDecoder := range fields {
165 fieldHash := calcHash(fieldName, ctx.caseSensitive())
166 _, known := knownHash[fieldHash]
167 if known {
168 return &generalStructDecoder{typ, fields, false}
169 }
170 knownHash[fieldHash] = struct{}{}
171 if fieldName1 == 0 {
172 fieldName1 = fieldHash
173 fieldDecoder1 = fieldDecoder
174 } else if fieldName2 == 0 {
175 fieldName2 = fieldHash
176 fieldDecoder2 = fieldDecoder
177 } else if fieldName3 == 0 {
178 fieldName3 = fieldHash
179 fieldDecoder3 = fieldDecoder
180 } else if fieldName4 == 0 {
181 fieldName4 = fieldHash
182 fieldDecoder4 = fieldDecoder
183 } else {
184 fieldName5 = fieldHash
185 fieldDecoder5 = fieldDecoder
186 }
187 }
188 return &fiveFieldsStructDecoder{typ,
189 fieldName1, fieldDecoder1,
190 fieldName2, fieldDecoder2,
191 fieldName3, fieldDecoder3,
192 fieldName4, fieldDecoder4,
193 fieldName5, fieldDecoder5}
194 case 6:
195 var fieldName1 int64
196 var fieldName2 int64
197 var fieldName3 int64
198 var fieldName4 int64
199 var fieldName5 int64
200 var fieldName6 int64
201 var fieldDecoder1 *structFieldDecoder
202 var fieldDecoder2 *structFieldDecoder
203 var fieldDecoder3 *structFieldDecoder
204 var fieldDecoder4 *structFieldDecoder
205 var fieldDecoder5 *structFieldDecoder
206 var fieldDecoder6 *structFieldDecoder
207 for fieldName, fieldDecoder := range fields {
208 fieldHash := calcHash(fieldName, ctx.caseSensitive())
209 _, known := knownHash[fieldHash]
210 if known {
211 return &generalStructDecoder{typ, fields, false}
212 }
213 knownHash[fieldHash] = struct{}{}
214 if fieldName1 == 0 {
215 fieldName1 = fieldHash
216 fieldDecoder1 = fieldDecoder
217 } else if fieldName2 == 0 {
218 fieldName2 = fieldHash
219 fieldDecoder2 = fieldDecoder
220 } else if fieldName3 == 0 {
221 fieldName3 = fieldHash
222 fieldDecoder3 = fieldDecoder
223 } else if fieldName4 == 0 {
224 fieldName4 = fieldHash
225 fieldDecoder4 = fieldDecoder
226 } else if fieldName5 == 0 {
227 fieldName5 = fieldHash
228 fieldDecoder5 = fieldDecoder
229 } else {
230 fieldName6 = fieldHash
231 fieldDecoder6 = fieldDecoder
232 }
233 }
234 return &sixFieldsStructDecoder{typ,
235 fieldName1, fieldDecoder1,
236 fieldName2, fieldDecoder2,
237 fieldName3, fieldDecoder3,
238 fieldName4, fieldDecoder4,
239 fieldName5, fieldDecoder5,
240 fieldName6, fieldDecoder6}
241 case 7:
242 var fieldName1 int64
243 var fieldName2 int64
244 var fieldName3 int64
245 var fieldName4 int64
246 var fieldName5 int64
247 var fieldName6 int64
248 var fieldName7 int64
249 var fieldDecoder1 *structFieldDecoder
250 var fieldDecoder2 *structFieldDecoder
251 var fieldDecoder3 *structFieldDecoder
252 var fieldDecoder4 *structFieldDecoder
253 var fieldDecoder5 *structFieldDecoder
254 var fieldDecoder6 *structFieldDecoder
255 var fieldDecoder7 *structFieldDecoder
256 for fieldName, fieldDecoder := range fields {
257 fieldHash := calcHash(fieldName, ctx.caseSensitive())
258 _, known := knownHash[fieldHash]
259 if known {
260 return &generalStructDecoder{typ, fields, false}
261 }
262 knownHash[fieldHash] = struct{}{}
263 if fieldName1 == 0 {
264 fieldName1 = fieldHash
265 fieldDecoder1 = fieldDecoder
266 } else if fieldName2 == 0 {
267 fieldName2 = fieldHash
268 fieldDecoder2 = fieldDecoder
269 } else if fieldName3 == 0 {
270 fieldName3 = fieldHash
271 fieldDecoder3 = fieldDecoder
272 } else if fieldName4 == 0 {
273 fieldName4 = fieldHash
274 fieldDecoder4 = fieldDecoder
275 } else if fieldName5 == 0 {
276 fieldName5 = fieldHash
277 fieldDecoder5 = fieldDecoder
278 } else if fieldName6 == 0 {
279 fieldName6 = fieldHash
280 fieldDecoder6 = fieldDecoder
281 } else {
282 fieldName7 = fieldHash
283 fieldDecoder7 = fieldDecoder
284 }
285 }
286 return &sevenFieldsStructDecoder{typ,
287 fieldName1, fieldDecoder1,
288 fieldName2, fieldDecoder2,
289 fieldName3, fieldDecoder3,
290 fieldName4, fieldDecoder4,
291 fieldName5, fieldDecoder5,
292 fieldName6, fieldDecoder6,
293 fieldName7, fieldDecoder7}
294 case 8:
295 var fieldName1 int64
296 var fieldName2 int64
297 var fieldName3 int64
298 var fieldName4 int64
299 var fieldName5 int64
300 var fieldName6 int64
301 var fieldName7 int64
302 var fieldName8 int64
303 var fieldDecoder1 *structFieldDecoder
304 var fieldDecoder2 *structFieldDecoder
305 var fieldDecoder3 *structFieldDecoder
306 var fieldDecoder4 *structFieldDecoder
307 var fieldDecoder5 *structFieldDecoder
308 var fieldDecoder6 *structFieldDecoder
309 var fieldDecoder7 *structFieldDecoder
310 var fieldDecoder8 *structFieldDecoder
311 for fieldName, fieldDecoder := range fields {
312 fieldHash := calcHash(fieldName, ctx.caseSensitive())
313 _, known := knownHash[fieldHash]
314 if known {
315 return &generalStructDecoder{typ, fields, false}
316 }
317 knownHash[fieldHash] = struct{}{}
318 if fieldName1 == 0 {
319 fieldName1 = fieldHash
320 fieldDecoder1 = fieldDecoder
321 } else if fieldName2 == 0 {
322 fieldName2 = fieldHash
323 fieldDecoder2 = fieldDecoder
324 } else if fieldName3 == 0 {
325 fieldName3 = fieldHash
326 fieldDecoder3 = fieldDecoder
327 } else if fieldName4 == 0 {
328 fieldName4 = fieldHash
329 fieldDecoder4 = fieldDecoder
330 } else if fieldName5 == 0 {
331 fieldName5 = fieldHash
332 fieldDecoder5 = fieldDecoder
333 } else if fieldName6 == 0 {
334 fieldName6 = fieldHash
335 fieldDecoder6 = fieldDecoder
336 } else if fieldName7 == 0 {
337 fieldName7 = fieldHash
338 fieldDecoder7 = fieldDecoder
339 } else {
340 fieldName8 = fieldHash
341 fieldDecoder8 = fieldDecoder
342 }
343 }
344 return &eightFieldsStructDecoder{typ,
345 fieldName1, fieldDecoder1,
346 fieldName2, fieldDecoder2,
347 fieldName3, fieldDecoder3,
348 fieldName4, fieldDecoder4,
349 fieldName5, fieldDecoder5,
350 fieldName6, fieldDecoder6,
351 fieldName7, fieldDecoder7,
352 fieldName8, fieldDecoder8}
353 case 9:
354 var fieldName1 int64
355 var fieldName2 int64
356 var fieldName3 int64
357 var fieldName4 int64
358 var fieldName5 int64
359 var fieldName6 int64
360 var fieldName7 int64
361 var fieldName8 int64
362 var fieldName9 int64
363 var fieldDecoder1 *structFieldDecoder
364 var fieldDecoder2 *structFieldDecoder
365 var fieldDecoder3 *structFieldDecoder
366 var fieldDecoder4 *structFieldDecoder
367 var fieldDecoder5 *structFieldDecoder
368 var fieldDecoder6 *structFieldDecoder
369 var fieldDecoder7 *structFieldDecoder
370 var fieldDecoder8 *structFieldDecoder
371 var fieldDecoder9 *structFieldDecoder
372 for fieldName, fieldDecoder := range fields {
373 fieldHash := calcHash(fieldName, ctx.caseSensitive())
374 _, known := knownHash[fieldHash]
375 if known {
376 return &generalStructDecoder{typ, fields, false}
377 }
378 knownHash[fieldHash] = struct{}{}
379 if fieldName1 == 0 {
380 fieldName1 = fieldHash
381 fieldDecoder1 = fieldDecoder
382 } else if fieldName2 == 0 {
383 fieldName2 = fieldHash
384 fieldDecoder2 = fieldDecoder
385 } else if fieldName3 == 0 {
386 fieldName3 = fieldHash
387 fieldDecoder3 = fieldDecoder
388 } else if fieldName4 == 0 {
389 fieldName4 = fieldHash
390 fieldDecoder4 = fieldDecoder
391 } else if fieldName5 == 0 {
392 fieldName5 = fieldHash
393 fieldDecoder5 = fieldDecoder
394 } else if fieldName6 == 0 {
395 fieldName6 = fieldHash
396 fieldDecoder6 = fieldDecoder
397 } else if fieldName7 == 0 {
398 fieldName7 = fieldHash
399 fieldDecoder7 = fieldDecoder
400 } else if fieldName8 == 0 {
401 fieldName8 = fieldHash
402 fieldDecoder8 = fieldDecoder
403 } else {
404 fieldName9 = fieldHash
405 fieldDecoder9 = fieldDecoder
406 }
407 }
408 return &nineFieldsStructDecoder{typ,
409 fieldName1, fieldDecoder1,
410 fieldName2, fieldDecoder2,
411 fieldName3, fieldDecoder3,
412 fieldName4, fieldDecoder4,
413 fieldName5, fieldDecoder5,
414 fieldName6, fieldDecoder6,
415 fieldName7, fieldDecoder7,
416 fieldName8, fieldDecoder8,
417 fieldName9, fieldDecoder9}
418 case 10:
419 var fieldName1 int64
420 var fieldName2 int64
421 var fieldName3 int64
422 var fieldName4 int64
423 var fieldName5 int64
424 var fieldName6 int64
425 var fieldName7 int64
426 var fieldName8 int64
427 var fieldName9 int64
428 var fieldName10 int64
429 var fieldDecoder1 *structFieldDecoder
430 var fieldDecoder2 *structFieldDecoder
431 var fieldDecoder3 *structFieldDecoder
432 var fieldDecoder4 *structFieldDecoder
433 var fieldDecoder5 *structFieldDecoder
434 var fieldDecoder6 *structFieldDecoder
435 var fieldDecoder7 *structFieldDecoder
436 var fieldDecoder8 *structFieldDecoder
437 var fieldDecoder9 *structFieldDecoder
438 var fieldDecoder10 *structFieldDecoder
439 for fieldName, fieldDecoder := range fields {
440 fieldHash := calcHash(fieldName, ctx.caseSensitive())
441 _, known := knownHash[fieldHash]
442 if known {
443 return &generalStructDecoder{typ, fields, false}
444 }
445 knownHash[fieldHash] = struct{}{}
446 if fieldName1 == 0 {
447 fieldName1 = fieldHash
448 fieldDecoder1 = fieldDecoder
449 } else if fieldName2 == 0 {
450 fieldName2 = fieldHash
451 fieldDecoder2 = fieldDecoder
452 } else if fieldName3 == 0 {
453 fieldName3 = fieldHash
454 fieldDecoder3 = fieldDecoder
455 } else if fieldName4 == 0 {
456 fieldName4 = fieldHash
457 fieldDecoder4 = fieldDecoder
458 } else if fieldName5 == 0 {
459 fieldName5 = fieldHash
460 fieldDecoder5 = fieldDecoder
461 } else if fieldName6 == 0 {
462 fieldName6 = fieldHash
463 fieldDecoder6 = fieldDecoder
464 } else if fieldName7 == 0 {
465 fieldName7 = fieldHash
466 fieldDecoder7 = fieldDecoder
467 } else if fieldName8 == 0 {
468 fieldName8 = fieldHash
469 fieldDecoder8 = fieldDecoder
470 } else if fieldName9 == 0 {
471 fieldName9 = fieldHash
472 fieldDecoder9 = fieldDecoder
473 } else {
474 fieldName10 = fieldHash
475 fieldDecoder10 = fieldDecoder
476 }
477 }
478 return &tenFieldsStructDecoder{typ,
479 fieldName1, fieldDecoder1,
480 fieldName2, fieldDecoder2,
481 fieldName3, fieldDecoder3,
482 fieldName4, fieldDecoder4,
483 fieldName5, fieldDecoder5,
484 fieldName6, fieldDecoder6,
485 fieldName7, fieldDecoder7,
486 fieldName8, fieldDecoder8,
487 fieldName9, fieldDecoder9,
488 fieldName10, fieldDecoder10}
489 }
490 return &generalStructDecoder{typ, fields, false}
491}
492
493type generalStructDecoder struct {
494 typ reflect2.Type
495 fields map[string]*structFieldDecoder
496 disallowUnknownFields bool
497}
498
499func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
500 if !iter.readObjectStart() {
501 return
502 }
503 if !iter.incrementDepth() {
504 return
505 }
506 var c byte
507 for c = ','; c == ','; c = iter.nextToken() {
508 decoder.decodeOneField(ptr, iter)
509 }
510 if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
511 iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
512 }
513 if c != '}' {
514 iter.ReportError("struct Decode", `expect }, but found `+string([]byte{c}))
515 }
516 iter.decrementDepth()
517}
518
519func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *Iterator) {
520 var field string
521 var fieldDecoder *structFieldDecoder
522 if iter.cfg.objectFieldMustBeSimpleString {
523 fieldBytes := iter.ReadStringAsSlice()
524 field = *(*string)(unsafe.Pointer(&fieldBytes))
525 fieldDecoder = decoder.fields[field]
526 if fieldDecoder == nil && !iter.cfg.caseSensitive {
527 fieldDecoder = decoder.fields[strings.ToLower(field)]
528 }
529 } else {
530 field = iter.ReadString()
531 fieldDecoder = decoder.fields[field]
532 if fieldDecoder == nil && !iter.cfg.caseSensitive {
533 fieldDecoder = decoder.fields[strings.ToLower(field)]
534 }
535 }
536 if fieldDecoder == nil {
537 if decoder.disallowUnknownFields {
538 msg := "found unknown field: " + field
539 iter.ReportError("ReadObject", msg)
540 }
541 c := iter.nextToken()
542 if c != ':' {
543 iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
544 }
545 iter.Skip()
546 return
547 }
548 c := iter.nextToken()
549 if c != ':' {
550 iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
551 }
552 fieldDecoder.Decode(ptr, iter)
553}
554
555type skipObjectDecoder struct {
556 typ reflect2.Type
557}
558
559func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
560 valueType := iter.WhatIsNext()
561 if valueType != ObjectValue && valueType != NilValue {
562 iter.ReportError("skipObjectDecoder", "expect object or null")
563 return
564 }
565 iter.Skip()
566}
567
568type oneFieldStructDecoder struct {
569 typ reflect2.Type
570 fieldHash int64
571 fieldDecoder *structFieldDecoder
572}
573
574func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
575 if !iter.readObjectStart() {
576 return
577 }
578 if !iter.incrementDepth() {
579 return
580 }
581 for {
582 if iter.readFieldHash() == decoder.fieldHash {
583 decoder.fieldDecoder.Decode(ptr, iter)
584 } else {
585 iter.Skip()
586 }
587 if iter.isObjectEnd() {
588 break
589 }
590 }
591 if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
592 iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
593 }
594 iter.decrementDepth()
595}
596
597type twoFieldsStructDecoder struct {
598 typ reflect2.Type
599 fieldHash1 int64
600 fieldDecoder1 *structFieldDecoder
601 fieldHash2 int64
602 fieldDecoder2 *structFieldDecoder
603}
604
605func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
606 if !iter.readObjectStart() {
607 return
608 }
609 if !iter.incrementDepth() {
610 return
611 }
612 for {
613 switch iter.readFieldHash() {
614 case decoder.fieldHash1:
615 decoder.fieldDecoder1.Decode(ptr, iter)
616 case decoder.fieldHash2:
617 decoder.fieldDecoder2.Decode(ptr, iter)
618 default:
619 iter.Skip()
620 }
621 if iter.isObjectEnd() {
622 break
623 }
624 }
625 if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
626 iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
627 }
628 iter.decrementDepth()
629}
630
631type threeFieldsStructDecoder struct {
632 typ reflect2.Type
633 fieldHash1 int64
634 fieldDecoder1 *structFieldDecoder
635 fieldHash2 int64
636 fieldDecoder2 *structFieldDecoder
637 fieldHash3 int64
638 fieldDecoder3 *structFieldDecoder
639}
640
641func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
642 if !iter.readObjectStart() {
643 return
644 }
645 if !iter.incrementDepth() {
646 return
647 }
648 for {
649 switch iter.readFieldHash() {
650 case decoder.fieldHash1:
651 decoder.fieldDecoder1.Decode(ptr, iter)
652 case decoder.fieldHash2:
653 decoder.fieldDecoder2.Decode(ptr, iter)
654 case decoder.fieldHash3:
655 decoder.fieldDecoder3.Decode(ptr, iter)
656 default:
657 iter.Skip()
658 }
659 if iter.isObjectEnd() {
660 break
661 }
662 }
663 if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
664 iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
665 }
666 iter.decrementDepth()
667}
668
669type fourFieldsStructDecoder struct {
670 typ reflect2.Type
671 fieldHash1 int64
672 fieldDecoder1 *structFieldDecoder
673 fieldHash2 int64
674 fieldDecoder2 *structFieldDecoder
675 fieldHash3 int64
676 fieldDecoder3 *structFieldDecoder
677 fieldHash4 int64
678 fieldDecoder4 *structFieldDecoder
679}
680
681func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
682 if !iter.readObjectStart() {
683 return
684 }
685 if !iter.incrementDepth() {
686 return
687 }
688 for {
689 switch iter.readFieldHash() {
690 case decoder.fieldHash1:
691 decoder.fieldDecoder1.Decode(ptr, iter)
692 case decoder.fieldHash2:
693 decoder.fieldDecoder2.Decode(ptr, iter)
694 case decoder.fieldHash3:
695 decoder.fieldDecoder3.Decode(ptr, iter)
696 case decoder.fieldHash4:
697 decoder.fieldDecoder4.Decode(ptr, iter)
698 default:
699 iter.Skip()
700 }
701 if iter.isObjectEnd() {
702 break
703 }
704 }
705 if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
706 iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
707 }
708 iter.decrementDepth()
709}
710
711type fiveFieldsStructDecoder struct {
712 typ reflect2.Type
713 fieldHash1 int64
714 fieldDecoder1 *structFieldDecoder
715 fieldHash2 int64
716 fieldDecoder2 *structFieldDecoder
717 fieldHash3 int64
718 fieldDecoder3 *structFieldDecoder
719 fieldHash4 int64
720 fieldDecoder4 *structFieldDecoder
721 fieldHash5 int64
722 fieldDecoder5 *structFieldDecoder
723}
724
725func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
726 if !iter.readObjectStart() {
727 return
728 }
729 if !iter.incrementDepth() {
730 return
731 }
732 for {
733 switch iter.readFieldHash() {
734 case decoder.fieldHash1:
735 decoder.fieldDecoder1.Decode(ptr, iter)
736 case decoder.fieldHash2:
737 decoder.fieldDecoder2.Decode(ptr, iter)
738 case decoder.fieldHash3:
739 decoder.fieldDecoder3.Decode(ptr, iter)
740 case decoder.fieldHash4:
741 decoder.fieldDecoder4.Decode(ptr, iter)
742 case decoder.fieldHash5:
743 decoder.fieldDecoder5.Decode(ptr, iter)
744 default:
745 iter.Skip()
746 }
747 if iter.isObjectEnd() {
748 break
749 }
750 }
751 if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
752 iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
753 }
754 iter.decrementDepth()
755}
756
757type sixFieldsStructDecoder struct {
758 typ reflect2.Type
759 fieldHash1 int64
760 fieldDecoder1 *structFieldDecoder
761 fieldHash2 int64
762 fieldDecoder2 *structFieldDecoder
763 fieldHash3 int64
764 fieldDecoder3 *structFieldDecoder
765 fieldHash4 int64
766 fieldDecoder4 *structFieldDecoder
767 fieldHash5 int64
768 fieldDecoder5 *structFieldDecoder
769 fieldHash6 int64
770 fieldDecoder6 *structFieldDecoder
771}
772
773func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
774 if !iter.readObjectStart() {
775 return
776 }
777 if !iter.incrementDepth() {
778 return
779 }
780 for {
781 switch iter.readFieldHash() {
782 case decoder.fieldHash1:
783 decoder.fieldDecoder1.Decode(ptr, iter)
784 case decoder.fieldHash2:
785 decoder.fieldDecoder2.Decode(ptr, iter)
786 case decoder.fieldHash3:
787 decoder.fieldDecoder3.Decode(ptr, iter)
788 case decoder.fieldHash4:
789 decoder.fieldDecoder4.Decode(ptr, iter)
790 case decoder.fieldHash5:
791 decoder.fieldDecoder5.Decode(ptr, iter)
792 case decoder.fieldHash6:
793 decoder.fieldDecoder6.Decode(ptr, iter)
794 default:
795 iter.Skip()
796 }
797 if iter.isObjectEnd() {
798 break
799 }
800 }
801 if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
802 iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
803 }
804 iter.decrementDepth()
805}
806
807type sevenFieldsStructDecoder struct {
808 typ reflect2.Type
809 fieldHash1 int64
810 fieldDecoder1 *structFieldDecoder
811 fieldHash2 int64
812 fieldDecoder2 *structFieldDecoder
813 fieldHash3 int64
814 fieldDecoder3 *structFieldDecoder
815 fieldHash4 int64
816 fieldDecoder4 *structFieldDecoder
817 fieldHash5 int64
818 fieldDecoder5 *structFieldDecoder
819 fieldHash6 int64
820 fieldDecoder6 *structFieldDecoder
821 fieldHash7 int64
822 fieldDecoder7 *structFieldDecoder
823}
824
825func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
826 if !iter.readObjectStart() {
827 return
828 }
829 if !iter.incrementDepth() {
830 return
831 }
832 for {
833 switch iter.readFieldHash() {
834 case decoder.fieldHash1:
835 decoder.fieldDecoder1.Decode(ptr, iter)
836 case decoder.fieldHash2:
837 decoder.fieldDecoder2.Decode(ptr, iter)
838 case decoder.fieldHash3:
839 decoder.fieldDecoder3.Decode(ptr, iter)
840 case decoder.fieldHash4:
841 decoder.fieldDecoder4.Decode(ptr, iter)
842 case decoder.fieldHash5:
843 decoder.fieldDecoder5.Decode(ptr, iter)
844 case decoder.fieldHash6:
845 decoder.fieldDecoder6.Decode(ptr, iter)
846 case decoder.fieldHash7:
847 decoder.fieldDecoder7.Decode(ptr, iter)
848 default:
849 iter.Skip()
850 }
851 if iter.isObjectEnd() {
852 break
853 }
854 }
855 if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
856 iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
857 }
858 iter.decrementDepth()
859}
860
861type eightFieldsStructDecoder struct {
862 typ reflect2.Type
863 fieldHash1 int64
864 fieldDecoder1 *structFieldDecoder
865 fieldHash2 int64
866 fieldDecoder2 *structFieldDecoder
867 fieldHash3 int64
868 fieldDecoder3 *structFieldDecoder
869 fieldHash4 int64
870 fieldDecoder4 *structFieldDecoder
871 fieldHash5 int64
872 fieldDecoder5 *structFieldDecoder
873 fieldHash6 int64
874 fieldDecoder6 *structFieldDecoder
875 fieldHash7 int64
876 fieldDecoder7 *structFieldDecoder
877 fieldHash8 int64
878 fieldDecoder8 *structFieldDecoder
879}
880
881func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
882 if !iter.readObjectStart() {
883 return
884 }
885 if !iter.incrementDepth() {
886 return
887 }
888 for {
889 switch iter.readFieldHash() {
890 case decoder.fieldHash1:
891 decoder.fieldDecoder1.Decode(ptr, iter)
892 case decoder.fieldHash2:
893 decoder.fieldDecoder2.Decode(ptr, iter)
894 case decoder.fieldHash3:
895 decoder.fieldDecoder3.Decode(ptr, iter)
896 case decoder.fieldHash4:
897 decoder.fieldDecoder4.Decode(ptr, iter)
898 case decoder.fieldHash5:
899 decoder.fieldDecoder5.Decode(ptr, iter)
900 case decoder.fieldHash6:
901 decoder.fieldDecoder6.Decode(ptr, iter)
902 case decoder.fieldHash7:
903 decoder.fieldDecoder7.Decode(ptr, iter)
904 case decoder.fieldHash8:
905 decoder.fieldDecoder8.Decode(ptr, iter)
906 default:
907 iter.Skip()
908 }
909 if iter.isObjectEnd() {
910 break
911 }
912 }
913 if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
914 iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
915 }
916 iter.decrementDepth()
917}
918
919type nineFieldsStructDecoder struct {
920 typ reflect2.Type
921 fieldHash1 int64
922 fieldDecoder1 *structFieldDecoder
923 fieldHash2 int64
924 fieldDecoder2 *structFieldDecoder
925 fieldHash3 int64
926 fieldDecoder3 *structFieldDecoder
927 fieldHash4 int64
928 fieldDecoder4 *structFieldDecoder
929 fieldHash5 int64
930 fieldDecoder5 *structFieldDecoder
931 fieldHash6 int64
932 fieldDecoder6 *structFieldDecoder
933 fieldHash7 int64
934 fieldDecoder7 *structFieldDecoder
935 fieldHash8 int64
936 fieldDecoder8 *structFieldDecoder
937 fieldHash9 int64
938 fieldDecoder9 *structFieldDecoder
939}
940
941func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
942 if !iter.readObjectStart() {
943 return
944 }
945 if !iter.incrementDepth() {
946 return
947 }
948 for {
949 switch iter.readFieldHash() {
950 case decoder.fieldHash1:
951 decoder.fieldDecoder1.Decode(ptr, iter)
952 case decoder.fieldHash2:
953 decoder.fieldDecoder2.Decode(ptr, iter)
954 case decoder.fieldHash3:
955 decoder.fieldDecoder3.Decode(ptr, iter)
956 case decoder.fieldHash4:
957 decoder.fieldDecoder4.Decode(ptr, iter)
958 case decoder.fieldHash5:
959 decoder.fieldDecoder5.Decode(ptr, iter)
960 case decoder.fieldHash6:
961 decoder.fieldDecoder6.Decode(ptr, iter)
962 case decoder.fieldHash7:
963 decoder.fieldDecoder7.Decode(ptr, iter)
964 case decoder.fieldHash8:
965 decoder.fieldDecoder8.Decode(ptr, iter)
966 case decoder.fieldHash9:
967 decoder.fieldDecoder9.Decode(ptr, iter)
968 default:
969 iter.Skip()
970 }
971 if iter.isObjectEnd() {
972 break
973 }
974 }
975 if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
976 iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
977 }
978 iter.decrementDepth()
979}
980
981type tenFieldsStructDecoder struct {
982 typ reflect2.Type
983 fieldHash1 int64
984 fieldDecoder1 *structFieldDecoder
985 fieldHash2 int64
986 fieldDecoder2 *structFieldDecoder
987 fieldHash3 int64
988 fieldDecoder3 *structFieldDecoder
989 fieldHash4 int64
990 fieldDecoder4 *structFieldDecoder
991 fieldHash5 int64
992 fieldDecoder5 *structFieldDecoder
993 fieldHash6 int64
994 fieldDecoder6 *structFieldDecoder
995 fieldHash7 int64
996 fieldDecoder7 *structFieldDecoder
997 fieldHash8 int64
998 fieldDecoder8 *structFieldDecoder
999 fieldHash9 int64
1000 fieldDecoder9 *structFieldDecoder
1001 fieldHash10 int64
1002 fieldDecoder10 *structFieldDecoder
1003}
1004
1005func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
1006 if !iter.readObjectStart() {
1007 return
1008 }
1009 if !iter.incrementDepth() {
1010 return
1011 }
1012 for {
1013 switch iter.readFieldHash() {
1014 case decoder.fieldHash1:
1015 decoder.fieldDecoder1.Decode(ptr, iter)
1016 case decoder.fieldHash2:
1017 decoder.fieldDecoder2.Decode(ptr, iter)
1018 case decoder.fieldHash3:
1019 decoder.fieldDecoder3.Decode(ptr, iter)
1020 case decoder.fieldHash4:
1021 decoder.fieldDecoder4.Decode(ptr, iter)
1022 case decoder.fieldHash5:
1023 decoder.fieldDecoder5.Decode(ptr, iter)
1024 case decoder.fieldHash6:
1025 decoder.fieldDecoder6.Decode(ptr, iter)
1026 case decoder.fieldHash7:
1027 decoder.fieldDecoder7.Decode(ptr, iter)
1028 case decoder.fieldHash8:
1029 decoder.fieldDecoder8.Decode(ptr, iter)
1030 case decoder.fieldHash9:
1031 decoder.fieldDecoder9.Decode(ptr, iter)
1032 case decoder.fieldHash10:
1033 decoder.fieldDecoder10.Decode(ptr, iter)
1034 default:
1035 iter.Skip()
1036 }
1037 if iter.isObjectEnd() {
1038 break
1039 }
1040 }
1041 if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
1042 iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
1043 }
1044 iter.decrementDepth()
1045}
1046
1047type structFieldDecoder struct {
1048 field reflect2.StructField
1049 fieldDecoder ValDecoder
1050}
1051
1052func (decoder *structFieldDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
1053 fieldPtr := decoder.field.UnsafeGet(ptr)
1054 decoder.fieldDecoder.Decode(fieldPtr, iter)
1055 if iter.Error != nil && iter.Error != io.EOF {
1056 iter.Error = fmt.Errorf("%s: %s", decoder.field.Name(), iter.Error.Error())
1057 }
1058}
1059
1060type stringModeStringDecoder struct {
1061 elemDecoder ValDecoder
1062 cfg *frozenConfig
1063}
1064
1065func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
1066 decoder.elemDecoder.Decode(ptr, iter)
1067 str := *((*string)(ptr))
1068 tempIter := decoder.cfg.BorrowIterator([]byte(str))
1069 defer decoder.cfg.ReturnIterator(tempIter)
1070 *((*string)(ptr)) = tempIter.ReadString()
1071}
1072
1073type stringModeNumberDecoder struct {
1074 elemDecoder ValDecoder
1075}
1076
1077func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
1078 if iter.WhatIsNext() == NilValue {
1079 decoder.elemDecoder.Decode(ptr, iter)
1080 return
1081 }
1082
1083 c := iter.nextToken()
1084 if c != '"' {
1085 iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c}))
1086 return
1087 }
1088 decoder.elemDecoder.Decode(ptr, iter)
1089 if iter.Error != nil {
1090 return
1091 }
1092 c = iter.readByte()
1093 if c != '"' {
1094 iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c}))
1095 return
1096 }
1097}
diff --git a/vendor/github.com/json-iterator/go/reflect_struct_encoder.go b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go
new file mode 100644
index 0000000..152e3ef
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go
@@ -0,0 +1,211 @@
1package jsoniter
2
3import (
4 "fmt"
5 "github.com/modern-go/reflect2"
6 "io"
7 "reflect"
8 "unsafe"
9)
10
11func encoderOfStruct(ctx *ctx, typ reflect2.Type) ValEncoder {
12 type bindingTo struct {
13 binding *Binding
14 toName string
15 ignored bool
16 }
17 orderedBindings := []*bindingTo{}
18 structDescriptor := describeStruct(ctx, typ)
19 for _, binding := range structDescriptor.Fields {
20 for _, toName := range binding.ToNames {
21 new := &bindingTo{
22 binding: binding,
23 toName: toName,
24 }
25 for _, old := range orderedBindings {
26 if old.toName != toName {
27 continue
28 }
29 old.ignored, new.ignored = resolveConflictBinding(ctx.frozenConfig, old.binding, new.binding)
30 }
31 orderedBindings = append(orderedBindings, new)
32 }
33 }
34 if len(orderedBindings) == 0 {
35 return &emptyStructEncoder{}
36 }
37 finalOrderedFields := []structFieldTo{}
38 for _, bindingTo := range orderedBindings {
39 if !bindingTo.ignored {
40 finalOrderedFields = append(finalOrderedFields, structFieldTo{
41 encoder: bindingTo.binding.Encoder.(*structFieldEncoder),
42 toName: bindingTo.toName,
43 })
44 }
45 }
46 return &structEncoder{typ, finalOrderedFields}
47}
48
49func createCheckIsEmpty(ctx *ctx, typ reflect2.Type) checkIsEmpty {
50 encoder := createEncoderOfNative(ctx, typ)
51 if encoder != nil {
52 return encoder
53 }
54 kind := typ.Kind()
55 switch kind {
56 case reflect.Interface:
57 return &dynamicEncoder{typ}
58 case reflect.Struct:
59 return &structEncoder{typ: typ}
60 case reflect.Array:
61 return &arrayEncoder{}
62 case reflect.Slice:
63 return &sliceEncoder{}
64 case reflect.Map:
65 return encoderOfMap(ctx, typ)
66 case reflect.Ptr:
67 return &OptionalEncoder{}
68 default:
69 return &lazyErrorEncoder{err: fmt.Errorf("unsupported type: %v", typ)}
70 }
71}
72
73func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ignoreNew bool) {
74 newTagged := new.Field.Tag().Get(cfg.getTagKey()) != ""
75 oldTagged := old.Field.Tag().Get(cfg.getTagKey()) != ""
76 if newTagged {
77 if oldTagged {
78 if len(old.levels) > len(new.levels) {
79 return true, false
80 } else if len(new.levels) > len(old.levels) {
81 return false, true
82 } else {
83 return true, true
84 }
85 } else {
86 return true, false
87 }
88 } else {
89 if oldTagged {
90 return true, false
91 }
92 if len(old.levels) > len(new.levels) {
93 return true, false
94 } else if len(new.levels) > len(old.levels) {
95 return false, true
96 } else {
97 return true, true
98 }
99 }
100}
101
102type structFieldEncoder struct {
103 field reflect2.StructField
104 fieldEncoder ValEncoder
105 omitempty bool
106}
107
108func (encoder *structFieldEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
109 fieldPtr := encoder.field.UnsafeGet(ptr)
110 encoder.fieldEncoder.Encode(fieldPtr, stream)
111 if stream.Error != nil && stream.Error != io.EOF {
112 stream.Error = fmt.Errorf("%s: %s", encoder.field.Name(), stream.Error.Error())
113 }
114}
115
116func (encoder *structFieldEncoder) IsEmpty(ptr unsafe.Pointer) bool {
117 fieldPtr := encoder.field.UnsafeGet(ptr)
118 return encoder.fieldEncoder.IsEmpty(fieldPtr)
119}
120
121func (encoder *structFieldEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool {
122 isEmbeddedPtrNil, converted := encoder.fieldEncoder.(IsEmbeddedPtrNil)
123 if !converted {
124 return false
125 }
126 fieldPtr := encoder.field.UnsafeGet(ptr)
127 return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr)
128}
129
130type IsEmbeddedPtrNil interface {
131 IsEmbeddedPtrNil(ptr unsafe.Pointer) bool
132}
133
134type structEncoder struct {
135 typ reflect2.Type
136 fields []structFieldTo
137}
138
139type structFieldTo struct {
140 encoder *structFieldEncoder
141 toName string
142}
143
144func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
145 stream.WriteObjectStart()
146 isNotFirst := false
147 for _, field := range encoder.fields {
148 if field.encoder.omitempty && field.encoder.IsEmpty(ptr) {
149 continue
150 }
151 if field.encoder.IsEmbeddedPtrNil(ptr) {
152 continue
153 }
154 if isNotFirst {
155 stream.WriteMore()
156 }
157 stream.WriteObjectField(field.toName)
158 field.encoder.Encode(ptr, stream)
159 isNotFirst = true
160 }
161 stream.WriteObjectEnd()
162 if stream.Error != nil && stream.Error != io.EOF {
163 stream.Error = fmt.Errorf("%v.%s", encoder.typ, stream.Error.Error())
164 }
165}
166
167func (encoder *structEncoder) IsEmpty(ptr unsafe.Pointer) bool {
168 return false
169}
170
171type emptyStructEncoder struct {
172}
173
174func (encoder *emptyStructEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
175 stream.WriteEmptyObject()
176}
177
178func (encoder *emptyStructEncoder) IsEmpty(ptr unsafe.Pointer) bool {
179 return false
180}
181
182type stringModeNumberEncoder struct {
183 elemEncoder ValEncoder
184}
185
186func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
187 stream.writeByte('"')
188 encoder.elemEncoder.Encode(ptr, stream)
189 stream.writeByte('"')
190}
191
192func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool {
193 return encoder.elemEncoder.IsEmpty(ptr)
194}
195
196type stringModeStringEncoder struct {
197 elemEncoder ValEncoder
198 cfg *frozenConfig
199}
200
201func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
202 tempStream := encoder.cfg.BorrowStream(nil)
203 tempStream.Attachment = stream.Attachment
204 defer encoder.cfg.ReturnStream(tempStream)
205 encoder.elemEncoder.Encode(ptr, tempStream)
206 stream.WriteString(string(tempStream.Buffer()))
207}
208
209func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool {
210 return encoder.elemEncoder.IsEmpty(ptr)
211}
diff --git a/vendor/github.com/json-iterator/go/stream.go b/vendor/github.com/json-iterator/go/stream.go
new file mode 100644
index 0000000..23d8a3a
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/stream.go
@@ -0,0 +1,210 @@
1package jsoniter
2
3import (
4 "io"
5)
6
7// stream is a io.Writer like object, with JSON specific write functions.
8// Error is not returned as return value, but stored as Error member on this stream instance.
9type Stream struct {
10 cfg *frozenConfig
11 out io.Writer
12 buf []byte
13 Error error
14 indention int
15 Attachment interface{} // open for customized encoder
16}
17
18// NewStream create new stream instance.
19// cfg can be jsoniter.ConfigDefault.
20// out can be nil if write to internal buffer.
21// bufSize is the initial size for the internal buffer in bytes.
22func NewStream(cfg API, out io.Writer, bufSize int) *Stream {
23 return &Stream{
24 cfg: cfg.(*frozenConfig),
25 out: out,
26 buf: make([]byte, 0, bufSize),
27 Error: nil,
28 indention: 0,
29 }
30}
31
32// Pool returns a pool can provide more stream with same configuration
33func (stream *Stream) Pool() StreamPool {
34 return stream.cfg
35}
36
37// Reset reuse this stream instance by assign a new writer
38func (stream *Stream) Reset(out io.Writer) {
39 stream.out = out
40 stream.buf = stream.buf[:0]
41}
42
43// Available returns how many bytes are unused in the buffer.
44func (stream *Stream) Available() int {
45 return cap(stream.buf) - len(stream.buf)
46}
47
48// Buffered returns the number of bytes that have been written into the current buffer.
49func (stream *Stream) Buffered() int {
50 return len(stream.buf)
51}
52
53// Buffer if writer is nil, use this method to take the result
54func (stream *Stream) Buffer() []byte {
55 return stream.buf
56}
57
58// SetBuffer allows to append to the internal buffer directly
59func (stream *Stream) SetBuffer(buf []byte) {
60 stream.buf = buf
61}
62
63// Write writes the contents of p into the buffer.
64// It returns the number of bytes written.
65// If nn < len(p), it also returns an error explaining
66// why the write is short.
67func (stream *Stream) Write(p []byte) (nn int, err error) {
68 stream.buf = append(stream.buf, p...)
69 if stream.out != nil {
70 nn, err = stream.out.Write(stream.buf)
71 stream.buf = stream.buf[nn:]
72 return
73 }
74 return len(p), nil
75}
76
77// WriteByte writes a single byte.
78func (stream *Stream) writeByte(c byte) {
79 stream.buf = append(stream.buf, c)
80}
81
82func (stream *Stream) writeTwoBytes(c1 byte, c2 byte) {
83 stream.buf = append(stream.buf, c1, c2)
84}
85
86func (stream *Stream) writeThreeBytes(c1 byte, c2 byte, c3 byte) {
87 stream.buf = append(stream.buf, c1, c2, c3)
88}
89
90func (stream *Stream) writeFourBytes(c1 byte, c2 byte, c3 byte, c4 byte) {
91 stream.buf = append(stream.buf, c1, c2, c3, c4)
92}
93
94func (stream *Stream) writeFiveBytes(c1 byte, c2 byte, c3 byte, c4 byte, c5 byte) {
95 stream.buf = append(stream.buf, c1, c2, c3, c4, c5)
96}
97
98// Flush writes any buffered data to the underlying io.Writer.
99func (stream *Stream) Flush() error {
100 if stream.out == nil {
101 return nil
102 }
103 if stream.Error != nil {
104 return stream.Error
105 }
106 _, err := stream.out.Write(stream.buf)
107 if err != nil {
108 if stream.Error == nil {
109 stream.Error = err
110 }
111 return err
112 }
113 stream.buf = stream.buf[:0]
114 return nil
115}
116
117// WriteRaw write string out without quotes, just like []byte
118func (stream *Stream) WriteRaw(s string) {
119 stream.buf = append(stream.buf, s...)
120}
121
122// WriteNil write null to stream
123func (stream *Stream) WriteNil() {
124 stream.writeFourBytes('n', 'u', 'l', 'l')
125}
126
127// WriteTrue write true to stream
128func (stream *Stream) WriteTrue() {
129 stream.writeFourBytes('t', 'r', 'u', 'e')
130}
131
132// WriteFalse write false to stream
133func (stream *Stream) WriteFalse() {
134 stream.writeFiveBytes('f', 'a', 'l', 's', 'e')
135}
136
137// WriteBool write true or false into stream
138func (stream *Stream) WriteBool(val bool) {
139 if val {
140 stream.WriteTrue()
141 } else {
142 stream.WriteFalse()
143 }
144}
145
146// WriteObjectStart write { with possible indention
147func (stream *Stream) WriteObjectStart() {
148 stream.indention += stream.cfg.indentionStep
149 stream.writeByte('{')
150 stream.writeIndention(0)
151}
152
153// WriteObjectField write "field": with possible indention
154func (stream *Stream) WriteObjectField(field string) {
155 stream.WriteString(field)
156 if stream.indention > 0 {
157 stream.writeTwoBytes(':', ' ')
158 } else {
159 stream.writeByte(':')
160 }
161}
162
163// WriteObjectEnd write } with possible indention
164func (stream *Stream) WriteObjectEnd() {
165 stream.writeIndention(stream.cfg.indentionStep)
166 stream.indention -= stream.cfg.indentionStep
167 stream.writeByte('}')
168}
169
170// WriteEmptyObject write {}
171func (stream *Stream) WriteEmptyObject() {
172 stream.writeByte('{')
173 stream.writeByte('}')
174}
175
176// WriteMore write , with possible indention
177func (stream *Stream) WriteMore() {
178 stream.writeByte(',')
179 stream.writeIndention(0)
180}
181
182// WriteArrayStart write [ with possible indention
183func (stream *Stream) WriteArrayStart() {
184 stream.indention += stream.cfg.indentionStep
185 stream.writeByte('[')
186 stream.writeIndention(0)
187}
188
189// WriteEmptyArray write []
190func (stream *Stream) WriteEmptyArray() {
191 stream.writeTwoBytes('[', ']')
192}
193
194// WriteArrayEnd write ] with possible indention
195func (stream *Stream) WriteArrayEnd() {
196 stream.writeIndention(stream.cfg.indentionStep)
197 stream.indention -= stream.cfg.indentionStep
198 stream.writeByte(']')
199}
200
201func (stream *Stream) writeIndention(delta int) {
202 if stream.indention == 0 {
203 return
204 }
205 stream.writeByte('\n')
206 toWrite := stream.indention - delta
207 for i := 0; i < toWrite; i++ {
208 stream.buf = append(stream.buf, ' ')
209 }
210}
diff --git a/vendor/github.com/json-iterator/go/stream_float.go b/vendor/github.com/json-iterator/go/stream_float.go
new file mode 100644
index 0000000..826aa59
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/stream_float.go
@@ -0,0 +1,111 @@
1package jsoniter
2
3import (
4 "fmt"
5 "math"
6 "strconv"
7)
8
9var pow10 []uint64
10
11func init() {
12 pow10 = []uint64{1, 10, 100, 1000, 10000, 100000, 1000000}
13}
14
15// WriteFloat32 write float32 to stream
16func (stream *Stream) WriteFloat32(val float32) {
17 if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) {
18 stream.Error = fmt.Errorf("unsupported value: %f", val)
19 return
20 }
21 abs := math.Abs(float64(val))
22 fmt := byte('f')
23 // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
24 if abs != 0 {
25 if float32(abs) < 1e-6 || float32(abs) >= 1e21 {
26 fmt = 'e'
27 }
28 }
29 stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 32)
30}
31
32// WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster
33func (stream *Stream) WriteFloat32Lossy(val float32) {
34 if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) {
35 stream.Error = fmt.Errorf("unsupported value: %f", val)
36 return
37 }
38 if val < 0 {
39 stream.writeByte('-')
40 val = -val
41 }
42 if val > 0x4ffffff {
43 stream.WriteFloat32(val)
44 return
45 }
46 precision := 6
47 exp := uint64(1000000) // 6
48 lval := uint64(float64(val)*float64(exp) + 0.5)
49 stream.WriteUint64(lval / exp)
50 fval := lval % exp
51 if fval == 0 {
52 return
53 }
54 stream.writeByte('.')
55 for p := precision - 1; p > 0 && fval < pow10[p]; p-- {
56 stream.writeByte('0')
57 }
58 stream.WriteUint64(fval)
59 for stream.buf[len(stream.buf)-1] == '0' {
60 stream.buf = stream.buf[:len(stream.buf)-1]
61 }
62}
63
64// WriteFloat64 write float64 to stream
65func (stream *Stream) WriteFloat64(val float64) {
66 if math.IsInf(val, 0) || math.IsNaN(val) {
67 stream.Error = fmt.Errorf("unsupported value: %f", val)
68 return
69 }
70 abs := math.Abs(val)
71 fmt := byte('f')
72 // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
73 if abs != 0 {
74 if abs < 1e-6 || abs >= 1e21 {
75 fmt = 'e'
76 }
77 }
78 stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 64)
79}
80
81// WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster
82func (stream *Stream) WriteFloat64Lossy(val float64) {
83 if math.IsInf(val, 0) || math.IsNaN(val) {
84 stream.Error = fmt.Errorf("unsupported value: %f", val)
85 return
86 }
87 if val < 0 {
88 stream.writeByte('-')
89 val = -val
90 }
91 if val > 0x4ffffff {
92 stream.WriteFloat64(val)
93 return
94 }
95 precision := 6
96 exp := uint64(1000000) // 6
97 lval := uint64(val*float64(exp) + 0.5)
98 stream.WriteUint64(lval / exp)
99 fval := lval % exp
100 if fval == 0 {
101 return
102 }
103 stream.writeByte('.')
104 for p := precision - 1; p > 0 && fval < pow10[p]; p-- {
105 stream.writeByte('0')
106 }
107 stream.WriteUint64(fval)
108 for stream.buf[len(stream.buf)-1] == '0' {
109 stream.buf = stream.buf[:len(stream.buf)-1]
110 }
111}
diff --git a/vendor/github.com/json-iterator/go/stream_int.go b/vendor/github.com/json-iterator/go/stream_int.go
new file mode 100644
index 0000000..d1059ee
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/stream_int.go
@@ -0,0 +1,190 @@
1package jsoniter
2
3var digits []uint32
4
5func init() {
6 digits = make([]uint32, 1000)
7 for i := uint32(0); i < 1000; i++ {
8 digits[i] = (((i / 100) + '0') << 16) + ((((i / 10) % 10) + '0') << 8) + i%10 + '0'
9 if i < 10 {
10 digits[i] += 2 << 24
11 } else if i < 100 {
12 digits[i] += 1 << 24
13 }
14 }
15}
16
17func writeFirstBuf(space []byte, v uint32) []byte {
18 start := v >> 24
19 if start == 0 {
20 space = append(space, byte(v>>16), byte(v>>8))
21 } else if start == 1 {
22 space = append(space, byte(v>>8))
23 }
24 space = append(space, byte(v))
25 return space
26}
27
28func writeBuf(buf []byte, v uint32) []byte {
29 return append(buf, byte(v>>16), byte(v>>8), byte(v))
30}
31
32// WriteUint8 write uint8 to stream
33func (stream *Stream) WriteUint8(val uint8) {
34 stream.buf = writeFirstBuf(stream.buf, digits[val])
35}
36
37// WriteInt8 write int8 to stream
38func (stream *Stream) WriteInt8(nval int8) {
39 var val uint8
40 if nval < 0 {
41 val = uint8(-nval)
42 stream.buf = append(stream.buf, '-')
43 } else {
44 val = uint8(nval)
45 }
46 stream.buf = writeFirstBuf(stream.buf, digits[val])
47}
48
49// WriteUint16 write uint16 to stream
50func (stream *Stream) WriteUint16(val uint16) {
51 q1 := val / 1000
52 if q1 == 0 {
53 stream.buf = writeFirstBuf(stream.buf, digits[val])
54 return
55 }
56 r1 := val - q1*1000
57 stream.buf = writeFirstBuf(stream.buf, digits[q1])
58 stream.buf = writeBuf(stream.buf, digits[r1])
59 return
60}
61
62// WriteInt16 write int16 to stream
63func (stream *Stream) WriteInt16(nval int16) {
64 var val uint16
65 if nval < 0 {
66 val = uint16(-nval)
67 stream.buf = append(stream.buf, '-')
68 } else {
69 val = uint16(nval)
70 }
71 stream.WriteUint16(val)
72}
73
74// WriteUint32 write uint32 to stream
75func (stream *Stream) WriteUint32(val uint32) {
76 q1 := val / 1000
77 if q1 == 0 {
78 stream.buf = writeFirstBuf(stream.buf, digits[val])
79 return
80 }
81 r1 := val - q1*1000
82 q2 := q1 / 1000
83 if q2 == 0 {
84 stream.buf = writeFirstBuf(stream.buf, digits[q1])
85 stream.buf = writeBuf(stream.buf, digits[r1])
86 return
87 }
88 r2 := q1 - q2*1000
89 q3 := q2 / 1000
90 if q3 == 0 {
91 stream.buf = writeFirstBuf(stream.buf, digits[q2])
92 } else {
93 r3 := q2 - q3*1000
94 stream.buf = append(stream.buf, byte(q3+'0'))
95 stream.buf = writeBuf(stream.buf, digits[r3])
96 }
97 stream.buf = writeBuf(stream.buf, digits[r2])
98 stream.buf = writeBuf(stream.buf, digits[r1])
99}
100
101// WriteInt32 write int32 to stream
102func (stream *Stream) WriteInt32(nval int32) {
103 var val uint32
104 if nval < 0 {
105 val = uint32(-nval)
106 stream.buf = append(stream.buf, '-')
107 } else {
108 val = uint32(nval)
109 }
110 stream.WriteUint32(val)
111}
112
113// WriteUint64 write uint64 to stream
114func (stream *Stream) WriteUint64(val uint64) {
115 q1 := val / 1000
116 if q1 == 0 {
117 stream.buf = writeFirstBuf(stream.buf, digits[val])
118 return
119 }
120 r1 := val - q1*1000
121 q2 := q1 / 1000
122 if q2 == 0 {
123 stream.buf = writeFirstBuf(stream.buf, digits[q1])
124 stream.buf = writeBuf(stream.buf, digits[r1])
125 return
126 }
127 r2 := q1 - q2*1000
128 q3 := q2 / 1000
129 if q3 == 0 {
130 stream.buf = writeFirstBuf(stream.buf, digits[q2])
131 stream.buf = writeBuf(stream.buf, digits[r2])
132 stream.buf = writeBuf(stream.buf, digits[r1])
133 return
134 }
135 r3 := q2 - q3*1000
136 q4 := q3 / 1000
137 if q4 == 0 {
138 stream.buf = writeFirstBuf(stream.buf, digits[q3])
139 stream.buf = writeBuf(stream.buf, digits[r3])
140 stream.buf = writeBuf(stream.buf, digits[r2])
141 stream.buf = writeBuf(stream.buf, digits[r1])
142 return
143 }
144 r4 := q3 - q4*1000
145 q5 := q4 / 1000
146 if q5 == 0 {
147 stream.buf = writeFirstBuf(stream.buf, digits[q4])
148 stream.buf = writeBuf(stream.buf, digits[r4])
149 stream.buf = writeBuf(stream.buf, digits[r3])
150 stream.buf = writeBuf(stream.buf, digits[r2])
151 stream.buf = writeBuf(stream.buf, digits[r1])
152 return
153 }
154 r5 := q4 - q5*1000
155 q6 := q5 / 1000
156 if q6 == 0 {
157 stream.buf = writeFirstBuf(stream.buf, digits[q5])
158 } else {
159 stream.buf = writeFirstBuf(stream.buf, digits[q6])
160 r6 := q5 - q6*1000
161 stream.buf = writeBuf(stream.buf, digits[r6])
162 }
163 stream.buf = writeBuf(stream.buf, digits[r5])
164 stream.buf = writeBuf(stream.buf, digits[r4])
165 stream.buf = writeBuf(stream.buf, digits[r3])
166 stream.buf = writeBuf(stream.buf, digits[r2])
167 stream.buf = writeBuf(stream.buf, digits[r1])
168}
169
170// WriteInt64 write int64 to stream
171func (stream *Stream) WriteInt64(nval int64) {
172 var val uint64
173 if nval < 0 {
174 val = uint64(-nval)
175 stream.buf = append(stream.buf, '-')
176 } else {
177 val = uint64(nval)
178 }
179 stream.WriteUint64(val)
180}
181
182// WriteInt write int to stream
183func (stream *Stream) WriteInt(val int) {
184 stream.WriteInt64(int64(val))
185}
186
187// WriteUint write uint to stream
188func (stream *Stream) WriteUint(val uint) {
189 stream.WriteUint64(uint64(val))
190}
diff --git a/vendor/github.com/json-iterator/go/stream_str.go b/vendor/github.com/json-iterator/go/stream_str.go
new file mode 100644
index 0000000..54c2ba0
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/stream_str.go
@@ -0,0 +1,372 @@
1package jsoniter
2
3import (
4 "unicode/utf8"
5)
6
7// htmlSafeSet holds the value true if the ASCII character with the given
8// array position can be safely represented inside a JSON string, embedded
9// inside of HTML <script> tags, without any additional escaping.
10//
11// All values are true except for the ASCII control characters (0-31), the
12// double quote ("), the backslash character ("\"), HTML opening and closing
13// tags ("<" and ">"), and the ampersand ("&").
14var htmlSafeSet = [utf8.RuneSelf]bool{
15 ' ': true,
16 '!': true,
17 '"': false,
18 '#': true,
19 '$': true,
20 '%': true,
21 '&': false,
22 '\'': true,
23 '(': true,
24 ')': true,
25 '*': true,
26 '+': true,
27 ',': true,
28 '-': true,
29 '.': true,
30 '/': true,
31 '0': true,
32 '1': true,
33 '2': true,
34 '3': true,
35 '4': true,
36 '5': true,
37 '6': true,
38 '7': true,
39 '8': true,
40 '9': true,
41 ':': true,
42 ';': true,
43 '<': false,
44 '=': true,
45 '>': false,
46 '?': true,
47 '@': true,
48 'A': true,
49 'B': true,
50 'C': true,
51 'D': true,
52 'E': true,
53 'F': true,
54 'G': true,
55 'H': true,
56 'I': true,
57 'J': true,
58 'K': true,
59 'L': true,
60 'M': true,
61 'N': true,
62 'O': true,
63 'P': true,
64 'Q': true,
65 'R': true,
66 'S': true,
67 'T': true,
68 'U': true,
69 'V': true,
70 'W': true,
71 'X': true,
72 'Y': true,
73 'Z': true,
74 '[': true,
75 '\\': false,
76 ']': true,
77 '^': true,
78 '_': true,
79 '`': true,
80 'a': true,
81 'b': true,
82 'c': true,
83 'd': true,
84 'e': true,
85 'f': true,
86 'g': true,
87 'h': true,
88 'i': true,
89 'j': true,
90 'k': true,
91 'l': true,
92 'm': true,
93 'n': true,
94 'o': true,
95 'p': true,
96 'q': true,
97 'r': true,
98 's': true,
99 't': true,
100 'u': true,
101 'v': true,
102 'w': true,
103 'x': true,
104 'y': true,
105 'z': true,
106 '{': true,
107 '|': true,
108 '}': true,
109 '~': true,
110 '\u007f': true,
111}
112
113// safeSet holds the value true if the ASCII character with the given array
114// position can be represented inside a JSON string without any further
115// escaping.
116//
117// All values are true except for the ASCII control characters (0-31), the
118// double quote ("), and the backslash character ("\").
119var safeSet = [utf8.RuneSelf]bool{
120 ' ': true,
121 '!': true,
122 '"': false,
123 '#': true,
124 '$': true,
125 '%': true,
126 '&': true,
127 '\'': true,
128 '(': true,
129 ')': true,
130 '*': true,
131 '+': true,
132 ',': true,
133 '-': true,
134 '.': true,
135 '/': true,
136 '0': true,
137 '1': true,
138 '2': true,
139 '3': true,
140 '4': true,
141 '5': true,
142 '6': true,
143 '7': true,
144 '8': true,
145 '9': true,
146 ':': true,
147 ';': true,
148 '<': true,
149 '=': true,
150 '>': true,
151 '?': true,
152 '@': true,
153 'A': true,
154 'B': true,
155 'C': true,
156 'D': true,
157 'E': true,
158 'F': true,
159 'G': true,
160 'H': true,
161 'I': true,
162 'J': true,
163 'K': true,
164 'L': true,
165 'M': true,
166 'N': true,
167 'O': true,
168 'P': true,
169 'Q': true,
170 'R': true,
171 'S': true,
172 'T': true,
173 'U': true,
174 'V': true,
175 'W': true,
176 'X': true,
177 'Y': true,
178 'Z': true,
179 '[': true,
180 '\\': false,
181 ']': true,
182 '^': true,
183 '_': true,
184 '`': true,
185 'a': true,
186 'b': true,
187 'c': true,
188 'd': true,
189 'e': true,
190 'f': true,
191 'g': true,
192 'h': true,
193 'i': true,
194 'j': true,
195 'k': true,
196 'l': true,
197 'm': true,
198 'n': true,
199 'o': true,
200 'p': true,
201 'q': true,
202 'r': true,
203 's': true,
204 't': true,
205 'u': true,
206 'v': true,
207 'w': true,
208 'x': true,
209 'y': true,
210 'z': true,
211 '{': true,
212 '|': true,
213 '}': true,
214 '~': true,
215 '\u007f': true,
216}
217
218var hex = "0123456789abcdef"
219
220// WriteStringWithHTMLEscaped write string to stream with html special characters escaped
221func (stream *Stream) WriteStringWithHTMLEscaped(s string) {
222 valLen := len(s)
223 stream.buf = append(stream.buf, '"')
224 // write string, the fast path, without utf8 and escape support
225 i := 0
226 for ; i < valLen; i++ {
227 c := s[i]
228 if c < utf8.RuneSelf && htmlSafeSet[c] {
229 stream.buf = append(stream.buf, c)
230 } else {
231 break
232 }
233 }
234 if i == valLen {
235 stream.buf = append(stream.buf, '"')
236 return
237 }
238 writeStringSlowPathWithHTMLEscaped(stream, i, s, valLen)
239}
240
241func writeStringSlowPathWithHTMLEscaped(stream *Stream, i int, s string, valLen int) {
242 start := i
243 // for the remaining parts, we process them char by char
244 for i < valLen {
245 if b := s[i]; b < utf8.RuneSelf {
246 if htmlSafeSet[b] {
247 i++
248 continue
249 }
250 if start < i {
251 stream.WriteRaw(s[start:i])
252 }
253 switch b {
254 case '\\', '"':
255 stream.writeTwoBytes('\\', b)
256 case '\n':
257 stream.writeTwoBytes('\\', 'n')
258 case '\r':
259 stream.writeTwoBytes('\\', 'r')
260 case '\t':
261 stream.writeTwoBytes('\\', 't')
262 default:
263 // This encodes bytes < 0x20 except for \t, \n and \r.
264 // If escapeHTML is set, it also escapes <, >, and &
265 // because they can lead to security holes when
266 // user-controlled strings are rendered into JSON
267 // and served to some browsers.
268 stream.WriteRaw(`\u00`)
269 stream.writeTwoBytes(hex[b>>4], hex[b&0xF])
270 }
271 i++
272 start = i
273 continue
274 }
275 c, size := utf8.DecodeRuneInString(s[i:])
276 if c == utf8.RuneError && size == 1 {
277 if start < i {
278 stream.WriteRaw(s[start:i])
279 }
280 stream.WriteRaw(`\ufffd`)
281 i++
282 start = i
283 continue
284 }
285 // U+2028 is LINE SEPARATOR.
286 // U+2029 is PARAGRAPH SEPARATOR.
287 // They are both technically valid characters in JSON strings,
288 // but don't work in JSONP, which has to be evaluated as JavaScript,
289 // and can lead to security holes there. It is valid JSON to
290 // escape them, so we do so unconditionally.
291 // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
292 if c == '\u2028' || c == '\u2029' {
293 if start < i {
294 stream.WriteRaw(s[start:i])
295 }
296 stream.WriteRaw(`\u202`)
297 stream.writeByte(hex[c&0xF])
298 i += size
299 start = i
300 continue
301 }
302 i += size
303 }
304 if start < len(s) {
305 stream.WriteRaw(s[start:])
306 }
307 stream.writeByte('"')
308}
309
310// WriteString write string to stream without html escape
311func (stream *Stream) WriteString(s string) {
312 valLen := len(s)
313 stream.buf = append(stream.buf, '"')
314 // write string, the fast path, without utf8 and escape support
315 i := 0
316 for ; i < valLen; i++ {
317 c := s[i]
318 if c > 31 && c != '"' && c != '\\' {
319 stream.buf = append(stream.buf, c)
320 } else {
321 break
322 }
323 }
324 if i == valLen {
325 stream.buf = append(stream.buf, '"')
326 return
327 }
328 writeStringSlowPath(stream, i, s, valLen)
329}
330
331func writeStringSlowPath(stream *Stream, i int, s string, valLen int) {
332 start := i
333 // for the remaining parts, we process them char by char
334 for i < valLen {
335 if b := s[i]; b < utf8.RuneSelf {
336 if safeSet[b] {
337 i++
338 continue
339 }
340 if start < i {
341 stream.WriteRaw(s[start:i])
342 }
343 switch b {
344 case '\\', '"':
345 stream.writeTwoBytes('\\', b)
346 case '\n':
347 stream.writeTwoBytes('\\', 'n')
348 case '\r':
349 stream.writeTwoBytes('\\', 'r')
350 case '\t':
351 stream.writeTwoBytes('\\', 't')
352 default:
353 // This encodes bytes < 0x20 except for \t, \n and \r.
354 // If escapeHTML is set, it also escapes <, >, and &
355 // because they can lead to security holes when
356 // user-controlled strings are rendered into JSON
357 // and served to some browsers.
358 stream.WriteRaw(`\u00`)
359 stream.writeTwoBytes(hex[b>>4], hex[b&0xF])
360 }
361 i++
362 start = i
363 continue
364 }
365 i++
366 continue
367 }
368 if start < len(s) {
369 stream.WriteRaw(s[start:])
370 }
371 stream.writeByte('"')
372}
diff --git a/vendor/github.com/json-iterator/go/test.sh b/vendor/github.com/json-iterator/go/test.sh
new file mode 100644
index 0000000..f4e7c0b
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/test.sh
@@ -0,0 +1,12 @@
1#!/usr/bin/env bash
2
3set -e
4echo "" > coverage.txt
5
6for d in $(go list ./... | grep -v vendor); do
7 go test -coverprofile=profile.out -coverpkg=github.com/json-iterator/go $d
8 if [ -f profile.out ]; then
9 cat profile.out >> coverage.txt
10 rm profile.out
11 fi
12done
diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE
new file mode 100644
index 0000000..87d5574
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/LICENSE
@@ -0,0 +1,304 @@
1Copyright (c) 2012 The Go Authors. All rights reserved.
2Copyright (c) 2019 Klaus Post. All rights reserved.
3
4Redistribution and use in source and binary forms, with or without
5modification, are permitted provided that the following conditions are
6met:
7
8 * Redistributions of source code must retain the above copyright
9notice, this list of conditions and the following disclaimer.
10 * Redistributions in binary form must reproduce the above
11copyright notice, this list of conditions and the following disclaimer
12in the documentation and/or other materials provided with the
13distribution.
14 * Neither the name of Google Inc. nor the names of its
15contributors may be used to endorse or promote products derived from
16this software without specific prior written permission.
17
18THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30------------------
31
32Files: gzhttp/*
33
34 Apache License
35 Version 2.0, January 2004
36 http://www.apache.org/licenses/
37
38 TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
39
40 1. Definitions.
41
42 "License" shall mean the terms and conditions for use, reproduction,
43 and distribution as defined by Sections 1 through 9 of this document.
44
45 "Licensor" shall mean the copyright owner or entity authorized by
46 the copyright owner that is granting the License.
47
48 "Legal Entity" shall mean the union of the acting entity and all
49 other entities that control, are controlled by, or are under common
50 control with that entity. For the purposes of this definition,
51 "control" means (i) the power, direct or indirect, to cause the
52 direction or management of such entity, whether by contract or
53 otherwise, or (ii) ownership of fifty percent (50%) or more of the
54 outstanding shares, or (iii) beneficial ownership of such entity.
55
56 "You" (or "Your") shall mean an individual or Legal Entity
57 exercising permissions granted by this License.
58
59 "Source" form shall mean the preferred form for making modifications,
60 including but not limited to software source code, documentation
61 source, and configuration files.
62
63 "Object" form shall mean any form resulting from mechanical
64 transformation or translation of a Source form, including but
65 not limited to compiled object code, generated documentation,
66 and conversions to other media types.
67
68 "Work" shall mean the work of authorship, whether in Source or
69 Object form, made available under the License, as indicated by a
70 copyright notice that is included in or attached to the work
71 (an example is provided in the Appendix below).
72
73 "Derivative Works" shall mean any work, whether in Source or Object
74 form, that is based on (or derived from) the Work and for which the
75 editorial revisions, annotations, elaborations, or other modifications
76 represent, as a whole, an original work of authorship. For the purposes
77 of this License, Derivative Works shall not include works that remain
78 separable from, or merely link (or bind by name) to the interfaces of,
79 the Work and Derivative Works thereof.
80
81 "Contribution" shall mean any work of authorship, including
82 the original version of the Work and any modifications or additions
83 to that Work or Derivative Works thereof, that is intentionally
84 submitted to Licensor for inclusion in the Work by the copyright owner
85 or by an individual or Legal Entity authorized to submit on behalf of
86 the copyright owner. For the purposes of this definition, "submitted"
87 means any form of electronic, verbal, or written communication sent
88 to the Licensor or its representatives, including but not limited to
89 communication on electronic mailing lists, source code control systems,
90 and issue tracking systems that are managed by, or on behalf of, the
91 Licensor for the purpose of discussing and improving the Work, but
92 excluding communication that is conspicuously marked or otherwise
93 designated in writing by the copyright owner as "Not a Contribution."
94
95 "Contributor" shall mean Licensor and any individual or Legal Entity
96 on behalf of whom a Contribution has been received by Licensor and
97 subsequently incorporated within the Work.
98
99 2. Grant of Copyright License. Subject to the terms and conditions of
100 this License, each Contributor hereby grants to You a perpetual,
101 worldwide, non-exclusive, no-charge, royalty-free, irrevocable
102 copyright license to reproduce, prepare Derivative Works of,
103 publicly display, publicly perform, sublicense, and distribute the
104 Work and such Derivative Works in Source or Object form.
105
106 3. Grant of Patent License. Subject to the terms and conditions of
107 this License, each Contributor hereby grants to You a perpetual,
108 worldwide, non-exclusive, no-charge, royalty-free, irrevocable
109 (except as stated in this section) patent license to make, have made,
110 use, offer to sell, sell, import, and otherwise transfer the Work,
111 where such license applies only to those patent claims licensable
112 by such Contributor that are necessarily infringed by their
113 Contribution(s) alone or by combination of their Contribution(s)
114 with the Work to which such Contribution(s) was submitted. If You
115 institute patent litigation against any entity (including a
116 cross-claim or counterclaim in a lawsuit) alleging that the Work
117 or a Contribution incorporated within the Work constitutes direct
118 or contributory patent infringement, then any patent licenses
119 granted to You under this License for that Work shall terminate
120 as of the date such litigation is filed.
121
122 4. Redistribution. You may reproduce and distribute copies of the
123 Work or Derivative Works thereof in any medium, with or without
124 modifications, and in Source or Object form, provided that You
125 meet the following conditions:
126
127 (a) You must give any other recipients of the Work or
128 Derivative Works a copy of this License; and
129
130 (b) You must cause any modified files to carry prominent notices
131 stating that You changed the files; and
132
133 (c) You must retain, in the Source form of any Derivative Works
134 that You distribute, all copyright, patent, trademark, and
135 attribution notices from the Source form of the Work,
136 excluding those notices that do not pertain to any part of
137 the Derivative Works; and
138
139 (d) If the Work includes a "NOTICE" text file as part of its
140 distribution, then any Derivative Works that You distribute must
141 include a readable copy of the attribution notices contained
142 within such NOTICE file, excluding those notices that do not
143 pertain to any part of the Derivative Works, in at least one
144 of the following places: within a NOTICE text file distributed
145 as part of the Derivative Works; within the Source form or
146 documentation, if provided along with the Derivative Works; or,
147 within a display generated by the Derivative Works, if and
148 wherever such third-party notices normally appear. The contents
149 of the NOTICE file are for informational purposes only and
150 do not modify the License. You may add Your own attribution
151 notices within Derivative Works that You distribute, alongside
152 or as an addendum to the NOTICE text from the Work, provided
153 that such additional attribution notices cannot be construed
154 as modifying the License.
155
156 You may add Your own copyright statement to Your modifications and
157 may provide additional or different license terms and conditions
158 for use, reproduction, or distribution of Your modifications, or
159 for any such Derivative Works as a whole, provided Your use,
160 reproduction, and distribution of the Work otherwise complies with
161 the conditions stated in this License.
162
163 5. Submission of Contributions. Unless You explicitly state otherwise,
164 any Contribution intentionally submitted for inclusion in the Work
165 by You to the Licensor shall be under the terms and conditions of
166 this License, without any additional terms or conditions.
167 Notwithstanding the above, nothing herein shall supersede or modify
168 the terms of any separate license agreement you may have executed
169 with Licensor regarding such Contributions.
170
171 6. Trademarks. This License does not grant permission to use the trade
172 names, trademarks, service marks, or product names of the Licensor,
173 except as required for reasonable and customary use in describing the
174 origin of the Work and reproducing the content of the NOTICE file.
175
176 7. Disclaimer of Warranty. Unless required by applicable law or
177 agreed to in writing, Licensor provides the Work (and each
178 Contributor provides its Contributions) on an "AS IS" BASIS,
179 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
180 implied, including, without limitation, any warranties or conditions
181 of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
182 PARTICULAR PURPOSE. You are solely responsible for determining the
183 appropriateness of using or redistributing the Work and assume any
184 risks associated with Your exercise of permissions under this License.
185
186 8. Limitation of Liability. In no event and under no legal theory,
187 whether in tort (including negligence), contract, or otherwise,
188 unless required by applicable law (such as deliberate and grossly
189 negligent acts) or agreed to in writing, shall any Contributor be
190 liable to You for damages, including any direct, indirect, special,
191 incidental, or consequential damages of any character arising as a
192 result of this License or out of the use or inability to use the
193 Work (including but not limited to damages for loss of goodwill,
194 work stoppage, computer failure or malfunction, or any and all
195 other commercial damages or losses), even if such Contributor
196 has been advised of the possibility of such damages.
197
198 9. Accepting Warranty or Additional Liability. While redistributing
199 the Work or Derivative Works thereof, You may choose to offer,
200 and charge a fee for, acceptance of support, warranty, indemnity,
201 or other liability obligations and/or rights consistent with this
202 License. However, in accepting such obligations, You may act only
203 on Your own behalf and on Your sole responsibility, not on behalf
204 of any other Contributor, and only if You agree to indemnify,
205 defend, and hold each Contributor harmless for any liability
206 incurred by, or claims asserted against, such Contributor by reason
207 of your accepting any such warranty or additional liability.
208
209 END OF TERMS AND CONDITIONS
210
211 APPENDIX: How to apply the Apache License to your work.
212
213 To apply the Apache License to your work, attach the following
214 boilerplate notice, with the fields enclosed by brackets "[]"
215 replaced with your own identifying information. (Don't include
216 the brackets!) The text should be enclosed in the appropriate
217 comment syntax for the file format. We also recommend that a
218 file or class name and description of purpose be included on the
219 same "printed page" as the copyright notice for easier
220 identification within third-party archives.
221
222 Copyright 2016-2017 The New York Times Company
223
224 Licensed under the Apache License, Version 2.0 (the "License");
225 you may not use this file except in compliance with the License.
226 You may obtain a copy of the License at
227
228 http://www.apache.org/licenses/LICENSE-2.0
229
230 Unless required by applicable law or agreed to in writing, software
231 distributed under the License is distributed on an "AS IS" BASIS,
232 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
233 See the License for the specific language governing permissions and
234 limitations under the License.
235
236------------------
237
238Files: s2/cmd/internal/readahead/*
239
240The MIT License (MIT)
241
242Copyright (c) 2015 Klaus Post
243
244Permission is hereby granted, free of charge, to any person obtaining a copy
245of this software and associated documentation files (the "Software"), to deal
246in the Software without restriction, including without limitation the rights
247to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
248copies of the Software, and to permit persons to whom the Software is
249furnished to do so, subject to the following conditions:
250
251The above copyright notice and this permission notice shall be included in all
252copies or substantial portions of the Software.
253
254THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
255IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
256FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
257AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
258LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
259OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
260SOFTWARE.
261
262---------------------
263Files: snappy/*
264Files: internal/snapref/*
265
266Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
267
268Redistribution and use in source and binary forms, with or without
269modification, are permitted provided that the following conditions are
270met:
271
272 * Redistributions of source code must retain the above copyright
273notice, this list of conditions and the following disclaimer.
274 * Redistributions in binary form must reproduce the above
275copyright notice, this list of conditions and the following disclaimer
276in the documentation and/or other materials provided with the
277distribution.
278 * Neither the name of Google Inc. nor the names of its
279contributors may be used to endorse or promote products derived from
280this software without specific prior written permission.
281
282THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
283"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
284LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
285A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
286OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
287SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
288LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
289DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
290THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
291(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
292OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
293
294-----------------
295
296Files: s2/cmd/internal/filepathx/*
297
298Copyright 2016 The filepathx Authors
299
300Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
301
302The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
303
304THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/klauspost/compress/s2/.gitignore b/vendor/github.com/klauspost/compress/s2/.gitignore
new file mode 100644
index 0000000..3a89c6e
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/.gitignore
@@ -0,0 +1,15 @@
1testdata/bench
2
3# These explicitly listed benchmark data files are for an obsolete version of
4# snappy_test.go.
5testdata/alice29.txt
6testdata/asyoulik.txt
7testdata/fireworks.jpeg
8testdata/geo.protodata
9testdata/html
10testdata/html_x_4
11testdata/kppkn.gtb
12testdata/lcet10.txt
13testdata/paper-100k.pdf
14testdata/plrabn12.txt
15testdata/urls.10K
diff --git a/vendor/github.com/klauspost/compress/s2/LICENSE b/vendor/github.com/klauspost/compress/s2/LICENSE
new file mode 100644
index 0000000..1d2d645
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/LICENSE
@@ -0,0 +1,28 @@
1Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
2Copyright (c) 2019 Klaus Post. All rights reserved.
3
4Redistribution and use in source and binary forms, with or without
5modification, are permitted provided that the following conditions are
6met:
7
8 * Redistributions of source code must retain the above copyright
9notice, this list of conditions and the following disclaimer.
10 * Redistributions in binary form must reproduce the above
11copyright notice, this list of conditions and the following disclaimer
12in the documentation and/or other materials provided with the
13distribution.
14 * Neither the name of Google Inc. nor the names of its
15contributors may be used to endorse or promote products derived from
16this software without specific prior written permission.
17
18THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/klauspost/compress/s2/README.md b/vendor/github.com/klauspost/compress/s2/README.md
new file mode 100644
index 0000000..8284bb0
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/README.md
@@ -0,0 +1,1120 @@
1# S2 Compression
2
3S2 is an extension of [Snappy](https://github.com/google/snappy).
4
5S2 is aimed for high throughput, which is why it features concurrent compression for bigger payloads.
6
7Decoding is compatible with Snappy compressed content, but content compressed with S2 cannot be decompressed by Snappy.
8This means that S2 can seamlessly replace Snappy without converting compressed content.
9
10S2 can produce Snappy compatible output, faster and better than Snappy.
11If you want full benefit of the changes you should use s2 without Snappy compatibility.
12
13S2 is designed to have high throughput on content that cannot be compressed.
14This is important, so you don't have to worry about spending CPU cycles on already compressed data.
15
16## Benefits over Snappy
17
18* Better compression
19* Adjustable compression (3 levels)
20* Concurrent stream compression
21* Faster decompression, even for Snappy compatible content
22* Concurrent Snappy/S2 stream decompression
23* Skip forward in compressed stream
24* Random seeking with indexes
25* Compatible with reading Snappy compressed content
26* Smaller block size overhead on incompressible blocks
27* Block concatenation
28* Block Dictionary support
29* Uncompressed stream mode
30* Automatic stream size padding
31* Snappy compatible block compression
32
33## Drawbacks over Snappy
34
35* Not optimized for 32 bit systems
36* Streams use slightly more memory due to larger blocks and concurrency (configurable)
37
38# Usage
39
40Installation: `go get -u github.com/klauspost/compress/s2`
41
42Full package documentation:
43
44[![godoc][1]][2]
45
46[1]: https://godoc.org/github.com/klauspost/compress?status.svg
47[2]: https://godoc.org/github.com/klauspost/compress/s2
48
49## Compression
50
51```Go
52func EncodeStream(src io.Reader, dst io.Writer) error {
53 enc := s2.NewWriter(dst)
54 _, err := io.Copy(enc, src)
55 if err != nil {
56 enc.Close()
57 return err
58 }
59 // Blocks until compression is done.
60 return enc.Close()
61}
62```
63
64You should always call `enc.Close()`, otherwise you will leak resources and your encode will be incomplete.
65
66For the best throughput, you should attempt to reuse the `Writer` using the `Reset()` method.
67
68The Writer in S2 is always buffered, therefore `NewBufferedWriter` in Snappy can be replaced with `NewWriter` in S2.
69It is possible to flush any buffered data using the `Flush()` method.
70This will block until all data sent to the encoder has been written to the output.
71
72S2 also supports the `io.ReaderFrom` interface, which will consume all input from a reader.
73
74As a final method to compress data, if you have a single block of data you would like to have encoded as a stream,
75a slightly more efficient method is to use the `EncodeBuffer` method.
76This will take ownership of the buffer until the stream is closed.
77
78```Go
79func EncodeStream(src []byte, dst io.Writer) error {
80 enc := s2.NewWriter(dst)
81 // The encoder owns the buffer until Flush or Close is called.
82 err := enc.EncodeBuffer(buf)
83 if err != nil {
84 enc.Close()
85 return err
86 }
87 // Blocks until compression is done.
88 return enc.Close()
89}
90```
91
92Each call to `EncodeBuffer` will result in discrete blocks being created without buffering,
93so it should only be used a single time per stream.
94If you need to write several blocks, you should use the regular io.Writer interface.
95
96
97## Decompression
98
99```Go
100func DecodeStream(src io.Reader, dst io.Writer) error {
101 dec := s2.NewReader(src)
102 _, err := io.Copy(dst, dec)
103 return err
104}
105```
106
107Similar to the Writer, a Reader can be reused using the `Reset` method.
108
109For the best possible throughput, there is a `EncodeBuffer(buf []byte)` function available.
110However, it requires that the provided buffer isn't used after it is handed over to S2 and until the stream is flushed or closed.
111
112For smaller data blocks, there is also a non-streaming interface: `Encode()`, `EncodeBetter()` and `Decode()`.
113Do however note that these functions (similar to Snappy) does not provide validation of data,
114so data corruption may be undetected. Stream encoding provides CRC checks of data.
115
116It is possible to efficiently skip forward in a compressed stream using the `Skip()` method.
117For big skips the decompressor is able to skip blocks without decompressing them.
118
119## Single Blocks
120
121Similar to Snappy S2 offers single block compression.
122Blocks do not offer the same flexibility and safety as streams,
123but may be preferable for very small payloads, less than 100K.
124
125Using a simple `dst := s2.Encode(nil, src)` will compress `src` and return the compressed result.
126It is possible to provide a destination buffer.
127If the buffer has a capacity of `s2.MaxEncodedLen(len(src))` it will be used.
128If not a new will be allocated.
129
130Alternatively `EncodeBetter`/`EncodeBest` can also be used for better, but slightly slower compression.
131
132Similarly to decompress a block you can use `dst, err := s2.Decode(nil, src)`.
133Again an optional destination buffer can be supplied.
134The `s2.DecodedLen(src)` can be used to get the minimum capacity needed.
135If that is not satisfied a new buffer will be allocated.
136
137Block function always operate on a single goroutine since it should only be used for small payloads.
138
139# Commandline tools
140
141Some very simply commandline tools are provided; `s2c` for compression and `s2d` for decompression.
142
143Binaries can be downloaded on the [Releases Page](https://github.com/klauspost/compress/releases).
144
145Installing then requires Go to be installed. To install them, use:
146
147`go install github.com/klauspost/compress/s2/cmd/s2c@latest && go install github.com/klauspost/compress/s2/cmd/s2d@latest`
148
149To build binaries to the current folder use:
150
151`go build github.com/klauspost/compress/s2/cmd/s2c && go build github.com/klauspost/compress/s2/cmd/s2d`
152
153
154## s2c
155
156```
157Usage: s2c [options] file1 file2
158
159Compresses all files supplied as input separately.
160Output files are written as 'filename.ext.s2' or 'filename.ext.snappy'.
161By default output files will be overwritten.
162Use - as the only file name to read from stdin and write to stdout.
163
164Wildcards are accepted: testdir/*.txt will compress all files in testdir ending with .txt
165Directories can be wildcards as well. testdir/*/*.txt will match testdir/subdir/b.txt
166
167File names beginning with 'http://' and 'https://' will be downloaded and compressed.
168Only http response code 200 is accepted.
169
170Options:
171 -bench int
172 Run benchmark n times. No output will be written
173 -blocksize string
174 Max block size. Examples: 64K, 256K, 1M, 4M. Must be power of two and <= 4MB (default "4M")
175 -c Write all output to stdout. Multiple input files will be concatenated
176 -cpu int
177 Compress using this amount of threads (default 32)
178 -faster
179 Compress faster, but with a minor compression loss
180 -help
181 Display help
182 -index
183 Add seek index (default true)
184 -o string
185 Write output to another file. Single input file only
186 -pad string
187 Pad size to a multiple of this value, Examples: 500, 64K, 256K, 1M, 4M, etc (default "1")
188 -q Don't write any output to terminal, except errors
189 -rm
190 Delete source file(s) after successful compression
191 -safe
192 Do not overwrite output files
193 -slower
194 Compress more, but a lot slower
195 -snappy
196 Generate Snappy compatible output stream
197 -verify
198 Verify written files
199
200```
201
202## s2d
203
204```
205Usage: s2d [options] file1 file2
206
207Decompresses all files supplied as input. Input files must end with '.s2' or '.snappy'.
208Output file names have the extension removed. By default output files will be overwritten.
209Use - as the only file name to read from stdin and write to stdout.
210
211Wildcards are accepted: testdir/*.txt will compress all files in testdir ending with .txt
212Directories can be wildcards as well. testdir/*/*.txt will match testdir/subdir/b.txt
213
214File names beginning with 'http://' and 'https://' will be downloaded and decompressed.
215Extensions on downloaded files are ignored. Only http response code 200 is accepted.
216
217Options:
218 -bench int
219 Run benchmark n times. No output will be written
220 -c Write all output to stdout. Multiple input files will be concatenated
221 -help
222 Display help
223 -o string
224 Write output to another file. Single input file only
225 -offset string
226 Start at offset. Examples: 92, 64K, 256K, 1M, 4M. Requires Index
227 -q Don't write any output to terminal, except errors
228 -rm
229 Delete source file(s) after successful decompression
230 -safe
231 Do not overwrite output files
232 -tail string
233 Return last of compressed file. Examples: 92, 64K, 256K, 1M, 4M. Requires Index
234 -verify
235 Verify files, but do not write output
236```
237
238## s2sx: self-extracting archives
239
240s2sx allows creating self-extracting archives with no dependencies.
241
242By default, executables are created for the same platforms as the host os,
243but this can be overridden with `-os` and `-arch` parameters.
244
245Extracted files have 0666 permissions, except when untar option used.
246
247```
248Usage: s2sx [options] file1 file2
249
250Compresses all files supplied as input separately.
251If files have '.s2' extension they are assumed to be compressed already.
252Output files are written as 'filename.s2sx' and with '.exe' for windows targets.
253If output is big, an additional file with ".more" is written. This must be included as well.
254By default output files will be overwritten.
255
256Wildcards are accepted: testdir/*.txt will compress all files in testdir ending with .txt
257Directories can be wildcards as well. testdir/*/*.txt will match testdir/subdir/b.txt
258
259Options:
260 -arch string
261 Destination architecture (default "amd64")
262 -c Write all output to stdout. Multiple input files will be concatenated
263 -cpu int
264 Compress using this amount of threads (default 32)
265 -help
266 Display help
267 -max string
268 Maximum executable size. Rest will be written to another file. (default "1G")
269 -os string
270 Destination operating system (default "windows")
271 -q Don't write any output to terminal, except errors
272 -rm
273 Delete source file(s) after successful compression
274 -safe
275 Do not overwrite output files
276 -untar
277 Untar on destination
278```
279
280Available platforms are:
281
282 * darwin-amd64
283 * darwin-arm64
284 * linux-amd64
285 * linux-arm
286 * linux-arm64
287 * linux-mips64
288 * linux-ppc64le
289 * windows-386
290 * windows-amd64
291
292By default, there is a size limit of 1GB for the output executable.
293
294When this is exceeded the remaining file content is written to a file called
295output+`.more`. This file must be included for a successful extraction and
296placed alongside the executable for a successful extraction.
297
298This file *must* have the same name as the executable, so if the executable is renamed,
299so must the `.more` file.
300
301This functionality is disabled with stdin/stdout.
302
303### Self-extracting TAR files
304
305If you wrap a TAR file you can specify `-untar` to make it untar on the destination host.
306
307Files are extracted to the current folder with the path specified in the tar file.
308
309Note that tar files are not validated before they are wrapped.
310
311For security reasons files that move below the root folder are not allowed.
312
313# Performance
314
315This section will focus on comparisons to Snappy.
316This package is solely aimed at replacing Snappy as a high speed compression package.
317If you are mainly looking for better compression [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd)
318gives better compression, but typically at speeds slightly below "better" mode in this package.
319
320Compression is increased compared to Snappy, mostly around 5-20% and the throughput is typically 25-40% increased (single threaded) compared to the Snappy Go implementation.
321
322Streams are concurrently compressed. The stream will be distributed among all available CPU cores for the best possible throughput.
323
324A "better" compression mode is also available. This allows to trade a bit of speed for a minor compression gain.
325The content compressed in this mode is fully compatible with the standard decoder.
326
327Snappy vs S2 **compression** speed on 16 core (32 thread) computer, using all threads and a single thread (1 CPU):
328
329| File | S2 Speed | S2 Throughput | S2 % smaller | S2 "better" | "better" throughput | "better" % smaller |
330|---------------------------------------------------------------------------------------------------------|----------|---------------|--------------|-------------|---------------------|--------------------|
331| [rawstudio-mint14.tar](https://files.klauspost.com/compress/rawstudio-mint14.7z) | 16.33x | 10556 MB/s | 8.0% | 6.04x | 5252 MB/s | 14.7% |
332| (1 CPU) | 1.08x | 940 MB/s | - | 0.46x | 400 MB/s | - |
333| [github-june-2days-2019.json](https://files.klauspost.com/compress/github-june-2days-2019.json.zst) | 16.51x | 15224 MB/s | 31.70% | 9.47x | 8734 MB/s | 37.71% |
334| (1 CPU) | 1.26x | 1157 MB/s | - | 0.60x | 556 MB/s | - |
335| [github-ranks-backup.bin](https://files.klauspost.com/compress/github-ranks-backup.bin.zst) | 15.14x | 12598 MB/s | -5.76% | 6.23x | 5675 MB/s | 3.62% |
336| (1 CPU) | 1.02x | 932 MB/s | - | 0.47x | 432 MB/s | - |
337| [consensus.db.10gb](https://files.klauspost.com/compress/consensus.db.10gb.zst) | 11.21x | 12116 MB/s | 15.95% | 3.24x | 3500 MB/s | 18.00% |
338| (1 CPU) | 1.05x | 1135 MB/s | - | 0.27x | 292 MB/s | - |
339| [apache.log](https://files.klauspost.com/compress/apache.log.zst) | 8.55x | 16673 MB/s | 20.54% | 5.85x | 11420 MB/s | 24.97% |
340| (1 CPU) | 1.91x | 1771 MB/s | - | 0.53x | 1041 MB/s | - |
341| [gob-stream](https://files.klauspost.com/compress/gob-stream.7z) | 15.76x | 14357 MB/s | 24.01% | 8.67x | 7891 MB/s | 33.68% |
342| (1 CPU) | 1.17x | 1064 MB/s | - | 0.65x | 595 MB/s | - |
343| [10gb.tar](http://mattmahoney.net/dc/10gb.html) | 13.33x | 9835 MB/s | 2.34% | 6.85x | 4863 MB/s | 9.96% |
344| (1 CPU) | 0.97x | 689 MB/s | - | 0.55x | 387 MB/s | - |
345| sharnd.out.2gb | 9.11x | 13213 MB/s | 0.01% | 1.49x | 9184 MB/s | 0.01% |
346| (1 CPU) | 0.88x | 5418 MB/s | - | 0.77x | 5417 MB/s | - |
347| [sofia-air-quality-dataset csv](https://files.klauspost.com/compress/sofia-air-quality-dataset.tar.zst) | 22.00x | 11477 MB/s | 18.73% | 11.15x | 5817 MB/s | 27.88% |
348| (1 CPU) | 1.23x | 642 MB/s | - | 0.71x | 642 MB/s | - |
349| [silesia.tar](http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip) | 11.23x | 6520 MB/s | 5.9% | 5.35x | 3109 MB/s | 15.88% |
350| (1 CPU) | 1.05x | 607 MB/s | - | 0.52x | 304 MB/s | - |
351| [enwik9](https://files.klauspost.com/compress/enwik9.zst) | 19.28x | 8440 MB/s | 4.04% | 9.31x | 4076 MB/s | 18.04% |
352| (1 CPU) | 1.12x | 488 MB/s | - | 0.57x | 250 MB/s | - |
353
354### Legend
355
356* `S2 Speed`: Speed of S2 compared to Snappy, using 16 cores and 1 core.
357* `S2 Throughput`: Throughput of S2 in MB/s.
358* `S2 % smaller`: How many percent of the Snappy output size is S2 better.
359* `S2 "better"`: Speed when enabling "better" compression mode in S2 compared to Snappy.
360* `"better" throughput`: Speed when enabling "better" compression mode in S2 compared to Snappy.
361* `"better" % smaller`: How many percent of the Snappy output size is S2 better when using "better" compression.
362
363There is a good speedup across the board when using a single thread and a significant speedup when using multiple threads.
364
365Machine generated data gets by far the biggest compression boost, with size being reduced by up to 35% of Snappy size.
366
367The "better" compression mode sees a good improvement in all cases, but usually at a performance cost.
368
369Incompressible content (`sharnd.out.2gb`, 2GB random data) sees the smallest speedup.
370This is likely dominated by synchronization overhead, which is confirmed by the fact that single threaded performance is higher (see above).
371
372## Decompression
373
374S2 attempts to create content that is also fast to decompress, except in "better" mode where the smallest representation is used.
375
376S2 vs Snappy **decompression** speed. Both operating on single core:
377
378| File | S2 Throughput | vs. Snappy | Better Throughput | vs. Snappy |
379|-----------------------------------------------------------------------------------------------------|---------------|------------|-------------------|------------|
380| [rawstudio-mint14.tar](https://files.klauspost.com/compress/rawstudio-mint14.7z) | 2117 MB/s | 1.14x | 1738 MB/s | 0.94x |
381| [github-june-2days-2019.json](https://files.klauspost.com/compress/github-june-2days-2019.json.zst) | 2401 MB/s | 1.25x | 2307 MB/s | 1.20x |
382| [github-ranks-backup.bin](https://files.klauspost.com/compress/github-ranks-backup.bin.zst) | 2075 MB/s | 0.98x | 1764 MB/s | 0.83x |
383| [consensus.db.10gb](https://files.klauspost.com/compress/consensus.db.10gb.zst) | 2967 MB/s | 1.05x | 2885 MB/s | 1.02x |
384| [adresser.json](https://files.klauspost.com/compress/adresser.json.zst) | 4141 MB/s | 1.07x | 4184 MB/s | 1.08x |
385| [gob-stream](https://files.klauspost.com/compress/gob-stream.7z) | 2264 MB/s | 1.12x | 2185 MB/s | 1.08x |
386| [10gb.tar](http://mattmahoney.net/dc/10gb.html) | 1525 MB/s | 1.03x | 1347 MB/s | 0.91x |
387| sharnd.out.2gb | 3813 MB/s | 0.79x | 3900 MB/s | 0.81x |
388| [enwik9](http://mattmahoney.net/dc/textdata.html) | 1246 MB/s | 1.29x | 967 MB/s | 1.00x |
389| [silesia.tar](http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip) | 1433 MB/s | 1.12x | 1203 MB/s | 0.94x |
390| [enwik10](https://encode.su/threads/3315-enwik10-benchmark-results) | 1284 MB/s | 1.32x | 1010 MB/s | 1.04x |
391
392### Legend
393
394* `S2 Throughput`: Decompression speed of S2 encoded content.
395* `Better Throughput`: Decompression speed of S2 "better" encoded content.
396* `vs Snappy`: Decompression speed of S2 "better" mode compared to Snappy and absolute speed.
397
398
399While the decompression code hasn't changed, there is a significant speedup in decompression speed.
400S2 prefers longer matches and will typically only find matches that are 6 bytes or longer.
401While this reduces compression a bit, it improves decompression speed.
402
403The "better" compression mode will actively look for shorter matches, which is why it has a decompression speed quite similar to Snappy.
404
405Without assembly decompression is also very fast; single goroutine decompression speed. No assembly:
406
407| File | S2 Throughput | S2 throughput |
408|--------------------------------|---------------|---------------|
409| consensus.db.10gb.s2 | 1.84x | 2289.8 MB/s |
410| 10gb.tar.s2 | 1.30x | 867.07 MB/s |
411| rawstudio-mint14.tar.s2 | 1.66x | 1329.65 MB/s |
412| github-june-2days-2019.json.s2 | 2.36x | 1831.59 MB/s |
413| github-ranks-backup.bin.s2 | 1.73x | 1390.7 MB/s |
414| enwik9.s2 | 1.67x | 681.53 MB/s |
415| adresser.json.s2 | 3.41x | 4230.53 MB/s |
416| silesia.tar.s2 | 1.52x | 811.58 |
417
418Even though S2 typically compresses better than Snappy, decompression speed is always better.
419
420### Concurrent Stream Decompression
421
422For full stream decompression S2 offers a [DecodeConcurrent](https://pkg.go.dev/github.com/klauspost/compress/s2#Reader.DecodeConcurrent)
423that will decode a full stream using multiple goroutines.
424
425Example scaling, AMD Ryzen 3950X, 16 cores, decompression using `s2d -bench=3 <input>`, best of 3:
426
427| Input | `-cpu=1` | `-cpu=2` | `-cpu=4` | `-cpu=8` | `-cpu=16` |
428|-------------------------------------------|------------|------------|------------|------------|-------------|
429| enwik10.snappy | 1098.6MB/s | 1819.8MB/s | 3625.6MB/s | 6910.6MB/s | 10818.2MB/s |
430| enwik10.s2 | 1303.5MB/s | 2606.1MB/s | 4847.9MB/s | 8878.4MB/s | 9592.1MB/s |
431| sofia-air-quality-dataset.tar.snappy | 1302.0MB/s | 2165.0MB/s | 4244.5MB/s | 8241.0MB/s | 12920.5MB/s |
432| sofia-air-quality-dataset.tar.s2 | 1399.2MB/s | 2463.2MB/s | 5196.5MB/s | 9639.8MB/s | 11439.5MB/s |
433| sofia-air-quality-dataset.tar.s2 (no asm) | 837.5MB/s | 1652.6MB/s | 3183.6MB/s | 5945.0MB/s | 9620.7MB/s |
434
435Scaling can be expected to be pretty linear until memory bandwidth is saturated.
436
437For now the DecodeConcurrent can only be used for full streams without seeking or combining with regular reads.
438
439## Block compression
440
441
442When compressing blocks no concurrent compression is performed just as Snappy.
443This is because blocks are for smaller payloads and generally will not benefit from concurrent compression.
444
445An important change is that incompressible blocks will not be more than at most 10 bytes bigger than the input.
446In rare, worst case scenario Snappy blocks could be significantly bigger than the input.
447
448### Mixed content blocks
449
450The most reliable is a wide dataset.
451For this we use [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com/compress/webdevdata.org-2015-01-07-4GB-subset.7z),
45253927 files, total input size: 4,014,735,833 bytes. Single goroutine used.
453
454| * | Input | Output | Reduction | MB/s |
455|-------------------|------------|------------|------------|------------|
456| S2 | 4014735833 | 1059723369 | 73.60% | **936.73** |
457| S2 Better | 4014735833 | 961580539 | 76.05% | 451.10 |
458| S2 Best | 4014735833 | 899182886 | **77.60%** | 46.84 |
459| Snappy | 4014735833 | 1128706759 | 71.89% | 790.15 |
460| S2, Snappy Output | 4014735833 | 1093823291 | 72.75% | 936.60 |
461| LZ4 | 4014735833 | 1063768713 | 73.50% | 452.02 |
462
463S2 delivers both the best single threaded throughput with regular mode and the best compression rate with "best".
464"Better" mode provides the same compression speed as LZ4 with better compression ratio.
465
466When outputting Snappy compatible output it still delivers better throughput (150MB/s more) and better compression.
467
468As can be seen from the other benchmarks decompression should also be easier on the S2 generated output.
469
470Though they cannot be compared due to different decompression speeds here are the speed/size comparisons for
471other Go compressors:
472
473| * | Input | Output | Reduction | MB/s |
474|-------------------|------------|------------|-----------|--------|
475| Zstd Fastest (Go) | 4014735833 | 794608518 | 80.21% | 236.04 |
476| Zstd Best (Go) | 4014735833 | 704603356 | 82.45% | 35.63 |
477| Deflate (Go) l1 | 4014735833 | 871294239 | 78.30% | 214.04 |
478| Deflate (Go) l9 | 4014735833 | 730389060 | 81.81% | 41.17 |
479
480### Standard block compression
481
482Benchmarking single block performance is subject to a lot more variation since it only tests a limited number of file patterns.
483So individual benchmarks should only be seen as a guideline and the overall picture is more important.
484
485These micro-benchmarks are with data in cache and trained branch predictors. For a more realistic benchmark see the mixed content above.
486
487Block compression. Parallel benchmark running on 16 cores, 16 goroutines.
488
489AMD64 assembly is use for both S2 and Snappy.
490
491| Absolute Perf | Snappy size | S2 Size | Snappy Speed | S2 Speed | Snappy dec | S2 dec |
492|-----------------------|-------------|---------|--------------|-------------|-------------|-------------|
493| html | 22843 | 20868 | 16246 MB/s | 18617 MB/s | 40972 MB/s | 49263 MB/s |
494| urls.10K | 335492 | 286541 | 7943 MB/s | 10201 MB/s | 22523 MB/s | 26484 MB/s |
495| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 303228 MB/s | 718321 MB/s | 827552 MB/s |
496| fireworks.jpeg (200B) | 146 | 155 | 8869 MB/s | 20180 MB/s | 33691 MB/s | 52421 MB/s |
497| paper-100k.pdf | 85304 | 84202 | 167546 MB/s | 112988 MB/s | 326905 MB/s | 291944 MB/s |
498| html_x_4 | 92234 | 20870 | 15194 MB/s | 54457 MB/s | 30843 MB/s | 32217 MB/s |
499| alice29.txt | 88034 | 85934 | 5936 MB/s | 6540 MB/s | 12882 MB/s | 20044 MB/s |
500| asyoulik.txt | 77503 | 79575 | 5517 MB/s | 6657 MB/s | 12735 MB/s | 22806 MB/s |
501| lcet10.txt | 234661 | 220383 | 6235 MB/s | 6303 MB/s | 14519 MB/s | 18697 MB/s |
502| plrabn12.txt | 319267 | 318196 | 5159 MB/s | 6074 MB/s | 11923 MB/s | 19901 MB/s |
503| geo.protodata | 23335 | 18606 | 21220 MB/s | 25432 MB/s | 56271 MB/s | 62540 MB/s |
504| kppkn.gtb | 69526 | 65019 | 9732 MB/s | 8905 MB/s | 18491 MB/s | 18969 MB/s |
505| alice29.txt (128B) | 80 | 82 | 6691 MB/s | 17179 MB/s | 31883 MB/s | 38874 MB/s |
506| alice29.txt (1000B) | 774 | 774 | 12204 MB/s | 13273 MB/s | 48056 MB/s | 52341 MB/s |
507| alice29.txt (10000B) | 6648 | 6933 | 10044 MB/s | 12824 MB/s | 32378 MB/s | 46322 MB/s |
508| alice29.txt (20000B) | 12686 | 13516 | 7733 MB/s | 12160 MB/s | 30566 MB/s | 58969 MB/s |
509
510
511Speed is generally at or above Snappy. Small blocks gets a significant speedup, although at the expense of size.
512
513Decompression speed is better than Snappy, except in one case.
514
515Since payloads are very small the variance in terms of size is rather big, so they should only be seen as a general guideline.
516
517Size is on average around Snappy, but varies on content type.
518In cases where compression is worse, it usually is compensated by a speed boost.
519
520
521### Better compression
522
523Benchmarking single block performance is subject to a lot more variation since it only tests a limited number of file patterns.
524So individual benchmarks should only be seen as a guideline and the overall picture is more important.
525
526| Absolute Perf | Snappy size | Better Size | Snappy Speed | Better Speed | Snappy dec | Better dec |
527|-----------------------|-------------|-------------|--------------|--------------|-------------|-------------|
528| html | 22843 | 18972 | 16246 MB/s | 8621 MB/s | 40972 MB/s | 40292 MB/s |
529| urls.10K | 335492 | 248079 | 7943 MB/s | 5104 MB/s | 22523 MB/s | 20981 MB/s |
530| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 84429 MB/s | 718321 MB/s | 823698 MB/s |
531| fireworks.jpeg (200B) | 146 | 149 | 8869 MB/s | 7125 MB/s | 33691 MB/s | 30101 MB/s |
532| paper-100k.pdf | 85304 | 82887 | 167546 MB/s | 11087 MB/s | 326905 MB/s | 198869 MB/s |
533| html_x_4 | 92234 | 18982 | 15194 MB/s | 29316 MB/s | 30843 MB/s | 30937 MB/s |
534| alice29.txt | 88034 | 71611 | 5936 MB/s | 3709 MB/s | 12882 MB/s | 16611 MB/s |
535| asyoulik.txt | 77503 | 65941 | 5517 MB/s | 3380 MB/s | 12735 MB/s | 14975 MB/s |
536| lcet10.txt | 234661 | 184939 | 6235 MB/s | 3537 MB/s | 14519 MB/s | 16634 MB/s |
537| plrabn12.txt | 319267 | 264990 | 5159 MB/s | 2960 MB/s | 11923 MB/s | 13382 MB/s |
538| geo.protodata | 23335 | 17689 | 21220 MB/s | 10859 MB/s | 56271 MB/s | 57961 MB/s |
539| kppkn.gtb | 69526 | 55398 | 9732 MB/s | 5206 MB/s | 18491 MB/s | 16524 MB/s |
540| alice29.txt (128B) | 80 | 78 | 6691 MB/s | 7422 MB/s | 31883 MB/s | 34225 MB/s |
541| alice29.txt (1000B) | 774 | 746 | 12204 MB/s | 5734 MB/s | 48056 MB/s | 42068 MB/s |
542| alice29.txt (10000B) | 6648 | 6218 | 10044 MB/s | 6055 MB/s | 32378 MB/s | 28813 MB/s |
543| alice29.txt (20000B) | 12686 | 11492 | 7733 MB/s | 3143 MB/s | 30566 MB/s | 27315 MB/s |
544
545
546Except for the mostly incompressible JPEG image compression is better and usually in the
547double digits in terms of percentage reduction over Snappy.
548
549The PDF sample shows a significant slowdown compared to Snappy, as this mode tries harder
550to compress the data. Very small blocks are also not favorable for better compression, so throughput is way down.
551
552This mode aims to provide better compression at the expense of performance and achieves that
553without a huge performance penalty, except on very small blocks.
554
555Decompression speed suffers a little compared to the regular S2 mode,
556but still manages to be close to Snappy in spite of increased compression.
557
558# Best compression mode
559
560S2 offers a "best" compression mode.
561
562This will compress as much as possible with little regard to CPU usage.
563
564Mainly for offline compression, but where decompression speed should still
565be high and compatible with other S2 compressed data.
566
567Some examples compared on 16 core CPU, amd64 assembly used:
568
569```
570* enwik10
571Default... 10000000000 -> 4759950115 [47.60%]; 1.03s, 9263.0MB/s
572Better... 10000000000 -> 4084706676 [40.85%]; 2.16s, 4415.4MB/s
573Best... 10000000000 -> 3615520079 [36.16%]; 42.259s, 225.7MB/s
574
575* github-june-2days-2019.json
576Default... 6273951764 -> 1041700255 [16.60%]; 431ms, 13882.3MB/s
577Better... 6273951764 -> 945841238 [15.08%]; 547ms, 10938.4MB/s
578Best... 6273951764 -> 826392576 [13.17%]; 9.455s, 632.8MB/s
579
580* nyc-taxi-data-10M.csv
581Default... 3325605752 -> 1093516949 [32.88%]; 324ms, 9788.7MB/s
582Better... 3325605752 -> 885394158 [26.62%]; 491ms, 6459.4MB/s
583Best... 3325605752 -> 773681257 [23.26%]; 8.29s, 412.0MB/s
584
585* 10gb.tar
586Default... 10065157632 -> 5915541066 [58.77%]; 1.028s, 9337.4MB/s
587Better... 10065157632 -> 5453844650 [54.19%]; 1.597s, 4862.7MB/s
588Best... 10065157632 -> 5192495021 [51.59%]; 32.78s, 308.2MB/
589
590* consensus.db.10gb
591Default... 10737418240 -> 4549762344 [42.37%]; 882ms, 12118.4MB/s
592Better... 10737418240 -> 4438535064 [41.34%]; 1.533s, 3500.9MB/s
593Best... 10737418240 -> 4210602774 [39.21%]; 42.96s, 254.4MB/s
594```
595
596Decompression speed should be around the same as using the 'better' compression mode.
597
598## Dictionaries
599
600*Note: S2 dictionary compression is currently at an early implementation stage, with no assembly for
601neither encoding nor decoding. Performance improvements can be expected in the future.*
602
603Adding dictionaries allow providing a custom dictionary that will serve as lookup in the beginning of blocks.
604
605The same dictionary *must* be used for both encoding and decoding.
606S2 does not keep track of whether the same dictionary is used,
607and using the wrong dictionary will most often not result in an error when decompressing.
608
609Blocks encoded *without* dictionaries can be decompressed seamlessly *with* a dictionary.
610This means it is possible to switch from an encoding without dictionaries to an encoding with dictionaries
611and treat the blocks similarly.
612
613Similar to [zStandard dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression),
614the same usage scenario applies to S2 dictionaries.
615
616> Training works if there is some correlation in a family of small data samples. The more data-specific a dictionary is, the more efficient it is (there is no universal dictionary). Hence, deploying one dictionary per type of data will provide the greatest benefits. Dictionary gains are mostly effective in the first few KB. Then, the compression algorithm will gradually use previously decoded content to better compress the rest of the file.
617
618S2 further limits the dictionary to only be enabled on the first 64KB of a block.
619This will remove any negative (speed) impacts of the dictionaries on bigger blocks.
620
621### Compression
622
623Using the [github_users_sample_set](https://github.com/facebook/zstd/releases/download/v1.1.3/github_users_sample_set.tar.zst)
624and a 64KB dictionary trained with zStandard the following sizes can be achieved.
625
626| | Default | Better | Best |
627|--------------------|------------------|------------------|-----------------------|
628| Without Dictionary | 3362023 (44.92%) | 3083163 (41.19%) | 3057944 (40.86%) |
629| With Dictionary | 921524 (12.31%) | 873154 (11.67%) | 785503 bytes (10.49%) |
630
631So for highly repetitive content, this case provides an almost 3x reduction in size.
632
633For less uniform data we will use the Go source code tree.
634Compressing First 64KB of all `.go` files in `go/src`, Go 1.19.5, 8912 files, 51253563 bytes input:
635
636| | Default | Better | Best |
637|--------------------|-------------------|-------------------|-------------------|
638| Without Dictionary | 22955767 (44.79%) | 20189613 (39.39% | 19482828 (38.01%) |
639| With Dictionary | 19654568 (38.35%) | 16289357 (31.78%) | 15184589 (29.63%) |
640| Saving/file | 362 bytes | 428 bytes | 472 bytes |
641
642
643### Creating Dictionaries
644
645There are no tools to create dictionaries in S2.
646However, there are multiple ways to create a useful dictionary:
647
648#### Using a Sample File
649
650If your input is very uniform, you can just use a sample file as the dictionary.
651
652For example in the `github_users_sample_set` above, the average compression only goes up from
65310.49% to 11.48% by using the first file as dictionary compared to using a dedicated dictionary.
654
655```Go
656 // Read a sample
657 sample, err := os.ReadFile("sample.json")
658
659 // Create a dictionary.
660 dict := s2.MakeDict(sample, nil)
661
662 // b := dict.Bytes() will provide a dictionary that can be saved
663 // and reloaded with s2.NewDict(b).
664
665 // To encode:
666 encoded := dict.Encode(nil, file)
667
668 // To decode:
669 decoded, err := dict.Decode(nil, file)
670```
671
672#### Using Zstandard
673
674Zstandard dictionaries can easily be converted to S2 dictionaries.
675
676This can be helpful to generate dictionaries for files that don't have a fixed structure.
677
678
679Example, with training set files placed in `./training-set`:
680
681`λ zstd -r --train-fastcover training-set/* --maxdict=65536 -o name.dict`
682
683This will create a dictionary of 64KB, that can be converted to a dictionary like this:
684
685```Go
686 // Decode the Zstandard dictionary.
687 insp, err := zstd.InspectDictionary(zdict)
688 if err != nil {
689 panic(err)
690 }
691
692 // We are only interested in the contents.
693 // Assume that files start with "// Copyright (c) 2023".
694 // Search for the longest match for that.
695 // This may save a few bytes.
696 dict := s2.MakeDict(insp.Content(), []byte("// Copyright (c) 2023"))
697
698 // b := dict.Bytes() will provide a dictionary that can be saved
699 // and reloaded with s2.NewDict(b).
700
701 // We can now encode using this dictionary
702 encodedWithDict := dict.Encode(nil, payload)
703
704 // To decode content:
705 decoded, err := dict.Decode(nil, encodedWithDict)
706```
707
708It is recommended to save the dictionary returned by ` b:= dict.Bytes()`, since that will contain only the S2 dictionary.
709
710This dictionary can later be loaded using `s2.NewDict(b)`. The dictionary then no longer requires `zstd` to be initialized.
711
712Also note how `s2.MakeDict` allows you to search for a common starting sequence of your files.
713This can be omitted, at the expense of a few bytes.
714
715# Snappy Compatibility
716
717S2 now offers full compatibility with Snappy.
718
719This means that the efficient encoders of S2 can be used to generate fully Snappy compatible output.
720
721There is a [snappy](https://github.com/klauspost/compress/tree/master/snappy) package that can be used by
722simply changing imports from `github.com/golang/snappy` to `github.com/klauspost/compress/snappy`.
723This uses "better" mode for all operations.
724If you would like more control, you can use the s2 package as described below:
725
726## Blocks
727
728Snappy compatible blocks can be generated with the S2 encoder.
729Compression and speed is typically a bit better `MaxEncodedLen` is also smaller for smaller memory usage. Replace
730
731| Snappy | S2 replacement |
732|---------------------------|-----------------------|
733| snappy.Encode(...) | s2.EncodeSnappy(...) |
734| snappy.MaxEncodedLen(...) | s2.MaxEncodedLen(...) |
735
736`s2.EncodeSnappy` can be replaced with `s2.EncodeSnappyBetter` or `s2.EncodeSnappyBest` to get more efficiently compressed snappy compatible output.
737
738`s2.ConcatBlocks` is compatible with snappy blocks.
739
740Comparison of [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com/compress/webdevdata.org-2015-01-07-4GB-subset.7z),
74153927 files, total input size: 4,014,735,833 bytes. amd64, single goroutine used:
742
743| Encoder | Size | MB/s | Reduction |
744|-----------------------|------------|------------|------------|
745| snappy.Encode | 1128706759 | 725.59 | 71.89% |
746| s2.EncodeSnappy | 1093823291 | **899.16** | 72.75% |
747| s2.EncodeSnappyBetter | 1001158548 | 578.49 | 75.06% |
748| s2.EncodeSnappyBest | 944507998 | 66.00 | **76.47%** |
749
750## Streams
751
752For streams, replace `enc = snappy.NewBufferedWriter(w)` with `enc = s2.NewWriter(w, s2.WriterSnappyCompat())`.
753All other options are available, but note that block size limit is different for snappy.
754
755Comparison of different streams, AMD Ryzen 3950x, 16 cores. Size and throughput:
756
757| File | snappy.NewWriter | S2 Snappy | S2 Snappy, Better | S2 Snappy, Best |
758|-----------------------------|--------------------------|---------------------------|--------------------------|-------------------------|
759| nyc-taxi-data-10M.csv | 1316042016 - 539.47MB/s | 1307003093 - 10132.73MB/s | 1174534014 - 5002.44MB/s | 1115904679 - 177.97MB/s |
760| enwik10 (xml) | 5088294643 - 451.13MB/s | 5175840939 - 9440.69MB/s | 4560784526 - 4487.21MB/s | 4340299103 - 158.92MB/s |
761| 10gb.tar (mixed) | 6056946612 - 729.73MB/s | 6208571995 - 9978.05MB/s | 5741646126 - 4919.98MB/s | 5548973895 - 180.44MB/s |
762| github-june-2days-2019.json | 1525176492 - 933.00MB/s | 1476519054 - 13150.12MB/s | 1400547532 - 5803.40MB/s | 1321887137 - 204.29MB/s |
763| consensus.db.10gb (db) | 5412897703 - 1102.14MB/s | 5354073487 - 13562.91MB/s | 5335069899 - 5294.73MB/s | 5201000954 - 175.72MB/s |
764
765# Decompression
766
767All decompression functions map directly to equivalent s2 functions.
768
769| Snappy | S2 replacement |
770|------------------------|--------------------|
771| snappy.Decode(...) | s2.Decode(...) |
772| snappy.DecodedLen(...) | s2.DecodedLen(...) |
773| snappy.NewReader(...) | s2.NewReader(...) |
774
775Features like [quick forward skipping without decompression](https://pkg.go.dev/github.com/klauspost/compress/s2#Reader.Skip)
776are also available for Snappy streams.
777
778If you know you are only decompressing snappy streams, setting [`ReaderMaxBlockSize(64<<10)`](https://pkg.go.dev/github.com/klauspost/compress/s2#ReaderMaxBlockSize)
779on your Reader will reduce memory consumption.
780
781# Concatenating blocks and streams.
782
783Concatenating streams will concatenate the output of both without recompressing them.
784While this is inefficient in terms of compression it might be usable in certain scenarios.
785The 10 byte 'stream identifier' of the second stream can optionally be stripped, but it is not a requirement.
786
787Blocks can be concatenated using the `ConcatBlocks` function.
788
789Snappy blocks/streams can safely be concatenated with S2 blocks and streams.
790Streams with indexes (see below) will currently not work on concatenated streams.
791
792# Stream Seek Index
793
794S2 and Snappy streams can have indexes. These indexes will allow random seeking within the compressed data.
795
796The index can either be appended to the stream as a skippable block or returned for separate storage.
797
798When the index is appended to a stream it will be skipped by regular decoders,
799so the output remains compatible with other decoders.
800
801## Creating an Index
802
803To automatically add an index to a stream, add `WriterAddIndex()` option to your writer.
804Then the index will be added to the stream when `Close()` is called.
805
806```
807 // Add Index to stream...
808 enc := s2.NewWriter(w, s2.WriterAddIndex())
809 io.Copy(enc, r)
810 enc.Close()
811```
812
813If you want to store the index separately, you can use `CloseIndex()` instead of the regular `Close()`.
814This will return the index. Note that `CloseIndex()` should only be called once, and you shouldn't call `Close()`.
815
816```
817 // Get index for separate storage...
818 enc := s2.NewWriter(w)
819 io.Copy(enc, r)
820 index, err := enc.CloseIndex()
821```
822
823The `index` can then be used needing to read from the stream.
824This means the index can be used without needing to seek to the end of the stream
825or for manually forwarding streams. See below.
826
827Finally, an existing S2/Snappy stream can be indexed using the `s2.IndexStream(r io.Reader)` function.
828
829## Using Indexes
830
831To use indexes there is a `ReadSeeker(random bool, index []byte) (*ReadSeeker, error)` function available.
832
833Calling ReadSeeker will return an [io.ReadSeeker](https://pkg.go.dev/io#ReadSeeker) compatible version of the reader.
834
835If 'random' is specified the returned io.Seeker can be used for random seeking, otherwise only forward seeking is supported.
836Enabling random seeking requires the original input to support the [io.Seeker](https://pkg.go.dev/io#Seeker) interface.
837
838```
839 dec := s2.NewReader(r)
840 rs, err := dec.ReadSeeker(false, nil)
841 rs.Seek(wantOffset, io.SeekStart)
842```
843
844Get a seeker to seek forward. Since no index is provided, the index is read from the stream.
845This requires that an index was added and that `r` supports the [io.Seeker](https://pkg.go.dev/io#Seeker) interface.
846
847A custom index can be specified which will be used if supplied.
848When using a custom index, it will not be read from the input stream.
849
850```
851 dec := s2.NewReader(r)
852 rs, err := dec.ReadSeeker(false, index)
853 rs.Seek(wantOffset, io.SeekStart)
854```
855
856This will read the index from `index`. Since we specify non-random (forward only) seeking `r` does not have to be an io.Seeker
857
858```
859 dec := s2.NewReader(r)
860 rs, err := dec.ReadSeeker(true, index)
861 rs.Seek(wantOffset, io.SeekStart)
862```
863
864Finally, since we specify that we want to do random seeking `r` must be an io.Seeker.
865
866The returned [ReadSeeker](https://pkg.go.dev/github.com/klauspost/compress/s2#ReadSeeker) contains a shallow reference to the existing Reader,
867meaning changes performed to one is reflected in the other.
868
869To check if a stream contains an index at the end, the `(*Index).LoadStream(rs io.ReadSeeker) error` can be used.
870
871## Manually Forwarding Streams
872
873Indexes can also be read outside the decoder using the [Index](https://pkg.go.dev/github.com/klauspost/compress/s2#Index) type.
874This can be used for parsing indexes, either separate or in streams.
875
876In some cases it may not be possible to serve a seekable stream.
877This can for instance be an HTTP stream, where the Range request
878is sent at the start of the stream.
879
880With a little bit of extra code it is still possible to use indexes
881to forward to specific offset with a single forward skip.
882
883It is possible to load the index manually like this:
884```
885 var index s2.Index
886 _, err = index.Load(idxBytes)
887```
888
889This can be used to figure out how much to offset the compressed stream:
890
891```
892 compressedOffset, uncompressedOffset, err := index.Find(wantOffset)
893```
894
895The `compressedOffset` is the number of bytes that should be skipped
896from the beginning of the compressed file.
897
898The `uncompressedOffset` will then be offset of the uncompressed bytes returned
899when decoding from that position. This will always be <= wantOffset.
900
901When creating a decoder it must be specified that it should *not* expect a stream identifier
902at the beginning of the stream. Assuming the io.Reader `r` has been forwarded to `compressedOffset`
903we create the decoder like this:
904
905```
906 dec := s2.NewReader(r, s2.ReaderIgnoreStreamIdentifier())
907```
908
909We are not completely done. We still need to forward the stream the uncompressed bytes we didn't want.
910This is done using the regular "Skip" function:
911
912```
913 err = dec.Skip(wantOffset - uncompressedOffset)
914```
915
916This will ensure that we are at exactly the offset we want, and reading from `dec` will start at the requested offset.
917
918# Compact storage
919
920For compact storage [RemoveIndexHeaders](https://pkg.go.dev/github.com/klauspost/compress/s2#RemoveIndexHeaders) can be used to remove any redundant info from
921a serialized index. If you remove the header it must be restored before [Loading](https://pkg.go.dev/github.com/klauspost/compress/s2#Index.Load).
922
923This is expected to save 20 bytes. These can be restored using [RestoreIndexHeaders](https://pkg.go.dev/github.com/klauspost/compress/s2#RestoreIndexHeaders). This removes a layer of security, but is the most compact representation. Returns nil if headers contains errors.
924
925## Index Format:
926
927Each block is structured as a snappy skippable block, with the chunk ID 0x99.
928
929The block can be read from the front, but contains information so it can be read from the back as well.
930
931Numbers are stored as fixed size little endian values or [zigzag encoded](https://developers.google.com/protocol-buffers/docs/encoding#signed_integers) [base 128 varints](https://developers.google.com/protocol-buffers/docs/encoding),
932with un-encoded value length of 64 bits, unless other limits are specified.
933
934| Content | Format |
935|--------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|
936| ID, `[1]byte` | Always 0x99. |
937| Data Length, `[3]byte` | 3 byte little-endian length of the chunk in bytes, following this. |
938| Header `[6]byte` | Header, must be `[115, 50, 105, 100, 120, 0]` or in text: "s2idx\x00". |
939| UncompressedSize, Varint | Total Uncompressed size. |
940| CompressedSize, Varint | Total Compressed size if known. Should be -1 if unknown. |
941| EstBlockSize, Varint | Block Size, used for guessing uncompressed offsets. Must be >= 0. |
942| Entries, Varint | Number of Entries in index, must be < 65536 and >=0. |
943| HasUncompressedOffsets `byte` | 0 if no uncompressed offsets are present, 1 if present. Other values are invalid. |
944| UncompressedOffsets, [Entries]VarInt | Uncompressed offsets. See below how to decode. |
945| CompressedOffsets, [Entries]VarInt | Compressed offsets. See below how to decode. |
946| Block Size, `[4]byte` | Little Endian total encoded size (including header and trailer). Can be used for searching backwards to start of block. |
947| Trailer `[6]byte` | Trailer, must be `[0, 120, 100, 105, 50, 115]` or in text: "\x00xdi2s". Can be used for identifying block from end of stream. |
948
949For regular streams the uncompressed offsets are fully predictable,
950so `HasUncompressedOffsets` allows to specify that compressed blocks all have
951exactly `EstBlockSize` bytes of uncompressed content.
952
953Entries *must* be in order, starting with the lowest offset,
954and there *must* be no uncompressed offset duplicates.
955Entries *may* point to the start of a skippable block,
956but it is then not allowed to also have an entry for the next block since
957that would give an uncompressed offset duplicate.
958
959There is no requirement for all blocks to be represented in the index.
960In fact there is a maximum of 65536 block entries in an index.
961
962The writer can use any method to reduce the number of entries.
963An implicit block start at 0,0 can be assumed.
964
965### Decoding entries:
966
967```
968// Read Uncompressed entries.
969// Each assumes EstBlockSize delta from previous.
970for each entry {
971 uOff = 0
972 if HasUncompressedOffsets == 1 {
973 uOff = ReadVarInt // Read value from stream
974 }
975
976 // Except for the first entry, use previous values.
977 if entryNum == 0 {
978 entry[entryNum].UncompressedOffset = uOff
979 continue
980 }
981
982 // Uncompressed uses previous offset and adds EstBlockSize
983 entry[entryNum].UncompressedOffset = entry[entryNum-1].UncompressedOffset + EstBlockSize + uOff
984}
985
986
987// Guess that the first block will be 50% of uncompressed size.
988// Integer truncating division must be used.
989CompressGuess := EstBlockSize / 2
990
991// Read Compressed entries.
992// Each assumes CompressGuess delta from previous.
993// CompressGuess is adjusted for each value.
994for each entry {
995 cOff = ReadVarInt // Read value from stream
996
997 // Except for the first entry, use previous values.
998 if entryNum == 0 {
999 entry[entryNum].CompressedOffset = cOff
1000 continue
1001 }
1002
1003 // Compressed uses previous and our estimate.
1004 entry[entryNum].CompressedOffset = entry[entryNum-1].CompressedOffset + CompressGuess + cOff
1005
1006 // Adjust compressed offset for next loop, integer truncating division must be used.
1007 CompressGuess += cOff/2
1008}
1009```
1010
1011To decode from any given uncompressed offset `(wantOffset)`:
1012
1013* Iterate entries until `entry[n].UncompressedOffset > wantOffset`.
1014* Start decoding from `entry[n-1].CompressedOffset`.
1015* Discard `entry[n-1].UncompressedOffset - wantOffset` bytes from the decoded stream.
1016
1017See [using indexes](https://github.com/klauspost/compress/tree/master/s2#using-indexes) for functions that perform the operations with a simpler interface.
1018
1019
1020# Format Extensions
1021
1022* Frame [Stream identifier](https://github.com/google/snappy/blob/master/framing_format.txt#L68) changed from `sNaPpY` to `S2sTwO`.
1023* [Framed compressed blocks](https://github.com/google/snappy/blob/master/format_description.txt) can be up to 4MB (up from 64KB).
1024* Compressed blocks can have an offset of `0`, which indicates to repeat the last seen offset.
1025
1026Repeat offsets must be encoded as a [2.2.1. Copy with 1-byte offset (01)](https://github.com/google/snappy/blob/master/format_description.txt#L89), where the offset is 0.
1027
1028The length is specified by reading the 3-bit length specified in the tag and decode using this table:
1029
1030| Length | Actual Length |
1031|--------|----------------------|
1032| 0 | 4 |
1033| 1 | 5 |
1034| 2 | 6 |
1035| 3 | 7 |
1036| 4 | 8 |
1037| 5 | 8 + read 1 byte |
1038| 6 | 260 + read 2 bytes |
1039| 7 | 65540 + read 3 bytes |
1040
1041This allows any repeat offset + length to be represented by 2 to 5 bytes.
1042It also allows to emit matches longer than 64 bytes with one copy + one repeat instead of several 64 byte copies.
1043
1044Lengths are stored as little endian values.
1045
1046The first copy of a block cannot be a repeat offset and the offset is reset on every block in streams.
1047
1048Default streaming block size is 1MB.
1049
1050# Dictionary Encoding
1051
1052Adding dictionaries allow providing a custom dictionary that will serve as lookup in the beginning of blocks.
1053
1054A dictionary provides an initial repeat value that can be used to point to a common header.
1055
1056Other than that the dictionary contains values that can be used as back-references.
1057
1058Often used data should be placed at the *end* of the dictionary since offsets < 2048 bytes will be smaller.
1059
1060## Format
1061
1062Dictionary *content* must at least 16 bytes and less or equal to 64KiB (65536 bytes).
1063
1064Encoding: `[repeat value (uvarint)][dictionary content...]`
1065
1066Before the dictionary content, an unsigned base-128 (uvarint) encoded value specifying the initial repeat offset.
1067This value is an offset into the dictionary content and not a back-reference offset,
1068so setting this to 0 will make the repeat value point to the first value of the dictionary.
1069
1070The value must be less than the dictionary length-8
1071
1072## Encoding
1073
1074From the decoder point of view the dictionary content is seen as preceding the encoded content.
1075
1076`[dictionary content][decoded output]`
1077
1078Backreferences to the dictionary are encoded as ordinary backreferences that have an offset before the start of the decoded block.
1079
1080Matches copying from the dictionary are **not** allowed to cross from the dictionary into the decoded data.
1081However, if a copy ends at the end of the dictionary the next repeat will point to the start of the decoded buffer, which is allowed.
1082
1083The first match can be a repeat value, which will use the repeat offset stored in the dictionary.
1084
1085When 64KB (65536 bytes) has been en/decoded it is no longer allowed to reference the dictionary,
1086neither by a copy nor repeat operations.
1087If the boundary is crossed while copying from the dictionary, the operation should complete,
1088but the next instruction is not allowed to reference the dictionary.
1089
1090Valid blocks encoded *without* a dictionary can be decoded with any dictionary.
1091There are no checks whether the supplied dictionary is the correct for a block.
1092Because of this there is no overhead by using a dictionary.
1093
1094## Example
1095
1096This is the dictionary content. Elements are separated by `[]`.
1097
1098Dictionary: `[0x0a][Yesterday 25 bananas were added to Benjamins brown bag]`.
1099
1100Initial repeat offset is set at 10, which is the letter `2`.
1101
1102Encoded `[LIT "10"][REPEAT len=10][LIT "hich"][MATCH off=50 len=6][MATCH off=31 len=6][MATCH off=61 len=10]`
1103
1104Decoded: `[10][ bananas w][hich][ were ][brown ][were added]`
1105
1106Output: `10 bananas which were brown were added`
1107
1108
1109## Streams
1110
1111For streams each block can use the dictionary.
1112
1113The dictionary cannot not currently be provided on the stream.
1114
1115
1116# LICENSE
1117
1118This code is based on the [Snappy-Go](https://github.com/golang/snappy) implementation.
1119
1120Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
diff --git a/vendor/github.com/klauspost/compress/s2/decode.go b/vendor/github.com/klauspost/compress/s2/decode.go
new file mode 100644
index 0000000..6c7feaf
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/decode.go
@@ -0,0 +1,437 @@
1// Copyright 2011 The Snappy-Go Authors. All rights reserved.
2// Copyright (c) 2019 Klaus Post. All rights reserved.
3// Use of this source code is governed by a BSD-style
4// license that can be found in the LICENSE file.
5
6package s2
7
8import (
9 "encoding/binary"
10 "errors"
11 "fmt"
12 "strconv"
13)
14
15var (
16 // ErrCorrupt reports that the input is invalid.
17 ErrCorrupt = errors.New("s2: corrupt input")
18 // ErrCRC reports that the input failed CRC validation (streams only)
19 ErrCRC = errors.New("s2: corrupt input, crc mismatch")
20 // ErrTooLarge reports that the uncompressed length is too large.
21 ErrTooLarge = errors.New("s2: decoded block is too large")
22 // ErrUnsupported reports that the input isn't supported.
23 ErrUnsupported = errors.New("s2: unsupported input")
24)
25
26// DecodedLen returns the length of the decoded block.
27func DecodedLen(src []byte) (int, error) {
28 v, _, err := decodedLen(src)
29 return v, err
30}
31
32// decodedLen returns the length of the decoded block and the number of bytes
33// that the length header occupied.
34func decodedLen(src []byte) (blockLen, headerLen int, err error) {
35 v, n := binary.Uvarint(src)
36 if n <= 0 || v > 0xffffffff {
37 return 0, 0, ErrCorrupt
38 }
39
40 const wordSize = 32 << (^uint(0) >> 32 & 1)
41 if wordSize == 32 && v > 0x7fffffff {
42 return 0, 0, ErrTooLarge
43 }
44 return int(v), n, nil
45}
46
47const (
48 decodeErrCodeCorrupt = 1
49)
50
51// Decode returns the decoded form of src. The returned slice may be a sub-
52// slice of dst if dst was large enough to hold the entire decoded block.
53// Otherwise, a newly allocated slice will be returned.
54//
55// The dst and src must not overlap. It is valid to pass a nil dst.
56func Decode(dst, src []byte) ([]byte, error) {
57 dLen, s, err := decodedLen(src)
58 if err != nil {
59 return nil, err
60 }
61 if dLen <= cap(dst) {
62 dst = dst[:dLen]
63 } else {
64 dst = make([]byte, dLen)
65 }
66 if s2Decode(dst, src[s:]) != 0 {
67 return nil, ErrCorrupt
68 }
69 return dst, nil
70}
71
72// s2DecodeDict writes the decoding of src to dst. It assumes that the varint-encoded
73// length of the decompressed bytes has already been read, and that len(dst)
74// equals that length.
75//
76// It returns 0 on success or a decodeErrCodeXxx error code on failure.
77func s2DecodeDict(dst, src []byte, dict *Dict) int {
78 if dict == nil {
79 return s2Decode(dst, src)
80 }
81 const debug = false
82 const debugErrs = debug
83
84 if debug {
85 fmt.Println("Starting decode, dst len:", len(dst))
86 }
87 var d, s, length int
88 offset := len(dict.dict) - dict.repeat
89
90 // As long as we can read at least 5 bytes...
91 for s < len(src)-5 {
92 // Removing bounds checks is SLOWER, when if doing
93 // in := src[s:s+5]
94 // Checked on Go 1.18
95 switch src[s] & 0x03 {
96 case tagLiteral:
97 x := uint32(src[s] >> 2)
98 switch {
99 case x < 60:
100 s++
101 case x == 60:
102 s += 2
103 x = uint32(src[s-1])
104 case x == 61:
105 in := src[s : s+3]
106 x = uint32(in[1]) | uint32(in[2])<<8
107 s += 3
108 case x == 62:
109 in := src[s : s+4]
110 // Load as 32 bit and shift down.
111 x = uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
112 x >>= 8
113 s += 4
114 case x == 63:
115 in := src[s : s+5]
116 x = uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24
117 s += 5
118 }
119 length = int(x) + 1
120 if debug {
121 fmt.Println("literals, length:", length, "d-after:", d+length)
122 }
123 if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) {
124 if debugErrs {
125 fmt.Println("corrupt literal: length:", length, "d-left:", len(dst)-d, "src-left:", len(src)-s)
126 }
127 return decodeErrCodeCorrupt
128 }
129
130 copy(dst[d:], src[s:s+length])
131 d += length
132 s += length
133 continue
134
135 case tagCopy1:
136 s += 2
137 toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
138 length = int(src[s-2]) >> 2 & 0x7
139 if toffset == 0 {
140 if debug {
141 fmt.Print("(repeat) ")
142 }
143 // keep last offset
144 switch length {
145 case 5:
146 length = int(src[s]) + 4
147 s += 1
148 case 6:
149 in := src[s : s+2]
150 length = int(uint32(in[0])|(uint32(in[1])<<8)) + (1 << 8)
151 s += 2
152 case 7:
153 in := src[s : s+3]
154 length = int((uint32(in[2])<<16)|(uint32(in[1])<<8)|uint32(in[0])) + (1 << 16)
155 s += 3
156 default: // 0-> 4
157 }
158 } else {
159 offset = toffset
160 }
161 length += 4
162 case tagCopy2:
163 in := src[s : s+3]
164 offset = int(uint32(in[1]) | uint32(in[2])<<8)
165 length = 1 + int(in[0])>>2
166 s += 3
167
168 case tagCopy4:
169 in := src[s : s+5]
170 offset = int(uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24)
171 length = 1 + int(in[0])>>2
172 s += 5
173 }
174
175 if offset <= 0 || length > len(dst)-d {
176 if debugErrs {
177 fmt.Println("match error; offset:", offset, "length:", length, "dst-left:", len(dst)-d)
178 }
179 return decodeErrCodeCorrupt
180 }
181
182 // copy from dict
183 if d < offset {
184 if d > MaxDictSrcOffset {
185 if debugErrs {
186 fmt.Println("dict after", MaxDictSrcOffset, "d:", d, "offset:", offset, "length:", length)
187 }
188 return decodeErrCodeCorrupt
189 }
190 startOff := len(dict.dict) - offset + d
191 if startOff < 0 || startOff+length > len(dict.dict) {
192 if debugErrs {
193 fmt.Printf("offset (%d) + length (%d) bigger than dict (%d)\n", offset, length, len(dict.dict))
194 }
195 return decodeErrCodeCorrupt
196 }
197 if debug {
198 fmt.Println("dict copy, length:", length, "offset:", offset, "d-after:", d+length, "dict start offset:", startOff)
199 }
200 copy(dst[d:d+length], dict.dict[startOff:])
201 d += length
202 continue
203 }
204
205 if debug {
206 fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length)
207 }
208
209 // Copy from an earlier sub-slice of dst to a later sub-slice.
210 // If no overlap, use the built-in copy:
211 if offset > length {
212 copy(dst[d:d+length], dst[d-offset:])
213 d += length
214 continue
215 }
216
217 // Unlike the built-in copy function, this byte-by-byte copy always runs
218 // forwards, even if the slices overlap. Conceptually, this is:
219 //
220 // d += forwardCopy(dst[d:d+length], dst[d-offset:])
221 //
222 // We align the slices into a and b and show the compiler they are the same size.
223 // This allows the loop to run without bounds checks.
224 a := dst[d : d+length]
225 b := dst[d-offset:]
226 b = b[:len(a)]
227 for i := range a {
228 a[i] = b[i]
229 }
230 d += length
231 }
232
233 // Remaining with extra checks...
234 for s < len(src) {
235 switch src[s] & 0x03 {
236 case tagLiteral:
237 x := uint32(src[s] >> 2)
238 switch {
239 case x < 60:
240 s++
241 case x == 60:
242 s += 2
243 if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
244 if debugErrs {
245 fmt.Println("src went oob")
246 }
247 return decodeErrCodeCorrupt
248 }
249 x = uint32(src[s-1])
250 case x == 61:
251 s += 3
252 if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
253 if debugErrs {
254 fmt.Println("src went oob")
255 }
256 return decodeErrCodeCorrupt
257 }
258 x = uint32(src[s-2]) | uint32(src[s-1])<<8
259 case x == 62:
260 s += 4
261 if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
262 if debugErrs {
263 fmt.Println("src went oob")
264 }
265 return decodeErrCodeCorrupt
266 }
267 x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
268 case x == 63:
269 s += 5
270 if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
271 if debugErrs {
272 fmt.Println("src went oob")
273 }
274 return decodeErrCodeCorrupt
275 }
276 x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
277 }
278 length = int(x) + 1
279 if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) {
280 if debugErrs {
281 fmt.Println("corrupt literal: length:", length, "d-left:", len(dst)-d, "src-left:", len(src)-s)
282 }
283 return decodeErrCodeCorrupt
284 }
285 if debug {
286 fmt.Println("literals, length:", length, "d-after:", d+length)
287 }
288
289 copy(dst[d:], src[s:s+length])
290 d += length
291 s += length
292 continue
293
294 case tagCopy1:
295 s += 2
296 if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
297 if debugErrs {
298 fmt.Println("src went oob")
299 }
300 return decodeErrCodeCorrupt
301 }
302 length = int(src[s-2]) >> 2 & 0x7
303 toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
304 if toffset == 0 {
305 if debug {
306 fmt.Print("(repeat) ")
307 }
308 // keep last offset
309 switch length {
310 case 5:
311 s += 1
312 if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
313 if debugErrs {
314 fmt.Println("src went oob")
315 }
316 return decodeErrCodeCorrupt
317 }
318 length = int(uint32(src[s-1])) + 4
319 case 6:
320 s += 2
321 if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
322 if debugErrs {
323 fmt.Println("src went oob")
324 }
325 return decodeErrCodeCorrupt
326 }
327 length = int(uint32(src[s-2])|(uint32(src[s-1])<<8)) + (1 << 8)
328 case 7:
329 s += 3
330 if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
331 if debugErrs {
332 fmt.Println("src went oob")
333 }
334 return decodeErrCodeCorrupt
335 }
336 length = int(uint32(src[s-3])|(uint32(src[s-2])<<8)|(uint32(src[s-1])<<16)) + (1 << 16)
337 default: // 0-> 4
338 }
339 } else {
340 offset = toffset
341 }
342 length += 4
343 case tagCopy2:
344 s += 3
345 if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
346 if debugErrs {
347 fmt.Println("src went oob")
348 }
349 return decodeErrCodeCorrupt
350 }
351 length = 1 + int(src[s-3])>>2
352 offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
353
354 case tagCopy4:
355 s += 5
356 if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
357 if debugErrs {
358 fmt.Println("src went oob")
359 }
360 return decodeErrCodeCorrupt
361 }
362 length = 1 + int(src[s-5])>>2
363 offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
364 }
365
366 if offset <= 0 || length > len(dst)-d {
367 if debugErrs {
368 fmt.Println("match error; offset:", offset, "length:", length, "dst-left:", len(dst)-d)
369 }
370 return decodeErrCodeCorrupt
371 }
372
373 // copy from dict
374 if d < offset {
375 if d > MaxDictSrcOffset {
376 if debugErrs {
377 fmt.Println("dict after", MaxDictSrcOffset, "d:", d, "offset:", offset, "length:", length)
378 }
379 return decodeErrCodeCorrupt
380 }
381 rOff := len(dict.dict) - (offset - d)
382 if debug {
383 fmt.Println("starting dict entry from dict offset", len(dict.dict)-rOff)
384 }
385 if rOff+length > len(dict.dict) {
386 if debugErrs {
387 fmt.Println("err: END offset", rOff+length, "bigger than dict", len(dict.dict), "dict offset:", rOff, "length:", length)
388 }
389 return decodeErrCodeCorrupt
390 }
391 if rOff < 0 {
392 if debugErrs {
393 fmt.Println("err: START offset", rOff, "less than 0", len(dict.dict), "dict offset:", rOff, "length:", length)
394 }
395 return decodeErrCodeCorrupt
396 }
397 copy(dst[d:d+length], dict.dict[rOff:])
398 d += length
399 continue
400 }
401
402 if debug {
403 fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length)
404 }
405
406 // Copy from an earlier sub-slice of dst to a later sub-slice.
407 // If no overlap, use the built-in copy:
408 if offset > length {
409 copy(dst[d:d+length], dst[d-offset:])
410 d += length
411 continue
412 }
413
414 // Unlike the built-in copy function, this byte-by-byte copy always runs
415 // forwards, even if the slices overlap. Conceptually, this is:
416 //
417 // d += forwardCopy(dst[d:d+length], dst[d-offset:])
418 //
419 // We align the slices into a and b and show the compiler they are the same size.
420 // This allows the loop to run without bounds checks.
421 a := dst[d : d+length]
422 b := dst[d-offset:]
423 b = b[:len(a)]
424 for i := range a {
425 a[i] = b[i]
426 }
427 d += length
428 }
429
430 if d != len(dst) {
431 if debugErrs {
432 fmt.Println("wanted length", len(dst), "got", d)
433 }
434 return decodeErrCodeCorrupt
435 }
436 return 0
437}
diff --git a/vendor/github.com/klauspost/compress/s2/decode_amd64.s b/vendor/github.com/klauspost/compress/s2/decode_amd64.s
new file mode 100644
index 0000000..9b105e0
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/decode_amd64.s
@@ -0,0 +1,568 @@
1// Copyright 2016 The Go Authors. All rights reserved.
2// Copyright (c) 2019 Klaus Post. All rights reserved.
3// Use of this source code is governed by a BSD-style
4// license that can be found in the LICENSE file.
5
6// +build !appengine
7// +build gc
8// +build !noasm
9
10#include "textflag.h"
11
12#define R_TMP0 AX
13#define R_TMP1 BX
14#define R_LEN CX
15#define R_OFF DX
16#define R_SRC SI
17#define R_DST DI
18#define R_DBASE R8
19#define R_DLEN R9
20#define R_DEND R10
21#define R_SBASE R11
22#define R_SLEN R12
23#define R_SEND R13
24#define R_TMP2 R14
25#define R_TMP3 R15
26
27// The asm code generally follows the pure Go code in decode_other.go, except
28// where marked with a "!!!".
29
30// func decode(dst, src []byte) int
31//
32// All local variables fit into registers. The non-zero stack size is only to
33// spill registers and push args when issuing a CALL. The register allocation:
34// - R_TMP0 scratch
35// - R_TMP1 scratch
36// - R_LEN length or x (shared)
37// - R_OFF offset
38// - R_SRC &src[s]
39// - R_DST &dst[d]
40// + R_DBASE dst_base
41// + R_DLEN dst_len
42// + R_DEND dst_base + dst_len
43// + R_SBASE src_base
44// + R_SLEN src_len
45// + R_SEND src_base + src_len
46// - R_TMP2 used by doCopy
47// - R_TMP3 used by doCopy
48//
49// The registers R_DBASE-R_SEND (marked with a "+") are set at the start of the
50// function, and after a CALL returns, and are not otherwise modified.
51//
52// The d variable is implicitly R_DST - R_DBASE, and len(dst)-d is R_DEND - R_DST.
53// The s variable is implicitly R_SRC - R_SBASE, and len(src)-s is R_SEND - R_SRC.
54TEXT ·s2Decode(SB), NOSPLIT, $48-56
55 // Initialize R_SRC, R_DST and R_DBASE-R_SEND.
56 MOVQ dst_base+0(FP), R_DBASE
57 MOVQ dst_len+8(FP), R_DLEN
58 MOVQ R_DBASE, R_DST
59 MOVQ R_DBASE, R_DEND
60 ADDQ R_DLEN, R_DEND
61 MOVQ src_base+24(FP), R_SBASE
62 MOVQ src_len+32(FP), R_SLEN
63 MOVQ R_SBASE, R_SRC
64 MOVQ R_SBASE, R_SEND
65 ADDQ R_SLEN, R_SEND
66 XORQ R_OFF, R_OFF
67
68loop:
69 // for s < len(src)
70 CMPQ R_SRC, R_SEND
71 JEQ end
72
73 // R_LEN = uint32(src[s])
74 //
75 // switch src[s] & 0x03
76 MOVBLZX (R_SRC), R_LEN
77 MOVL R_LEN, R_TMP1
78 ANDL $3, R_TMP1
79 CMPL R_TMP1, $1
80 JAE tagCopy
81
82 // ----------------------------------------
83 // The code below handles literal tags.
84
85 // case tagLiteral:
86 // x := uint32(src[s] >> 2)
87 // switch
88 SHRL $2, R_LEN
89 CMPL R_LEN, $60
90 JAE tagLit60Plus
91
92 // case x < 60:
93 // s++
94 INCQ R_SRC
95
96doLit:
97 // This is the end of the inner "switch", when we have a literal tag.
98 //
99 // We assume that R_LEN == x and x fits in a uint32, where x is the variable
100 // used in the pure Go decode_other.go code.
101
102 // length = int(x) + 1
103 //
104 // Unlike the pure Go code, we don't need to check if length <= 0 because
105 // R_LEN can hold 64 bits, so the increment cannot overflow.
106 INCQ R_LEN
107
108 // Prepare to check if copying length bytes will run past the end of dst or
109 // src.
110 //
111 // R_TMP0 = len(dst) - d
112 // R_TMP1 = len(src) - s
113 MOVQ R_DEND, R_TMP0
114 SUBQ R_DST, R_TMP0
115 MOVQ R_SEND, R_TMP1
116 SUBQ R_SRC, R_TMP1
117
118 // !!! Try a faster technique for short (16 or fewer bytes) copies.
119 //
120 // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
121 // goto callMemmove // Fall back on calling runtime·memmove.
122 // }
123 //
124 // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
125 // against 21 instead of 16, because it cannot assume that all of its input
126 // is contiguous in memory and so it needs to leave enough source bytes to
127 // read the next tag without refilling buffers, but Go's Decode assumes
128 // contiguousness (the src argument is a []byte).
129 CMPQ R_LEN, $16
130 JGT callMemmove
131 CMPQ R_TMP0, $16
132 JLT callMemmove
133 CMPQ R_TMP1, $16
134 JLT callMemmove
135
136 // !!! Implement the copy from src to dst as a 16-byte load and store.
137 // (Decode's documentation says that dst and src must not overlap.)
138 //
139 // This always copies 16 bytes, instead of only length bytes, but that's
140 // OK. If the input is a valid Snappy encoding then subsequent iterations
141 // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
142 // non-nil error), so the overrun will be ignored.
143 //
144 // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
145 // 16-byte loads and stores. This technique probably wouldn't be as
146 // effective on architectures that are fussier about alignment.
147 MOVOU 0(R_SRC), X0
148 MOVOU X0, 0(R_DST)
149
150 // d += length
151 // s += length
152 ADDQ R_LEN, R_DST
153 ADDQ R_LEN, R_SRC
154 JMP loop
155
156callMemmove:
157 // if length > len(dst)-d || length > len(src)-s { etc }
158 CMPQ R_LEN, R_TMP0
159 JGT errCorrupt
160 CMPQ R_LEN, R_TMP1
161 JGT errCorrupt
162
163 // copy(dst[d:], src[s:s+length])
164 //
165 // This means calling runtime·memmove(&dst[d], &src[s], length), so we push
166 // R_DST, R_SRC and R_LEN as arguments. Coincidentally, we also need to spill those
167 // three registers to the stack, to save local variables across the CALL.
168 MOVQ R_DST, 0(SP)
169 MOVQ R_SRC, 8(SP)
170 MOVQ R_LEN, 16(SP)
171 MOVQ R_DST, 24(SP)
172 MOVQ R_SRC, 32(SP)
173 MOVQ R_LEN, 40(SP)
174 MOVQ R_OFF, 48(SP)
175 CALL runtime·memmove(SB)
176
177 // Restore local variables: unspill registers from the stack and
178 // re-calculate R_DBASE-R_SEND.
179 MOVQ 24(SP), R_DST
180 MOVQ 32(SP), R_SRC
181 MOVQ 40(SP), R_LEN
182 MOVQ 48(SP), R_OFF
183 MOVQ dst_base+0(FP), R_DBASE
184 MOVQ dst_len+8(FP), R_DLEN
185 MOVQ R_DBASE, R_DEND
186 ADDQ R_DLEN, R_DEND
187 MOVQ src_base+24(FP), R_SBASE
188 MOVQ src_len+32(FP), R_SLEN
189 MOVQ R_SBASE, R_SEND
190 ADDQ R_SLEN, R_SEND
191
192 // d += length
193 // s += length
194 ADDQ R_LEN, R_DST
195 ADDQ R_LEN, R_SRC
196 JMP loop
197
198tagLit60Plus:
199 // !!! This fragment does the
200 //
201 // s += x - 58; if uint(s) > uint(len(src)) { etc }
202 //
203 // checks. In the asm version, we code it once instead of once per switch case.
204 ADDQ R_LEN, R_SRC
205 SUBQ $58, R_SRC
206 CMPQ R_SRC, R_SEND
207 JA errCorrupt
208
209 // case x == 60:
210 CMPL R_LEN, $61
211 JEQ tagLit61
212 JA tagLit62Plus
213
214 // x = uint32(src[s-1])
215 MOVBLZX -1(R_SRC), R_LEN
216 JMP doLit
217
218tagLit61:
219 // case x == 61:
220 // x = uint32(src[s-2]) | uint32(src[s-1])<<8
221 MOVWLZX -2(R_SRC), R_LEN
222 JMP doLit
223
224tagLit62Plus:
225 CMPL R_LEN, $62
226 JA tagLit63
227
228 // case x == 62:
229 // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
230 // We read one byte, safe to read one back, since we are just reading tag.
231 // x = binary.LittleEndian.Uint32(src[s-1:]) >> 8
232 MOVL -4(R_SRC), R_LEN
233 SHRL $8, R_LEN
234 JMP doLit
235
236tagLit63:
237 // case x == 63:
238 // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
239 MOVL -4(R_SRC), R_LEN
240 JMP doLit
241
242// The code above handles literal tags.
243// ----------------------------------------
244// The code below handles copy tags.
245
246tagCopy4:
247 // case tagCopy4:
248 // s += 5
249 ADDQ $5, R_SRC
250
251 // if uint(s) > uint(len(src)) { etc }
252 CMPQ R_SRC, R_SEND
253 JA errCorrupt
254
255 // length = 1 + int(src[s-5])>>2
256 SHRQ $2, R_LEN
257 INCQ R_LEN
258
259 // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
260 MOVLQZX -4(R_SRC), R_OFF
261 JMP doCopy
262
263tagCopy2:
264 // case tagCopy2:
265 // s += 3
266 ADDQ $3, R_SRC
267
268 // if uint(s) > uint(len(src)) { etc }
269 CMPQ R_SRC, R_SEND
270 JA errCorrupt
271
272 // length = 1 + int(src[s-3])>>2
273 SHRQ $2, R_LEN
274 INCQ R_LEN
275
276 // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
277 MOVWQZX -2(R_SRC), R_OFF
278 JMP doCopy
279
280tagCopy:
281 // We have a copy tag. We assume that:
282 // - R_TMP1 == src[s] & 0x03
283 // - R_LEN == src[s]
284 CMPQ R_TMP1, $2
285 JEQ tagCopy2
286 JA tagCopy4
287
288 // case tagCopy1:
289 // s += 2
290 ADDQ $2, R_SRC
291
292 // if uint(s) > uint(len(src)) { etc }
293 CMPQ R_SRC, R_SEND
294 JA errCorrupt
295
296 // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
297 // length = 4 + int(src[s-2])>>2&0x7
298 MOVBQZX -1(R_SRC), R_TMP1
299 MOVQ R_LEN, R_TMP0
300 SHRQ $2, R_LEN
301 ANDQ $0xe0, R_TMP0
302 ANDQ $7, R_LEN
303 SHLQ $3, R_TMP0
304 ADDQ $4, R_LEN
305 ORQ R_TMP1, R_TMP0
306
307 // check if repeat code, ZF set by ORQ.
308 JZ repeatCode
309
310 // This is a regular copy, transfer our temporary value to R_OFF (length)
311 MOVQ R_TMP0, R_OFF
312 JMP doCopy
313
314// This is a repeat code.
315repeatCode:
316 // If length < 9, reuse last offset, with the length already calculated.
317 CMPQ R_LEN, $9
318 JL doCopyRepeat
319
320 // Read additional bytes for length.
321 JE repeatLen1
322
323 // Rare, so the extra branch shouldn't hurt too much.
324 CMPQ R_LEN, $10
325 JE repeatLen2
326 JMP repeatLen3
327
328// Read repeat lengths.
329repeatLen1:
330 // s ++
331 ADDQ $1, R_SRC
332
333 // if uint(s) > uint(len(src)) { etc }
334 CMPQ R_SRC, R_SEND
335 JA errCorrupt
336
337 // length = src[s-1] + 8
338 MOVBQZX -1(R_SRC), R_LEN
339 ADDL $8, R_LEN
340 JMP doCopyRepeat
341
342repeatLen2:
343 // s +=2
344 ADDQ $2, R_SRC
345
346 // if uint(s) > uint(len(src)) { etc }
347 CMPQ R_SRC, R_SEND
348 JA errCorrupt
349
350 // length = uint32(src[s-2]) | (uint32(src[s-1])<<8) + (1 << 8)
351 MOVWQZX -2(R_SRC), R_LEN
352 ADDL $260, R_LEN
353 JMP doCopyRepeat
354
355repeatLen3:
356 // s +=3
357 ADDQ $3, R_SRC
358
359 // if uint(s) > uint(len(src)) { etc }
360 CMPQ R_SRC, R_SEND
361 JA errCorrupt
362
363 // length = uint32(src[s-3]) | (uint32(src[s-2])<<8) | (uint32(src[s-1])<<16) + (1 << 16)
364 // Read one byte further back (just part of the tag, shifted out)
365 MOVL -4(R_SRC), R_LEN
366 SHRL $8, R_LEN
367 ADDL $65540, R_LEN
368 JMP doCopyRepeat
369
370doCopy:
371 // This is the end of the outer "switch", when we have a copy tag.
372 //
373 // We assume that:
374 // - R_LEN == length && R_LEN > 0
375 // - R_OFF == offset
376
377 // if d < offset { etc }
378 MOVQ R_DST, R_TMP1
379 SUBQ R_DBASE, R_TMP1
380 CMPQ R_TMP1, R_OFF
381 JLT errCorrupt
382
383 // Repeat values can skip the test above, since any offset > 0 will be in dst.
384doCopyRepeat:
385 // if offset <= 0 { etc }
386 CMPQ R_OFF, $0
387 JLE errCorrupt
388
389 // if length > len(dst)-d { etc }
390 MOVQ R_DEND, R_TMP1
391 SUBQ R_DST, R_TMP1
392 CMPQ R_LEN, R_TMP1
393 JGT errCorrupt
394
395 // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
396 //
397 // Set:
398 // - R_TMP2 = len(dst)-d
399 // - R_TMP3 = &dst[d-offset]
400 MOVQ R_DEND, R_TMP2
401 SUBQ R_DST, R_TMP2
402 MOVQ R_DST, R_TMP3
403 SUBQ R_OFF, R_TMP3
404
405 // !!! Try a faster technique for short (16 or fewer bytes) forward copies.
406 //
407 // First, try using two 8-byte load/stores, similar to the doLit technique
408 // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
409 // still OK if offset >= 8. Note that this has to be two 8-byte load/stores
410 // and not one 16-byte load/store, and the first store has to be before the
411 // second load, due to the overlap if offset is in the range [8, 16).
412 //
413 // if length > 16 || offset < 8 || len(dst)-d < 16 {
414 // goto slowForwardCopy
415 // }
416 // copy 16 bytes
417 // d += length
418 CMPQ R_LEN, $16
419 JGT slowForwardCopy
420 CMPQ R_OFF, $8
421 JLT slowForwardCopy
422 CMPQ R_TMP2, $16
423 JLT slowForwardCopy
424 MOVQ 0(R_TMP3), R_TMP0
425 MOVQ R_TMP0, 0(R_DST)
426 MOVQ 8(R_TMP3), R_TMP1
427 MOVQ R_TMP1, 8(R_DST)
428 ADDQ R_LEN, R_DST
429 JMP loop
430
431slowForwardCopy:
432 // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
433 // can still try 8-byte load stores, provided we can overrun up to 10 extra
434 // bytes. As above, the overrun will be fixed up by subsequent iterations
435 // of the outermost loop.
436 //
437 // The C++ snappy code calls this technique IncrementalCopyFastPath. Its
438 // commentary says:
439 //
440 // ----
441 //
442 // The main part of this loop is a simple copy of eight bytes at a time
443 // until we've copied (at least) the requested amount of bytes. However,
444 // if d and d-offset are less than eight bytes apart (indicating a
445 // repeating pattern of length < 8), we first need to expand the pattern in
446 // order to get the correct results. For instance, if the buffer looks like
447 // this, with the eight-byte <d-offset> and <d> patterns marked as
448 // intervals:
449 //
450 // abxxxxxxxxxxxx
451 // [------] d-offset
452 // [------] d
453 //
454 // a single eight-byte copy from <d-offset> to <d> will repeat the pattern
455 // once, after which we can move <d> two bytes without moving <d-offset>:
456 //
457 // ababxxxxxxxxxx
458 // [------] d-offset
459 // [------] d
460 //
461 // and repeat the exercise until the two no longer overlap.
462 //
463 // This allows us to do very well in the special case of one single byte
464 // repeated many times, without taking a big hit for more general cases.
465 //
466 // The worst case of extra writing past the end of the match occurs when
467 // offset == 1 and length == 1; the last copy will read from byte positions
468 // [0..7] and write to [4..11], whereas it was only supposed to write to
469 // position 1. Thus, ten excess bytes.
470 //
471 // ----
472 //
473 // That "10 byte overrun" worst case is confirmed by Go's
474 // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
475 // and finishSlowForwardCopy algorithm.
476 //
477 // if length > len(dst)-d-10 {
478 // goto verySlowForwardCopy
479 // }
480 SUBQ $10, R_TMP2
481 CMPQ R_LEN, R_TMP2
482 JGT verySlowForwardCopy
483
484 // We want to keep the offset, so we use R_TMP2 from here.
485 MOVQ R_OFF, R_TMP2
486
487makeOffsetAtLeast8:
488 // !!! As above, expand the pattern so that offset >= 8 and we can use
489 // 8-byte load/stores.
490 //
491 // for offset < 8 {
492 // copy 8 bytes from dst[d-offset:] to dst[d:]
493 // length -= offset
494 // d += offset
495 // offset += offset
496 // // The two previous lines together means that d-offset, and therefore
497 // // R_TMP3, is unchanged.
498 // }
499 CMPQ R_TMP2, $8
500 JGE fixUpSlowForwardCopy
501 MOVQ (R_TMP3), R_TMP1
502 MOVQ R_TMP1, (R_DST)
503 SUBQ R_TMP2, R_LEN
504 ADDQ R_TMP2, R_DST
505 ADDQ R_TMP2, R_TMP2
506 JMP makeOffsetAtLeast8
507
508fixUpSlowForwardCopy:
509 // !!! Add length (which might be negative now) to d (implied by R_DST being
510 // &dst[d]) so that d ends up at the right place when we jump back to the
511 // top of the loop. Before we do that, though, we save R_DST to R_TMP0 so that, if
512 // length is positive, copying the remaining length bytes will write to the
513 // right place.
514 MOVQ R_DST, R_TMP0
515 ADDQ R_LEN, R_DST
516
517finishSlowForwardCopy:
518 // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
519 // length means that we overrun, but as above, that will be fixed up by
520 // subsequent iterations of the outermost loop.
521 CMPQ R_LEN, $0
522 JLE loop
523 MOVQ (R_TMP3), R_TMP1
524 MOVQ R_TMP1, (R_TMP0)
525 ADDQ $8, R_TMP3
526 ADDQ $8, R_TMP0
527 SUBQ $8, R_LEN
528 JMP finishSlowForwardCopy
529
530verySlowForwardCopy:
531 // verySlowForwardCopy is a simple implementation of forward copy. In C
532 // parlance, this is a do/while loop instead of a while loop, since we know
533 // that length > 0. In Go syntax:
534 //
535 // for {
536 // dst[d] = dst[d - offset]
537 // d++
538 // length--
539 // if length == 0 {
540 // break
541 // }
542 // }
543 MOVB (R_TMP3), R_TMP1
544 MOVB R_TMP1, (R_DST)
545 INCQ R_TMP3
546 INCQ R_DST
547 DECQ R_LEN
548 JNZ verySlowForwardCopy
549 JMP loop
550
551// The code above handles copy tags.
552// ----------------------------------------
553
554end:
555 // This is the end of the "for s < len(src)".
556 //
557 // if d != len(dst) { etc }
558 CMPQ R_DST, R_DEND
559 JNE errCorrupt
560
561 // return 0
562 MOVQ $0, ret+48(FP)
563 RET
564
565errCorrupt:
566 // return decodeErrCodeCorrupt
567 MOVQ $1, ret+48(FP)
568 RET
diff --git a/vendor/github.com/klauspost/compress/s2/decode_arm64.s b/vendor/github.com/klauspost/compress/s2/decode_arm64.s
new file mode 100644
index 0000000..4b63d50
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/decode_arm64.s
@@ -0,0 +1,574 @@
1// Copyright 2020 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// +build !appengine
6// +build gc
7// +build !noasm
8
9#include "textflag.h"
10
11#define R_TMP0 R2
12#define R_TMP1 R3
13#define R_LEN R4
14#define R_OFF R5
15#define R_SRC R6
16#define R_DST R7
17#define R_DBASE R8
18#define R_DLEN R9
19#define R_DEND R10
20#define R_SBASE R11
21#define R_SLEN R12
22#define R_SEND R13
23#define R_TMP2 R14
24#define R_TMP3 R15
25
26// TEST_SRC will check if R_SRC is <= SRC_END
27#define TEST_SRC() \
28 CMP R_SEND, R_SRC \
29 BGT errCorrupt
30
31// MOVD R_SRC, R_TMP1
32// SUB R_SBASE, R_TMP1, R_TMP1
33// CMP R_SLEN, R_TMP1
34// BGT errCorrupt
35
36// The asm code generally follows the pure Go code in decode_other.go, except
37// where marked with a "!!!".
38
39// func decode(dst, src []byte) int
40//
41// All local variables fit into registers. The non-zero stack size is only to
42// spill registers and push args when issuing a CALL. The register allocation:
43// - R_TMP0 scratch
44// - R_TMP1 scratch
45// - R_LEN length or x
46// - R_OFF offset
47// - R_SRC &src[s]
48// - R_DST &dst[d]
49// + R_DBASE dst_base
50// + R_DLEN dst_len
51// + R_DEND dst_base + dst_len
52// + R_SBASE src_base
53// + R_SLEN src_len
54// + R_SEND src_base + src_len
55// - R_TMP2 used by doCopy
56// - R_TMP3 used by doCopy
57//
58// The registers R_DBASE-R_SEND (marked with a "+") are set at the start of the
59// function, and after a CALL returns, and are not otherwise modified.
60//
61// The d variable is implicitly R_DST - R_DBASE, and len(dst)-d is R_DEND - R_DST.
62// The s variable is implicitly R_SRC - R_SBASE, and len(src)-s is R_SEND - R_SRC.
63TEXT ·s2Decode(SB), NOSPLIT, $56-64
64 // Initialize R_SRC, R_DST and R_DBASE-R_SEND.
65 MOVD dst_base+0(FP), R_DBASE
66 MOVD dst_len+8(FP), R_DLEN
67 MOVD R_DBASE, R_DST
68 MOVD R_DBASE, R_DEND
69 ADD R_DLEN, R_DEND, R_DEND
70 MOVD src_base+24(FP), R_SBASE
71 MOVD src_len+32(FP), R_SLEN
72 MOVD R_SBASE, R_SRC
73 MOVD R_SBASE, R_SEND
74 ADD R_SLEN, R_SEND, R_SEND
75 MOVD $0, R_OFF
76
77loop:
78 // for s < len(src)
79 CMP R_SEND, R_SRC
80 BEQ end
81
82 // R_LEN = uint32(src[s])
83 //
84 // switch src[s] & 0x03
85 MOVBU (R_SRC), R_LEN
86 MOVW R_LEN, R_TMP1
87 ANDW $3, R_TMP1
88 MOVW $1, R1
89 CMPW R1, R_TMP1
90 BGE tagCopy
91
92 // ----------------------------------------
93 // The code below handles literal tags.
94
95 // case tagLiteral:
96 // x := uint32(src[s] >> 2)
97 // switch
98 MOVW $60, R1
99 LSRW $2, R_LEN, R_LEN
100 CMPW R_LEN, R1
101 BLS tagLit60Plus
102
103 // case x < 60:
104 // s++
105 ADD $1, R_SRC, R_SRC
106
107doLit:
108 // This is the end of the inner "switch", when we have a literal tag.
109 //
110 // We assume that R_LEN == x and x fits in a uint32, where x is the variable
111 // used in the pure Go decode_other.go code.
112
113 // length = int(x) + 1
114 //
115 // Unlike the pure Go code, we don't need to check if length <= 0 because
116 // R_LEN can hold 64 bits, so the increment cannot overflow.
117 ADD $1, R_LEN, R_LEN
118
119 // Prepare to check if copying length bytes will run past the end of dst or
120 // src.
121 //
122 // R_TMP0 = len(dst) - d
123 // R_TMP1 = len(src) - s
124 MOVD R_DEND, R_TMP0
125 SUB R_DST, R_TMP0, R_TMP0
126 MOVD R_SEND, R_TMP1
127 SUB R_SRC, R_TMP1, R_TMP1
128
129 // !!! Try a faster technique for short (16 or fewer bytes) copies.
130 //
131 // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
132 // goto callMemmove // Fall back on calling runtime·memmove.
133 // }
134 //
135 // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
136 // against 21 instead of 16, because it cannot assume that all of its input
137 // is contiguous in memory and so it needs to leave enough source bytes to
138 // read the next tag without refilling buffers, but Go's Decode assumes
139 // contiguousness (the src argument is a []byte).
140 CMP $16, R_LEN
141 BGT callMemmove
142 CMP $16, R_TMP0
143 BLT callMemmove
144 CMP $16, R_TMP1
145 BLT callMemmove
146
147 // !!! Implement the copy from src to dst as a 16-byte load and store.
148 // (Decode's documentation says that dst and src must not overlap.)
149 //
150 // This always copies 16 bytes, instead of only length bytes, but that's
151 // OK. If the input is a valid Snappy encoding then subsequent iterations
152 // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
153 // non-nil error), so the overrun will be ignored.
154 //
155 // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or
156 // 16-byte loads and stores. This technique probably wouldn't be as
157 // effective on architectures that are fussier about alignment.
158 LDP 0(R_SRC), (R_TMP2, R_TMP3)
159 STP (R_TMP2, R_TMP3), 0(R_DST)
160
161 // d += length
162 // s += length
163 ADD R_LEN, R_DST, R_DST
164 ADD R_LEN, R_SRC, R_SRC
165 B loop
166
167callMemmove:
168 // if length > len(dst)-d || length > len(src)-s { etc }
169 CMP R_TMP0, R_LEN
170 BGT errCorrupt
171 CMP R_TMP1, R_LEN
172 BGT errCorrupt
173
174 // copy(dst[d:], src[s:s+length])
175 //
176 // This means calling runtime·memmove(&dst[d], &src[s], length), so we push
177 // R_DST, R_SRC and R_LEN as arguments. Coincidentally, we also need to spill those
178 // three registers to the stack, to save local variables across the CALL.
179 MOVD R_DST, 8(RSP)
180 MOVD R_SRC, 16(RSP)
181 MOVD R_LEN, 24(RSP)
182 MOVD R_DST, 32(RSP)
183 MOVD R_SRC, 40(RSP)
184 MOVD R_LEN, 48(RSP)
185 MOVD R_OFF, 56(RSP)
186 CALL runtime·memmove(SB)
187
188 // Restore local variables: unspill registers from the stack and
189 // re-calculate R_DBASE-R_SEND.
190 MOVD 32(RSP), R_DST
191 MOVD 40(RSP), R_SRC
192 MOVD 48(RSP), R_LEN
193 MOVD 56(RSP), R_OFF
194 MOVD dst_base+0(FP), R_DBASE
195 MOVD dst_len+8(FP), R_DLEN
196 MOVD R_DBASE, R_DEND
197 ADD R_DLEN, R_DEND, R_DEND
198 MOVD src_base+24(FP), R_SBASE
199 MOVD src_len+32(FP), R_SLEN
200 MOVD R_SBASE, R_SEND
201 ADD R_SLEN, R_SEND, R_SEND
202
203 // d += length
204 // s += length
205 ADD R_LEN, R_DST, R_DST
206 ADD R_LEN, R_SRC, R_SRC
207 B loop
208
209tagLit60Plus:
210 // !!! This fragment does the
211 //
212 // s += x - 58; if uint(s) > uint(len(src)) { etc }
213 //
214 // checks. In the asm version, we code it once instead of once per switch case.
215 ADD R_LEN, R_SRC, R_SRC
216 SUB $58, R_SRC, R_SRC
217 TEST_SRC()
218
219 // case x == 60:
220 MOVW $61, R1
221 CMPW R1, R_LEN
222 BEQ tagLit61
223 BGT tagLit62Plus
224
225 // x = uint32(src[s-1])
226 MOVBU -1(R_SRC), R_LEN
227 B doLit
228
229tagLit61:
230 // case x == 61:
231 // x = uint32(src[s-2]) | uint32(src[s-1])<<8
232 MOVHU -2(R_SRC), R_LEN
233 B doLit
234
235tagLit62Plus:
236 CMPW $62, R_LEN
237 BHI tagLit63
238
239 // case x == 62:
240 // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
241 MOVHU -3(R_SRC), R_LEN
242 MOVBU -1(R_SRC), R_TMP1
243 ORR R_TMP1<<16, R_LEN
244 B doLit
245
246tagLit63:
247 // case x == 63:
248 // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
249 MOVWU -4(R_SRC), R_LEN
250 B doLit
251
252 // The code above handles literal tags.
253 // ----------------------------------------
254 // The code below handles copy tags.
255
256tagCopy4:
257 // case tagCopy4:
258 // s += 5
259 ADD $5, R_SRC, R_SRC
260
261 // if uint(s) > uint(len(src)) { etc }
262 MOVD R_SRC, R_TMP1
263 SUB R_SBASE, R_TMP1, R_TMP1
264 CMP R_SLEN, R_TMP1
265 BGT errCorrupt
266
267 // length = 1 + int(src[s-5])>>2
268 MOVD $1, R1
269 ADD R_LEN>>2, R1, R_LEN
270
271 // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
272 MOVWU -4(R_SRC), R_OFF
273 B doCopy
274
275tagCopy2:
276 // case tagCopy2:
277 // s += 3
278 ADD $3, R_SRC, R_SRC
279
280 // if uint(s) > uint(len(src)) { etc }
281 TEST_SRC()
282
283 // length = 1 + int(src[s-3])>>2
284 MOVD $1, R1
285 ADD R_LEN>>2, R1, R_LEN
286
287 // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
288 MOVHU -2(R_SRC), R_OFF
289 B doCopy
290
291tagCopy:
292 // We have a copy tag. We assume that:
293 // - R_TMP1 == src[s] & 0x03
294 // - R_LEN == src[s]
295 CMP $2, R_TMP1
296 BEQ tagCopy2
297 BGT tagCopy4
298
299 // case tagCopy1:
300 // s += 2
301 ADD $2, R_SRC, R_SRC
302
303 // if uint(s) > uint(len(src)) { etc }
304 TEST_SRC()
305
306 // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
307 // Calculate offset in R_TMP0 in case it is a repeat.
308 MOVD R_LEN, R_TMP0
309 AND $0xe0, R_TMP0
310 MOVBU -1(R_SRC), R_TMP1
311 ORR R_TMP0<<3, R_TMP1, R_TMP0
312
313 // length = 4 + int(src[s-2])>>2&0x7
314 MOVD $7, R1
315 AND R_LEN>>2, R1, R_LEN
316 ADD $4, R_LEN, R_LEN
317
318 // check if repeat code with offset 0.
319 CMP $0, R_TMP0
320 BEQ repeatCode
321
322 // This is a regular copy, transfer our temporary value to R_OFF (offset)
323 MOVD R_TMP0, R_OFF
324 B doCopy
325
326 // This is a repeat code.
327repeatCode:
328 // If length < 9, reuse last offset, with the length already calculated.
329 CMP $9, R_LEN
330 BLT doCopyRepeat
331 BEQ repeatLen1
332 CMP $10, R_LEN
333 BEQ repeatLen2
334
335repeatLen3:
336 // s +=3
337 ADD $3, R_SRC, R_SRC
338
339 // if uint(s) > uint(len(src)) { etc }
340 TEST_SRC()
341
342 // length = uint32(src[s-3]) | (uint32(src[s-2])<<8) | (uint32(src[s-1])<<16) + 65540
343 MOVBU -1(R_SRC), R_TMP0
344 MOVHU -3(R_SRC), R_LEN
345 ORR R_TMP0<<16, R_LEN, R_LEN
346 ADD $65540, R_LEN, R_LEN
347 B doCopyRepeat
348
349repeatLen2:
350 // s +=2
351 ADD $2, R_SRC, R_SRC
352
353 // if uint(s) > uint(len(src)) { etc }
354 TEST_SRC()
355
356 // length = uint32(src[s-2]) | (uint32(src[s-1])<<8) + 260
357 MOVHU -2(R_SRC), R_LEN
358 ADD $260, R_LEN, R_LEN
359 B doCopyRepeat
360
361repeatLen1:
362 // s +=1
363 ADD $1, R_SRC, R_SRC
364
365 // if uint(s) > uint(len(src)) { etc }
366 TEST_SRC()
367
368 // length = src[s-1] + 8
369 MOVBU -1(R_SRC), R_LEN
370 ADD $8, R_LEN, R_LEN
371 B doCopyRepeat
372
373doCopy:
374 // This is the end of the outer "switch", when we have a copy tag.
375 //
376 // We assume that:
377 // - R_LEN == length && R_LEN > 0
378 // - R_OFF == offset
379
380 // if d < offset { etc }
381 MOVD R_DST, R_TMP1
382 SUB R_DBASE, R_TMP1, R_TMP1
383 CMP R_OFF, R_TMP1
384 BLT errCorrupt
385
386 // Repeat values can skip the test above, since any offset > 0 will be in dst.
387doCopyRepeat:
388
389 // if offset <= 0 { etc }
390 CMP $0, R_OFF
391 BLE errCorrupt
392
393 // if length > len(dst)-d { etc }
394 MOVD R_DEND, R_TMP1
395 SUB R_DST, R_TMP1, R_TMP1
396 CMP R_TMP1, R_LEN
397 BGT errCorrupt
398
399 // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
400 //
401 // Set:
402 // - R_TMP2 = len(dst)-d
403 // - R_TMP3 = &dst[d-offset]
404 MOVD R_DEND, R_TMP2
405 SUB R_DST, R_TMP2, R_TMP2
406 MOVD R_DST, R_TMP3
407 SUB R_OFF, R_TMP3, R_TMP3
408
409 // !!! Try a faster technique for short (16 or fewer bytes) forward copies.
410 //
411 // First, try using two 8-byte load/stores, similar to the doLit technique
412 // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
413 // still OK if offset >= 8. Note that this has to be two 8-byte load/stores
414 // and not one 16-byte load/store, and the first store has to be before the
415 // second load, due to the overlap if offset is in the range [8, 16).
416 //
417 // if length > 16 || offset < 8 || len(dst)-d < 16 {
418 // goto slowForwardCopy
419 // }
420 // copy 16 bytes
421 // d += length
422 CMP $16, R_LEN
423 BGT slowForwardCopy
424 CMP $8, R_OFF
425 BLT slowForwardCopy
426 CMP $16, R_TMP2
427 BLT slowForwardCopy
428 MOVD 0(R_TMP3), R_TMP0
429 MOVD R_TMP0, 0(R_DST)
430 MOVD 8(R_TMP3), R_TMP1
431 MOVD R_TMP1, 8(R_DST)
432 ADD R_LEN, R_DST, R_DST
433 B loop
434
435slowForwardCopy:
436 // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
437 // can still try 8-byte load stores, provided we can overrun up to 10 extra
438 // bytes. As above, the overrun will be fixed up by subsequent iterations
439 // of the outermost loop.
440 //
441 // The C++ snappy code calls this technique IncrementalCopyFastPath. Its
442 // commentary says:
443 //
444 // ----
445 //
446 // The main part of this loop is a simple copy of eight bytes at a time
447 // until we've copied (at least) the requested amount of bytes. However,
448 // if d and d-offset are less than eight bytes apart (indicating a
449 // repeating pattern of length < 8), we first need to expand the pattern in
450 // order to get the correct results. For instance, if the buffer looks like
451 // this, with the eight-byte <d-offset> and <d> patterns marked as
452 // intervals:
453 //
454 // abxxxxxxxxxxxx
455 // [------] d-offset
456 // [------] d
457 //
458 // a single eight-byte copy from <d-offset> to <d> will repeat the pattern
459 // once, after which we can move <d> two bytes without moving <d-offset>:
460 //
461 // ababxxxxxxxxxx
462 // [------] d-offset
463 // [------] d
464 //
465 // and repeat the exercise until the two no longer overlap.
466 //
467 // This allows us to do very well in the special case of one single byte
468 // repeated many times, without taking a big hit for more general cases.
469 //
470 // The worst case of extra writing past the end of the match occurs when
471 // offset == 1 and length == 1; the last copy will read from byte positions
472 // [0..7] and write to [4..11], whereas it was only supposed to write to
473 // position 1. Thus, ten excess bytes.
474 //
475 // ----
476 //
477 // That "10 byte overrun" worst case is confirmed by Go's
478 // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
479 // and finishSlowForwardCopy algorithm.
480 //
481 // if length > len(dst)-d-10 {
482 // goto verySlowForwardCopy
483 // }
484 SUB $10, R_TMP2, R_TMP2
485 CMP R_TMP2, R_LEN
486 BGT verySlowForwardCopy
487
488 // We want to keep the offset, so we use R_TMP2 from here.
489 MOVD R_OFF, R_TMP2
490
491makeOffsetAtLeast8:
492 // !!! As above, expand the pattern so that offset >= 8 and we can use
493 // 8-byte load/stores.
494 //
495 // for offset < 8 {
496 // copy 8 bytes from dst[d-offset:] to dst[d:]
497 // length -= offset
498 // d += offset
499 // offset += offset
500 // // The two previous lines together means that d-offset, and therefore
501 // // R_TMP3, is unchanged.
502 // }
503 CMP $8, R_TMP2
504 BGE fixUpSlowForwardCopy
505 MOVD (R_TMP3), R_TMP1
506 MOVD R_TMP1, (R_DST)
507 SUB R_TMP2, R_LEN, R_LEN
508 ADD R_TMP2, R_DST, R_DST
509 ADD R_TMP2, R_TMP2, R_TMP2
510 B makeOffsetAtLeast8
511
512fixUpSlowForwardCopy:
513 // !!! Add length (which might be negative now) to d (implied by R_DST being
514 // &dst[d]) so that d ends up at the right place when we jump back to the
515 // top of the loop. Before we do that, though, we save R_DST to R_TMP0 so that, if
516 // length is positive, copying the remaining length bytes will write to the
517 // right place.
518 MOVD R_DST, R_TMP0
519 ADD R_LEN, R_DST, R_DST
520
521finishSlowForwardCopy:
522 // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
523 // length means that we overrun, but as above, that will be fixed up by
524 // subsequent iterations of the outermost loop.
525 MOVD $0, R1
526 CMP R1, R_LEN
527 BLE loop
528 MOVD (R_TMP3), R_TMP1
529 MOVD R_TMP1, (R_TMP0)
530 ADD $8, R_TMP3, R_TMP3
531 ADD $8, R_TMP0, R_TMP0
532 SUB $8, R_LEN, R_LEN
533 B finishSlowForwardCopy
534
535verySlowForwardCopy:
536 // verySlowForwardCopy is a simple implementation of forward copy. In C
537 // parlance, this is a do/while loop instead of a while loop, since we know
538 // that length > 0. In Go syntax:
539 //
540 // for {
541 // dst[d] = dst[d - offset]
542 // d++
543 // length--
544 // if length == 0 {
545 // break
546 // }
547 // }
548 MOVB (R_TMP3), R_TMP1
549 MOVB R_TMP1, (R_DST)
550 ADD $1, R_TMP3, R_TMP3
551 ADD $1, R_DST, R_DST
552 SUB $1, R_LEN, R_LEN
553 CBNZ R_LEN, verySlowForwardCopy
554 B loop
555
556 // The code above handles copy tags.
557 // ----------------------------------------
558
559end:
560 // This is the end of the "for s < len(src)".
561 //
562 // if d != len(dst) { etc }
563 CMP R_DEND, R_DST
564 BNE errCorrupt
565
566 // return 0
567 MOVD $0, ret+48(FP)
568 RET
569
570errCorrupt:
571 // return decodeErrCodeCorrupt
572 MOVD $1, R_TMP0
573 MOVD R_TMP0, ret+48(FP)
574 RET
diff --git a/vendor/github.com/klauspost/compress/s2/decode_asm.go b/vendor/github.com/klauspost/compress/s2/decode_asm.go
new file mode 100644
index 0000000..cb3576e
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/decode_asm.go
@@ -0,0 +1,17 @@
1// Copyright 2016 The Snappy-Go Authors. All rights reserved.
2// Copyright (c) 2019 Klaus Post. All rights reserved.
3// Use of this source code is governed by a BSD-style
4// license that can be found in the LICENSE file.
5
6//go:build (amd64 || arm64) && !appengine && gc && !noasm
7// +build amd64 arm64
8// +build !appengine
9// +build gc
10// +build !noasm
11
12package s2
13
14// decode has the same semantics as in decode_other.go.
15//
16//go:noescape
17func s2Decode(dst, src []byte) int
diff --git a/vendor/github.com/klauspost/compress/s2/decode_other.go b/vendor/github.com/klauspost/compress/s2/decode_other.go
new file mode 100644
index 0000000..2cb55c2
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/decode_other.go
@@ -0,0 +1,292 @@
1// Copyright 2016 The Snappy-Go Authors. All rights reserved.
2// Copyright (c) 2019 Klaus Post. All rights reserved.
3// Use of this source code is governed by a BSD-style
4// license that can be found in the LICENSE file.
5
6//go:build (!amd64 && !arm64) || appengine || !gc || noasm
7// +build !amd64,!arm64 appengine !gc noasm
8
9package s2
10
11import (
12 "fmt"
13 "strconv"
14)
15
16// decode writes the decoding of src to dst. It assumes that the varint-encoded
17// length of the decompressed bytes has already been read, and that len(dst)
18// equals that length.
19//
20// It returns 0 on success or a decodeErrCodeXxx error code on failure.
21func s2Decode(dst, src []byte) int {
22 const debug = false
23 if debug {
24 fmt.Println("Starting decode, dst len:", len(dst))
25 }
26 var d, s, length int
27 offset := 0
28
29 // As long as we can read at least 5 bytes...
30 for s < len(src)-5 {
31 // Removing bounds checks is SLOWER, when if doing
32 // in := src[s:s+5]
33 // Checked on Go 1.18
34 switch src[s] & 0x03 {
35 case tagLiteral:
36 x := uint32(src[s] >> 2)
37 switch {
38 case x < 60:
39 s++
40 case x == 60:
41 s += 2
42 x = uint32(src[s-1])
43 case x == 61:
44 in := src[s : s+3]
45 x = uint32(in[1]) | uint32(in[2])<<8
46 s += 3
47 case x == 62:
48 in := src[s : s+4]
49 // Load as 32 bit and shift down.
50 x = uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
51 x >>= 8
52 s += 4
53 case x == 63:
54 in := src[s : s+5]
55 x = uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24
56 s += 5
57 }
58 length = int(x) + 1
59 if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) {
60 if debug {
61 fmt.Println("corrupt: lit size", length)
62 }
63 return decodeErrCodeCorrupt
64 }
65 if debug {
66 fmt.Println("literals, length:", length, "d-after:", d+length)
67 }
68
69 copy(dst[d:], src[s:s+length])
70 d += length
71 s += length
72 continue
73
74 case tagCopy1:
75 s += 2
76 toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
77 length = int(src[s-2]) >> 2 & 0x7
78 if toffset == 0 {
79 if debug {
80 fmt.Print("(repeat) ")
81 }
82 // keep last offset
83 switch length {
84 case 5:
85 length = int(src[s]) + 4
86 s += 1
87 case 6:
88 in := src[s : s+2]
89 length = int(uint32(in[0])|(uint32(in[1])<<8)) + (1 << 8)
90 s += 2
91 case 7:
92 in := src[s : s+3]
93 length = int((uint32(in[2])<<16)|(uint32(in[1])<<8)|uint32(in[0])) + (1 << 16)
94 s += 3
95 default: // 0-> 4
96 }
97 } else {
98 offset = toffset
99 }
100 length += 4
101 case tagCopy2:
102 in := src[s : s+3]
103 offset = int(uint32(in[1]) | uint32(in[2])<<8)
104 length = 1 + int(in[0])>>2
105 s += 3
106
107 case tagCopy4:
108 in := src[s : s+5]
109 offset = int(uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24)
110 length = 1 + int(in[0])>>2
111 s += 5
112 }
113
114 if offset <= 0 || d < offset || length > len(dst)-d {
115 if debug {
116 fmt.Println("corrupt: match, length", length, "offset:", offset, "dst avail:", len(dst)-d, "dst pos:", d)
117 }
118
119 return decodeErrCodeCorrupt
120 }
121
122 if debug {
123 fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length)
124 }
125
126 // Copy from an earlier sub-slice of dst to a later sub-slice.
127 // If no overlap, use the built-in copy:
128 if offset > length {
129 copy(dst[d:d+length], dst[d-offset:])
130 d += length
131 continue
132 }
133
134 // Unlike the built-in copy function, this byte-by-byte copy always runs
135 // forwards, even if the slices overlap. Conceptually, this is:
136 //
137 // d += forwardCopy(dst[d:d+length], dst[d-offset:])
138 //
139 // We align the slices into a and b and show the compiler they are the same size.
140 // This allows the loop to run without bounds checks.
141 a := dst[d : d+length]
142 b := dst[d-offset:]
143 b = b[:len(a)]
144 for i := range a {
145 a[i] = b[i]
146 }
147 d += length
148 }
149
150 // Remaining with extra checks...
151 for s < len(src) {
152 switch src[s] & 0x03 {
153 case tagLiteral:
154 x := uint32(src[s] >> 2)
155 switch {
156 case x < 60:
157 s++
158 case x == 60:
159 s += 2
160 if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
161 return decodeErrCodeCorrupt
162 }
163 x = uint32(src[s-1])
164 case x == 61:
165 s += 3
166 if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
167 return decodeErrCodeCorrupt
168 }
169 x = uint32(src[s-2]) | uint32(src[s-1])<<8
170 case x == 62:
171 s += 4
172 if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
173 return decodeErrCodeCorrupt
174 }
175 x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
176 case x == 63:
177 s += 5
178 if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
179 return decodeErrCodeCorrupt
180 }
181 x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
182 }
183 length = int(x) + 1
184 if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) {
185 if debug {
186 fmt.Println("corrupt: lit size", length)
187 }
188 return decodeErrCodeCorrupt
189 }
190 if debug {
191 fmt.Println("literals, length:", length, "d-after:", d+length)
192 }
193
194 copy(dst[d:], src[s:s+length])
195 d += length
196 s += length
197 continue
198
199 case tagCopy1:
200 s += 2
201 if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
202 return decodeErrCodeCorrupt
203 }
204 length = int(src[s-2]) >> 2 & 0x7
205 toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
206 if toffset == 0 {
207 if debug {
208 fmt.Print("(repeat) ")
209 }
210 // keep last offset
211 switch length {
212 case 5:
213 s += 1
214 if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
215 return decodeErrCodeCorrupt
216 }
217 length = int(uint32(src[s-1])) + 4
218 case 6:
219 s += 2
220 if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
221 return decodeErrCodeCorrupt
222 }
223 length = int(uint32(src[s-2])|(uint32(src[s-1])<<8)) + (1 << 8)
224 case 7:
225 s += 3
226 if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
227 return decodeErrCodeCorrupt
228 }
229 length = int(uint32(src[s-3])|(uint32(src[s-2])<<8)|(uint32(src[s-1])<<16)) + (1 << 16)
230 default: // 0-> 4
231 }
232 } else {
233 offset = toffset
234 }
235 length += 4
236 case tagCopy2:
237 s += 3
238 if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
239 return decodeErrCodeCorrupt
240 }
241 length = 1 + int(src[s-3])>>2
242 offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
243
244 case tagCopy4:
245 s += 5
246 if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
247 return decodeErrCodeCorrupt
248 }
249 length = 1 + int(src[s-5])>>2
250 offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
251 }
252
253 if offset <= 0 || d < offset || length > len(dst)-d {
254 if debug {
255 fmt.Println("corrupt: match, length", length, "offset:", offset, "dst avail:", len(dst)-d, "dst pos:", d)
256 }
257 return decodeErrCodeCorrupt
258 }
259
260 if debug {
261 fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length)
262 }
263
264 // Copy from an earlier sub-slice of dst to a later sub-slice.
265 // If no overlap, use the built-in copy:
266 if offset > length {
267 copy(dst[d:d+length], dst[d-offset:])
268 d += length
269 continue
270 }
271
272 // Unlike the built-in copy function, this byte-by-byte copy always runs
273 // forwards, even if the slices overlap. Conceptually, this is:
274 //
275 // d += forwardCopy(dst[d:d+length], dst[d-offset:])
276 //
277 // We align the slices into a and b and show the compiler they are the same size.
278 // This allows the loop to run without bounds checks.
279 a := dst[d : d+length]
280 b := dst[d-offset:]
281 b = b[:len(a)]
282 for i := range a {
283 a[i] = b[i]
284 }
285 d += length
286 }
287
288 if d != len(dst) {
289 return decodeErrCodeCorrupt
290 }
291 return 0
292}
diff --git a/vendor/github.com/klauspost/compress/s2/dict.go b/vendor/github.com/klauspost/compress/s2/dict.go
new file mode 100644
index 0000000..f125ad0
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/dict.go
@@ -0,0 +1,350 @@
1// Copyright (c) 2022+ Klaus Post. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package s2
6
7import (
8 "bytes"
9 "encoding/binary"
10 "sync"
11)
12
13const (
14 // MinDictSize is the minimum dictionary size when repeat has been read.
15 MinDictSize = 16
16
17 // MaxDictSize is the maximum dictionary size when repeat has been read.
18 MaxDictSize = 65536
19
20 // MaxDictSrcOffset is the maximum offset where a dictionary entry can start.
21 MaxDictSrcOffset = 65535
22)
23
24// Dict contains a dictionary that can be used for encoding and decoding s2
25type Dict struct {
26 dict []byte
27 repeat int // Repeat as index of dict
28
29 fast, better, best sync.Once
30 fastTable *[1 << 14]uint16
31
32 betterTableShort *[1 << 14]uint16
33 betterTableLong *[1 << 17]uint16
34
35 bestTableShort *[1 << 16]uint32
36 bestTableLong *[1 << 19]uint32
37}
38
39// NewDict will read a dictionary.
40// It will return nil if the dictionary is invalid.
41func NewDict(dict []byte) *Dict {
42 if len(dict) == 0 {
43 return nil
44 }
45 var d Dict
46 // Repeat is the first value of the dict
47 r, n := binary.Uvarint(dict)
48 if n <= 0 {
49 return nil
50 }
51 dict = dict[n:]
52 d.dict = dict
53 if cap(d.dict) < len(d.dict)+16 {
54 d.dict = append(make([]byte, 0, len(d.dict)+16), d.dict...)
55 }
56 if len(dict) < MinDictSize || len(dict) > MaxDictSize {
57 return nil
58 }
59 d.repeat = int(r)
60 if d.repeat > len(dict) {
61 return nil
62 }
63 return &d
64}
65
66// Bytes will return a serialized version of the dictionary.
67// The output can be sent to NewDict.
68func (d *Dict) Bytes() []byte {
69 dst := make([]byte, binary.MaxVarintLen16+len(d.dict))
70 return append(dst[:binary.PutUvarint(dst, uint64(d.repeat))], d.dict...)
71}
72
73// MakeDict will create a dictionary.
74// 'data' must be at least MinDictSize.
75// If data is longer than MaxDictSize only the last MaxDictSize bytes will be used.
76// If searchStart is set the start repeat value will be set to the last
77// match of this content.
78// If no matches are found, it will attempt to find shorter matches.
79// This content should match the typical start of a block.
80// If at least 4 bytes cannot be matched, repeat is set to start of block.
81func MakeDict(data []byte, searchStart []byte) *Dict {
82 if len(data) == 0 {
83 return nil
84 }
85 if len(data) > MaxDictSize {
86 data = data[len(data)-MaxDictSize:]
87 }
88 var d Dict
89 dict := data
90 d.dict = dict
91 if cap(d.dict) < len(d.dict)+16 {
92 d.dict = append(make([]byte, 0, len(d.dict)+16), d.dict...)
93 }
94 if len(dict) < MinDictSize {
95 return nil
96 }
97
98 // Find the longest match possible, last entry if multiple.
99 for s := len(searchStart); s > 4; s-- {
100 if idx := bytes.LastIndex(data, searchStart[:s]); idx >= 0 && idx <= len(data)-8 {
101 d.repeat = idx
102 break
103 }
104 }
105
106 return &d
107}
108
109// MakeDictManual will create a dictionary.
110// 'data' must be at least MinDictSize and less than or equal to MaxDictSize.
111// A manual first repeat index into data must be provided.
112// It must be less than len(data)-8.
113func MakeDictManual(data []byte, firstIdx uint16) *Dict {
114 if len(data) < MinDictSize || int(firstIdx) >= len(data)-8 || len(data) > MaxDictSize {
115 return nil
116 }
117 var d Dict
118 dict := data
119 d.dict = dict
120 if cap(d.dict) < len(d.dict)+16 {
121 d.dict = append(make([]byte, 0, len(d.dict)+16), d.dict...)
122 }
123
124 d.repeat = int(firstIdx)
125 return &d
126}
127
128// Encode returns the encoded form of src. The returned slice may be a sub-
129// slice of dst if dst was large enough to hold the entire encoded block.
130// Otherwise, a newly allocated slice will be returned.
131//
132// The dst and src must not overlap. It is valid to pass a nil dst.
133//
134// The blocks will require the same amount of memory to decode as encoding,
135// and does not make for concurrent decoding.
136// Also note that blocks do not contain CRC information, so corruption may be undetected.
137//
138// If you need to encode larger amounts of data, consider using
139// the streaming interface which gives all of these features.
140func (d *Dict) Encode(dst, src []byte) []byte {
141 if n := MaxEncodedLen(len(src)); n < 0 {
142 panic(ErrTooLarge)
143 } else if cap(dst) < n {
144 dst = make([]byte, n)
145 } else {
146 dst = dst[:n]
147 }
148
149 // The block starts with the varint-encoded length of the decompressed bytes.
150 dstP := binary.PutUvarint(dst, uint64(len(src)))
151
152 if len(src) == 0 {
153 return dst[:dstP]
154 }
155 if len(src) < minNonLiteralBlockSize {
156 dstP += emitLiteral(dst[dstP:], src)
157 return dst[:dstP]
158 }
159 n := encodeBlockDictGo(dst[dstP:], src, d)
160 if n > 0 {
161 dstP += n
162 return dst[:dstP]
163 }
164 // Not compressible
165 dstP += emitLiteral(dst[dstP:], src)
166 return dst[:dstP]
167}
168
169// EncodeBetter returns the encoded form of src. The returned slice may be a sub-
170// slice of dst if dst was large enough to hold the entire encoded block.
171// Otherwise, a newly allocated slice will be returned.
172//
173// EncodeBetter compresses better than Encode but typically with a
174// 10-40% speed decrease on both compression and decompression.
175//
176// The dst and src must not overlap. It is valid to pass a nil dst.
177//
178// The blocks will require the same amount of memory to decode as encoding,
179// and does not make for concurrent decoding.
180// Also note that blocks do not contain CRC information, so corruption may be undetected.
181//
182// If you need to encode larger amounts of data, consider using
183// the streaming interface which gives all of these features.
184func (d *Dict) EncodeBetter(dst, src []byte) []byte {
185 if n := MaxEncodedLen(len(src)); n < 0 {
186 panic(ErrTooLarge)
187 } else if len(dst) < n {
188 dst = make([]byte, n)
189 }
190
191 // The block starts with the varint-encoded length of the decompressed bytes.
192 dstP := binary.PutUvarint(dst, uint64(len(src)))
193
194 if len(src) == 0 {
195 return dst[:dstP]
196 }
197 if len(src) < minNonLiteralBlockSize {
198 dstP += emitLiteral(dst[dstP:], src)
199 return dst[:dstP]
200 }
201 n := encodeBlockBetterDict(dst[dstP:], src, d)
202 if n > 0 {
203 dstP += n
204 return dst[:dstP]
205 }
206 // Not compressible
207 dstP += emitLiteral(dst[dstP:], src)
208 return dst[:dstP]
209}
210
211// EncodeBest returns the encoded form of src. The returned slice may be a sub-
212// slice of dst if dst was large enough to hold the entire encoded block.
213// Otherwise, a newly allocated slice will be returned.
214//
215// EncodeBest compresses as good as reasonably possible but with a
216// big speed decrease.
217//
218// The dst and src must not overlap. It is valid to pass a nil dst.
219//
220// The blocks will require the same amount of memory to decode as encoding,
221// and does not make for concurrent decoding.
222// Also note that blocks do not contain CRC information, so corruption may be undetected.
223//
224// If you need to encode larger amounts of data, consider using
225// the streaming interface which gives all of these features.
226func (d *Dict) EncodeBest(dst, src []byte) []byte {
227 if n := MaxEncodedLen(len(src)); n < 0 {
228 panic(ErrTooLarge)
229 } else if len(dst) < n {
230 dst = make([]byte, n)
231 }
232
233 // The block starts with the varint-encoded length of the decompressed bytes.
234 dstP := binary.PutUvarint(dst, uint64(len(src)))
235
236 if len(src) == 0 {
237 return dst[:dstP]
238 }
239 if len(src) < minNonLiteralBlockSize {
240 dstP += emitLiteral(dst[dstP:], src)
241 return dst[:dstP]
242 }
243 n := encodeBlockBest(dst[dstP:], src, d)
244 if n > 0 {
245 dstP += n
246 return dst[:dstP]
247 }
248 // Not compressible
249 dstP += emitLiteral(dst[dstP:], src)
250 return dst[:dstP]
251}
252
253// Decode returns the decoded form of src. The returned slice may be a sub-
254// slice of dst if dst was large enough to hold the entire decoded block.
255// Otherwise, a newly allocated slice will be returned.
256//
257// The dst and src must not overlap. It is valid to pass a nil dst.
258func (d *Dict) Decode(dst, src []byte) ([]byte, error) {
259 dLen, s, err := decodedLen(src)
260 if err != nil {
261 return nil, err
262 }
263 if dLen <= cap(dst) {
264 dst = dst[:dLen]
265 } else {
266 dst = make([]byte, dLen)
267 }
268 if s2DecodeDict(dst, src[s:], d) != 0 {
269 return nil, ErrCorrupt
270 }
271 return dst, nil
272}
273
274func (d *Dict) initFast() {
275 d.fast.Do(func() {
276 const (
277 tableBits = 14
278 maxTableSize = 1 << tableBits
279 )
280
281 var table [maxTableSize]uint16
282 // We stop so any entry of length 8 can always be read.
283 for i := 0; i < len(d.dict)-8-2; i += 3 {
284 x0 := load64(d.dict, i)
285 h0 := hash6(x0, tableBits)
286 h1 := hash6(x0>>8, tableBits)
287 h2 := hash6(x0>>16, tableBits)
288 table[h0] = uint16(i)
289 table[h1] = uint16(i + 1)
290 table[h2] = uint16(i + 2)
291 }
292 d.fastTable = &table
293 })
294}
295
296func (d *Dict) initBetter() {
297 d.better.Do(func() {
298 const (
299 // Long hash matches.
300 lTableBits = 17
301 maxLTableSize = 1 << lTableBits
302
303 // Short hash matches.
304 sTableBits = 14
305 maxSTableSize = 1 << sTableBits
306 )
307
308 var lTable [maxLTableSize]uint16
309 var sTable [maxSTableSize]uint16
310
311 // We stop so any entry of length 8 can always be read.
312 for i := 0; i < len(d.dict)-8; i++ {
313 cv := load64(d.dict, i)
314 lTable[hash7(cv, lTableBits)] = uint16(i)
315 sTable[hash4(cv, sTableBits)] = uint16(i)
316 }
317 d.betterTableShort = &sTable
318 d.betterTableLong = &lTable
319 })
320}
321
322func (d *Dict) initBest() {
323 d.best.Do(func() {
324 const (
325 // Long hash matches.
326 lTableBits = 19
327 maxLTableSize = 1 << lTableBits
328
329 // Short hash matches.
330 sTableBits = 16
331 maxSTableSize = 1 << sTableBits
332 )
333
334 var lTable [maxLTableSize]uint32
335 var sTable [maxSTableSize]uint32
336
337 // We stop so any entry of length 8 can always be read.
338 for i := 0; i < len(d.dict)-8; i++ {
339 cv := load64(d.dict, i)
340 hashL := hash8(cv, lTableBits)
341 hashS := hash4(cv, sTableBits)
342 candidateL := lTable[hashL]
343 candidateS := sTable[hashS]
344 lTable[hashL] = uint32(i) | candidateL<<16
345 sTable[hashS] = uint32(i) | candidateS<<16
346 }
347 d.bestTableShort = &sTable
348 d.bestTableLong = &lTable
349 })
350}
diff --git a/vendor/github.com/klauspost/compress/s2/encode.go b/vendor/github.com/klauspost/compress/s2/encode.go
new file mode 100644
index 0000000..0c9088a
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/encode.go
@@ -0,0 +1,393 @@
1// Copyright 2011 The Snappy-Go Authors. All rights reserved.
2// Copyright (c) 2019 Klaus Post. All rights reserved.
3// Use of this source code is governed by a BSD-style
4// license that can be found in the LICENSE file.
5
6package s2
7
8import (
9 "encoding/binary"
10 "math"
11 "math/bits"
12)
13
14// Encode returns the encoded form of src. The returned slice may be a sub-
15// slice of dst if dst was large enough to hold the entire encoded block.
16// Otherwise, a newly allocated slice will be returned.
17//
18// The dst and src must not overlap. It is valid to pass a nil dst.
19//
20// The blocks will require the same amount of memory to decode as encoding,
21// and does not make for concurrent decoding.
22// Also note that blocks do not contain CRC information, so corruption may be undetected.
23//
24// If you need to encode larger amounts of data, consider using
25// the streaming interface which gives all of these features.
26func Encode(dst, src []byte) []byte {
27 if n := MaxEncodedLen(len(src)); n < 0 {
28 panic(ErrTooLarge)
29 } else if cap(dst) < n {
30 dst = make([]byte, n)
31 } else {
32 dst = dst[:n]
33 }
34
35 // The block starts with the varint-encoded length of the decompressed bytes.
36 d := binary.PutUvarint(dst, uint64(len(src)))
37
38 if len(src) == 0 {
39 return dst[:d]
40 }
41 if len(src) < minNonLiteralBlockSize {
42 d += emitLiteral(dst[d:], src)
43 return dst[:d]
44 }
45 n := encodeBlock(dst[d:], src)
46 if n > 0 {
47 d += n
48 return dst[:d]
49 }
50 // Not compressible
51 d += emitLiteral(dst[d:], src)
52 return dst[:d]
53}
54
55// EstimateBlockSize will perform a very fast compression
56// without outputting the result and return the compressed output size.
57// The function returns -1 if no improvement could be achieved.
58// Using actual compression will most often produce better compression than the estimate.
59func EstimateBlockSize(src []byte) (d int) {
60 if len(src) <= inputMargin || int64(len(src)) > 0xffffffff {
61 return -1
62 }
63 if len(src) <= 1024 {
64 d = calcBlockSizeSmall(src)
65 } else {
66 d = calcBlockSize(src)
67 }
68
69 if d == 0 {
70 return -1
71 }
72 // Size of the varint encoded block size.
73 d += (bits.Len64(uint64(len(src))) + 7) / 7
74
75 if d >= len(src) {
76 return -1
77 }
78 return d
79}
80
81// EncodeBetter returns the encoded form of src. The returned slice may be a sub-
82// slice of dst if dst was large enough to hold the entire encoded block.
83// Otherwise, a newly allocated slice will be returned.
84//
85// EncodeBetter compresses better than Encode but typically with a
86// 10-40% speed decrease on both compression and decompression.
87//
88// The dst and src must not overlap. It is valid to pass a nil dst.
89//
90// The blocks will require the same amount of memory to decode as encoding,
91// and does not make for concurrent decoding.
92// Also note that blocks do not contain CRC information, so corruption may be undetected.
93//
94// If you need to encode larger amounts of data, consider using
95// the streaming interface which gives all of these features.
96func EncodeBetter(dst, src []byte) []byte {
97 if n := MaxEncodedLen(len(src)); n < 0 {
98 panic(ErrTooLarge)
99 } else if len(dst) < n {
100 dst = make([]byte, n)
101 }
102
103 // The block starts with the varint-encoded length of the decompressed bytes.
104 d := binary.PutUvarint(dst, uint64(len(src)))
105
106 if len(src) == 0 {
107 return dst[:d]
108 }
109 if len(src) < minNonLiteralBlockSize {
110 d += emitLiteral(dst[d:], src)
111 return dst[:d]
112 }
113 n := encodeBlockBetter(dst[d:], src)
114 if n > 0 {
115 d += n
116 return dst[:d]
117 }
118 // Not compressible
119 d += emitLiteral(dst[d:], src)
120 return dst[:d]
121}
122
123// EncodeBest returns the encoded form of src. The returned slice may be a sub-
124// slice of dst if dst was large enough to hold the entire encoded block.
125// Otherwise, a newly allocated slice will be returned.
126//
127// EncodeBest compresses as good as reasonably possible but with a
128// big speed decrease.
129//
130// The dst and src must not overlap. It is valid to pass a nil dst.
131//
132// The blocks will require the same amount of memory to decode as encoding,
133// and does not make for concurrent decoding.
134// Also note that blocks do not contain CRC information, so corruption may be undetected.
135//
136// If you need to encode larger amounts of data, consider using
137// the streaming interface which gives all of these features.
138func EncodeBest(dst, src []byte) []byte {
139 if n := MaxEncodedLen(len(src)); n < 0 {
140 panic(ErrTooLarge)
141 } else if len(dst) < n {
142 dst = make([]byte, n)
143 }
144
145 // The block starts with the varint-encoded length of the decompressed bytes.
146 d := binary.PutUvarint(dst, uint64(len(src)))
147
148 if len(src) == 0 {
149 return dst[:d]
150 }
151 if len(src) < minNonLiteralBlockSize {
152 d += emitLiteral(dst[d:], src)
153 return dst[:d]
154 }
155 n := encodeBlockBest(dst[d:], src, nil)
156 if n > 0 {
157 d += n
158 return dst[:d]
159 }
160 // Not compressible
161 d += emitLiteral(dst[d:], src)
162 return dst[:d]
163}
164
165// EncodeSnappy returns the encoded form of src. The returned slice may be a sub-
166// slice of dst if dst was large enough to hold the entire encoded block.
167// Otherwise, a newly allocated slice will be returned.
168//
169// The output is Snappy compatible and will likely decompress faster.
170//
171// The dst and src must not overlap. It is valid to pass a nil dst.
172//
173// The blocks will require the same amount of memory to decode as encoding,
174// and does not make for concurrent decoding.
175// Also note that blocks do not contain CRC information, so corruption may be undetected.
176//
177// If you need to encode larger amounts of data, consider using
178// the streaming interface which gives all of these features.
179func EncodeSnappy(dst, src []byte) []byte {
180 if n := MaxEncodedLen(len(src)); n < 0 {
181 panic(ErrTooLarge)
182 } else if cap(dst) < n {
183 dst = make([]byte, n)
184 } else {
185 dst = dst[:n]
186 }
187
188 // The block starts with the varint-encoded length of the decompressed bytes.
189 d := binary.PutUvarint(dst, uint64(len(src)))
190
191 if len(src) == 0 {
192 return dst[:d]
193 }
194 if len(src) < minNonLiteralBlockSize {
195 d += emitLiteral(dst[d:], src)
196 return dst[:d]
197 }
198
199 n := encodeBlockSnappy(dst[d:], src)
200 if n > 0 {
201 d += n
202 return dst[:d]
203 }
204 // Not compressible
205 d += emitLiteral(dst[d:], src)
206 return dst[:d]
207}
208
209// EncodeSnappyBetter returns the encoded form of src. The returned slice may be a sub-
210// slice of dst if dst was large enough to hold the entire encoded block.
211// Otherwise, a newly allocated slice will be returned.
212//
213// The output is Snappy compatible and will likely decompress faster.
214//
215// The dst and src must not overlap. It is valid to pass a nil dst.
216//
217// The blocks will require the same amount of memory to decode as encoding,
218// and does not make for concurrent decoding.
219// Also note that blocks do not contain CRC information, so corruption may be undetected.
220//
221// If you need to encode larger amounts of data, consider using
222// the streaming interface which gives all of these features.
223func EncodeSnappyBetter(dst, src []byte) []byte {
224 if n := MaxEncodedLen(len(src)); n < 0 {
225 panic(ErrTooLarge)
226 } else if cap(dst) < n {
227 dst = make([]byte, n)
228 } else {
229 dst = dst[:n]
230 }
231
232 // The block starts with the varint-encoded length of the decompressed bytes.
233 d := binary.PutUvarint(dst, uint64(len(src)))
234
235 if len(src) == 0 {
236 return dst[:d]
237 }
238 if len(src) < minNonLiteralBlockSize {
239 d += emitLiteral(dst[d:], src)
240 return dst[:d]
241 }
242
243 n := encodeBlockBetterSnappy(dst[d:], src)
244 if n > 0 {
245 d += n
246 return dst[:d]
247 }
248 // Not compressible
249 d += emitLiteral(dst[d:], src)
250 return dst[:d]
251}
252
253// EncodeSnappyBest returns the encoded form of src. The returned slice may be a sub-
254// slice of dst if dst was large enough to hold the entire encoded block.
255// Otherwise, a newly allocated slice will be returned.
256//
257// The output is Snappy compatible and will likely decompress faster.
258//
259// The dst and src must not overlap. It is valid to pass a nil dst.
260//
261// The blocks will require the same amount of memory to decode as encoding,
262// and does not make for concurrent decoding.
263// Also note that blocks do not contain CRC information, so corruption may be undetected.
264//
265// If you need to encode larger amounts of data, consider using
266// the streaming interface which gives all of these features.
267func EncodeSnappyBest(dst, src []byte) []byte {
268 if n := MaxEncodedLen(len(src)); n < 0 {
269 panic(ErrTooLarge)
270 } else if cap(dst) < n {
271 dst = make([]byte, n)
272 } else {
273 dst = dst[:n]
274 }
275
276 // The block starts with the varint-encoded length of the decompressed bytes.
277 d := binary.PutUvarint(dst, uint64(len(src)))
278
279 if len(src) == 0 {
280 return dst[:d]
281 }
282 if len(src) < minNonLiteralBlockSize {
283 d += emitLiteral(dst[d:], src)
284 return dst[:d]
285 }
286
287 n := encodeBlockBestSnappy(dst[d:], src)
288 if n > 0 {
289 d += n
290 return dst[:d]
291 }
292 // Not compressible
293 d += emitLiteral(dst[d:], src)
294 return dst[:d]
295}
296
297// ConcatBlocks will concatenate the supplied blocks and append them to the supplied destination.
298// If the destination is nil or too small, a new will be allocated.
299// The blocks are not validated, so garbage in = garbage out.
300// dst may not overlap block data.
301// Any data in dst is preserved as is, so it will not be considered a block.
302func ConcatBlocks(dst []byte, blocks ...[]byte) ([]byte, error) {
303 totalSize := uint64(0)
304 compSize := 0
305 for _, b := range blocks {
306 l, hdr, err := decodedLen(b)
307 if err != nil {
308 return nil, err
309 }
310 totalSize += uint64(l)
311 compSize += len(b) - hdr
312 }
313 if totalSize == 0 {
314 dst = append(dst, 0)
315 return dst, nil
316 }
317 if totalSize > math.MaxUint32 {
318 return nil, ErrTooLarge
319 }
320 var tmp [binary.MaxVarintLen32]byte
321 hdrSize := binary.PutUvarint(tmp[:], totalSize)
322 wantSize := hdrSize + compSize
323
324 if cap(dst)-len(dst) < wantSize {
325 dst = append(make([]byte, 0, wantSize+len(dst)), dst...)
326 }
327 dst = append(dst, tmp[:hdrSize]...)
328 for _, b := range blocks {
329 _, hdr, err := decodedLen(b)
330 if err != nil {
331 return nil, err
332 }
333 dst = append(dst, b[hdr:]...)
334 }
335 return dst, nil
336}
337
338// inputMargin is the minimum number of extra input bytes to keep, inside
339// encodeBlock's inner loop. On some architectures, this margin lets us
340// implement a fast path for emitLiteral, where the copy of short (<= 16 byte)
341// literals can be implemented as a single load to and store from a 16-byte
342// register. That literal's actual length can be as short as 1 byte, so this
343// can copy up to 15 bytes too much, but that's OK as subsequent iterations of
344// the encoding loop will fix up the copy overrun, and this inputMargin ensures
345// that we don't overrun the dst and src buffers.
346const inputMargin = 8
347
348// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that
349// will be accepted by the encoder.
350const minNonLiteralBlockSize = 32
351
352const intReduction = 2 - (1 << (^uint(0) >> 63)) // 1 (32 bits) or 0 (64 bits)
353
354// MaxBlockSize is the maximum value where MaxEncodedLen will return a valid block size.
355// Blocks this big are highly discouraged, though.
356// Half the size on 32 bit systems.
357const MaxBlockSize = (1<<(32-intReduction) - 1) - binary.MaxVarintLen32 - 5
358
359// MaxEncodedLen returns the maximum length of a snappy block, given its
360// uncompressed length.
361//
362// It will return a negative value if srcLen is too large to encode.
363// 32 bit platforms will have lower thresholds for rejecting big content.
364func MaxEncodedLen(srcLen int) int {
365 n := uint64(srcLen)
366 if intReduction == 1 {
367 // 32 bits
368 if n > math.MaxInt32 {
369 // Also includes negative.
370 return -1
371 }
372 } else if n > 0xffffffff {
373 // 64 bits
374 // Also includes negative.
375 return -1
376 }
377 // Size of the varint encoded block size.
378 n = n + uint64((bits.Len64(n)+7)/7)
379
380 // Add maximum size of encoding block as literals.
381 n += uint64(literalExtraSize(int64(srcLen)))
382 if intReduction == 1 {
383 // 32 bits
384 if n > math.MaxInt32 {
385 return -1
386 }
387 } else if n > 0xffffffff {
388 // 64 bits
389 // Also includes negative.
390 return -1
391 }
392 return int(n)
393}
diff --git a/vendor/github.com/klauspost/compress/s2/encode_all.go b/vendor/github.com/klauspost/compress/s2/encode_all.go
new file mode 100644
index 0000000..5e57995
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/encode_all.go
@@ -0,0 +1,1048 @@
1// Copyright 2016 The Snappy-Go Authors. All rights reserved.
2// Copyright (c) 2019 Klaus Post. All rights reserved.
3// Use of this source code is governed by a BSD-style
4// license that can be found in the LICENSE file.
5
6package s2
7
8import (
9 "bytes"
10 "encoding/binary"
11 "fmt"
12 "math/bits"
13)
14
15func load32(b []byte, i int) uint32 {
16 return binary.LittleEndian.Uint32(b[i:])
17}
18
19func load64(b []byte, i int) uint64 {
20 return binary.LittleEndian.Uint64(b[i:])
21}
22
23// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits.
24// Preferably h should be a constant and should always be <64.
25func hash6(u uint64, h uint8) uint32 {
26 const prime6bytes = 227718039650203
27 return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63))
28}
29
30func encodeGo(dst, src []byte) []byte {
31 if n := MaxEncodedLen(len(src)); n < 0 {
32 panic(ErrTooLarge)
33 } else if len(dst) < n {
34 dst = make([]byte, n)
35 }
36
37 // The block starts with the varint-encoded length of the decompressed bytes.
38 d := binary.PutUvarint(dst, uint64(len(src)))
39
40 if len(src) == 0 {
41 return dst[:d]
42 }
43 if len(src) < minNonLiteralBlockSize {
44 d += emitLiteral(dst[d:], src)
45 return dst[:d]
46 }
47 n := encodeBlockGo(dst[d:], src)
48 if n > 0 {
49 d += n
50 return dst[:d]
51 }
52 // Not compressible
53 d += emitLiteral(dst[d:], src)
54 return dst[:d]
55}
56
57// encodeBlockGo encodes a non-empty src to a guaranteed-large-enough dst. It
58// assumes that the varint-encoded length of the decompressed bytes has already
59// been written.
60//
61// It also assumes that:
62//
63// len(dst) >= MaxEncodedLen(len(src)) &&
64// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
65func encodeBlockGo(dst, src []byte) (d int) {
66 // Initialize the hash table.
67 const (
68 tableBits = 14
69 maxTableSize = 1 << tableBits
70
71 debug = false
72 )
73
74 var table [maxTableSize]uint32
75
76 // sLimit is when to stop looking for offset/length copies. The inputMargin
77 // lets us use a fast path for emitLiteral in the main loop, while we are
78 // looking for copies.
79 sLimit := len(src) - inputMargin
80
81 // Bail if we can't compress to at least this.
82 dstLimit := len(src) - len(src)>>5 - 5
83
84 // nextEmit is where in src the next emitLiteral should start from.
85 nextEmit := 0
86
87 // The encoded form must start with a literal, as there are no previous
88 // bytes to copy, so we start looking for hash matches at s == 1.
89 s := 1
90 cv := load64(src, s)
91
92 // We search for a repeat at -1, but don't output repeats when nextEmit == 0
93 repeat := 1
94
95 for {
96 candidate := 0
97 for {
98 // Next src position to check
99 nextS := s + (s-nextEmit)>>6 + 4
100 if nextS > sLimit {
101 goto emitRemainder
102 }
103 hash0 := hash6(cv, tableBits)
104 hash1 := hash6(cv>>8, tableBits)
105 candidate = int(table[hash0])
106 candidate2 := int(table[hash1])
107 table[hash0] = uint32(s)
108 table[hash1] = uint32(s + 1)
109 hash2 := hash6(cv>>16, tableBits)
110
111 // Check repeat at offset checkRep.
112 const checkRep = 1
113 if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
114 base := s + checkRep
115 // Extend back
116 for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
117 i--
118 base--
119 }
120 d += emitLiteral(dst[d:], src[nextEmit:base])
121
122 // Extend forward
123 candidate := s - repeat + 4 + checkRep
124 s += 4 + checkRep
125 for s <= sLimit {
126 if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
127 s += bits.TrailingZeros64(diff) >> 3
128 break
129 }
130 s += 8
131 candidate += 8
132 }
133 if debug {
134 // Validate match.
135 if s <= candidate {
136 panic("s <= candidate")
137 }
138 a := src[base:s]
139 b := src[base-repeat : base-repeat+(s-base)]
140 if !bytes.Equal(a, b) {
141 panic("mismatch")
142 }
143 }
144 if nextEmit > 0 {
145 // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
146 d += emitRepeat(dst[d:], repeat, s-base)
147 } else {
148 // First match, cannot be repeat.
149 d += emitCopy(dst[d:], repeat, s-base)
150 }
151 nextEmit = s
152 if s >= sLimit {
153 goto emitRemainder
154 }
155
156 cv = load64(src, s)
157 continue
158 }
159
160 if uint32(cv) == load32(src, candidate) {
161 break
162 }
163 candidate = int(table[hash2])
164 if uint32(cv>>8) == load32(src, candidate2) {
165 table[hash2] = uint32(s + 2)
166 candidate = candidate2
167 s++
168 break
169 }
170 table[hash2] = uint32(s + 2)
171 if uint32(cv>>16) == load32(src, candidate) {
172 s += 2
173 break
174 }
175
176 cv = load64(src, nextS)
177 s = nextS
178 }
179
180 // Extend backwards.
181 // The top bytes will be rechecked to get the full match.
182 for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
183 candidate--
184 s--
185 }
186
187 // Bail if we exceed the maximum size.
188 if d+(s-nextEmit) > dstLimit {
189 return 0
190 }
191
192 // A 4-byte match has been found. We'll later see if more than 4 bytes
193 // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
194 // them as literal bytes.
195
196 d += emitLiteral(dst[d:], src[nextEmit:s])
197
198 // Call emitCopy, and then see if another emitCopy could be our next
199 // move. Repeat until we find no match for the input immediately after
200 // what was consumed by the last emitCopy call.
201 //
202 // If we exit this loop normally then we need to call emitLiteral next,
203 // though we don't yet know how big the literal will be. We handle that
204 // by proceeding to the next iteration of the main loop. We also can
205 // exit this loop via goto if we get close to exhausting the input.
206 for {
207 // Invariant: we have a 4-byte match at s, and no need to emit any
208 // literal bytes prior to s.
209 base := s
210 repeat = base - candidate
211
212 // Extend the 4-byte match as long as possible.
213 s += 4
214 candidate += 4
215 for s <= len(src)-8 {
216 if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
217 s += bits.TrailingZeros64(diff) >> 3
218 break
219 }
220 s += 8
221 candidate += 8
222 }
223
224 d += emitCopy(dst[d:], repeat, s-base)
225 if debug {
226 // Validate match.
227 if s <= candidate {
228 panic("s <= candidate")
229 }
230 a := src[base:s]
231 b := src[base-repeat : base-repeat+(s-base)]
232 if !bytes.Equal(a, b) {
233 panic("mismatch")
234 }
235 }
236
237 nextEmit = s
238 if s >= sLimit {
239 goto emitRemainder
240 }
241
242 if d > dstLimit {
243 // Do we have space for more, if not bail.
244 return 0
245 }
246 // Check for an immediate match, otherwise start search at s+1
247 x := load64(src, s-2)
248 m2Hash := hash6(x, tableBits)
249 currHash := hash6(x>>16, tableBits)
250 candidate = int(table[currHash])
251 table[m2Hash] = uint32(s - 2)
252 table[currHash] = uint32(s)
253 if debug && s == candidate {
254 panic("s == candidate")
255 }
256 if uint32(x>>16) != load32(src, candidate) {
257 cv = load64(src, s+1)
258 s++
259 break
260 }
261 }
262 }
263
264emitRemainder:
265 if nextEmit < len(src) {
266 // Bail if we exceed the maximum size.
267 if d+len(src)-nextEmit > dstLimit {
268 return 0
269 }
270 d += emitLiteral(dst[d:], src[nextEmit:])
271 }
272 return d
273}
274
275func encodeBlockSnappyGo(dst, src []byte) (d int) {
276 // Initialize the hash table.
277 const (
278 tableBits = 14
279 maxTableSize = 1 << tableBits
280 )
281
282 var table [maxTableSize]uint32
283
284 // sLimit is when to stop looking for offset/length copies. The inputMargin
285 // lets us use a fast path for emitLiteral in the main loop, while we are
286 // looking for copies.
287 sLimit := len(src) - inputMargin
288
289 // Bail if we can't compress to at least this.
290 dstLimit := len(src) - len(src)>>5 - 5
291
292 // nextEmit is where in src the next emitLiteral should start from.
293 nextEmit := 0
294
295 // The encoded form must start with a literal, as there are no previous
296 // bytes to copy, so we start looking for hash matches at s == 1.
297 s := 1
298 cv := load64(src, s)
299
300 // We search for a repeat at -1, but don't output repeats when nextEmit == 0
301 repeat := 1
302
303 for {
304 candidate := 0
305 for {
306 // Next src position to check
307 nextS := s + (s-nextEmit)>>6 + 4
308 if nextS > sLimit {
309 goto emitRemainder
310 }
311 hash0 := hash6(cv, tableBits)
312 hash1 := hash6(cv>>8, tableBits)
313 candidate = int(table[hash0])
314 candidate2 := int(table[hash1])
315 table[hash0] = uint32(s)
316 table[hash1] = uint32(s + 1)
317 hash2 := hash6(cv>>16, tableBits)
318
319 // Check repeat at offset checkRep.
320 const checkRep = 1
321 if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
322 base := s + checkRep
323 // Extend back
324 for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
325 i--
326 base--
327 }
328 d += emitLiteral(dst[d:], src[nextEmit:base])
329
330 // Extend forward
331 candidate := s - repeat + 4 + checkRep
332 s += 4 + checkRep
333 for s <= sLimit {
334 if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
335 s += bits.TrailingZeros64(diff) >> 3
336 break
337 }
338 s += 8
339 candidate += 8
340 }
341
342 d += emitCopyNoRepeat(dst[d:], repeat, s-base)
343 nextEmit = s
344 if s >= sLimit {
345 goto emitRemainder
346 }
347
348 cv = load64(src, s)
349 continue
350 }
351
352 if uint32(cv) == load32(src, candidate) {
353 break
354 }
355 candidate = int(table[hash2])
356 if uint32(cv>>8) == load32(src, candidate2) {
357 table[hash2] = uint32(s + 2)
358 candidate = candidate2
359 s++
360 break
361 }
362 table[hash2] = uint32(s + 2)
363 if uint32(cv>>16) == load32(src, candidate) {
364 s += 2
365 break
366 }
367
368 cv = load64(src, nextS)
369 s = nextS
370 }
371
372 // Extend backwards
373 for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
374 candidate--
375 s--
376 }
377
378 // Bail if we exceed the maximum size.
379 if d+(s-nextEmit) > dstLimit {
380 return 0
381 }
382
383 // A 4-byte match has been found. We'll later see if more than 4 bytes
384 // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
385 // them as literal bytes.
386
387 d += emitLiteral(dst[d:], src[nextEmit:s])
388
389 // Call emitCopy, and then see if another emitCopy could be our next
390 // move. Repeat until we find no match for the input immediately after
391 // what was consumed by the last emitCopy call.
392 //
393 // If we exit this loop normally then we need to call emitLiteral next,
394 // though we don't yet know how big the literal will be. We handle that
395 // by proceeding to the next iteration of the main loop. We also can
396 // exit this loop via goto if we get close to exhausting the input.
397 for {
398 // Invariant: we have a 4-byte match at s, and no need to emit any
399 // literal bytes prior to s.
400 base := s
401 repeat = base - candidate
402
403 // Extend the 4-byte match as long as possible.
404 s += 4
405 candidate += 4
406 for s <= len(src)-8 {
407 if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
408 s += bits.TrailingZeros64(diff) >> 3
409 break
410 }
411 s += 8
412 candidate += 8
413 }
414
415 d += emitCopyNoRepeat(dst[d:], repeat, s-base)
416 if false {
417 // Validate match.
418 a := src[base:s]
419 b := src[base-repeat : base-repeat+(s-base)]
420 if !bytes.Equal(a, b) {
421 panic("mismatch")
422 }
423 }
424
425 nextEmit = s
426 if s >= sLimit {
427 goto emitRemainder
428 }
429
430 if d > dstLimit {
431 // Do we have space for more, if not bail.
432 return 0
433 }
434 // Check for an immediate match, otherwise start search at s+1
435 x := load64(src, s-2)
436 m2Hash := hash6(x, tableBits)
437 currHash := hash6(x>>16, tableBits)
438 candidate = int(table[currHash])
439 table[m2Hash] = uint32(s - 2)
440 table[currHash] = uint32(s)
441 if uint32(x>>16) != load32(src, candidate) {
442 cv = load64(src, s+1)
443 s++
444 break
445 }
446 }
447 }
448
449emitRemainder:
450 if nextEmit < len(src) {
451 // Bail if we exceed the maximum size.
452 if d+len(src)-nextEmit > dstLimit {
453 return 0
454 }
455 d += emitLiteral(dst[d:], src[nextEmit:])
456 }
457 return d
458}
459
460// encodeBlockGo encodes a non-empty src to a guaranteed-large-enough dst. It
461// assumes that the varint-encoded length of the decompressed bytes has already
462// been written.
463//
464// It also assumes that:
465//
466// len(dst) >= MaxEncodedLen(len(src)) &&
467// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
468func encodeBlockDictGo(dst, src []byte, dict *Dict) (d int) {
469 // Initialize the hash table.
470 const (
471 tableBits = 14
472 maxTableSize = 1 << tableBits
473 maxAhead = 8 // maximum bytes ahead without checking sLimit
474
475 debug = false
476 )
477 dict.initFast()
478
479 var table [maxTableSize]uint32
480
481 // sLimit is when to stop looking for offset/length copies. The inputMargin
482 // lets us use a fast path for emitLiteral in the main loop, while we are
483 // looking for copies.
484 sLimit := len(src) - inputMargin
485 if sLimit > MaxDictSrcOffset-maxAhead {
486 sLimit = MaxDictSrcOffset - maxAhead
487 }
488
489 // Bail if we can't compress to at least this.
490 dstLimit := len(src) - len(src)>>5 - 5
491
492 // nextEmit is where in src the next emitLiteral should start from.
493 nextEmit := 0
494
495 // The encoded form can start with a dict entry (copy or repeat).
496 s := 0
497
498 // Convert dict repeat to offset
499 repeat := len(dict.dict) - dict.repeat
500 cv := load64(src, 0)
501
502 // While in dict
503searchDict:
504 for {
505 // Next src position to check
506 nextS := s + (s-nextEmit)>>6 + 4
507 hash0 := hash6(cv, tableBits)
508 hash1 := hash6(cv>>8, tableBits)
509 if nextS > sLimit {
510 if debug {
511 fmt.Println("slimit reached", s, nextS)
512 }
513 break searchDict
514 }
515 candidateDict := int(dict.fastTable[hash0])
516 candidateDict2 := int(dict.fastTable[hash1])
517 candidate2 := int(table[hash1])
518 candidate := int(table[hash0])
519 table[hash0] = uint32(s)
520 table[hash1] = uint32(s + 1)
521 hash2 := hash6(cv>>16, tableBits)
522
523 // Check repeat at offset checkRep.
524 const checkRep = 1
525
526 if repeat > s {
527 candidate := len(dict.dict) - repeat + s
528 if repeat-s >= 4 && uint32(cv) == load32(dict.dict, candidate) {
529 // Extend back
530 base := s
531 for i := candidate; base > nextEmit && i > 0 && dict.dict[i-1] == src[base-1]; {
532 i--
533 base--
534 }
535 d += emitLiteral(dst[d:], src[nextEmit:base])
536 if debug && nextEmit != base {
537 fmt.Println("emitted ", base-nextEmit, "literals")
538 }
539 s += 4
540 candidate += 4
541 for candidate < len(dict.dict)-8 && s <= len(src)-8 {
542 if diff := load64(src, s) ^ load64(dict.dict, candidate); diff != 0 {
543 s += bits.TrailingZeros64(diff) >> 3
544 break
545 }
546 s += 8
547 candidate += 8
548 }
549 d += emitRepeat(dst[d:], repeat, s-base)
550 if debug {
551 fmt.Println("emitted dict repeat length", s-base, "offset:", repeat, "s:", s)
552 }
553 nextEmit = s
554 if s >= sLimit {
555 break searchDict
556 }
557 cv = load64(src, s)
558 continue
559 }
560 } else if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
561 base := s + checkRep
562 // Extend back
563 for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
564 i--
565 base--
566 }
567 d += emitLiteral(dst[d:], src[nextEmit:base])
568 if debug && nextEmit != base {
569 fmt.Println("emitted ", base-nextEmit, "literals")
570 }
571
572 // Extend forward
573 candidate := s - repeat + 4 + checkRep
574 s += 4 + checkRep
575 for s <= sLimit {
576 if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
577 s += bits.TrailingZeros64(diff) >> 3
578 break
579 }
580 s += 8
581 candidate += 8
582 }
583 if debug {
584 // Validate match.
585 if s <= candidate {
586 panic("s <= candidate")
587 }
588 a := src[base:s]
589 b := src[base-repeat : base-repeat+(s-base)]
590 if !bytes.Equal(a, b) {
591 panic("mismatch")
592 }
593 }
594
595 if nextEmit > 0 {
596 // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
597 d += emitRepeat(dst[d:], repeat, s-base)
598 } else {
599 // First match, cannot be repeat.
600 d += emitCopy(dst[d:], repeat, s-base)
601 }
602
603 nextEmit = s
604 if s >= sLimit {
605 break searchDict
606 }
607 if debug {
608 fmt.Println("emitted reg repeat", s-base, "s:", s)
609 }
610 cv = load64(src, s)
611 continue searchDict
612 }
613 if s == 0 {
614 cv = load64(src, nextS)
615 s = nextS
616 continue searchDict
617 }
618 // Start with table. These matches will always be closer.
619 if uint32(cv) == load32(src, candidate) {
620 goto emitMatch
621 }
622 candidate = int(table[hash2])
623 if uint32(cv>>8) == load32(src, candidate2) {
624 table[hash2] = uint32(s + 2)
625 candidate = candidate2
626 s++
627 goto emitMatch
628 }
629
630 // Check dict. Dicts have longer offsets, so we want longer matches.
631 if cv == load64(dict.dict, candidateDict) {
632 table[hash2] = uint32(s + 2)
633 goto emitDict
634 }
635
636 candidateDict = int(dict.fastTable[hash2])
637 // Check if upper 7 bytes match
638 if candidateDict2 >= 1 {
639 if cv^load64(dict.dict, candidateDict2-1) < (1 << 8) {
640 table[hash2] = uint32(s + 2)
641 candidateDict = candidateDict2
642 s++
643 goto emitDict
644 }
645 }
646
647 table[hash2] = uint32(s + 2)
648 if uint32(cv>>16) == load32(src, candidate) {
649 s += 2
650 goto emitMatch
651 }
652 if candidateDict >= 2 {
653 // Check if upper 6 bytes match
654 if cv^load64(dict.dict, candidateDict-2) < (1 << 16) {
655 s += 2
656 goto emitDict
657 }
658 }
659
660 cv = load64(src, nextS)
661 s = nextS
662 continue searchDict
663
664 emitDict:
665 {
666 if debug {
667 if load32(dict.dict, candidateDict) != load32(src, s) {
668 panic("dict emit mismatch")
669 }
670 }
671 // Extend backwards.
672 // The top bytes will be rechecked to get the full match.
673 for candidateDict > 0 && s > nextEmit && dict.dict[candidateDict-1] == src[s-1] {
674 candidateDict--
675 s--
676 }
677
678 // Bail if we exceed the maximum size.
679 if d+(s-nextEmit) > dstLimit {
680 return 0
681 }
682
683 // A 4-byte match has been found. We'll later see if more than 4 bytes
684 // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
685 // them as literal bytes.
686
687 d += emitLiteral(dst[d:], src[nextEmit:s])
688 if debug && nextEmit != s {
689 fmt.Println("emitted ", s-nextEmit, "literals")
690 }
691 {
692 // Invariant: we have a 4-byte match at s, and no need to emit any
693 // literal bytes prior to s.
694 base := s
695 repeat = s + (len(dict.dict)) - candidateDict
696
697 // Extend the 4-byte match as long as possible.
698 s += 4
699 candidateDict += 4
700 for s <= len(src)-8 && len(dict.dict)-candidateDict >= 8 {
701 if diff := load64(src, s) ^ load64(dict.dict, candidateDict); diff != 0 {
702 s += bits.TrailingZeros64(diff) >> 3
703 break
704 }
705 s += 8
706 candidateDict += 8
707 }
708
709 // Matches longer than 64 are split.
710 if s <= sLimit || s-base < 8 {
711 d += emitCopy(dst[d:], repeat, s-base)
712 } else {
713 // Split to ensure we don't start a copy within next block
714 d += emitCopy(dst[d:], repeat, 4)
715 d += emitRepeat(dst[d:], repeat, s-base-4)
716 }
717 if false {
718 // Validate match.
719 if s <= candidate {
720 panic("s <= candidate")
721 }
722 a := src[base:s]
723 b := dict.dict[base-repeat : base-repeat+(s-base)]
724 if !bytes.Equal(a, b) {
725 panic("mismatch")
726 }
727 }
728 if debug {
729 fmt.Println("emitted dict copy, length", s-base, "offset:", repeat, "s:", s)
730 }
731 nextEmit = s
732 if s >= sLimit {
733 break searchDict
734 }
735
736 if d > dstLimit {
737 // Do we have space for more, if not bail.
738 return 0
739 }
740
741 // Index and continue loop to try new candidate.
742 x := load64(src, s-2)
743 m2Hash := hash6(x, tableBits)
744 currHash := hash6(x>>8, tableBits)
745 table[m2Hash] = uint32(s - 2)
746 table[currHash] = uint32(s - 1)
747 cv = load64(src, s)
748 }
749 continue
750 }
751 emitMatch:
752
753 // Extend backwards.
754 // The top bytes will be rechecked to get the full match.
755 for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
756 candidate--
757 s--
758 }
759
760 // Bail if we exceed the maximum size.
761 if d+(s-nextEmit) > dstLimit {
762 return 0
763 }
764
765 // A 4-byte match has been found. We'll later see if more than 4 bytes
766 // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
767 // them as literal bytes.
768
769 d += emitLiteral(dst[d:], src[nextEmit:s])
770 if debug && nextEmit != s {
771 fmt.Println("emitted ", s-nextEmit, "literals")
772 }
773 // Call emitCopy, and then see if another emitCopy could be our next
774 // move. Repeat until we find no match for the input immediately after
775 // what was consumed by the last emitCopy call.
776 //
777 // If we exit this loop normally then we need to call emitLiteral next,
778 // though we don't yet know how big the literal will be. We handle that
779 // by proceeding to the next iteration of the main loop. We also can
780 // exit this loop via goto if we get close to exhausting the input.
781 for {
782 // Invariant: we have a 4-byte match at s, and no need to emit any
783 // literal bytes prior to s.
784 base := s
785 repeat = base - candidate
786
787 // Extend the 4-byte match as long as possible.
788 s += 4
789 candidate += 4
790 for s <= len(src)-8 {
791 if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
792 s += bits.TrailingZeros64(diff) >> 3
793 break
794 }
795 s += 8
796 candidate += 8
797 }
798
799 d += emitCopy(dst[d:], repeat, s-base)
800 if debug {
801 // Validate match.
802 if s <= candidate {
803 panic("s <= candidate")
804 }
805 a := src[base:s]
806 b := src[base-repeat : base-repeat+(s-base)]
807 if !bytes.Equal(a, b) {
808 panic("mismatch")
809 }
810 }
811 if debug {
812 fmt.Println("emitted src copy, length", s-base, "offset:", repeat, "s:", s)
813 }
814 nextEmit = s
815 if s >= sLimit {
816 break searchDict
817 }
818
819 if d > dstLimit {
820 // Do we have space for more, if not bail.
821 return 0
822 }
823 // Check for an immediate match, otherwise start search at s+1
824 x := load64(src, s-2)
825 m2Hash := hash6(x, tableBits)
826 currHash := hash6(x>>16, tableBits)
827 candidate = int(table[currHash])
828 table[m2Hash] = uint32(s - 2)
829 table[currHash] = uint32(s)
830 if debug && s == candidate {
831 panic("s == candidate")
832 }
833 if uint32(x>>16) != load32(src, candidate) {
834 cv = load64(src, s+1)
835 s++
836 break
837 }
838 }
839 }
840
841 // Search without dict:
842 if repeat > s {
843 repeat = 0
844 }
845
846 // No more dict
847 sLimit = len(src) - inputMargin
848 if s >= sLimit {
849 goto emitRemainder
850 }
851 if debug {
852 fmt.Println("non-dict matching at", s, "repeat:", repeat)
853 }
854 cv = load64(src, s)
855 if debug {
856 fmt.Println("now", s, "->", sLimit, "out:", d, "left:", len(src)-s, "nextemit:", nextEmit, "dstLimit:", dstLimit, "s:", s)
857 }
858 for {
859 candidate := 0
860 for {
861 // Next src position to check
862 nextS := s + (s-nextEmit)>>6 + 4
863 if nextS > sLimit {
864 goto emitRemainder
865 }
866 hash0 := hash6(cv, tableBits)
867 hash1 := hash6(cv>>8, tableBits)
868 candidate = int(table[hash0])
869 candidate2 := int(table[hash1])
870 table[hash0] = uint32(s)
871 table[hash1] = uint32(s + 1)
872 hash2 := hash6(cv>>16, tableBits)
873
874 // Check repeat at offset checkRep.
875 const checkRep = 1
876 if repeat > 0 && uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
877 base := s + checkRep
878 // Extend back
879 for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
880 i--
881 base--
882 }
883 d += emitLiteral(dst[d:], src[nextEmit:base])
884 if debug && nextEmit != base {
885 fmt.Println("emitted ", base-nextEmit, "literals")
886 }
887 // Extend forward
888 candidate := s - repeat + 4 + checkRep
889 s += 4 + checkRep
890 for s <= sLimit {
891 if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
892 s += bits.TrailingZeros64(diff) >> 3
893 break
894 }
895 s += 8
896 candidate += 8
897 }
898 if debug {
899 // Validate match.
900 if s <= candidate {
901 panic("s <= candidate")
902 }
903 a := src[base:s]
904 b := src[base-repeat : base-repeat+(s-base)]
905 if !bytes.Equal(a, b) {
906 panic("mismatch")
907 }
908 }
909 if nextEmit > 0 {
910 // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
911 d += emitRepeat(dst[d:], repeat, s-base)
912 } else {
913 // First match, cannot be repeat.
914 d += emitCopy(dst[d:], repeat, s-base)
915 }
916 if debug {
917 fmt.Println("emitted src repeat length", s-base, "offset:", repeat, "s:", s)
918 }
919 nextEmit = s
920 if s >= sLimit {
921 goto emitRemainder
922 }
923
924 cv = load64(src, s)
925 continue
926 }
927
928 if uint32(cv) == load32(src, candidate) {
929 break
930 }
931 candidate = int(table[hash2])
932 if uint32(cv>>8) == load32(src, candidate2) {
933 table[hash2] = uint32(s + 2)
934 candidate = candidate2
935 s++
936 break
937 }
938 table[hash2] = uint32(s + 2)
939 if uint32(cv>>16) == load32(src, candidate) {
940 s += 2
941 break
942 }
943
944 cv = load64(src, nextS)
945 s = nextS
946 }
947
948 // Extend backwards.
949 // The top bytes will be rechecked to get the full match.
950 for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
951 candidate--
952 s--
953 }
954
955 // Bail if we exceed the maximum size.
956 if d+(s-nextEmit) > dstLimit {
957 return 0
958 }
959
960 // A 4-byte match has been found. We'll later see if more than 4 bytes
961 // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
962 // them as literal bytes.
963
964 d += emitLiteral(dst[d:], src[nextEmit:s])
965 if debug && nextEmit != s {
966 fmt.Println("emitted ", s-nextEmit, "literals")
967 }
968 // Call emitCopy, and then see if another emitCopy could be our next
969 // move. Repeat until we find no match for the input immediately after
970 // what was consumed by the last emitCopy call.
971 //
972 // If we exit this loop normally then we need to call emitLiteral next,
973 // though we don't yet know how big the literal will be. We handle that
974 // by proceeding to the next iteration of the main loop. We also can
975 // exit this loop via goto if we get close to exhausting the input.
976 for {
977 // Invariant: we have a 4-byte match at s, and no need to emit any
978 // literal bytes prior to s.
979 base := s
980 repeat = base - candidate
981
982 // Extend the 4-byte match as long as possible.
983 s += 4
984 candidate += 4
985 for s <= len(src)-8 {
986 if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
987 s += bits.TrailingZeros64(diff) >> 3
988 break
989 }
990 s += 8
991 candidate += 8
992 }
993
994 d += emitCopy(dst[d:], repeat, s-base)
995 if debug {
996 // Validate match.
997 if s <= candidate {
998 panic("s <= candidate")
999 }
1000 a := src[base:s]
1001 b := src[base-repeat : base-repeat+(s-base)]
1002 if !bytes.Equal(a, b) {
1003 panic("mismatch")
1004 }
1005 }
1006 if debug {
1007 fmt.Println("emitted src copy, length", s-base, "offset:", repeat, "s:", s)
1008 }
1009 nextEmit = s
1010 if s >= sLimit {
1011 goto emitRemainder
1012 }
1013
1014 if d > dstLimit {
1015 // Do we have space for more, if not bail.
1016 return 0
1017 }
1018 // Check for an immediate match, otherwise start search at s+1
1019 x := load64(src, s-2)
1020 m2Hash := hash6(x, tableBits)
1021 currHash := hash6(x>>16, tableBits)
1022 candidate = int(table[currHash])
1023 table[m2Hash] = uint32(s - 2)
1024 table[currHash] = uint32(s)
1025 if debug && s == candidate {
1026 panic("s == candidate")
1027 }
1028 if uint32(x>>16) != load32(src, candidate) {
1029 cv = load64(src, s+1)
1030 s++
1031 break
1032 }
1033 }
1034 }
1035
1036emitRemainder:
1037 if nextEmit < len(src) {
1038 // Bail if we exceed the maximum size.
1039 if d+len(src)-nextEmit > dstLimit {
1040 return 0
1041 }
1042 d += emitLiteral(dst[d:], src[nextEmit:])
1043 if debug && nextEmit != s {
1044 fmt.Println("emitted ", len(src)-nextEmit, "literals")
1045 }
1046 }
1047 return d
1048}
diff --git a/vendor/github.com/klauspost/compress/s2/encode_amd64.go b/vendor/github.com/klauspost/compress/s2/encode_amd64.go
new file mode 100644
index 0000000..ebc332a
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/encode_amd64.go
@@ -0,0 +1,148 @@
1//go:build !appengine && !noasm && gc
2// +build !appengine,!noasm,gc
3
4package s2
5
6const hasAmd64Asm = true
7
8// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
9// assumes that the varint-encoded length of the decompressed bytes has already
10// been written.
11//
12// It also assumes that:
13//
14// len(dst) >= MaxEncodedLen(len(src)) &&
15// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
16func encodeBlock(dst, src []byte) (d int) {
17 const (
18 // Use 12 bit table when less than...
19 limit12B = 16 << 10
20 // Use 10 bit table when less than...
21 limit10B = 4 << 10
22 // Use 8 bit table when less than...
23 limit8B = 512
24 )
25
26 if len(src) >= 4<<20 {
27 return encodeBlockAsm(dst, src)
28 }
29 if len(src) >= limit12B {
30 return encodeBlockAsm4MB(dst, src)
31 }
32 if len(src) >= limit10B {
33 return encodeBlockAsm12B(dst, src)
34 }
35 if len(src) >= limit8B {
36 return encodeBlockAsm10B(dst, src)
37 }
38 if len(src) < minNonLiteralBlockSize {
39 return 0
40 }
41 return encodeBlockAsm8B(dst, src)
42}
43
44// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It
45// assumes that the varint-encoded length of the decompressed bytes has already
46// been written.
47//
48// It also assumes that:
49//
50// len(dst) >= MaxEncodedLen(len(src)) &&
51// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
52func encodeBlockBetter(dst, src []byte) (d int) {
53 const (
54 // Use 12 bit table when less than...
55 limit12B = 16 << 10
56 // Use 10 bit table when less than...
57 limit10B = 4 << 10
58 // Use 8 bit table when less than...
59 limit8B = 512
60 )
61
62 if len(src) > 4<<20 {
63 return encodeBetterBlockAsm(dst, src)
64 }
65 if len(src) >= limit12B {
66 return encodeBetterBlockAsm4MB(dst, src)
67 }
68 if len(src) >= limit10B {
69 return encodeBetterBlockAsm12B(dst, src)
70 }
71 if len(src) >= limit8B {
72 return encodeBetterBlockAsm10B(dst, src)
73 }
74 if len(src) < minNonLiteralBlockSize {
75 return 0
76 }
77 return encodeBetterBlockAsm8B(dst, src)
78}
79
80// encodeBlockSnappy encodes a non-empty src to a guaranteed-large-enough dst. It
81// assumes that the varint-encoded length of the decompressed bytes has already
82// been written.
83//
84// It also assumes that:
85//
86// len(dst) >= MaxEncodedLen(len(src)) &&
87// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
88func encodeBlockSnappy(dst, src []byte) (d int) {
89 const (
90 // Use 12 bit table when less than...
91 limit12B = 16 << 10
92 // Use 10 bit table when less than...
93 limit10B = 4 << 10
94 // Use 8 bit table when less than...
95 limit8B = 512
96 )
97 if len(src) >= 64<<10 {
98 return encodeSnappyBlockAsm(dst, src)
99 }
100 if len(src) >= limit12B {
101 return encodeSnappyBlockAsm64K(dst, src)
102 }
103 if len(src) >= limit10B {
104 return encodeSnappyBlockAsm12B(dst, src)
105 }
106 if len(src) >= limit8B {
107 return encodeSnappyBlockAsm10B(dst, src)
108 }
109 if len(src) < minNonLiteralBlockSize {
110 return 0
111 }
112 return encodeSnappyBlockAsm8B(dst, src)
113}
114
115// encodeBlockSnappy encodes a non-empty src to a guaranteed-large-enough dst. It
116// assumes that the varint-encoded length of the decompressed bytes has already
117// been written.
118//
119// It also assumes that:
120//
121// len(dst) >= MaxEncodedLen(len(src)) &&
122// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
123func encodeBlockBetterSnappy(dst, src []byte) (d int) {
124 const (
125 // Use 12 bit table when less than...
126 limit12B = 16 << 10
127 // Use 10 bit table when less than...
128 limit10B = 4 << 10
129 // Use 8 bit table when less than...
130 limit8B = 512
131 )
132 if len(src) >= 64<<10 {
133 return encodeSnappyBetterBlockAsm(dst, src)
134 }
135 if len(src) >= limit12B {
136 return encodeSnappyBetterBlockAsm64K(dst, src)
137 }
138 if len(src) >= limit10B {
139 return encodeSnappyBetterBlockAsm12B(dst, src)
140 }
141 if len(src) >= limit8B {
142 return encodeSnappyBetterBlockAsm10B(dst, src)
143 }
144 if len(src) < minNonLiteralBlockSize {
145 return 0
146 }
147 return encodeSnappyBetterBlockAsm8B(dst, src)
148}
diff --git a/vendor/github.com/klauspost/compress/s2/encode_best.go b/vendor/github.com/klauspost/compress/s2/encode_best.go
new file mode 100644
index 0000000..47bac74
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/encode_best.go
@@ -0,0 +1,796 @@
1// Copyright 2016 The Snappy-Go Authors. All rights reserved.
2// Copyright (c) 2019 Klaus Post. All rights reserved.
3// Use of this source code is governed by a BSD-style
4// license that can be found in the LICENSE file.
5
6package s2
7
8import (
9 "fmt"
10 "math"
11 "math/bits"
12)
13
14// encodeBlockBest encodes a non-empty src to a guaranteed-large-enough dst. It
15// assumes that the varint-encoded length of the decompressed bytes has already
16// been written.
17//
18// It also assumes that:
19//
20// len(dst) >= MaxEncodedLen(len(src)) &&
21// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
22func encodeBlockBest(dst, src []byte, dict *Dict) (d int) {
23 // Initialize the hash tables.
24 const (
25 // Long hash matches.
26 lTableBits = 19
27 maxLTableSize = 1 << lTableBits
28
29 // Short hash matches.
30 sTableBits = 16
31 maxSTableSize = 1 << sTableBits
32
33 inputMargin = 8 + 2
34
35 debug = false
36 )
37
38 // sLimit is when to stop looking for offset/length copies. The inputMargin
39 // lets us use a fast path for emitLiteral in the main loop, while we are
40 // looking for copies.
41 sLimit := len(src) - inputMargin
42 if len(src) < minNonLiteralBlockSize {
43 return 0
44 }
45 sLimitDict := len(src) - inputMargin
46 if sLimitDict > MaxDictSrcOffset-inputMargin {
47 sLimitDict = MaxDictSrcOffset - inputMargin
48 }
49
50 var lTable [maxLTableSize]uint64
51 var sTable [maxSTableSize]uint64
52
53 // Bail if we can't compress to at least this.
54 dstLimit := len(src) - 5
55
56 // nextEmit is where in src the next emitLiteral should start from.
57 nextEmit := 0
58
59 // The encoded form must start with a literal, as there are no previous
60 // bytes to copy, so we start looking for hash matches at s == 1.
61 s := 1
62 repeat := 1
63 if dict != nil {
64 dict.initBest()
65 s = 0
66 repeat = len(dict.dict) - dict.repeat
67 }
68 cv := load64(src, s)
69
70 // We search for a repeat at -1, but don't output repeats when nextEmit == 0
71 const lowbitMask = 0xffffffff
72 getCur := func(x uint64) int {
73 return int(x & lowbitMask)
74 }
75 getPrev := func(x uint64) int {
76 return int(x >> 32)
77 }
78 const maxSkip = 64
79
80 for {
81 type match struct {
82 offset int
83 s int
84 length int
85 score int
86 rep, dict bool
87 }
88 var best match
89 for {
90 // Next src position to check
91 nextS := (s-nextEmit)>>8 + 1
92 if nextS > maxSkip {
93 nextS = s + maxSkip
94 } else {
95 nextS += s
96 }
97 if nextS > sLimit {
98 goto emitRemainder
99 }
100 if dict != nil && s >= MaxDictSrcOffset {
101 dict = nil
102 if repeat > s {
103 repeat = math.MinInt32
104 }
105 }
106 hashL := hash8(cv, lTableBits)
107 hashS := hash4(cv, sTableBits)
108 candidateL := lTable[hashL]
109 candidateS := sTable[hashS]
110
111 score := func(m match) int {
112 // Matches that are longer forward are penalized since we must emit it as a literal.
113 score := m.length - m.s
114 if nextEmit == m.s {
115 // If we do not have to emit literals, we save 1 byte
116 score++
117 }
118 offset := m.s - m.offset
119 if m.rep {
120 return score - emitRepeatSize(offset, m.length)
121 }
122 return score - emitCopySize(offset, m.length)
123 }
124
125 matchAt := func(offset, s int, first uint32, rep bool) match {
126 if best.length != 0 && best.s-best.offset == s-offset {
127 // Don't retest if we have the same offset.
128 return match{offset: offset, s: s}
129 }
130 if load32(src, offset) != first {
131 return match{offset: offset, s: s}
132 }
133 m := match{offset: offset, s: s, length: 4 + offset, rep: rep}
134 s += 4
135 for s < len(src) {
136 if len(src)-s < 8 {
137 if src[s] == src[m.length] {
138 m.length++
139 s++
140 continue
141 }
142 break
143 }
144 if diff := load64(src, s) ^ load64(src, m.length); diff != 0 {
145 m.length += bits.TrailingZeros64(diff) >> 3
146 break
147 }
148 s += 8
149 m.length += 8
150 }
151 m.length -= offset
152 m.score = score(m)
153 if m.score <= -m.s {
154 // Eliminate if no savings, we might find a better one.
155 m.length = 0
156 }
157 return m
158 }
159 matchDict := func(candidate, s int, first uint32, rep bool) match {
160 if s >= MaxDictSrcOffset {
161 return match{offset: candidate, s: s}
162 }
163 // Calculate offset as if in continuous array with s
164 offset := -len(dict.dict) + candidate
165 if best.length != 0 && best.s-best.offset == s-offset && !rep {
166 // Don't retest if we have the same offset.
167 return match{offset: offset, s: s}
168 }
169
170 if load32(dict.dict, candidate) != first {
171 return match{offset: offset, s: s}
172 }
173 m := match{offset: offset, s: s, length: 4 + candidate, rep: rep, dict: true}
174 s += 4
175 if !rep {
176 for s < sLimitDict && m.length < len(dict.dict) {
177 if len(src)-s < 8 || len(dict.dict)-m.length < 8 {
178 if src[s] == dict.dict[m.length] {
179 m.length++
180 s++
181 continue
182 }
183 break
184 }
185 if diff := load64(src, s) ^ load64(dict.dict, m.length); diff != 0 {
186 m.length += bits.TrailingZeros64(diff) >> 3
187 break
188 }
189 s += 8
190 m.length += 8
191 }
192 } else {
193 for s < len(src) && m.length < len(dict.dict) {
194 if len(src)-s < 8 || len(dict.dict)-m.length < 8 {
195 if src[s] == dict.dict[m.length] {
196 m.length++
197 s++
198 continue
199 }
200 break
201 }
202 if diff := load64(src, s) ^ load64(dict.dict, m.length); diff != 0 {
203 m.length += bits.TrailingZeros64(diff) >> 3
204 break
205 }
206 s += 8
207 m.length += 8
208 }
209 }
210 m.length -= candidate
211 m.score = score(m)
212 if m.score <= -m.s {
213 // Eliminate if no savings, we might find a better one.
214 m.length = 0
215 }
216 return m
217 }
218
219 bestOf := func(a, b match) match {
220 if b.length == 0 {
221 return a
222 }
223 if a.length == 0 {
224 return b
225 }
226 as := a.score + b.s
227 bs := b.score + a.s
228 if as >= bs {
229 return a
230 }
231 return b
232 }
233
234 if s > 0 {
235 best = bestOf(matchAt(getCur(candidateL), s, uint32(cv), false), matchAt(getPrev(candidateL), s, uint32(cv), false))
236 best = bestOf(best, matchAt(getCur(candidateS), s, uint32(cv), false))
237 best = bestOf(best, matchAt(getPrev(candidateS), s, uint32(cv), false))
238 }
239 if dict != nil {
240 candidateL := dict.bestTableLong[hashL]
241 candidateS := dict.bestTableShort[hashS]
242 best = bestOf(best, matchDict(int(candidateL&0xffff), s, uint32(cv), false))
243 best = bestOf(best, matchDict(int(candidateL>>16), s, uint32(cv), false))
244 best = bestOf(best, matchDict(int(candidateS&0xffff), s, uint32(cv), false))
245 best = bestOf(best, matchDict(int(candidateS>>16), s, uint32(cv), false))
246 }
247 {
248 if (dict == nil || repeat <= s) && repeat > 0 {
249 best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8), true))
250 } else if s-repeat < -4 && dict != nil {
251 candidate := len(dict.dict) - (repeat - s)
252 best = bestOf(best, matchDict(candidate, s, uint32(cv), true))
253 candidate++
254 best = bestOf(best, matchDict(candidate, s+1, uint32(cv>>8), true))
255 }
256
257 if best.length > 0 {
258 hashS := hash4(cv>>8, sTableBits)
259 // s+1
260 nextShort := sTable[hashS]
261 s := s + 1
262 cv := load64(src, s)
263 hashL := hash8(cv, lTableBits)
264 nextLong := lTable[hashL]
265 best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv), false))
266 best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv), false))
267 best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv), false))
268 best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false))
269
270 // Dict at + 1
271 if dict != nil {
272 candidateL := dict.bestTableLong[hashL]
273 candidateS := dict.bestTableShort[hashS]
274
275 best = bestOf(best, matchDict(int(candidateL&0xffff), s, uint32(cv), false))
276 best = bestOf(best, matchDict(int(candidateS&0xffff), s, uint32(cv), false))
277 }
278
279 // s+2
280 if true {
281 hashS := hash4(cv>>8, sTableBits)
282
283 nextShort = sTable[hashS]
284 s++
285 cv = load64(src, s)
286 hashL := hash8(cv, lTableBits)
287 nextLong = lTable[hashL]
288
289 if (dict == nil || repeat <= s) && repeat > 0 {
290 // Repeat at + 2
291 best = bestOf(best, matchAt(s-repeat, s, uint32(cv), true))
292 } else if repeat-s > 4 && dict != nil {
293 candidate := len(dict.dict) - (repeat - s)
294 best = bestOf(best, matchDict(candidate, s, uint32(cv), true))
295 }
296 best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv), false))
297 best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv), false))
298 best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv), false))
299 best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false))
300
301 // Dict at +2
302 // Very small gain
303 if dict != nil {
304 candidateL := dict.bestTableLong[hashL]
305 candidateS := dict.bestTableShort[hashS]
306
307 best = bestOf(best, matchDict(int(candidateL&0xffff), s, uint32(cv), false))
308 best = bestOf(best, matchDict(int(candidateS&0xffff), s, uint32(cv), false))
309 }
310 }
311 // Search for a match at best match end, see if that is better.
312 // Allow some bytes at the beginning to mismatch.
313 // Sweet spot is around 1-2 bytes, but depends on input.
314 // The skipped bytes are tested in Extend backwards,
315 // and still picked up as part of the match if they do.
316 const skipBeginning = 2
317 const skipEnd = 1
318 if sAt := best.s + best.length - skipEnd; sAt < sLimit {
319
320 sBack := best.s + skipBeginning - skipEnd
321 backL := best.length - skipBeginning
322 // Load initial values
323 cv = load64(src, sBack)
324
325 // Grab candidates...
326 next := lTable[hash8(load64(src, sAt), lTableBits)]
327
328 if checkAt := getCur(next) - backL; checkAt > 0 {
329 best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false))
330 }
331 if checkAt := getPrev(next) - backL; checkAt > 0 {
332 best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false))
333 }
334 // Disabled: Extremely small gain
335 if false {
336 next = sTable[hash4(load64(src, sAt), sTableBits)]
337 if checkAt := getCur(next) - backL; checkAt > 0 {
338 best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false))
339 }
340 if checkAt := getPrev(next) - backL; checkAt > 0 {
341 best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false))
342 }
343 }
344 }
345 }
346 }
347
348 // Update table
349 lTable[hashL] = uint64(s) | candidateL<<32
350 sTable[hashS] = uint64(s) | candidateS<<32
351
352 if best.length > 0 {
353 break
354 }
355
356 cv = load64(src, nextS)
357 s = nextS
358 }
359
360 // Extend backwards, not needed for repeats...
361 s = best.s
362 if !best.rep && !best.dict {
363 for best.offset > 0 && s > nextEmit && src[best.offset-1] == src[s-1] {
364 best.offset--
365 best.length++
366 s--
367 }
368 }
369 if false && best.offset >= s {
370 panic(fmt.Errorf("t %d >= s %d", best.offset, s))
371 }
372 // Bail if we exceed the maximum size.
373 if d+(s-nextEmit) > dstLimit {
374 return 0
375 }
376
377 base := s
378 offset := s - best.offset
379 s += best.length
380
381 if offset > 65535 && s-base <= 5 && !best.rep {
382 // Bail if the match is equal or worse to the encoding.
383 s = best.s + 1
384 if s >= sLimit {
385 goto emitRemainder
386 }
387 cv = load64(src, s)
388 continue
389 }
390 if debug && nextEmit != base {
391 fmt.Println("EMIT", base-nextEmit, "literals. base-after:", base)
392 }
393 d += emitLiteral(dst[d:], src[nextEmit:base])
394 if best.rep {
395 if nextEmit > 0 || best.dict {
396 if debug {
397 fmt.Println("REPEAT, length", best.length, "offset:", offset, "s-after:", s, "dict:", best.dict, "best:", best)
398 }
399 // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
400 d += emitRepeat(dst[d:], offset, best.length)
401 } else {
402 // First match without dict cannot be a repeat.
403 if debug {
404 fmt.Println("COPY, length", best.length, "offset:", offset, "s-after:", s, "dict:", best.dict, "best:", best)
405 }
406 d += emitCopy(dst[d:], offset, best.length)
407 }
408 } else {
409 if debug {
410 fmt.Println("COPY, length", best.length, "offset:", offset, "s-after:", s, "dict:", best.dict, "best:", best)
411 }
412 d += emitCopy(dst[d:], offset, best.length)
413 }
414 repeat = offset
415
416 nextEmit = s
417 if s >= sLimit {
418 goto emitRemainder
419 }
420
421 if d > dstLimit {
422 // Do we have space for more, if not bail.
423 return 0
424 }
425 // Fill tables...
426 for i := best.s + 1; i < s; i++ {
427 cv0 := load64(src, i)
428 long0 := hash8(cv0, lTableBits)
429 short0 := hash4(cv0, sTableBits)
430 lTable[long0] = uint64(i) | lTable[long0]<<32
431 sTable[short0] = uint64(i) | sTable[short0]<<32
432 }
433 cv = load64(src, s)
434 }
435
436emitRemainder:
437 if nextEmit < len(src) {
438 // Bail if we exceed the maximum size.
439 if d+len(src)-nextEmit > dstLimit {
440 return 0
441 }
442 if debug && nextEmit != s {
443 fmt.Println("emitted ", len(src)-nextEmit, "literals")
444 }
445 d += emitLiteral(dst[d:], src[nextEmit:])
446 }
447 return d
448}
449
450// encodeBlockBestSnappy encodes a non-empty src to a guaranteed-large-enough dst. It
451// assumes that the varint-encoded length of the decompressed bytes has already
452// been written.
453//
454// It also assumes that:
455//
456// len(dst) >= MaxEncodedLen(len(src)) &&
457// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
458func encodeBlockBestSnappy(dst, src []byte) (d int) {
459 // Initialize the hash tables.
460 const (
461 // Long hash matches.
462 lTableBits = 19
463 maxLTableSize = 1 << lTableBits
464
465 // Short hash matches.
466 sTableBits = 16
467 maxSTableSize = 1 << sTableBits
468
469 inputMargin = 8 + 2
470 )
471
472 // sLimit is when to stop looking for offset/length copies. The inputMargin
473 // lets us use a fast path for emitLiteral in the main loop, while we are
474 // looking for copies.
475 sLimit := len(src) - inputMargin
476 if len(src) < minNonLiteralBlockSize {
477 return 0
478 }
479
480 var lTable [maxLTableSize]uint64
481 var sTable [maxSTableSize]uint64
482
483 // Bail if we can't compress to at least this.
484 dstLimit := len(src) - 5
485
486 // nextEmit is where in src the next emitLiteral should start from.
487 nextEmit := 0
488
489 // The encoded form must start with a literal, as there are no previous
490 // bytes to copy, so we start looking for hash matches at s == 1.
491 s := 1
492 cv := load64(src, s)
493
494 // We search for a repeat at -1, but don't output repeats when nextEmit == 0
495 repeat := 1
496 const lowbitMask = 0xffffffff
497 getCur := func(x uint64) int {
498 return int(x & lowbitMask)
499 }
500 getPrev := func(x uint64) int {
501 return int(x >> 32)
502 }
503 const maxSkip = 64
504
505 for {
506 type match struct {
507 offset int
508 s int
509 length int
510 score int
511 }
512 var best match
513 for {
514 // Next src position to check
515 nextS := (s-nextEmit)>>8 + 1
516 if nextS > maxSkip {
517 nextS = s + maxSkip
518 } else {
519 nextS += s
520 }
521 if nextS > sLimit {
522 goto emitRemainder
523 }
524 hashL := hash8(cv, lTableBits)
525 hashS := hash4(cv, sTableBits)
526 candidateL := lTable[hashL]
527 candidateS := sTable[hashS]
528
529 score := func(m match) int {
530 // Matches that are longer forward are penalized since we must emit it as a literal.
531 score := m.length - m.s
532 if nextEmit == m.s {
533 // If we do not have to emit literals, we save 1 byte
534 score++
535 }
536 offset := m.s - m.offset
537
538 return score - emitCopyNoRepeatSize(offset, m.length)
539 }
540
541 matchAt := func(offset, s int, first uint32) match {
542 if best.length != 0 && best.s-best.offset == s-offset {
543 // Don't retest if we have the same offset.
544 return match{offset: offset, s: s}
545 }
546 if load32(src, offset) != first {
547 return match{offset: offset, s: s}
548 }
549 m := match{offset: offset, s: s, length: 4 + offset}
550 s += 4
551 for s <= sLimit {
552 if diff := load64(src, s) ^ load64(src, m.length); diff != 0 {
553 m.length += bits.TrailingZeros64(diff) >> 3
554 break
555 }
556 s += 8
557 m.length += 8
558 }
559 m.length -= offset
560 m.score = score(m)
561 if m.score <= -m.s {
562 // Eliminate if no savings, we might find a better one.
563 m.length = 0
564 }
565 return m
566 }
567
568 bestOf := func(a, b match) match {
569 if b.length == 0 {
570 return a
571 }
572 if a.length == 0 {
573 return b
574 }
575 as := a.score + b.s
576 bs := b.score + a.s
577 if as >= bs {
578 return a
579 }
580 return b
581 }
582
583 best = bestOf(matchAt(getCur(candidateL), s, uint32(cv)), matchAt(getPrev(candidateL), s, uint32(cv)))
584 best = bestOf(best, matchAt(getCur(candidateS), s, uint32(cv)))
585 best = bestOf(best, matchAt(getPrev(candidateS), s, uint32(cv)))
586
587 {
588 best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8)))
589 if best.length > 0 {
590 // s+1
591 nextShort := sTable[hash4(cv>>8, sTableBits)]
592 s := s + 1
593 cv := load64(src, s)
594 nextLong := lTable[hash8(cv, lTableBits)]
595 best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv)))
596 best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv)))
597 best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv)))
598 best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv)))
599 // Repeat at + 2
600 best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8)))
601
602 // s+2
603 if true {
604 nextShort = sTable[hash4(cv>>8, sTableBits)]
605 s++
606 cv = load64(src, s)
607 nextLong = lTable[hash8(cv, lTableBits)]
608 best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv)))
609 best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv)))
610 best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv)))
611 best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv)))
612 }
613 // Search for a match at best match end, see if that is better.
614 if sAt := best.s + best.length; sAt < sLimit {
615 sBack := best.s
616 backL := best.length
617 // Load initial values
618 cv = load64(src, sBack)
619 // Search for mismatch
620 next := lTable[hash8(load64(src, sAt), lTableBits)]
621 //next := sTable[hash4(load64(src, sAt), sTableBits)]
622
623 if checkAt := getCur(next) - backL; checkAt > 0 {
624 best = bestOf(best, matchAt(checkAt, sBack, uint32(cv)))
625 }
626 if checkAt := getPrev(next) - backL; checkAt > 0 {
627 best = bestOf(best, matchAt(checkAt, sBack, uint32(cv)))
628 }
629 }
630 }
631 }
632
633 // Update table
634 lTable[hashL] = uint64(s) | candidateL<<32
635 sTable[hashS] = uint64(s) | candidateS<<32
636
637 if best.length > 0 {
638 break
639 }
640
641 cv = load64(src, nextS)
642 s = nextS
643 }
644
645 // Extend backwards, not needed for repeats...
646 s = best.s
647 if true {
648 for best.offset > 0 && s > nextEmit && src[best.offset-1] == src[s-1] {
649 best.offset--
650 best.length++
651 s--
652 }
653 }
654 if false && best.offset >= s {
655 panic(fmt.Errorf("t %d >= s %d", best.offset, s))
656 }
657 // Bail if we exceed the maximum size.
658 if d+(s-nextEmit) > dstLimit {
659 return 0
660 }
661
662 base := s
663 offset := s - best.offset
664
665 s += best.length
666
667 if offset > 65535 && s-base <= 5 {
668 // Bail if the match is equal or worse to the encoding.
669 s = best.s + 1
670 if s >= sLimit {
671 goto emitRemainder
672 }
673 cv = load64(src, s)
674 continue
675 }
676 d += emitLiteral(dst[d:], src[nextEmit:base])
677 d += emitCopyNoRepeat(dst[d:], offset, best.length)
678 repeat = offset
679
680 nextEmit = s
681 if s >= sLimit {
682 goto emitRemainder
683 }
684
685 if d > dstLimit {
686 // Do we have space for more, if not bail.
687 return 0
688 }
689 // Fill tables...
690 for i := best.s + 1; i < s; i++ {
691 cv0 := load64(src, i)
692 long0 := hash8(cv0, lTableBits)
693 short0 := hash4(cv0, sTableBits)
694 lTable[long0] = uint64(i) | lTable[long0]<<32
695 sTable[short0] = uint64(i) | sTable[short0]<<32
696 }
697 cv = load64(src, s)
698 }
699
700emitRemainder:
701 if nextEmit < len(src) {
702 // Bail if we exceed the maximum size.
703 if d+len(src)-nextEmit > dstLimit {
704 return 0
705 }
706 d += emitLiteral(dst[d:], src[nextEmit:])
707 }
708 return d
709}
710
711// emitCopySize returns the size to encode the offset+length
712//
713// It assumes that:
714//
715// 1 <= offset && offset <= math.MaxUint32
716// 4 <= length && length <= 1 << 24
717func emitCopySize(offset, length int) int {
718 if offset >= 65536 {
719 i := 0
720 if length > 64 {
721 length -= 64
722 if length >= 4 {
723 // Emit remaining as repeats
724 return 5 + emitRepeatSize(offset, length)
725 }
726 i = 5
727 }
728 if length == 0 {
729 return i
730 }
731 return i + 5
732 }
733
734 // Offset no more than 2 bytes.
735 if length > 64 {
736 if offset < 2048 {
737 // Emit 8 bytes, then rest as repeats...
738 return 2 + emitRepeatSize(offset, length-8)
739 }
740 // Emit remaining as repeats, at least 4 bytes remain.
741 return 3 + emitRepeatSize(offset, length-60)
742 }
743 if length >= 12 || offset >= 2048 {
744 return 3
745 }
746 // Emit the remaining copy, encoded as 2 bytes.
747 return 2
748}
749
750// emitCopyNoRepeatSize returns the size to encode the offset+length
751//
752// It assumes that:
753//
754// 1 <= offset && offset <= math.MaxUint32
755// 4 <= length && length <= 1 << 24
756func emitCopyNoRepeatSize(offset, length int) int {
757 if offset >= 65536 {
758 return 5 + 5*(length/64)
759 }
760
761 // Offset no more than 2 bytes.
762 if length > 64 {
763 // Emit remaining as repeats, at least 4 bytes remain.
764 return 3 + 3*(length/60)
765 }
766 if length >= 12 || offset >= 2048 {
767 return 3
768 }
769 // Emit the remaining copy, encoded as 2 bytes.
770 return 2
771}
772
773// emitRepeatSize returns the number of bytes required to encode a repeat.
774// Length must be at least 4 and < 1<<24
775func emitRepeatSize(offset, length int) int {
776 // Repeat offset, make length cheaper
777 if length <= 4+4 || (length < 8+4 && offset < 2048) {
778 return 2
779 }
780 if length < (1<<8)+4+4 {
781 return 3
782 }
783 if length < (1<<16)+(1<<8)+4 {
784 return 4
785 }
786 const maxRepeat = (1 << 24) - 1
787 length -= (1 << 16) - 4
788 left := 0
789 if length > maxRepeat {
790 left = length - maxRepeat + 4
791 }
792 if left > 0 {
793 return 5 + emitRepeatSize(offset, left)
794 }
795 return 5
796}
diff --git a/vendor/github.com/klauspost/compress/s2/encode_better.go b/vendor/github.com/klauspost/compress/s2/encode_better.go
new file mode 100644
index 0000000..544cb1e
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/encode_better.go
@@ -0,0 +1,1106 @@
1// Copyright 2016 The Snappy-Go Authors. All rights reserved.
2// Copyright (c) 2019 Klaus Post. All rights reserved.
3// Use of this source code is governed by a BSD-style
4// license that can be found in the LICENSE file.
5
6package s2
7
8import (
9 "bytes"
10 "fmt"
11 "math/bits"
12)
13
14// hash4 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits.
15// Preferably h should be a constant and should always be <32.
16func hash4(u uint64, h uint8) uint32 {
17 const prime4bytes = 2654435761
18 return (uint32(u) * prime4bytes) >> ((32 - h) & 31)
19}
20
21// hash5 returns the hash of the lowest 5 bytes of u to fit in a hash table with h bits.
22// Preferably h should be a constant and should always be <64.
23func hash5(u uint64, h uint8) uint32 {
24 const prime5bytes = 889523592379
25 return uint32(((u << (64 - 40)) * prime5bytes) >> ((64 - h) & 63))
26}
27
28// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
29// Preferably h should be a constant and should always be <64.
30func hash7(u uint64, h uint8) uint32 {
31 const prime7bytes = 58295818150454627
32 return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63))
33}
34
35// hash8 returns the hash of u to fit in a hash table with h bits.
36// Preferably h should be a constant and should always be <64.
37func hash8(u uint64, h uint8) uint32 {
38 const prime8bytes = 0xcf1bbcdcb7a56463
39 return uint32((u * prime8bytes) >> ((64 - h) & 63))
40}
41
42// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It
43// assumes that the varint-encoded length of the decompressed bytes has already
44// been written.
45//
46// It also assumes that:
47//
48// len(dst) >= MaxEncodedLen(len(src)) &&
49// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
50func encodeBlockBetterGo(dst, src []byte) (d int) {
51 // sLimit is when to stop looking for offset/length copies. The inputMargin
52 // lets us use a fast path for emitLiteral in the main loop, while we are
53 // looking for copies.
54 sLimit := len(src) - inputMargin
55 if len(src) < minNonLiteralBlockSize {
56 return 0
57 }
58
59 // Initialize the hash tables.
60 const (
61 // Long hash matches.
62 lTableBits = 17
63 maxLTableSize = 1 << lTableBits
64
65 // Short hash matches.
66 sTableBits = 14
67 maxSTableSize = 1 << sTableBits
68 )
69
70 var lTable [maxLTableSize]uint32
71 var sTable [maxSTableSize]uint32
72
73 // Bail if we can't compress to at least this.
74 dstLimit := len(src) - len(src)>>5 - 6
75
76 // nextEmit is where in src the next emitLiteral should start from.
77 nextEmit := 0
78
79 // The encoded form must start with a literal, as there are no previous
80 // bytes to copy, so we start looking for hash matches at s == 1.
81 s := 1
82 cv := load64(src, s)
83
84 // We initialize repeat to 0, so we never match on first attempt
85 repeat := 0
86
87 for {
88 candidateL := 0
89 nextS := 0
90 for {
91 // Next src position to check
92 nextS = s + (s-nextEmit)>>7 + 1
93 if nextS > sLimit {
94 goto emitRemainder
95 }
96 hashL := hash7(cv, lTableBits)
97 hashS := hash4(cv, sTableBits)
98 candidateL = int(lTable[hashL])
99 candidateS := int(sTable[hashS])
100 lTable[hashL] = uint32(s)
101 sTable[hashS] = uint32(s)
102
103 valLong := load64(src, candidateL)
104 valShort := load64(src, candidateS)
105
106 // If long matches at least 8 bytes, use that.
107 if cv == valLong {
108 break
109 }
110 if cv == valShort {
111 candidateL = candidateS
112 break
113 }
114
115 // Check repeat at offset checkRep.
116 const checkRep = 1
117 // Minimum length of a repeat. Tested with various values.
118 // While 4-5 offers improvements in some, 6 reduces
119 // regressions significantly.
120 const wantRepeatBytes = 6
121 const repeatMask = ((1 << (wantRepeatBytes * 8)) - 1) << (8 * checkRep)
122 if false && repeat > 0 && cv&repeatMask == load64(src, s-repeat)&repeatMask {
123 base := s + checkRep
124 // Extend back
125 for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
126 i--
127 base--
128 }
129 d += emitLiteral(dst[d:], src[nextEmit:base])
130
131 // Extend forward
132 candidate := s - repeat + wantRepeatBytes + checkRep
133 s += wantRepeatBytes + checkRep
134 for s < len(src) {
135 if len(src)-s < 8 {
136 if src[s] == src[candidate] {
137 s++
138 candidate++
139 continue
140 }
141 break
142 }
143 if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
144 s += bits.TrailingZeros64(diff) >> 3
145 break
146 }
147 s += 8
148 candidate += 8
149 }
150 // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
151 d += emitRepeat(dst[d:], repeat, s-base)
152 nextEmit = s
153 if s >= sLimit {
154 goto emitRemainder
155 }
156 // Index in-between
157 index0 := base + 1
158 index1 := s - 2
159
160 for index0 < index1 {
161 cv0 := load64(src, index0)
162 cv1 := load64(src, index1)
163 lTable[hash7(cv0, lTableBits)] = uint32(index0)
164 sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
165
166 lTable[hash7(cv1, lTableBits)] = uint32(index1)
167 sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
168 index0 += 2
169 index1 -= 2
170 }
171
172 cv = load64(src, s)
173 continue
174 }
175
176 // Long likely matches 7, so take that.
177 if uint32(cv) == uint32(valLong) {
178 break
179 }
180
181 // Check our short candidate
182 if uint32(cv) == uint32(valShort) {
183 // Try a long candidate at s+1
184 hashL = hash7(cv>>8, lTableBits)
185 candidateL = int(lTable[hashL])
186 lTable[hashL] = uint32(s + 1)
187 if uint32(cv>>8) == load32(src, candidateL) {
188 s++
189 break
190 }
191 // Use our short candidate.
192 candidateL = candidateS
193 break
194 }
195
196 cv = load64(src, nextS)
197 s = nextS
198 }
199
200 // Extend backwards
201 for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] {
202 candidateL--
203 s--
204 }
205
206 // Bail if we exceed the maximum size.
207 if d+(s-nextEmit) > dstLimit {
208 return 0
209 }
210
211 base := s
212 offset := base - candidateL
213
214 // Extend the 4-byte match as long as possible.
215 s += 4
216 candidateL += 4
217 for s < len(src) {
218 if len(src)-s < 8 {
219 if src[s] == src[candidateL] {
220 s++
221 candidateL++
222 continue
223 }
224 break
225 }
226 if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 {
227 s += bits.TrailingZeros64(diff) >> 3
228 break
229 }
230 s += 8
231 candidateL += 8
232 }
233
234 if offset > 65535 && s-base <= 5 && repeat != offset {
235 // Bail if the match is equal or worse to the encoding.
236 s = nextS + 1
237 if s >= sLimit {
238 goto emitRemainder
239 }
240 cv = load64(src, s)
241 continue
242 }
243
244 d += emitLiteral(dst[d:], src[nextEmit:base])
245 if repeat == offset {
246 d += emitRepeat(dst[d:], offset, s-base)
247 } else {
248 d += emitCopy(dst[d:], offset, s-base)
249 repeat = offset
250 }
251
252 nextEmit = s
253 if s >= sLimit {
254 goto emitRemainder
255 }
256
257 if d > dstLimit {
258 // Do we have space for more, if not bail.
259 return 0
260 }
261
262 // Index short & long
263 index0 := base + 1
264 index1 := s - 2
265
266 cv0 := load64(src, index0)
267 cv1 := load64(src, index1)
268 lTable[hash7(cv0, lTableBits)] = uint32(index0)
269 sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
270
271 // lTable could be postponed, but very minor difference.
272 lTable[hash7(cv1, lTableBits)] = uint32(index1)
273 sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
274 index0 += 1
275 index1 -= 1
276 cv = load64(src, s)
277
278 // Index large values sparsely in between.
279 // We do two starting from different offsets for speed.
280 index2 := (index0 + index1 + 1) >> 1
281 for index2 < index1 {
282 lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
283 lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2)
284 index0 += 2
285 index2 += 2
286 }
287 }
288
289emitRemainder:
290 if nextEmit < len(src) {
291 // Bail if we exceed the maximum size.
292 if d+len(src)-nextEmit > dstLimit {
293 return 0
294 }
295 d += emitLiteral(dst[d:], src[nextEmit:])
296 }
297 return d
298}
299
300// encodeBlockBetterSnappyGo encodes a non-empty src to a guaranteed-large-enough dst. It
301// assumes that the varint-encoded length of the decompressed bytes has already
302// been written.
303//
304// It also assumes that:
305//
306// len(dst) >= MaxEncodedLen(len(src)) &&
307// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
308func encodeBlockBetterSnappyGo(dst, src []byte) (d int) {
309 // sLimit is when to stop looking for offset/length copies. The inputMargin
310 // lets us use a fast path for emitLiteral in the main loop, while we are
311 // looking for copies.
312 sLimit := len(src) - inputMargin
313 if len(src) < minNonLiteralBlockSize {
314 return 0
315 }
316
317 // Initialize the hash tables.
318 const (
319 // Long hash matches.
320 lTableBits = 16
321 maxLTableSize = 1 << lTableBits
322
323 // Short hash matches.
324 sTableBits = 14
325 maxSTableSize = 1 << sTableBits
326 )
327
328 var lTable [maxLTableSize]uint32
329 var sTable [maxSTableSize]uint32
330
331 // Bail if we can't compress to at least this.
332 dstLimit := len(src) - len(src)>>5 - 6
333
334 // nextEmit is where in src the next emitLiteral should start from.
335 nextEmit := 0
336
337 // The encoded form must start with a literal, as there are no previous
338 // bytes to copy, so we start looking for hash matches at s == 1.
339 s := 1
340 cv := load64(src, s)
341
342 // We initialize repeat to 0, so we never match on first attempt
343 repeat := 0
344 const maxSkip = 100
345
346 for {
347 candidateL := 0
348 nextS := 0
349 for {
350 // Next src position to check
351 nextS = (s-nextEmit)>>7 + 1
352 if nextS > maxSkip {
353 nextS = s + maxSkip
354 } else {
355 nextS += s
356 }
357
358 if nextS > sLimit {
359 goto emitRemainder
360 }
361 hashL := hash7(cv, lTableBits)
362 hashS := hash4(cv, sTableBits)
363 candidateL = int(lTable[hashL])
364 candidateS := int(sTable[hashS])
365 lTable[hashL] = uint32(s)
366 sTable[hashS] = uint32(s)
367
368 if uint32(cv) == load32(src, candidateL) {
369 break
370 }
371
372 // Check our short candidate
373 if uint32(cv) == load32(src, candidateS) {
374 // Try a long candidate at s+1
375 hashL = hash7(cv>>8, lTableBits)
376 candidateL = int(lTable[hashL])
377 lTable[hashL] = uint32(s + 1)
378 if uint32(cv>>8) == load32(src, candidateL) {
379 s++
380 break
381 }
382 // Use our short candidate.
383 candidateL = candidateS
384 break
385 }
386
387 cv = load64(src, nextS)
388 s = nextS
389 }
390
391 // Extend backwards
392 for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] {
393 candidateL--
394 s--
395 }
396
397 // Bail if we exceed the maximum size.
398 if d+(s-nextEmit) > dstLimit {
399 return 0
400 }
401
402 base := s
403 offset := base - candidateL
404
405 // Extend the 4-byte match as long as possible.
406 s += 4
407 candidateL += 4
408 for s < len(src) {
409 if len(src)-s < 8 {
410 if src[s] == src[candidateL] {
411 s++
412 candidateL++
413 continue
414 }
415 break
416 }
417 if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 {
418 s += bits.TrailingZeros64(diff) >> 3
419 break
420 }
421 s += 8
422 candidateL += 8
423 }
424
425 if offset > 65535 && s-base <= 5 && repeat != offset {
426 // Bail if the match is equal or worse to the encoding.
427 s = nextS + 1
428 if s >= sLimit {
429 goto emitRemainder
430 }
431 cv = load64(src, s)
432 continue
433 }
434
435 d += emitLiteral(dst[d:], src[nextEmit:base])
436 d += emitCopyNoRepeat(dst[d:], offset, s-base)
437 repeat = offset
438
439 nextEmit = s
440 if s >= sLimit {
441 goto emitRemainder
442 }
443
444 if d > dstLimit {
445 // Do we have space for more, if not bail.
446 return 0
447 }
448
449 // Index short & long
450 index0 := base + 1
451 index1 := s - 2
452
453 cv0 := load64(src, index0)
454 cv1 := load64(src, index1)
455 lTable[hash7(cv0, lTableBits)] = uint32(index0)
456 sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
457
458 lTable[hash7(cv1, lTableBits)] = uint32(index1)
459 sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
460 index0 += 1
461 index1 -= 1
462 cv = load64(src, s)
463
464 // Index large values sparsely in between.
465 // We do two starting from different offsets for speed.
466 index2 := (index0 + index1 + 1) >> 1
467 for index2 < index1 {
468 lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
469 lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2)
470 index0 += 2
471 index2 += 2
472 }
473 }
474
475emitRemainder:
476 if nextEmit < len(src) {
477 // Bail if we exceed the maximum size.
478 if d+len(src)-nextEmit > dstLimit {
479 return 0
480 }
481 d += emitLiteral(dst[d:], src[nextEmit:])
482 }
483 return d
484}
485
486// encodeBlockBetterDict encodes a non-empty src to a guaranteed-large-enough dst. It
487// assumes that the varint-encoded length of the decompressed bytes has already
488// been written.
489//
490// It also assumes that:
491//
492// len(dst) >= MaxEncodedLen(len(src)) &&
493// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
494func encodeBlockBetterDict(dst, src []byte, dict *Dict) (d int) {
495 // sLimit is when to stop looking for offset/length copies. The inputMargin
496 // lets us use a fast path for emitLiteral in the main loop, while we are
497 // looking for copies.
498 // Initialize the hash tables.
499 const (
500 // Long hash matches.
501 lTableBits = 17
502 maxLTableSize = 1 << lTableBits
503
504 // Short hash matches.
505 sTableBits = 14
506 maxSTableSize = 1 << sTableBits
507
508 maxAhead = 8 // maximum bytes ahead without checking sLimit
509
510 debug = false
511 )
512
513 sLimit := len(src) - inputMargin
514 if sLimit > MaxDictSrcOffset-maxAhead {
515 sLimit = MaxDictSrcOffset - maxAhead
516 }
517 if len(src) < minNonLiteralBlockSize {
518 return 0
519 }
520
521 dict.initBetter()
522
523 var lTable [maxLTableSize]uint32
524 var sTable [maxSTableSize]uint32
525
526 // Bail if we can't compress to at least this.
527 dstLimit := len(src) - len(src)>>5 - 6
528
529 // nextEmit is where in src the next emitLiteral should start from.
530 nextEmit := 0
531
532 // The encoded form must start with a literal, as there are no previous
533 // bytes to copy, so we start looking for hash matches at s == 1.
534 s := 0
535 cv := load64(src, s)
536
537 // We initialize repeat to 0, so we never match on first attempt
538 repeat := len(dict.dict) - dict.repeat
539
540 // While in dict
541searchDict:
542 for {
543 candidateL := 0
544 nextS := 0
545 for {
546 // Next src position to check
547 nextS = s + (s-nextEmit)>>7 + 1
548 if nextS > sLimit {
549 break searchDict
550 }
551 hashL := hash7(cv, lTableBits)
552 hashS := hash4(cv, sTableBits)
553 candidateL = int(lTable[hashL])
554 candidateS := int(sTable[hashS])
555 dictL := int(dict.betterTableLong[hashL])
556 dictS := int(dict.betterTableShort[hashS])
557 lTable[hashL] = uint32(s)
558 sTable[hashS] = uint32(s)
559
560 valLong := load64(src, candidateL)
561 valShort := load64(src, candidateS)
562
563 // If long matches at least 8 bytes, use that.
564 if s != 0 {
565 if cv == valLong {
566 goto emitMatch
567 }
568 if cv == valShort {
569 candidateL = candidateS
570 goto emitMatch
571 }
572 }
573
574 // Check dict repeat.
575 if repeat >= s+4 {
576 candidate := len(dict.dict) - repeat + s
577 if candidate > 0 && uint32(cv) == load32(dict.dict, candidate) {
578 // Extend back
579 base := s
580 for i := candidate; base > nextEmit && i > 0 && dict.dict[i-1] == src[base-1]; {
581 i--
582 base--
583 }
584 d += emitLiteral(dst[d:], src[nextEmit:base])
585 if debug && nextEmit != base {
586 fmt.Println("emitted ", base-nextEmit, "literals")
587 }
588 s += 4
589 candidate += 4
590 for candidate < len(dict.dict)-8 && s <= len(src)-8 {
591 if diff := load64(src, s) ^ load64(dict.dict, candidate); diff != 0 {
592 s += bits.TrailingZeros64(diff) >> 3
593 break
594 }
595 s += 8
596 candidate += 8
597 }
598 d += emitRepeat(dst[d:], repeat, s-base)
599 if debug {
600 fmt.Println("emitted dict repeat length", s-base, "offset:", repeat, "s:", s)
601 }
602 nextEmit = s
603 if s >= sLimit {
604 break searchDict
605 }
606 // Index in-between
607 index0 := base + 1
608 index1 := s - 2
609
610 cv = load64(src, s)
611 for index0 < index1 {
612 cv0 := load64(src, index0)
613 cv1 := load64(src, index1)
614 lTable[hash7(cv0, lTableBits)] = uint32(index0)
615 sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
616
617 lTable[hash7(cv1, lTableBits)] = uint32(index1)
618 sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
619 index0 += 2
620 index1 -= 2
621 }
622 continue
623 }
624 }
625 // Don't try to find match at s==0
626 if s == 0 {
627 cv = load64(src, nextS)
628 s = nextS
629 continue
630 }
631
632 // Long likely matches 7, so take that.
633 if uint32(cv) == uint32(valLong) {
634 goto emitMatch
635 }
636
637 // Long dict...
638 if uint32(cv) == load32(dict.dict, dictL) {
639 candidateL = dictL
640 goto emitDict
641 }
642
643 // Check our short candidate
644 if uint32(cv) == uint32(valShort) {
645 // Try a long candidate at s+1
646 hashL = hash7(cv>>8, lTableBits)
647 candidateL = int(lTable[hashL])
648 lTable[hashL] = uint32(s + 1)
649 if uint32(cv>>8) == load32(src, candidateL) {
650 s++
651 goto emitMatch
652 }
653 // Use our short candidate.
654 candidateL = candidateS
655 goto emitMatch
656 }
657 if uint32(cv) == load32(dict.dict, dictS) {
658 // Try a long candidate at s+1
659 hashL = hash7(cv>>8, lTableBits)
660 candidateL = int(lTable[hashL])
661 lTable[hashL] = uint32(s + 1)
662 if uint32(cv>>8) == load32(src, candidateL) {
663 s++
664 goto emitMatch
665 }
666 candidateL = dictS
667 goto emitDict
668 }
669 cv = load64(src, nextS)
670 s = nextS
671 }
672 emitDict:
673 {
674 if debug {
675 if load32(dict.dict, candidateL) != load32(src, s) {
676 panic("dict emit mismatch")
677 }
678 }
679 // Extend backwards.
680 // The top bytes will be rechecked to get the full match.
681 for candidateL > 0 && s > nextEmit && dict.dict[candidateL-1] == src[s-1] {
682 candidateL--
683 s--
684 }
685
686 // Bail if we exceed the maximum size.
687 if d+(s-nextEmit) > dstLimit {
688 return 0
689 }
690
691 // A 4-byte match has been found. We'll later see if more than 4 bytes
692 // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
693 // them as literal bytes.
694
695 d += emitLiteral(dst[d:], src[nextEmit:s])
696 if debug && nextEmit != s {
697 fmt.Println("emitted ", s-nextEmit, "literals")
698 }
699 {
700 // Invariant: we have a 4-byte match at s, and no need to emit any
701 // literal bytes prior to s.
702 base := s
703 offset := s + (len(dict.dict)) - candidateL
704
705 // Extend the 4-byte match as long as possible.
706 s += 4
707 candidateL += 4
708 for s <= len(src)-8 && len(dict.dict)-candidateL >= 8 {
709 if diff := load64(src, s) ^ load64(dict.dict, candidateL); diff != 0 {
710 s += bits.TrailingZeros64(diff) >> 3
711 break
712 }
713 s += 8
714 candidateL += 8
715 }
716
717 if repeat == offset {
718 if debug {
719 fmt.Println("emitted dict repeat, length", s-base, "offset:", offset, "s:", s, "dict offset:", candidateL)
720 }
721 d += emitRepeat(dst[d:], offset, s-base)
722 } else {
723 if debug {
724 fmt.Println("emitted dict copy, length", s-base, "offset:", offset, "s:", s, "dict offset:", candidateL)
725 }
726 // Matches longer than 64 are split.
727 if s <= sLimit || s-base < 8 {
728 d += emitCopy(dst[d:], offset, s-base)
729 } else {
730 // Split to ensure we don't start a copy within next block.
731 d += emitCopy(dst[d:], offset, 4)
732 d += emitRepeat(dst[d:], offset, s-base-4)
733 }
734 repeat = offset
735 }
736 if false {
737 // Validate match.
738 if s <= candidateL {
739 panic("s <= candidate")
740 }
741 a := src[base:s]
742 b := dict.dict[base-repeat : base-repeat+(s-base)]
743 if !bytes.Equal(a, b) {
744 panic("mismatch")
745 }
746 }
747
748 nextEmit = s
749 if s >= sLimit {
750 break searchDict
751 }
752
753 if d > dstLimit {
754 // Do we have space for more, if not bail.
755 return 0
756 }
757
758 // Index short & long
759 index0 := base + 1
760 index1 := s - 2
761
762 cv0 := load64(src, index0)
763 cv1 := load64(src, index1)
764 lTable[hash7(cv0, lTableBits)] = uint32(index0)
765 sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
766
767 lTable[hash7(cv1, lTableBits)] = uint32(index1)
768 sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
769 index0 += 1
770 index1 -= 1
771 cv = load64(src, s)
772
773 // index every second long in between.
774 for index0 < index1 {
775 lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
776 lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1)
777 index0 += 2
778 index1 -= 2
779 }
780 }
781 continue
782 }
783 emitMatch:
784
785 // Extend backwards
786 for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] {
787 candidateL--
788 s--
789 }
790
791 // Bail if we exceed the maximum size.
792 if d+(s-nextEmit) > dstLimit {
793 return 0
794 }
795
796 base := s
797 offset := base - candidateL
798
799 // Extend the 4-byte match as long as possible.
800 s += 4
801 candidateL += 4
802 for s < len(src) {
803 if len(src)-s < 8 {
804 if src[s] == src[candidateL] {
805 s++
806 candidateL++
807 continue
808 }
809 break
810 }
811 if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 {
812 s += bits.TrailingZeros64(diff) >> 3
813 break
814 }
815 s += 8
816 candidateL += 8
817 }
818
819 if offset > 65535 && s-base <= 5 && repeat != offset {
820 // Bail if the match is equal or worse to the encoding.
821 s = nextS + 1
822 if s >= sLimit {
823 goto emitRemainder
824 }
825 cv = load64(src, s)
826 continue
827 }
828
829 d += emitLiteral(dst[d:], src[nextEmit:base])
830 if debug && nextEmit != s {
831 fmt.Println("emitted ", s-nextEmit, "literals")
832 }
833 if repeat == offset {
834 if debug {
835 fmt.Println("emitted match repeat, length", s-base, "offset:", offset, "s:", s)
836 }
837 d += emitRepeat(dst[d:], offset, s-base)
838 } else {
839 if debug {
840 fmt.Println("emitted match copy, length", s-base, "offset:", offset, "s:", s)
841 }
842 d += emitCopy(dst[d:], offset, s-base)
843 repeat = offset
844 }
845
846 nextEmit = s
847 if s >= sLimit {
848 goto emitRemainder
849 }
850
851 if d > dstLimit {
852 // Do we have space for more, if not bail.
853 return 0
854 }
855
856 // Index short & long
857 index0 := base + 1
858 index1 := s - 2
859
860 cv0 := load64(src, index0)
861 cv1 := load64(src, index1)
862 lTable[hash7(cv0, lTableBits)] = uint32(index0)
863 sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
864
865 lTable[hash7(cv1, lTableBits)] = uint32(index1)
866 sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
867 index0 += 1
868 index1 -= 1
869 cv = load64(src, s)
870
871 // Index large values sparsely in between.
872 // We do two starting from different offsets for speed.
873 index2 := (index0 + index1 + 1) >> 1
874 for index2 < index1 {
875 lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
876 lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2)
877 index0 += 2
878 index2 += 2
879 }
880 }
881
882 // Search without dict:
883 if repeat > s {
884 repeat = 0
885 }
886
887 // No more dict
888 sLimit = len(src) - inputMargin
889 if s >= sLimit {
890 goto emitRemainder
891 }
892 cv = load64(src, s)
893 if debug {
894 fmt.Println("now", s, "->", sLimit, "out:", d, "left:", len(src)-s, "nextemit:", nextEmit, "dstLimit:", dstLimit, "s:", s)
895 }
896 for {
897 candidateL := 0
898 nextS := 0
899 for {
900 // Next src position to check
901 nextS = s + (s-nextEmit)>>7 + 1
902 if nextS > sLimit {
903 goto emitRemainder
904 }
905 hashL := hash7(cv, lTableBits)
906 hashS := hash4(cv, sTableBits)
907 candidateL = int(lTable[hashL])
908 candidateS := int(sTable[hashS])
909 lTable[hashL] = uint32(s)
910 sTable[hashS] = uint32(s)
911
912 valLong := load64(src, candidateL)
913 valShort := load64(src, candidateS)
914
915 // If long matches at least 8 bytes, use that.
916 if cv == valLong {
917 break
918 }
919 if cv == valShort {
920 candidateL = candidateS
921 break
922 }
923
924 // Check repeat at offset checkRep.
925 const checkRep = 1
926 // Minimum length of a repeat. Tested with various values.
927 // While 4-5 offers improvements in some, 6 reduces
928 // regressions significantly.
929 const wantRepeatBytes = 6
930 const repeatMask = ((1 << (wantRepeatBytes * 8)) - 1) << (8 * checkRep)
931 if false && repeat > 0 && cv&repeatMask == load64(src, s-repeat)&repeatMask {
932 base := s + checkRep
933 // Extend back
934 for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
935 i--
936 base--
937 }
938 d += emitLiteral(dst[d:], src[nextEmit:base])
939
940 // Extend forward
941 candidate := s - repeat + wantRepeatBytes + checkRep
942 s += wantRepeatBytes + checkRep
943 for s < len(src) {
944 if len(src)-s < 8 {
945 if src[s] == src[candidate] {
946 s++
947 candidate++
948 continue
949 }
950 break
951 }
952 if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
953 s += bits.TrailingZeros64(diff) >> 3
954 break
955 }
956 s += 8
957 candidate += 8
958 }
959 // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
960 d += emitRepeat(dst[d:], repeat, s-base)
961 nextEmit = s
962 if s >= sLimit {
963 goto emitRemainder
964 }
965 // Index in-between
966 index0 := base + 1
967 index1 := s - 2
968
969 for index0 < index1 {
970 cv0 := load64(src, index0)
971 cv1 := load64(src, index1)
972 lTable[hash7(cv0, lTableBits)] = uint32(index0)
973 sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
974
975 lTable[hash7(cv1, lTableBits)] = uint32(index1)
976 sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
977 index0 += 2
978 index1 -= 2
979 }
980
981 cv = load64(src, s)
982 continue
983 }
984
985 // Long likely matches 7, so take that.
986 if uint32(cv) == uint32(valLong) {
987 break
988 }
989
990 // Check our short candidate
991 if uint32(cv) == uint32(valShort) {
992 // Try a long candidate at s+1
993 hashL = hash7(cv>>8, lTableBits)
994 candidateL = int(lTable[hashL])
995 lTable[hashL] = uint32(s + 1)
996 if uint32(cv>>8) == load32(src, candidateL) {
997 s++
998 break
999 }
1000 // Use our short candidate.
1001 candidateL = candidateS
1002 break
1003 }
1004
1005 cv = load64(src, nextS)
1006 s = nextS
1007 }
1008
1009 // Extend backwards
1010 for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] {
1011 candidateL--
1012 s--
1013 }
1014
1015 // Bail if we exceed the maximum size.
1016 if d+(s-nextEmit) > dstLimit {
1017 return 0
1018 }
1019
1020 base := s
1021 offset := base - candidateL
1022
1023 // Extend the 4-byte match as long as possible.
1024 s += 4
1025 candidateL += 4
1026 for s < len(src) {
1027 if len(src)-s < 8 {
1028 if src[s] == src[candidateL] {
1029 s++
1030 candidateL++
1031 continue
1032 }
1033 break
1034 }
1035 if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 {
1036 s += bits.TrailingZeros64(diff) >> 3
1037 break
1038 }
1039 s += 8
1040 candidateL += 8
1041 }
1042
1043 if offset > 65535 && s-base <= 5 && repeat != offset {
1044 // Bail if the match is equal or worse to the encoding.
1045 s = nextS + 1
1046 if s >= sLimit {
1047 goto emitRemainder
1048 }
1049 cv = load64(src, s)
1050 continue
1051 }
1052
1053 d += emitLiteral(dst[d:], src[nextEmit:base])
1054 if repeat == offset {
1055 d += emitRepeat(dst[d:], offset, s-base)
1056 } else {
1057 d += emitCopy(dst[d:], offset, s-base)
1058 repeat = offset
1059 }
1060
1061 nextEmit = s
1062 if s >= sLimit {
1063 goto emitRemainder
1064 }
1065
1066 if d > dstLimit {
1067 // Do we have space for more, if not bail.
1068 return 0
1069 }
1070
1071 // Index short & long
1072 index0 := base + 1
1073 index1 := s - 2
1074
1075 cv0 := load64(src, index0)
1076 cv1 := load64(src, index1)
1077 lTable[hash7(cv0, lTableBits)] = uint32(index0)
1078 sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
1079
1080 lTable[hash7(cv1, lTableBits)] = uint32(index1)
1081 sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
1082 index0 += 1
1083 index1 -= 1
1084 cv = load64(src, s)
1085
1086 // Index large values sparsely in between.
1087 // We do two starting from different offsets for speed.
1088 index2 := (index0 + index1 + 1) >> 1
1089 for index2 < index1 {
1090 lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
1091 lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2)
1092 index0 += 2
1093 index2 += 2
1094 }
1095 }
1096
1097emitRemainder:
1098 if nextEmit < len(src) {
1099 // Bail if we exceed the maximum size.
1100 if d+len(src)-nextEmit > dstLimit {
1101 return 0
1102 }
1103 d += emitLiteral(dst[d:], src[nextEmit:])
1104 }
1105 return d
1106}
diff --git a/vendor/github.com/klauspost/compress/s2/encode_go.go b/vendor/github.com/klauspost/compress/s2/encode_go.go
new file mode 100644
index 0000000..6b393c3
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/encode_go.go
@@ -0,0 +1,729 @@
1//go:build !amd64 || appengine || !gc || noasm
2// +build !amd64 appengine !gc noasm
3
4package s2
5
6import (
7 "bytes"
8 "math/bits"
9)
10
11const hasAmd64Asm = false
12
13// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
14// assumes that the varint-encoded length of the decompressed bytes has already
15// been written.
16//
17// It also assumes that:
18//
19// len(dst) >= MaxEncodedLen(len(src))
20func encodeBlock(dst, src []byte) (d int) {
21 if len(src) < minNonLiteralBlockSize {
22 return 0
23 }
24 return encodeBlockGo(dst, src)
25}
26
27// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It
28// assumes that the varint-encoded length of the decompressed bytes has already
29// been written.
30//
31// It also assumes that:
32//
33// len(dst) >= MaxEncodedLen(len(src))
34func encodeBlockBetter(dst, src []byte) (d int) {
35 return encodeBlockBetterGo(dst, src)
36}
37
38// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It
39// assumes that the varint-encoded length of the decompressed bytes has already
40// been written.
41//
42// It also assumes that:
43//
44// len(dst) >= MaxEncodedLen(len(src))
45func encodeBlockBetterSnappy(dst, src []byte) (d int) {
46 return encodeBlockBetterSnappyGo(dst, src)
47}
48
49// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
50// assumes that the varint-encoded length of the decompressed bytes has already
51// been written.
52//
53// It also assumes that:
54//
55// len(dst) >= MaxEncodedLen(len(src))
56func encodeBlockSnappy(dst, src []byte) (d int) {
57 if len(src) < minNonLiteralBlockSize {
58 return 0
59 }
60 return encodeBlockSnappyGo(dst, src)
61}
62
63// emitLiteral writes a literal chunk and returns the number of bytes written.
64//
65// It assumes that:
66//
67// dst is long enough to hold the encoded bytes
68// 0 <= len(lit) && len(lit) <= math.MaxUint32
69func emitLiteral(dst, lit []byte) int {
70 if len(lit) == 0 {
71 return 0
72 }
73 const num = 63<<2 | tagLiteral
74 i, n := 0, uint(len(lit)-1)
75 switch {
76 case n < 60:
77 dst[0] = uint8(n)<<2 | tagLiteral
78 i = 1
79 case n < 1<<8:
80 dst[1] = uint8(n)
81 dst[0] = 60<<2 | tagLiteral
82 i = 2
83 case n < 1<<16:
84 dst[2] = uint8(n >> 8)
85 dst[1] = uint8(n)
86 dst[0] = 61<<2 | tagLiteral
87 i = 3
88 case n < 1<<24:
89 dst[3] = uint8(n >> 16)
90 dst[2] = uint8(n >> 8)
91 dst[1] = uint8(n)
92 dst[0] = 62<<2 | tagLiteral
93 i = 4
94 default:
95 dst[4] = uint8(n >> 24)
96 dst[3] = uint8(n >> 16)
97 dst[2] = uint8(n >> 8)
98 dst[1] = uint8(n)
99 dst[0] = 63<<2 | tagLiteral
100 i = 5
101 }
102 return i + copy(dst[i:], lit)
103}
104
105// emitRepeat writes a repeat chunk and returns the number of bytes written.
106// Length must be at least 4 and < 1<<24
107func emitRepeat(dst []byte, offset, length int) int {
108 // Repeat offset, make length cheaper
109 length -= 4
110 if length <= 4 {
111 dst[0] = uint8(length)<<2 | tagCopy1
112 dst[1] = 0
113 return 2
114 }
115 if length < 8 && offset < 2048 {
116 // Encode WITH offset
117 dst[1] = uint8(offset)
118 dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1
119 return 2
120 }
121 if length < (1<<8)+4 {
122 length -= 4
123 dst[2] = uint8(length)
124 dst[1] = 0
125 dst[0] = 5<<2 | tagCopy1
126 return 3
127 }
128 if length < (1<<16)+(1<<8) {
129 length -= 1 << 8
130 dst[3] = uint8(length >> 8)
131 dst[2] = uint8(length >> 0)
132 dst[1] = 0
133 dst[0] = 6<<2 | tagCopy1
134 return 4
135 }
136 const maxRepeat = (1 << 24) - 1
137 length -= 1 << 16
138 left := 0
139 if length > maxRepeat {
140 left = length - maxRepeat + 4
141 length = maxRepeat - 4
142 }
143 dst[4] = uint8(length >> 16)
144 dst[3] = uint8(length >> 8)
145 dst[2] = uint8(length >> 0)
146 dst[1] = 0
147 dst[0] = 7<<2 | tagCopy1
148 if left > 0 {
149 return 5 + emitRepeat(dst[5:], offset, left)
150 }
151 return 5
152}
153
154// emitCopy writes a copy chunk and returns the number of bytes written.
155//
156// It assumes that:
157//
158// dst is long enough to hold the encoded bytes
159// 1 <= offset && offset <= math.MaxUint32
160// 4 <= length && length <= 1 << 24
161func emitCopy(dst []byte, offset, length int) int {
162 if offset >= 65536 {
163 i := 0
164 if length > 64 {
165 // Emit a length 64 copy, encoded as 5 bytes.
166 dst[4] = uint8(offset >> 24)
167 dst[3] = uint8(offset >> 16)
168 dst[2] = uint8(offset >> 8)
169 dst[1] = uint8(offset)
170 dst[0] = 63<<2 | tagCopy4
171 length -= 64
172 if length >= 4 {
173 // Emit remaining as repeats
174 return 5 + emitRepeat(dst[5:], offset, length)
175 }
176 i = 5
177 }
178 if length == 0 {
179 return i
180 }
181 // Emit a copy, offset encoded as 4 bytes.
182 dst[i+0] = uint8(length-1)<<2 | tagCopy4
183 dst[i+1] = uint8(offset)
184 dst[i+2] = uint8(offset >> 8)
185 dst[i+3] = uint8(offset >> 16)
186 dst[i+4] = uint8(offset >> 24)
187 return i + 5
188 }
189
190 // Offset no more than 2 bytes.
191 if length > 64 {
192 off := 3
193 if offset < 2048 {
194 // emit 8 bytes as tagCopy1, rest as repeats.
195 dst[1] = uint8(offset)
196 dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1
197 length -= 8
198 off = 2
199 } else {
200 // Emit a length 60 copy, encoded as 3 bytes.
201 // Emit remaining as repeat value (minimum 4 bytes).
202 dst[2] = uint8(offset >> 8)
203 dst[1] = uint8(offset)
204 dst[0] = 59<<2 | tagCopy2
205 length -= 60
206 }
207 // Emit remaining as repeats, at least 4 bytes remain.
208 return off + emitRepeat(dst[off:], offset, length)
209 }
210 if length >= 12 || offset >= 2048 {
211 // Emit the remaining copy, encoded as 3 bytes.
212 dst[2] = uint8(offset >> 8)
213 dst[1] = uint8(offset)
214 dst[0] = uint8(length-1)<<2 | tagCopy2
215 return 3
216 }
217 // Emit the remaining copy, encoded as 2 bytes.
218 dst[1] = uint8(offset)
219 dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
220 return 2
221}
222
223// emitCopyNoRepeat writes a copy chunk and returns the number of bytes written.
224//
225// It assumes that:
226//
227// dst is long enough to hold the encoded bytes
228// 1 <= offset && offset <= math.MaxUint32
229// 4 <= length && length <= 1 << 24
230func emitCopyNoRepeat(dst []byte, offset, length int) int {
231 if offset >= 65536 {
232 i := 0
233 if length > 64 {
234 // Emit a length 64 copy, encoded as 5 bytes.
235 dst[4] = uint8(offset >> 24)
236 dst[3] = uint8(offset >> 16)
237 dst[2] = uint8(offset >> 8)
238 dst[1] = uint8(offset)
239 dst[0] = 63<<2 | tagCopy4
240 length -= 64
241 if length >= 4 {
242 // Emit remaining as repeats
243 return 5 + emitCopyNoRepeat(dst[5:], offset, length)
244 }
245 i = 5
246 }
247 if length == 0 {
248 return i
249 }
250 // Emit a copy, offset encoded as 4 bytes.
251 dst[i+0] = uint8(length-1)<<2 | tagCopy4
252 dst[i+1] = uint8(offset)
253 dst[i+2] = uint8(offset >> 8)
254 dst[i+3] = uint8(offset >> 16)
255 dst[i+4] = uint8(offset >> 24)
256 return i + 5
257 }
258
259 // Offset no more than 2 bytes.
260 if length > 64 {
261 // Emit a length 60 copy, encoded as 3 bytes.
262 // Emit remaining as repeat value (minimum 4 bytes).
263 dst[2] = uint8(offset >> 8)
264 dst[1] = uint8(offset)
265 dst[0] = 59<<2 | tagCopy2
266 length -= 60
267 // Emit remaining as repeats, at least 4 bytes remain.
268 return 3 + emitCopyNoRepeat(dst[3:], offset, length)
269 }
270 if length >= 12 || offset >= 2048 {
271 // Emit the remaining copy, encoded as 3 bytes.
272 dst[2] = uint8(offset >> 8)
273 dst[1] = uint8(offset)
274 dst[0] = uint8(length-1)<<2 | tagCopy2
275 return 3
276 }
277 // Emit the remaining copy, encoded as 2 bytes.
278 dst[1] = uint8(offset)
279 dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
280 return 2
281}
282
283// matchLen returns how many bytes match in a and b
284//
285// It assumes that:
286//
287// len(a) <= len(b)
288func matchLen(a []byte, b []byte) int {
289 b = b[:len(a)]
290 var checked int
291 if len(a) > 4 {
292 // Try 4 bytes first
293 if diff := load32(a, 0) ^ load32(b, 0); diff != 0 {
294 return bits.TrailingZeros32(diff) >> 3
295 }
296 // Switch to 8 byte matching.
297 checked = 4
298 a = a[4:]
299 b = b[4:]
300 for len(a) >= 8 {
301 b = b[:len(a)]
302 if diff := load64(a, 0) ^ load64(b, 0); diff != 0 {
303 return checked + (bits.TrailingZeros64(diff) >> 3)
304 }
305 checked += 8
306 a = a[8:]
307 b = b[8:]
308 }
309 }
310 b = b[:len(a)]
311 for i := range a {
312 if a[i] != b[i] {
313 return int(i) + checked
314 }
315 }
316 return len(a) + checked
317}
318
319// input must be > inputMargin
320func calcBlockSize(src []byte) (d int) {
321 // Initialize the hash table.
322 const (
323 tableBits = 13
324 maxTableSize = 1 << tableBits
325 )
326
327 var table [maxTableSize]uint32
328
329 // sLimit is when to stop looking for offset/length copies. The inputMargin
330 // lets us use a fast path for emitLiteral in the main loop, while we are
331 // looking for copies.
332 sLimit := len(src) - inputMargin
333
334 // Bail if we can't compress to at least this.
335 dstLimit := len(src) - len(src)>>5 - 5
336
337 // nextEmit is where in src the next emitLiteral should start from.
338 nextEmit := 0
339
340 // The encoded form must start with a literal, as there are no previous
341 // bytes to copy, so we start looking for hash matches at s == 1.
342 s := 1
343 cv := load64(src, s)
344
345 // We search for a repeat at -1, but don't output repeats when nextEmit == 0
346 repeat := 1
347
348 for {
349 candidate := 0
350 for {
351 // Next src position to check
352 nextS := s + (s-nextEmit)>>6 + 4
353 if nextS > sLimit {
354 goto emitRemainder
355 }
356 hash0 := hash6(cv, tableBits)
357 hash1 := hash6(cv>>8, tableBits)
358 candidate = int(table[hash0])
359 candidate2 := int(table[hash1])
360 table[hash0] = uint32(s)
361 table[hash1] = uint32(s + 1)
362 hash2 := hash6(cv>>16, tableBits)
363
364 // Check repeat at offset checkRep.
365 const checkRep = 1
366 if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
367 base := s + checkRep
368 // Extend back
369 for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
370 i--
371 base--
372 }
373 d += emitLiteralSize(src[nextEmit:base])
374
375 // Extend forward
376 candidate := s - repeat + 4 + checkRep
377 s += 4 + checkRep
378 for s <= sLimit {
379 if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
380 s += bits.TrailingZeros64(diff) >> 3
381 break
382 }
383 s += 8
384 candidate += 8
385 }
386
387 d += emitCopyNoRepeatSize(repeat, s-base)
388 nextEmit = s
389 if s >= sLimit {
390 goto emitRemainder
391 }
392
393 cv = load64(src, s)
394 continue
395 }
396
397 if uint32(cv) == load32(src, candidate) {
398 break
399 }
400 candidate = int(table[hash2])
401 if uint32(cv>>8) == load32(src, candidate2) {
402 table[hash2] = uint32(s + 2)
403 candidate = candidate2
404 s++
405 break
406 }
407 table[hash2] = uint32(s + 2)
408 if uint32(cv>>16) == load32(src, candidate) {
409 s += 2
410 break
411 }
412
413 cv = load64(src, nextS)
414 s = nextS
415 }
416
417 // Extend backwards
418 for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
419 candidate--
420 s--
421 }
422
423 // Bail if we exceed the maximum size.
424 if d+(s-nextEmit) > dstLimit {
425 return 0
426 }
427
428 // A 4-byte match has been found. We'll later see if more than 4 bytes
429 // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
430 // them as literal bytes.
431
432 d += emitLiteralSize(src[nextEmit:s])
433
434 // Call emitCopy, and then see if another emitCopy could be our next
435 // move. Repeat until we find no match for the input immediately after
436 // what was consumed by the last emitCopy call.
437 //
438 // If we exit this loop normally then we need to call emitLiteral next,
439 // though we don't yet know how big the literal will be. We handle that
440 // by proceeding to the next iteration of the main loop. We also can
441 // exit this loop via goto if we get close to exhausting the input.
442 for {
443 // Invariant: we have a 4-byte match at s, and no need to emit any
444 // literal bytes prior to s.
445 base := s
446 repeat = base - candidate
447
448 // Extend the 4-byte match as long as possible.
449 s += 4
450 candidate += 4
451 for s <= len(src)-8 {
452 if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
453 s += bits.TrailingZeros64(diff) >> 3
454 break
455 }
456 s += 8
457 candidate += 8
458 }
459
460 d += emitCopyNoRepeatSize(repeat, s-base)
461 if false {
462 // Validate match.
463 a := src[base:s]
464 b := src[base-repeat : base-repeat+(s-base)]
465 if !bytes.Equal(a, b) {
466 panic("mismatch")
467 }
468 }
469
470 nextEmit = s
471 if s >= sLimit {
472 goto emitRemainder
473 }
474
475 if d > dstLimit {
476 // Do we have space for more, if not bail.
477 return 0
478 }
479 // Check for an immediate match, otherwise start search at s+1
480 x := load64(src, s-2)
481 m2Hash := hash6(x, tableBits)
482 currHash := hash6(x>>16, tableBits)
483 candidate = int(table[currHash])
484 table[m2Hash] = uint32(s - 2)
485 table[currHash] = uint32(s)
486 if uint32(x>>16) != load32(src, candidate) {
487 cv = load64(src, s+1)
488 s++
489 break
490 }
491 }
492 }
493
494emitRemainder:
495 if nextEmit < len(src) {
496 // Bail if we exceed the maximum size.
497 if d+len(src)-nextEmit > dstLimit {
498 return 0
499 }
500 d += emitLiteralSize(src[nextEmit:])
501 }
502 return d
503}
504
505// length must be > inputMargin.
506func calcBlockSizeSmall(src []byte) (d int) {
507 // Initialize the hash table.
508 const (
509 tableBits = 9
510 maxTableSize = 1 << tableBits
511 )
512
513 var table [maxTableSize]uint32
514
515 // sLimit is when to stop looking for offset/length copies. The inputMargin
516 // lets us use a fast path for emitLiteral in the main loop, while we are
517 // looking for copies.
518 sLimit := len(src) - inputMargin
519
520 // Bail if we can't compress to at least this.
521 dstLimit := len(src) - len(src)>>5 - 5
522
523 // nextEmit is where in src the next emitLiteral should start from.
524 nextEmit := 0
525
526 // The encoded form must start with a literal, as there are no previous
527 // bytes to copy, so we start looking for hash matches at s == 1.
528 s := 1
529 cv := load64(src, s)
530
531 // We search for a repeat at -1, but don't output repeats when nextEmit == 0
532 repeat := 1
533
534 for {
535 candidate := 0
536 for {
537 // Next src position to check
538 nextS := s + (s-nextEmit)>>6 + 4
539 if nextS > sLimit {
540 goto emitRemainder
541 }
542 hash0 := hash6(cv, tableBits)
543 hash1 := hash6(cv>>8, tableBits)
544 candidate = int(table[hash0])
545 candidate2 := int(table[hash1])
546 table[hash0] = uint32(s)
547 table[hash1] = uint32(s + 1)
548 hash2 := hash6(cv>>16, tableBits)
549
550 // Check repeat at offset checkRep.
551 const checkRep = 1
552 if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
553 base := s + checkRep
554 // Extend back
555 for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
556 i--
557 base--
558 }
559 d += emitLiteralSize(src[nextEmit:base])
560
561 // Extend forward
562 candidate := s - repeat + 4 + checkRep
563 s += 4 + checkRep
564 for s <= sLimit {
565 if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
566 s += bits.TrailingZeros64(diff) >> 3
567 break
568 }
569 s += 8
570 candidate += 8
571 }
572
573 d += emitCopyNoRepeatSize(repeat, s-base)
574 nextEmit = s
575 if s >= sLimit {
576 goto emitRemainder
577 }
578
579 cv = load64(src, s)
580 continue
581 }
582
583 if uint32(cv) == load32(src, candidate) {
584 break
585 }
586 candidate = int(table[hash2])
587 if uint32(cv>>8) == load32(src, candidate2) {
588 table[hash2] = uint32(s + 2)
589 candidate = candidate2
590 s++
591 break
592 }
593 table[hash2] = uint32(s + 2)
594 if uint32(cv>>16) == load32(src, candidate) {
595 s += 2
596 break
597 }
598
599 cv = load64(src, nextS)
600 s = nextS
601 }
602
603 // Extend backwards
604 for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
605 candidate--
606 s--
607 }
608
609 // Bail if we exceed the maximum size.
610 if d+(s-nextEmit) > dstLimit {
611 return 0
612 }
613
614 // A 4-byte match has been found. We'll later see if more than 4 bytes
615 // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
616 // them as literal bytes.
617
618 d += emitLiteralSize(src[nextEmit:s])
619
620 // Call emitCopy, and then see if another emitCopy could be our next
621 // move. Repeat until we find no match for the input immediately after
622 // what was consumed by the last emitCopy call.
623 //
624 // If we exit this loop normally then we need to call emitLiteral next,
625 // though we don't yet know how big the literal will be. We handle that
626 // by proceeding to the next iteration of the main loop. We also can
627 // exit this loop via goto if we get close to exhausting the input.
628 for {
629 // Invariant: we have a 4-byte match at s, and no need to emit any
630 // literal bytes prior to s.
631 base := s
632 repeat = base - candidate
633
634 // Extend the 4-byte match as long as possible.
635 s += 4
636 candidate += 4
637 for s <= len(src)-8 {
638 if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
639 s += bits.TrailingZeros64(diff) >> 3
640 break
641 }
642 s += 8
643 candidate += 8
644 }
645
646 d += emitCopyNoRepeatSize(repeat, s-base)
647 if false {
648 // Validate match.
649 a := src[base:s]
650 b := src[base-repeat : base-repeat+(s-base)]
651 if !bytes.Equal(a, b) {
652 panic("mismatch")
653 }
654 }
655
656 nextEmit = s
657 if s >= sLimit {
658 goto emitRemainder
659 }
660
661 if d > dstLimit {
662 // Do we have space for more, if not bail.
663 return 0
664 }
665 // Check for an immediate match, otherwise start search at s+1
666 x := load64(src, s-2)
667 m2Hash := hash6(x, tableBits)
668 currHash := hash6(x>>16, tableBits)
669 candidate = int(table[currHash])
670 table[m2Hash] = uint32(s - 2)
671 table[currHash] = uint32(s)
672 if uint32(x>>16) != load32(src, candidate) {
673 cv = load64(src, s+1)
674 s++
675 break
676 }
677 }
678 }
679
680emitRemainder:
681 if nextEmit < len(src) {
682 // Bail if we exceed the maximum size.
683 if d+len(src)-nextEmit > dstLimit {
684 return 0
685 }
686 d += emitLiteralSize(src[nextEmit:])
687 }
688 return d
689}
690
691// emitLiteral writes a literal chunk and returns the number of bytes written.
692//
693// It assumes that:
694//
695// dst is long enough to hold the encoded bytes
696// 0 <= len(lit) && len(lit) <= math.MaxUint32
697func emitLiteralSize(lit []byte) int {
698 if len(lit) == 0 {
699 return 0
700 }
701 switch {
702 case len(lit) <= 60:
703 return len(lit) + 1
704 case len(lit) <= 1<<8:
705 return len(lit) + 2
706 case len(lit) <= 1<<16:
707 return len(lit) + 3
708 case len(lit) <= 1<<24:
709 return len(lit) + 4
710 default:
711 return len(lit) + 5
712 }
713}
714
715func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) {
716 panic("cvtLZ4BlockAsm should be unreachable")
717}
718
719func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) {
720 panic("cvtLZ4BlockSnappyAsm should be unreachable")
721}
722
723func cvtLZ4sBlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) {
724 panic("cvtLZ4sBlockAsm should be unreachable")
725}
726
727func cvtLZ4sBlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) {
728 panic("cvtLZ4sBlockSnappyAsm should be unreachable")
729}
diff --git a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go
new file mode 100644
index 0000000..297e415
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go
@@ -0,0 +1,228 @@
1// Code generated by command: go run gen.go -out ../encodeblock_amd64.s -stubs ../encodeblock_amd64.go -pkg=s2. DO NOT EDIT.
2
3//go:build !appengine && !noasm && gc && !noasm
4
5package s2
6
7func _dummy_()
8
9// encodeBlockAsm encodes a non-empty src to a guaranteed-large-enough dst.
10// Maximum input 4294967295 bytes.
11// It assumes that the varint-encoded length of the decompressed bytes has already been written.
12//
13//go:noescape
14func encodeBlockAsm(dst []byte, src []byte) int
15
16// encodeBlockAsm4MB encodes a non-empty src to a guaranteed-large-enough dst.
17// Maximum input 4194304 bytes.
18// It assumes that the varint-encoded length of the decompressed bytes has already been written.
19//
20//go:noescape
21func encodeBlockAsm4MB(dst []byte, src []byte) int
22
23// encodeBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst.
24// Maximum input 16383 bytes.
25// It assumes that the varint-encoded length of the decompressed bytes has already been written.
26//
27//go:noescape
28func encodeBlockAsm12B(dst []byte, src []byte) int
29
30// encodeBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst.
31// Maximum input 4095 bytes.
32// It assumes that the varint-encoded length of the decompressed bytes has already been written.
33//
34//go:noescape
35func encodeBlockAsm10B(dst []byte, src []byte) int
36
37// encodeBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst.
38// Maximum input 511 bytes.
39// It assumes that the varint-encoded length of the decompressed bytes has already been written.
40//
41//go:noescape
42func encodeBlockAsm8B(dst []byte, src []byte) int
43
44// encodeBetterBlockAsm encodes a non-empty src to a guaranteed-large-enough dst.
45// Maximum input 4294967295 bytes.
46// It assumes that the varint-encoded length of the decompressed bytes has already been written.
47//
48//go:noescape
49func encodeBetterBlockAsm(dst []byte, src []byte) int
50
51// encodeBetterBlockAsm4MB encodes a non-empty src to a guaranteed-large-enough dst.
52// Maximum input 4194304 bytes.
53// It assumes that the varint-encoded length of the decompressed bytes has already been written.
54//
55//go:noescape
56func encodeBetterBlockAsm4MB(dst []byte, src []byte) int
57
58// encodeBetterBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst.
59// Maximum input 16383 bytes.
60// It assumes that the varint-encoded length of the decompressed bytes has already been written.
61//
62//go:noescape
63func encodeBetterBlockAsm12B(dst []byte, src []byte) int
64
65// encodeBetterBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst.
66// Maximum input 4095 bytes.
67// It assumes that the varint-encoded length of the decompressed bytes has already been written.
68//
69//go:noescape
70func encodeBetterBlockAsm10B(dst []byte, src []byte) int
71
72// encodeBetterBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst.
73// Maximum input 511 bytes.
74// It assumes that the varint-encoded length of the decompressed bytes has already been written.
75//
76//go:noescape
77func encodeBetterBlockAsm8B(dst []byte, src []byte) int
78
79// encodeSnappyBlockAsm encodes a non-empty src to a guaranteed-large-enough dst.
80// Maximum input 4294967295 bytes.
81// It assumes that the varint-encoded length of the decompressed bytes has already been written.
82//
83//go:noescape
84func encodeSnappyBlockAsm(dst []byte, src []byte) int
85
86// encodeSnappyBlockAsm64K encodes a non-empty src to a guaranteed-large-enough dst.
87// Maximum input 65535 bytes.
88// It assumes that the varint-encoded length of the decompressed bytes has already been written.
89//
90//go:noescape
91func encodeSnappyBlockAsm64K(dst []byte, src []byte) int
92
93// encodeSnappyBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst.
94// Maximum input 16383 bytes.
95// It assumes that the varint-encoded length of the decompressed bytes has already been written.
96//
97//go:noescape
98func encodeSnappyBlockAsm12B(dst []byte, src []byte) int
99
100// encodeSnappyBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst.
101// Maximum input 4095 bytes.
102// It assumes that the varint-encoded length of the decompressed bytes has already been written.
103//
104//go:noescape
105func encodeSnappyBlockAsm10B(dst []byte, src []byte) int
106
107// encodeSnappyBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst.
108// Maximum input 511 bytes.
109// It assumes that the varint-encoded length of the decompressed bytes has already been written.
110//
111//go:noescape
112func encodeSnappyBlockAsm8B(dst []byte, src []byte) int
113
114// encodeSnappyBetterBlockAsm encodes a non-empty src to a guaranteed-large-enough dst.
115// Maximum input 4294967295 bytes.
116// It assumes that the varint-encoded length of the decompressed bytes has already been written.
117//
118//go:noescape
119func encodeSnappyBetterBlockAsm(dst []byte, src []byte) int
120
121// encodeSnappyBetterBlockAsm64K encodes a non-empty src to a guaranteed-large-enough dst.
122// Maximum input 65535 bytes.
123// It assumes that the varint-encoded length of the decompressed bytes has already been written.
124//
125//go:noescape
126func encodeSnappyBetterBlockAsm64K(dst []byte, src []byte) int
127
128// encodeSnappyBetterBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst.
129// Maximum input 16383 bytes.
130// It assumes that the varint-encoded length of the decompressed bytes has already been written.
131//
132//go:noescape
133func encodeSnappyBetterBlockAsm12B(dst []byte, src []byte) int
134
135// encodeSnappyBetterBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst.
136// Maximum input 4095 bytes.
137// It assumes that the varint-encoded length of the decompressed bytes has already been written.
138//
139//go:noescape
140func encodeSnappyBetterBlockAsm10B(dst []byte, src []byte) int
141
142// encodeSnappyBetterBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst.
143// Maximum input 511 bytes.
144// It assumes that the varint-encoded length of the decompressed bytes has already been written.
145//
146//go:noescape
147func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte) int
148
149// calcBlockSize encodes a non-empty src to a guaranteed-large-enough dst.
150// Maximum input 4294967295 bytes.
151// It assumes that the varint-encoded length of the decompressed bytes has already been written.
152//
153//go:noescape
154func calcBlockSize(src []byte) int
155
156// calcBlockSizeSmall encodes a non-empty src to a guaranteed-large-enough dst.
157// Maximum input 1024 bytes.
158// It assumes that the varint-encoded length of the decompressed bytes has already been written.
159//
160//go:noescape
161func calcBlockSizeSmall(src []byte) int
162
163// emitLiteral writes a literal chunk and returns the number of bytes written.
164//
165// It assumes that:
166//
167// dst is long enough to hold the encoded bytes with margin of 0 bytes
168// 0 <= len(lit) && len(lit) <= math.MaxUint32
169//
170//go:noescape
171func emitLiteral(dst []byte, lit []byte) int
172
173// emitRepeat writes a repeat chunk and returns the number of bytes written.
174// Length must be at least 4 and < 1<<32
175//
176//go:noescape
177func emitRepeat(dst []byte, offset int, length int) int
178
179// emitCopy writes a copy chunk and returns the number of bytes written.
180//
181// It assumes that:
182//
183// dst is long enough to hold the encoded bytes
184// 1 <= offset && offset <= math.MaxUint32
185// 4 <= length && length <= 1 << 24
186//
187//go:noescape
188func emitCopy(dst []byte, offset int, length int) int
189
190// emitCopyNoRepeat writes a copy chunk and returns the number of bytes written.
191//
192// It assumes that:
193//
194// dst is long enough to hold the encoded bytes
195// 1 <= offset && offset <= math.MaxUint32
196// 4 <= length && length <= 1 << 24
197//
198//go:noescape
199func emitCopyNoRepeat(dst []byte, offset int, length int) int
200
201// matchLen returns how many bytes match in a and b
202//
203// It assumes that:
204//
205// len(a) <= len(b)
206//
207//go:noescape
208func matchLen(a []byte, b []byte) int
209
210// cvtLZ4Block converts an LZ4 block to S2
211//
212//go:noescape
213func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
214
215// cvtLZ4sBlock converts an LZ4s block to S2
216//
217//go:noescape
218func cvtLZ4sBlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
219
220// cvtLZ4Block converts an LZ4 block to Snappy
221//
222//go:noescape
223func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
224
225// cvtLZ4sBlock converts an LZ4s block to Snappy
226//
227//go:noescape
228func cvtLZ4sBlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
diff --git a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s
new file mode 100644
index 0000000..5f110d1
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s
@@ -0,0 +1,21169 @@
1// Code generated by command: go run gen.go -out ../encodeblock_amd64.s -stubs ../encodeblock_amd64.go -pkg=s2. DO NOT EDIT.
2
3//go:build !appengine && !noasm && gc && !noasm
4
5#include "textflag.h"
6
7// func _dummy_()
8TEXT ·_dummy_(SB), $0
9#ifdef GOAMD64_v4
10#ifndef GOAMD64_v3
11#define GOAMD64_v3
12#endif
13#endif
14 RET
15
16// func encodeBlockAsm(dst []byte, src []byte) int
17// Requires: BMI, SSE2
18TEXT ·encodeBlockAsm(SB), $65560-56
19 MOVQ dst_base+0(FP), AX
20 MOVQ $0x00000200, CX
21 LEAQ 24(SP), DX
22 PXOR X0, X0
23
24zero_loop_encodeBlockAsm:
25 MOVOU X0, (DX)
26 MOVOU X0, 16(DX)
27 MOVOU X0, 32(DX)
28 MOVOU X0, 48(DX)
29 MOVOU X0, 64(DX)
30 MOVOU X0, 80(DX)
31 MOVOU X0, 96(DX)
32 MOVOU X0, 112(DX)
33 ADDQ $0x80, DX
34 DECQ CX
35 JNZ zero_loop_encodeBlockAsm
36 MOVL $0x00000000, 12(SP)
37 MOVQ src_len+32(FP), CX
38 LEAQ -9(CX), DX
39 LEAQ -8(CX), BX
40 MOVL BX, 8(SP)
41 SHRQ $0x05, CX
42 SUBL CX, DX
43 LEAQ (AX)(DX*1), DX
44 MOVQ DX, (SP)
45 MOVL $0x00000001, CX
46 MOVL CX, 16(SP)
47 MOVQ src_base+24(FP), DX
48
49search_loop_encodeBlockAsm:
50 MOVL CX, BX
51 SUBL 12(SP), BX
52 SHRL $0x06, BX
53 LEAL 4(CX)(BX*1), BX
54 CMPL BX, 8(SP)
55 JAE emit_remainder_encodeBlockAsm
56 MOVQ (DX)(CX*1), SI
57 MOVL BX, 20(SP)
58 MOVQ $0x0000cf1bbcdcbf9b, R8
59 MOVQ SI, R9
60 MOVQ SI, R10
61 SHRQ $0x08, R10
62 SHLQ $0x10, R9
63 IMULQ R8, R9
64 SHRQ $0x32, R9
65 SHLQ $0x10, R10
66 IMULQ R8, R10
67 SHRQ $0x32, R10
68 MOVL 24(SP)(R9*4), BX
69 MOVL 24(SP)(R10*4), DI
70 MOVL CX, 24(SP)(R9*4)
71 LEAL 1(CX), R9
72 MOVL R9, 24(SP)(R10*4)
73 MOVQ SI, R9
74 SHRQ $0x10, R9
75 SHLQ $0x10, R9
76 IMULQ R8, R9
77 SHRQ $0x32, R9
78 MOVL CX, R8
79 SUBL 16(SP), R8
80 MOVL 1(DX)(R8*1), R10
81 MOVQ SI, R8
82 SHRQ $0x08, R8
83 CMPL R8, R10
84 JNE no_repeat_found_encodeBlockAsm
85 LEAL 1(CX), SI
86 MOVL 12(SP), DI
87 MOVL SI, BX
88 SUBL 16(SP), BX
89 JZ repeat_extend_back_end_encodeBlockAsm
90
91repeat_extend_back_loop_encodeBlockAsm:
92 CMPL SI, DI
93 JBE repeat_extend_back_end_encodeBlockAsm
94 MOVB -1(DX)(BX*1), R8
95 MOVB -1(DX)(SI*1), R9
96 CMPB R8, R9
97 JNE repeat_extend_back_end_encodeBlockAsm
98 LEAL -1(SI), SI
99 DECL BX
100 JNZ repeat_extend_back_loop_encodeBlockAsm
101
102repeat_extend_back_end_encodeBlockAsm:
103 MOVL 12(SP), BX
104 CMPL BX, SI
105 JEQ emit_literal_done_repeat_emit_encodeBlockAsm
106 MOVL SI, R8
107 MOVL SI, 12(SP)
108 LEAQ (DX)(BX*1), R9
109 SUBL BX, R8
110 LEAL -1(R8), BX
111 CMPL BX, $0x3c
112 JB one_byte_repeat_emit_encodeBlockAsm
113 CMPL BX, $0x00000100
114 JB two_bytes_repeat_emit_encodeBlockAsm
115 CMPL BX, $0x00010000
116 JB three_bytes_repeat_emit_encodeBlockAsm
117 CMPL BX, $0x01000000
118 JB four_bytes_repeat_emit_encodeBlockAsm
119 MOVB $0xfc, (AX)
120 MOVL BX, 1(AX)
121 ADDQ $0x05, AX
122 JMP memmove_long_repeat_emit_encodeBlockAsm
123
124four_bytes_repeat_emit_encodeBlockAsm:
125 MOVL BX, R10
126 SHRL $0x10, R10
127 MOVB $0xf8, (AX)
128 MOVW BX, 1(AX)
129 MOVB R10, 3(AX)
130 ADDQ $0x04, AX
131 JMP memmove_long_repeat_emit_encodeBlockAsm
132
133three_bytes_repeat_emit_encodeBlockAsm:
134 MOVB $0xf4, (AX)
135 MOVW BX, 1(AX)
136 ADDQ $0x03, AX
137 JMP memmove_long_repeat_emit_encodeBlockAsm
138
139two_bytes_repeat_emit_encodeBlockAsm:
140 MOVB $0xf0, (AX)
141 MOVB BL, 1(AX)
142 ADDQ $0x02, AX
143 CMPL BX, $0x40
144 JB memmove_repeat_emit_encodeBlockAsm
145 JMP memmove_long_repeat_emit_encodeBlockAsm
146
147one_byte_repeat_emit_encodeBlockAsm:
148 SHLB $0x02, BL
149 MOVB BL, (AX)
150 ADDQ $0x01, AX
151
152memmove_repeat_emit_encodeBlockAsm:
153 LEAQ (AX)(R8*1), BX
154
155 // genMemMoveShort
156 CMPQ R8, $0x08
157 JBE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8
158 CMPQ R8, $0x10
159 JBE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8through16
160 CMPQ R8, $0x20
161 JBE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_17through32
162 JMP emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_33through64
163
164emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8:
165 MOVQ (R9), R10
166 MOVQ R10, (AX)
167 JMP memmove_end_copy_repeat_emit_encodeBlockAsm
168
169emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8through16:
170 MOVQ (R9), R10
171 MOVQ -8(R9)(R8*1), R9
172 MOVQ R10, (AX)
173 MOVQ R9, -8(AX)(R8*1)
174 JMP memmove_end_copy_repeat_emit_encodeBlockAsm
175
176emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_17through32:
177 MOVOU (R9), X0
178 MOVOU -16(R9)(R8*1), X1
179 MOVOU X0, (AX)
180 MOVOU X1, -16(AX)(R8*1)
181 JMP memmove_end_copy_repeat_emit_encodeBlockAsm
182
183emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_33through64:
184 MOVOU (R9), X0
185 MOVOU 16(R9), X1
186 MOVOU -32(R9)(R8*1), X2
187 MOVOU -16(R9)(R8*1), X3
188 MOVOU X0, (AX)
189 MOVOU X1, 16(AX)
190 MOVOU X2, -32(AX)(R8*1)
191 MOVOU X3, -16(AX)(R8*1)
192
193memmove_end_copy_repeat_emit_encodeBlockAsm:
194 MOVQ BX, AX
195 JMP emit_literal_done_repeat_emit_encodeBlockAsm
196
197memmove_long_repeat_emit_encodeBlockAsm:
198 LEAQ (AX)(R8*1), BX
199
200 // genMemMoveLong
201 MOVOU (R9), X0
202 MOVOU 16(R9), X1
203 MOVOU -32(R9)(R8*1), X2
204 MOVOU -16(R9)(R8*1), X3
205 MOVQ R8, R11
206 SHRQ $0x05, R11
207 MOVQ AX, R10
208 ANDL $0x0000001f, R10
209 MOVQ $0x00000040, R12
210 SUBQ R10, R12
211 DECQ R11
212 JA emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32
213 LEAQ -32(R9)(R12*1), R10
214 LEAQ -32(AX)(R12*1), R13
215
216emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_big_loop_back:
217 MOVOU (R10), X4
218 MOVOU 16(R10), X5
219 MOVOA X4, (R13)
220 MOVOA X5, 16(R13)
221 ADDQ $0x20, R13
222 ADDQ $0x20, R10
223 ADDQ $0x20, R12
224 DECQ R11
225 JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_big_loop_back
226
227emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32:
228 MOVOU -32(R9)(R12*1), X4
229 MOVOU -16(R9)(R12*1), X5
230 MOVOA X4, -32(AX)(R12*1)
231 MOVOA X5, -16(AX)(R12*1)
232 ADDQ $0x20, R12
233 CMPQ R8, R12
234 JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32
235 MOVOU X0, (AX)
236 MOVOU X1, 16(AX)
237 MOVOU X2, -32(AX)(R8*1)
238 MOVOU X3, -16(AX)(R8*1)
239 MOVQ BX, AX
240
241emit_literal_done_repeat_emit_encodeBlockAsm:
242 ADDL $0x05, CX
243 MOVL CX, BX
244 SUBL 16(SP), BX
245 MOVQ src_len+32(FP), R8
246 SUBL CX, R8
247 LEAQ (DX)(CX*1), R9
248 LEAQ (DX)(BX*1), BX
249
250 // matchLen
251 XORL R11, R11
252
253matchlen_loopback_16_repeat_extend_encodeBlockAsm:
254 CMPL R8, $0x10
255 JB matchlen_match8_repeat_extend_encodeBlockAsm
256 MOVQ (R9)(R11*1), R10
257 MOVQ 8(R9)(R11*1), R12
258 XORQ (BX)(R11*1), R10
259 JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm
260 XORQ 8(BX)(R11*1), R12
261 JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm
262 LEAL -16(R8), R8
263 LEAL 16(R11), R11
264 JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm
265
266matchlen_bsf_16repeat_extend_encodeBlockAsm:
267#ifdef GOAMD64_v3
268 TZCNTQ R12, R12
269
270#else
271 BSFQ R12, R12
272
273#endif
274 SARQ $0x03, R12
275 LEAL 8(R11)(R12*1), R11
276 JMP repeat_extend_forward_end_encodeBlockAsm
277
278matchlen_match8_repeat_extend_encodeBlockAsm:
279 CMPL R8, $0x08
280 JB matchlen_match4_repeat_extend_encodeBlockAsm
281 MOVQ (R9)(R11*1), R10
282 XORQ (BX)(R11*1), R10
283 JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm
284 LEAL -8(R8), R8
285 LEAL 8(R11), R11
286 JMP matchlen_match4_repeat_extend_encodeBlockAsm
287
288matchlen_bsf_8_repeat_extend_encodeBlockAsm:
289#ifdef GOAMD64_v3
290 TZCNTQ R10, R10
291
292#else
293 BSFQ R10, R10
294
295#endif
296 SARQ $0x03, R10
297 LEAL (R11)(R10*1), R11
298 JMP repeat_extend_forward_end_encodeBlockAsm
299
300matchlen_match4_repeat_extend_encodeBlockAsm:
301 CMPL R8, $0x04
302 JB matchlen_match2_repeat_extend_encodeBlockAsm
303 MOVL (R9)(R11*1), R10
304 CMPL (BX)(R11*1), R10
305 JNE matchlen_match2_repeat_extend_encodeBlockAsm
306 LEAL -4(R8), R8
307 LEAL 4(R11), R11
308
309matchlen_match2_repeat_extend_encodeBlockAsm:
310 CMPL R8, $0x01
311 JE matchlen_match1_repeat_extend_encodeBlockAsm
312 JB repeat_extend_forward_end_encodeBlockAsm
313 MOVW (R9)(R11*1), R10
314 CMPW (BX)(R11*1), R10
315 JNE matchlen_match1_repeat_extend_encodeBlockAsm
316 LEAL 2(R11), R11
317 SUBL $0x02, R8
318 JZ repeat_extend_forward_end_encodeBlockAsm
319
320matchlen_match1_repeat_extend_encodeBlockAsm:
321 MOVB (R9)(R11*1), R10
322 CMPB (BX)(R11*1), R10
323 JNE repeat_extend_forward_end_encodeBlockAsm
324 LEAL 1(R11), R11
325
326repeat_extend_forward_end_encodeBlockAsm:
327 ADDL R11, CX
328 MOVL CX, BX
329 SUBL SI, BX
330 MOVL 16(SP), SI
331 TESTL DI, DI
332 JZ repeat_as_copy_encodeBlockAsm
333
334 // emitRepeat
335emit_repeat_again_match_repeat_encodeBlockAsm:
336 MOVL BX, DI
337 LEAL -4(BX), BX
338 CMPL DI, $0x08
339 JBE repeat_two_match_repeat_encodeBlockAsm
340 CMPL DI, $0x0c
341 JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm
342 CMPL SI, $0x00000800
343 JB repeat_two_offset_match_repeat_encodeBlockAsm
344
345cant_repeat_two_offset_match_repeat_encodeBlockAsm:
346 CMPL BX, $0x00000104
347 JB repeat_three_match_repeat_encodeBlockAsm
348 CMPL BX, $0x00010100
349 JB repeat_four_match_repeat_encodeBlockAsm
350 CMPL BX, $0x0100ffff
351 JB repeat_five_match_repeat_encodeBlockAsm
352 LEAL -16842747(BX), BX
353 MOVL $0xfffb001d, (AX)
354 MOVB $0xff, 4(AX)
355 ADDQ $0x05, AX
356 JMP emit_repeat_again_match_repeat_encodeBlockAsm
357
358repeat_five_match_repeat_encodeBlockAsm:
359 LEAL -65536(BX), BX
360 MOVL BX, SI
361 MOVW $0x001d, (AX)
362 MOVW BX, 2(AX)
363 SARL $0x10, SI
364 MOVB SI, 4(AX)
365 ADDQ $0x05, AX
366 JMP repeat_end_emit_encodeBlockAsm
367
368repeat_four_match_repeat_encodeBlockAsm:
369 LEAL -256(BX), BX
370 MOVW $0x0019, (AX)
371 MOVW BX, 2(AX)
372 ADDQ $0x04, AX
373 JMP repeat_end_emit_encodeBlockAsm
374
375repeat_three_match_repeat_encodeBlockAsm:
376 LEAL -4(BX), BX
377 MOVW $0x0015, (AX)
378 MOVB BL, 2(AX)
379 ADDQ $0x03, AX
380 JMP repeat_end_emit_encodeBlockAsm
381
382repeat_two_match_repeat_encodeBlockAsm:
383 SHLL $0x02, BX
384 ORL $0x01, BX
385 MOVW BX, (AX)
386 ADDQ $0x02, AX
387 JMP repeat_end_emit_encodeBlockAsm
388
389repeat_two_offset_match_repeat_encodeBlockAsm:
390 XORQ DI, DI
391 LEAL 1(DI)(BX*4), BX
392 MOVB SI, 1(AX)
393 SARL $0x08, SI
394 SHLL $0x05, SI
395 ORL SI, BX
396 MOVB BL, (AX)
397 ADDQ $0x02, AX
398 JMP repeat_end_emit_encodeBlockAsm
399
400repeat_as_copy_encodeBlockAsm:
401 // emitCopy
402 CMPL SI, $0x00010000
403 JB two_byte_offset_repeat_as_copy_encodeBlockAsm
404 CMPL BX, $0x40
405 JBE four_bytes_remain_repeat_as_copy_encodeBlockAsm
406 MOVB $0xff, (AX)
407 MOVL SI, 1(AX)
408 LEAL -64(BX), BX
409 ADDQ $0x05, AX
410 CMPL BX, $0x04
411 JB four_bytes_remain_repeat_as_copy_encodeBlockAsm
412
413 // emitRepeat
414emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy:
415 MOVL BX, DI
416 LEAL -4(BX), BX
417 CMPL DI, $0x08
418 JBE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy
419 CMPL DI, $0x0c
420 JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy
421 CMPL SI, $0x00000800
422 JB repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy
423
424cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy:
425 CMPL BX, $0x00000104
426 JB repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy
427 CMPL BX, $0x00010100
428 JB repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy
429 CMPL BX, $0x0100ffff
430 JB repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy
431 LEAL -16842747(BX), BX
432 MOVL $0xfffb001d, (AX)
433 MOVB $0xff, 4(AX)
434 ADDQ $0x05, AX
435 JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy
436
437repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy:
438 LEAL -65536(BX), BX
439 MOVL BX, SI
440 MOVW $0x001d, (AX)
441 MOVW BX, 2(AX)
442 SARL $0x10, SI
443 MOVB SI, 4(AX)
444 ADDQ $0x05, AX
445 JMP repeat_end_emit_encodeBlockAsm
446
447repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy:
448 LEAL -256(BX), BX
449 MOVW $0x0019, (AX)
450 MOVW BX, 2(AX)
451 ADDQ $0x04, AX
452 JMP repeat_end_emit_encodeBlockAsm
453
454repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy:
455 LEAL -4(BX), BX
456 MOVW $0x0015, (AX)
457 MOVB BL, 2(AX)
458 ADDQ $0x03, AX
459 JMP repeat_end_emit_encodeBlockAsm
460
461repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy:
462 SHLL $0x02, BX
463 ORL $0x01, BX
464 MOVW BX, (AX)
465 ADDQ $0x02, AX
466 JMP repeat_end_emit_encodeBlockAsm
467
468repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy:
469 XORQ DI, DI
470 LEAL 1(DI)(BX*4), BX
471 MOVB SI, 1(AX)
472 SARL $0x08, SI
473 SHLL $0x05, SI
474 ORL SI, BX
475 MOVB BL, (AX)
476 ADDQ $0x02, AX
477 JMP repeat_end_emit_encodeBlockAsm
478
479four_bytes_remain_repeat_as_copy_encodeBlockAsm:
480 TESTL BX, BX
481 JZ repeat_end_emit_encodeBlockAsm
482 XORL DI, DI
483 LEAL -1(DI)(BX*4), BX
484 MOVB BL, (AX)
485 MOVL SI, 1(AX)
486 ADDQ $0x05, AX
487 JMP repeat_end_emit_encodeBlockAsm
488
489two_byte_offset_repeat_as_copy_encodeBlockAsm:
490 CMPL BX, $0x40
491 JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm
492 CMPL SI, $0x00000800
493 JAE long_offset_short_repeat_as_copy_encodeBlockAsm
494 MOVL $0x00000001, DI
495 LEAL 16(DI), DI
496 MOVB SI, 1(AX)
497 MOVL SI, R8
498 SHRL $0x08, R8
499 SHLL $0x05, R8
500 ORL R8, DI
501 MOVB DI, (AX)
502 ADDQ $0x02, AX
503 SUBL $0x08, BX
504
505 // emitRepeat
506 LEAL -4(BX), BX
507 JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
508
509emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
510 MOVL BX, DI
511 LEAL -4(BX), BX
512 CMPL DI, $0x08
513 JBE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
514 CMPL DI, $0x0c
515 JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
516 CMPL SI, $0x00000800
517 JB repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
518
519cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
520 CMPL BX, $0x00000104
521 JB repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
522 CMPL BX, $0x00010100
523 JB repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
524 CMPL BX, $0x0100ffff
525 JB repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
526 LEAL -16842747(BX), BX
527 MOVL $0xfffb001d, (AX)
528 MOVB $0xff, 4(AX)
529 ADDQ $0x05, AX
530 JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
531
532repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
533 LEAL -65536(BX), BX
534 MOVL BX, SI
535 MOVW $0x001d, (AX)
536 MOVW BX, 2(AX)
537 SARL $0x10, SI
538 MOVB SI, 4(AX)
539 ADDQ $0x05, AX
540 JMP repeat_end_emit_encodeBlockAsm
541
542repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
543 LEAL -256(BX), BX
544 MOVW $0x0019, (AX)
545 MOVW BX, 2(AX)
546 ADDQ $0x04, AX
547 JMP repeat_end_emit_encodeBlockAsm
548
549repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
550 LEAL -4(BX), BX
551 MOVW $0x0015, (AX)
552 MOVB BL, 2(AX)
553 ADDQ $0x03, AX
554 JMP repeat_end_emit_encodeBlockAsm
555
556repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
557 SHLL $0x02, BX
558 ORL $0x01, BX
559 MOVW BX, (AX)
560 ADDQ $0x02, AX
561 JMP repeat_end_emit_encodeBlockAsm
562
563repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
564 XORQ DI, DI
565 LEAL 1(DI)(BX*4), BX
566 MOVB SI, 1(AX)
567 SARL $0x08, SI
568 SHLL $0x05, SI
569 ORL SI, BX
570 MOVB BL, (AX)
571 ADDQ $0x02, AX
572 JMP repeat_end_emit_encodeBlockAsm
573
574long_offset_short_repeat_as_copy_encodeBlockAsm:
575 MOVB $0xee, (AX)
576 MOVW SI, 1(AX)
577 LEAL -60(BX), BX
578 ADDQ $0x03, AX
579
580 // emitRepeat
581emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short:
582 MOVL BX, DI
583 LEAL -4(BX), BX
584 CMPL DI, $0x08
585 JBE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short
586 CMPL DI, $0x0c
587 JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short
588 CMPL SI, $0x00000800
589 JB repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short
590
591cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short:
592 CMPL BX, $0x00000104
593 JB repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short
594 CMPL BX, $0x00010100
595 JB repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short
596 CMPL BX, $0x0100ffff
597 JB repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short
598 LEAL -16842747(BX), BX
599 MOVL $0xfffb001d, (AX)
600 MOVB $0xff, 4(AX)
601 ADDQ $0x05, AX
602 JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short
603
604repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short:
605 LEAL -65536(BX), BX
606 MOVL BX, SI
607 MOVW $0x001d, (AX)
608 MOVW BX, 2(AX)
609 SARL $0x10, SI
610 MOVB SI, 4(AX)
611 ADDQ $0x05, AX
612 JMP repeat_end_emit_encodeBlockAsm
613
614repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short:
615 LEAL -256(BX), BX
616 MOVW $0x0019, (AX)
617 MOVW BX, 2(AX)
618 ADDQ $0x04, AX
619 JMP repeat_end_emit_encodeBlockAsm
620
621repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short:
622 LEAL -4(BX), BX
623 MOVW $0x0015, (AX)
624 MOVB BL, 2(AX)
625 ADDQ $0x03, AX
626 JMP repeat_end_emit_encodeBlockAsm
627
628repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short:
629 SHLL $0x02, BX
630 ORL $0x01, BX
631 MOVW BX, (AX)
632 ADDQ $0x02, AX
633 JMP repeat_end_emit_encodeBlockAsm
634
635repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short:
636 XORQ DI, DI
637 LEAL 1(DI)(BX*4), BX
638 MOVB SI, 1(AX)
639 SARL $0x08, SI
640 SHLL $0x05, SI
641 ORL SI, BX
642 MOVB BL, (AX)
643 ADDQ $0x02, AX
644 JMP repeat_end_emit_encodeBlockAsm
645
646two_byte_offset_short_repeat_as_copy_encodeBlockAsm:
647 MOVL BX, DI
648 SHLL $0x02, DI
649 CMPL BX, $0x0c
650 JAE emit_copy_three_repeat_as_copy_encodeBlockAsm
651 CMPL SI, $0x00000800
652 JAE emit_copy_three_repeat_as_copy_encodeBlockAsm
653 LEAL -15(DI), DI
654 MOVB SI, 1(AX)
655 SHRL $0x08, SI
656 SHLL $0x05, SI
657 ORL SI, DI
658 MOVB DI, (AX)
659 ADDQ $0x02, AX
660 JMP repeat_end_emit_encodeBlockAsm
661
662emit_copy_three_repeat_as_copy_encodeBlockAsm:
663 LEAL -2(DI), DI
664 MOVB DI, (AX)
665 MOVW SI, 1(AX)
666 ADDQ $0x03, AX
667
668repeat_end_emit_encodeBlockAsm:
669 MOVL CX, 12(SP)
670 JMP search_loop_encodeBlockAsm
671
672no_repeat_found_encodeBlockAsm:
673 CMPL (DX)(BX*1), SI
674 JEQ candidate_match_encodeBlockAsm
675 SHRQ $0x08, SI
676 MOVL 24(SP)(R9*4), BX
677 LEAL 2(CX), R8
678 CMPL (DX)(DI*1), SI
679 JEQ candidate2_match_encodeBlockAsm
680 MOVL R8, 24(SP)(R9*4)
681 SHRQ $0x08, SI
682 CMPL (DX)(BX*1), SI
683 JEQ candidate3_match_encodeBlockAsm
684 MOVL 20(SP), CX
685 JMP search_loop_encodeBlockAsm
686
687candidate3_match_encodeBlockAsm:
688 ADDL $0x02, CX
689 JMP candidate_match_encodeBlockAsm
690
691candidate2_match_encodeBlockAsm:
692 MOVL R8, 24(SP)(R9*4)
693 INCL CX
694 MOVL DI, BX
695
696candidate_match_encodeBlockAsm:
697 MOVL 12(SP), SI
698 TESTL BX, BX
699 JZ match_extend_back_end_encodeBlockAsm
700
701match_extend_back_loop_encodeBlockAsm:
702 CMPL CX, SI
703 JBE match_extend_back_end_encodeBlockAsm
704 MOVB -1(DX)(BX*1), DI
705 MOVB -1(DX)(CX*1), R8
706 CMPB DI, R8
707 JNE match_extend_back_end_encodeBlockAsm
708 LEAL -1(CX), CX
709 DECL BX
710 JZ match_extend_back_end_encodeBlockAsm
711 JMP match_extend_back_loop_encodeBlockAsm
712
713match_extend_back_end_encodeBlockAsm:
714 MOVL CX, SI
715 SUBL 12(SP), SI
716 LEAQ 5(AX)(SI*1), SI
717 CMPQ SI, (SP)
718 JB match_dst_size_check_encodeBlockAsm
719 MOVQ $0x00000000, ret+48(FP)
720 RET
721
722match_dst_size_check_encodeBlockAsm:
723 MOVL CX, SI
724 MOVL 12(SP), DI
725 CMPL DI, SI
726 JEQ emit_literal_done_match_emit_encodeBlockAsm
727 MOVL SI, R8
728 MOVL SI, 12(SP)
729 LEAQ (DX)(DI*1), SI
730 SUBL DI, R8
731 LEAL -1(R8), DI
732 CMPL DI, $0x3c
733 JB one_byte_match_emit_encodeBlockAsm
734 CMPL DI, $0x00000100
735 JB two_bytes_match_emit_encodeBlockAsm
736 CMPL DI, $0x00010000
737 JB three_bytes_match_emit_encodeBlockAsm
738 CMPL DI, $0x01000000
739 JB four_bytes_match_emit_encodeBlockAsm
740 MOVB $0xfc, (AX)
741 MOVL DI, 1(AX)
742 ADDQ $0x05, AX
743 JMP memmove_long_match_emit_encodeBlockAsm
744
745four_bytes_match_emit_encodeBlockAsm:
746 MOVL DI, R9
747 SHRL $0x10, R9
748 MOVB $0xf8, (AX)
749 MOVW DI, 1(AX)
750 MOVB R9, 3(AX)
751 ADDQ $0x04, AX
752 JMP memmove_long_match_emit_encodeBlockAsm
753
754three_bytes_match_emit_encodeBlockAsm:
755 MOVB $0xf4, (AX)
756 MOVW DI, 1(AX)
757 ADDQ $0x03, AX
758 JMP memmove_long_match_emit_encodeBlockAsm
759
760two_bytes_match_emit_encodeBlockAsm:
761 MOVB $0xf0, (AX)
762 MOVB DI, 1(AX)
763 ADDQ $0x02, AX
764 CMPL DI, $0x40
765 JB memmove_match_emit_encodeBlockAsm
766 JMP memmove_long_match_emit_encodeBlockAsm
767
768one_byte_match_emit_encodeBlockAsm:
769 SHLB $0x02, DI
770 MOVB DI, (AX)
771 ADDQ $0x01, AX
772
773memmove_match_emit_encodeBlockAsm:
774 LEAQ (AX)(R8*1), DI
775
776 // genMemMoveShort
777 CMPQ R8, $0x08
778 JBE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8
779 CMPQ R8, $0x10
780 JBE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8through16
781 CMPQ R8, $0x20
782 JBE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_17through32
783 JMP emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_33through64
784
785emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8:
786 MOVQ (SI), R9
787 MOVQ R9, (AX)
788 JMP memmove_end_copy_match_emit_encodeBlockAsm
789
790emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8through16:
791 MOVQ (SI), R9
792 MOVQ -8(SI)(R8*1), SI
793 MOVQ R9, (AX)
794 MOVQ SI, -8(AX)(R8*1)
795 JMP memmove_end_copy_match_emit_encodeBlockAsm
796
797emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_17through32:
798 MOVOU (SI), X0
799 MOVOU -16(SI)(R8*1), X1
800 MOVOU X0, (AX)
801 MOVOU X1, -16(AX)(R8*1)
802 JMP memmove_end_copy_match_emit_encodeBlockAsm
803
804emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_33through64:
805 MOVOU (SI), X0
806 MOVOU 16(SI), X1
807 MOVOU -32(SI)(R8*1), X2
808 MOVOU -16(SI)(R8*1), X3
809 MOVOU X0, (AX)
810 MOVOU X1, 16(AX)
811 MOVOU X2, -32(AX)(R8*1)
812 MOVOU X3, -16(AX)(R8*1)
813
814memmove_end_copy_match_emit_encodeBlockAsm:
815 MOVQ DI, AX
816 JMP emit_literal_done_match_emit_encodeBlockAsm
817
818memmove_long_match_emit_encodeBlockAsm:
819 LEAQ (AX)(R8*1), DI
820
821 // genMemMoveLong
822 MOVOU (SI), X0
823 MOVOU 16(SI), X1
824 MOVOU -32(SI)(R8*1), X2
825 MOVOU -16(SI)(R8*1), X3
826 MOVQ R8, R10
827 SHRQ $0x05, R10
828 MOVQ AX, R9
829 ANDL $0x0000001f, R9
830 MOVQ $0x00000040, R11
831 SUBQ R9, R11
832 DECQ R10
833 JA emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32
834 LEAQ -32(SI)(R11*1), R9
835 LEAQ -32(AX)(R11*1), R12
836
837emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_big_loop_back:
838 MOVOU (R9), X4
839 MOVOU 16(R9), X5
840 MOVOA X4, (R12)
841 MOVOA X5, 16(R12)
842 ADDQ $0x20, R12
843 ADDQ $0x20, R9
844 ADDQ $0x20, R11
845 DECQ R10
846 JNA emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_big_loop_back
847
848emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32:
849 MOVOU -32(SI)(R11*1), X4
850 MOVOU -16(SI)(R11*1), X5
851 MOVOA X4, -32(AX)(R11*1)
852 MOVOA X5, -16(AX)(R11*1)
853 ADDQ $0x20, R11
854 CMPQ R8, R11
855 JAE emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32
856 MOVOU X0, (AX)
857 MOVOU X1, 16(AX)
858 MOVOU X2, -32(AX)(R8*1)
859 MOVOU X3, -16(AX)(R8*1)
860 MOVQ DI, AX
861
862emit_literal_done_match_emit_encodeBlockAsm:
863match_nolit_loop_encodeBlockAsm:
864 MOVL CX, SI
865 SUBL BX, SI
866 MOVL SI, 16(SP)
867 ADDL $0x04, CX
868 ADDL $0x04, BX
869 MOVQ src_len+32(FP), SI
870 SUBL CX, SI
871 LEAQ (DX)(CX*1), DI
872 LEAQ (DX)(BX*1), BX
873
874 // matchLen
875 XORL R9, R9
876
877matchlen_loopback_16_match_nolit_encodeBlockAsm:
878 CMPL SI, $0x10
879 JB matchlen_match8_match_nolit_encodeBlockAsm
880 MOVQ (DI)(R9*1), R8
881 MOVQ 8(DI)(R9*1), R10
882 XORQ (BX)(R9*1), R8
883 JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm
884 XORQ 8(BX)(R9*1), R10
885 JNZ matchlen_bsf_16match_nolit_encodeBlockAsm
886 LEAL -16(SI), SI
887 LEAL 16(R9), R9
888 JMP matchlen_loopback_16_match_nolit_encodeBlockAsm
889
890matchlen_bsf_16match_nolit_encodeBlockAsm:
891#ifdef GOAMD64_v3
892 TZCNTQ R10, R10
893
894#else
895 BSFQ R10, R10
896
897#endif
898 SARQ $0x03, R10
899 LEAL 8(R9)(R10*1), R9
900 JMP match_nolit_end_encodeBlockAsm
901
902matchlen_match8_match_nolit_encodeBlockAsm:
903 CMPL SI, $0x08
904 JB matchlen_match4_match_nolit_encodeBlockAsm
905 MOVQ (DI)(R9*1), R8
906 XORQ (BX)(R9*1), R8
907 JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm
908 LEAL -8(SI), SI
909 LEAL 8(R9), R9
910 JMP matchlen_match4_match_nolit_encodeBlockAsm
911
912matchlen_bsf_8_match_nolit_encodeBlockAsm:
913#ifdef GOAMD64_v3
914 TZCNTQ R8, R8
915
916#else
917 BSFQ R8, R8
918
919#endif
920 SARQ $0x03, R8
921 LEAL (R9)(R8*1), R9
922 JMP match_nolit_end_encodeBlockAsm
923
924matchlen_match4_match_nolit_encodeBlockAsm:
925 CMPL SI, $0x04
926 JB matchlen_match2_match_nolit_encodeBlockAsm
927 MOVL (DI)(R9*1), R8
928 CMPL (BX)(R9*1), R8
929 JNE matchlen_match2_match_nolit_encodeBlockAsm
930 LEAL -4(SI), SI
931 LEAL 4(R9), R9
932
933matchlen_match2_match_nolit_encodeBlockAsm:
934 CMPL SI, $0x01
935 JE matchlen_match1_match_nolit_encodeBlockAsm
936 JB match_nolit_end_encodeBlockAsm
937 MOVW (DI)(R9*1), R8
938 CMPW (BX)(R9*1), R8
939 JNE matchlen_match1_match_nolit_encodeBlockAsm
940 LEAL 2(R9), R9
941 SUBL $0x02, SI
942 JZ match_nolit_end_encodeBlockAsm
943
944matchlen_match1_match_nolit_encodeBlockAsm:
945 MOVB (DI)(R9*1), R8
946 CMPB (BX)(R9*1), R8
947 JNE match_nolit_end_encodeBlockAsm
948 LEAL 1(R9), R9
949
950match_nolit_end_encodeBlockAsm:
951 ADDL R9, CX
952 MOVL 16(SP), BX
953 ADDL $0x04, R9
954 MOVL CX, 12(SP)
955
956 // emitCopy
957 CMPL BX, $0x00010000
958 JB two_byte_offset_match_nolit_encodeBlockAsm
959 CMPL R9, $0x40
960 JBE four_bytes_remain_match_nolit_encodeBlockAsm
961 MOVB $0xff, (AX)
962 MOVL BX, 1(AX)
963 LEAL -64(R9), R9
964 ADDQ $0x05, AX
965 CMPL R9, $0x04
966 JB four_bytes_remain_match_nolit_encodeBlockAsm
967
968 // emitRepeat
969emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy:
970 MOVL R9, SI
971 LEAL -4(R9), R9
972 CMPL SI, $0x08
973 JBE repeat_two_match_nolit_encodeBlockAsm_emit_copy
974 CMPL SI, $0x0c
975 JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy
976 CMPL BX, $0x00000800
977 JB repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy
978
979cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy:
980 CMPL R9, $0x00000104
981 JB repeat_three_match_nolit_encodeBlockAsm_emit_copy
982 CMPL R9, $0x00010100
983 JB repeat_four_match_nolit_encodeBlockAsm_emit_copy
984 CMPL R9, $0x0100ffff
985 JB repeat_five_match_nolit_encodeBlockAsm_emit_copy
986 LEAL -16842747(R9), R9
987 MOVL $0xfffb001d, (AX)
988 MOVB $0xff, 4(AX)
989 ADDQ $0x05, AX
990 JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy
991
992repeat_five_match_nolit_encodeBlockAsm_emit_copy:
993 LEAL -65536(R9), R9
994 MOVL R9, BX
995 MOVW $0x001d, (AX)
996 MOVW R9, 2(AX)
997 SARL $0x10, BX
998 MOVB BL, 4(AX)
999 ADDQ $0x05, AX
1000 JMP match_nolit_emitcopy_end_encodeBlockAsm
1001
1002repeat_four_match_nolit_encodeBlockAsm_emit_copy:
1003 LEAL -256(R9), R9
1004 MOVW $0x0019, (AX)
1005 MOVW R9, 2(AX)
1006 ADDQ $0x04, AX
1007 JMP match_nolit_emitcopy_end_encodeBlockAsm
1008
1009repeat_three_match_nolit_encodeBlockAsm_emit_copy:
1010 LEAL -4(R9), R9
1011 MOVW $0x0015, (AX)
1012 MOVB R9, 2(AX)
1013 ADDQ $0x03, AX
1014 JMP match_nolit_emitcopy_end_encodeBlockAsm
1015
1016repeat_two_match_nolit_encodeBlockAsm_emit_copy:
1017 SHLL $0x02, R9
1018 ORL $0x01, R9
1019 MOVW R9, (AX)
1020 ADDQ $0x02, AX
1021 JMP match_nolit_emitcopy_end_encodeBlockAsm
1022
1023repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy:
1024 XORQ SI, SI
1025 LEAL 1(SI)(R9*4), R9
1026 MOVB BL, 1(AX)
1027 SARL $0x08, BX
1028 SHLL $0x05, BX
1029 ORL BX, R9
1030 MOVB R9, (AX)
1031 ADDQ $0x02, AX
1032 JMP match_nolit_emitcopy_end_encodeBlockAsm
1033
1034four_bytes_remain_match_nolit_encodeBlockAsm:
1035 TESTL R9, R9
1036 JZ match_nolit_emitcopy_end_encodeBlockAsm
1037 XORL SI, SI
1038 LEAL -1(SI)(R9*4), R9
1039 MOVB R9, (AX)
1040 MOVL BX, 1(AX)
1041 ADDQ $0x05, AX
1042 JMP match_nolit_emitcopy_end_encodeBlockAsm
1043
1044two_byte_offset_match_nolit_encodeBlockAsm:
1045 CMPL R9, $0x40
1046 JBE two_byte_offset_short_match_nolit_encodeBlockAsm
1047 CMPL BX, $0x00000800
1048 JAE long_offset_short_match_nolit_encodeBlockAsm
1049 MOVL $0x00000001, SI
1050 LEAL 16(SI), SI
1051 MOVB BL, 1(AX)
1052 MOVL BX, DI
1053 SHRL $0x08, DI
1054 SHLL $0x05, DI
1055 ORL DI, SI
1056 MOVB SI, (AX)
1057 ADDQ $0x02, AX
1058 SUBL $0x08, R9
1059
1060 // emitRepeat
1061 LEAL -4(R9), R9
1062 JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b
1063
1064emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short_2b:
1065 MOVL R9, SI
1066 LEAL -4(R9), R9
1067 CMPL SI, $0x08
1068 JBE repeat_two_match_nolit_encodeBlockAsm_emit_copy_short_2b
1069 CMPL SI, $0x0c
1070 JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b
1071 CMPL BX, $0x00000800
1072 JB repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b
1073
1074cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b:
1075 CMPL R9, $0x00000104
1076 JB repeat_three_match_nolit_encodeBlockAsm_emit_copy_short_2b
1077 CMPL R9, $0x00010100
1078 JB repeat_four_match_nolit_encodeBlockAsm_emit_copy_short_2b
1079 CMPL R9, $0x0100ffff
1080 JB repeat_five_match_nolit_encodeBlockAsm_emit_copy_short_2b
1081 LEAL -16842747(R9), R9
1082 MOVL $0xfffb001d, (AX)
1083 MOVB $0xff, 4(AX)
1084 ADDQ $0x05, AX
1085 JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short_2b
1086
1087repeat_five_match_nolit_encodeBlockAsm_emit_copy_short_2b:
1088 LEAL -65536(R9), R9
1089 MOVL R9, BX
1090 MOVW $0x001d, (AX)
1091 MOVW R9, 2(AX)
1092 SARL $0x10, BX
1093 MOVB BL, 4(AX)
1094 ADDQ $0x05, AX
1095 JMP match_nolit_emitcopy_end_encodeBlockAsm
1096
1097repeat_four_match_nolit_encodeBlockAsm_emit_copy_short_2b:
1098 LEAL -256(R9), R9
1099 MOVW $0x0019, (AX)
1100 MOVW R9, 2(AX)
1101 ADDQ $0x04, AX
1102 JMP match_nolit_emitcopy_end_encodeBlockAsm
1103
1104repeat_three_match_nolit_encodeBlockAsm_emit_copy_short_2b:
1105 LEAL -4(R9), R9
1106 MOVW $0x0015, (AX)
1107 MOVB R9, 2(AX)
1108 ADDQ $0x03, AX
1109 JMP match_nolit_emitcopy_end_encodeBlockAsm
1110
1111repeat_two_match_nolit_encodeBlockAsm_emit_copy_short_2b:
1112 SHLL $0x02, R9
1113 ORL $0x01, R9
1114 MOVW R9, (AX)
1115 ADDQ $0x02, AX
1116 JMP match_nolit_emitcopy_end_encodeBlockAsm
1117
1118repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b:
1119 XORQ SI, SI
1120 LEAL 1(SI)(R9*4), R9
1121 MOVB BL, 1(AX)
1122 SARL $0x08, BX
1123 SHLL $0x05, BX
1124 ORL BX, R9
1125 MOVB R9, (AX)
1126 ADDQ $0x02, AX
1127 JMP match_nolit_emitcopy_end_encodeBlockAsm
1128
1129long_offset_short_match_nolit_encodeBlockAsm:
1130 MOVB $0xee, (AX)
1131 MOVW BX, 1(AX)
1132 LEAL -60(R9), R9
1133 ADDQ $0x03, AX
1134
1135 // emitRepeat
1136emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short:
1137 MOVL R9, SI
1138 LEAL -4(R9), R9
1139 CMPL SI, $0x08
1140 JBE repeat_two_match_nolit_encodeBlockAsm_emit_copy_short
1141 CMPL SI, $0x0c
1142 JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short
1143 CMPL BX, $0x00000800
1144 JB repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short
1145
1146cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short:
1147 CMPL R9, $0x00000104
1148 JB repeat_three_match_nolit_encodeBlockAsm_emit_copy_short
1149 CMPL R9, $0x00010100
1150 JB repeat_four_match_nolit_encodeBlockAsm_emit_copy_short
1151 CMPL R9, $0x0100ffff
1152 JB repeat_five_match_nolit_encodeBlockAsm_emit_copy_short
1153 LEAL -16842747(R9), R9
1154 MOVL $0xfffb001d, (AX)
1155 MOVB $0xff, 4(AX)
1156 ADDQ $0x05, AX
1157 JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short
1158
1159repeat_five_match_nolit_encodeBlockAsm_emit_copy_short:
1160 LEAL -65536(R9), R9
1161 MOVL R9, BX
1162 MOVW $0x001d, (AX)
1163 MOVW R9, 2(AX)
1164 SARL $0x10, BX
1165 MOVB BL, 4(AX)
1166 ADDQ $0x05, AX
1167 JMP match_nolit_emitcopy_end_encodeBlockAsm
1168
1169repeat_four_match_nolit_encodeBlockAsm_emit_copy_short:
1170 LEAL -256(R9), R9
1171 MOVW $0x0019, (AX)
1172 MOVW R9, 2(AX)
1173 ADDQ $0x04, AX
1174 JMP match_nolit_emitcopy_end_encodeBlockAsm
1175
1176repeat_three_match_nolit_encodeBlockAsm_emit_copy_short:
1177 LEAL -4(R9), R9
1178 MOVW $0x0015, (AX)
1179 MOVB R9, 2(AX)
1180 ADDQ $0x03, AX
1181 JMP match_nolit_emitcopy_end_encodeBlockAsm
1182
1183repeat_two_match_nolit_encodeBlockAsm_emit_copy_short:
1184 SHLL $0x02, R9
1185 ORL $0x01, R9
1186 MOVW R9, (AX)
1187 ADDQ $0x02, AX
1188 JMP match_nolit_emitcopy_end_encodeBlockAsm
1189
1190repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short:
1191 XORQ SI, SI
1192 LEAL 1(SI)(R9*4), R9
1193 MOVB BL, 1(AX)
1194 SARL $0x08, BX
1195 SHLL $0x05, BX
1196 ORL BX, R9
1197 MOVB R9, (AX)
1198 ADDQ $0x02, AX
1199 JMP match_nolit_emitcopy_end_encodeBlockAsm
1200
1201two_byte_offset_short_match_nolit_encodeBlockAsm:
1202 MOVL R9, SI
1203 SHLL $0x02, SI
1204 CMPL R9, $0x0c
1205 JAE emit_copy_three_match_nolit_encodeBlockAsm
1206 CMPL BX, $0x00000800
1207 JAE emit_copy_three_match_nolit_encodeBlockAsm
1208 LEAL -15(SI), SI
1209 MOVB BL, 1(AX)
1210 SHRL $0x08, BX
1211 SHLL $0x05, BX
1212 ORL BX, SI
1213 MOVB SI, (AX)
1214 ADDQ $0x02, AX
1215 JMP match_nolit_emitcopy_end_encodeBlockAsm
1216
1217emit_copy_three_match_nolit_encodeBlockAsm:
1218 LEAL -2(SI), SI
1219 MOVB SI, (AX)
1220 MOVW BX, 1(AX)
1221 ADDQ $0x03, AX
1222
1223match_nolit_emitcopy_end_encodeBlockAsm:
1224 CMPL CX, 8(SP)
1225 JAE emit_remainder_encodeBlockAsm
1226 MOVQ -2(DX)(CX*1), SI
1227 CMPQ AX, (SP)
1228 JB match_nolit_dst_ok_encodeBlockAsm
1229 MOVQ $0x00000000, ret+48(FP)
1230 RET
1231
1232match_nolit_dst_ok_encodeBlockAsm:
1233 MOVQ $0x0000cf1bbcdcbf9b, R8
1234 MOVQ SI, DI
1235 SHRQ $0x10, SI
1236 MOVQ SI, BX
1237 SHLQ $0x10, DI
1238 IMULQ R8, DI
1239 SHRQ $0x32, DI
1240 SHLQ $0x10, BX
1241 IMULQ R8, BX
1242 SHRQ $0x32, BX
1243 LEAL -2(CX), R8
1244 LEAQ 24(SP)(BX*4), R9
1245 MOVL (R9), BX
1246 MOVL R8, 24(SP)(DI*4)
1247 MOVL CX, (R9)
1248 CMPL (DX)(BX*1), SI
1249 JEQ match_nolit_loop_encodeBlockAsm
1250 INCL CX
1251 JMP search_loop_encodeBlockAsm
1252
1253emit_remainder_encodeBlockAsm:
1254 MOVQ src_len+32(FP), CX
1255 SUBL 12(SP), CX
1256 LEAQ 5(AX)(CX*1), CX
1257 CMPQ CX, (SP)
1258 JB emit_remainder_ok_encodeBlockAsm
1259 MOVQ $0x00000000, ret+48(FP)
1260 RET
1261
1262emit_remainder_ok_encodeBlockAsm:
1263 MOVQ src_len+32(FP), CX
1264 MOVL 12(SP), BX
1265 CMPL BX, CX
1266 JEQ emit_literal_done_emit_remainder_encodeBlockAsm
1267 MOVL CX, SI
1268 MOVL CX, 12(SP)
1269 LEAQ (DX)(BX*1), CX
1270 SUBL BX, SI
1271 LEAL -1(SI), DX
1272 CMPL DX, $0x3c
1273 JB one_byte_emit_remainder_encodeBlockAsm
1274 CMPL DX, $0x00000100
1275 JB two_bytes_emit_remainder_encodeBlockAsm
1276 CMPL DX, $0x00010000
1277 JB three_bytes_emit_remainder_encodeBlockAsm
1278 CMPL DX, $0x01000000
1279 JB four_bytes_emit_remainder_encodeBlockAsm
1280 MOVB $0xfc, (AX)
1281 MOVL DX, 1(AX)
1282 ADDQ $0x05, AX
1283 JMP memmove_long_emit_remainder_encodeBlockAsm
1284
1285four_bytes_emit_remainder_encodeBlockAsm:
1286 MOVL DX, BX
1287 SHRL $0x10, BX
1288 MOVB $0xf8, (AX)
1289 MOVW DX, 1(AX)
1290 MOVB BL, 3(AX)
1291 ADDQ $0x04, AX
1292 JMP memmove_long_emit_remainder_encodeBlockAsm
1293
1294three_bytes_emit_remainder_encodeBlockAsm:
1295 MOVB $0xf4, (AX)
1296 MOVW DX, 1(AX)
1297 ADDQ $0x03, AX
1298 JMP memmove_long_emit_remainder_encodeBlockAsm
1299
1300two_bytes_emit_remainder_encodeBlockAsm:
1301 MOVB $0xf0, (AX)
1302 MOVB DL, 1(AX)
1303 ADDQ $0x02, AX
1304 CMPL DX, $0x40
1305 JB memmove_emit_remainder_encodeBlockAsm
1306 JMP memmove_long_emit_remainder_encodeBlockAsm
1307
1308one_byte_emit_remainder_encodeBlockAsm:
1309 SHLB $0x02, DL
1310 MOVB DL, (AX)
1311 ADDQ $0x01, AX
1312
1313memmove_emit_remainder_encodeBlockAsm:
1314 LEAQ (AX)(SI*1), DX
1315 MOVL SI, BX
1316
1317 // genMemMoveShort
1318 CMPQ BX, $0x03
1319 JB emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_1or2
1320 JE emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_3
1321 CMPQ BX, $0x08
1322 JB emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_4through7
1323 CMPQ BX, $0x10
1324 JBE emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_8through16
1325 CMPQ BX, $0x20
1326 JBE emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_17through32
1327 JMP emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_33through64
1328
1329emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_1or2:
1330 MOVB (CX), SI
1331 MOVB -1(CX)(BX*1), CL
1332 MOVB SI, (AX)
1333 MOVB CL, -1(AX)(BX*1)
1334 JMP memmove_end_copy_emit_remainder_encodeBlockAsm
1335
1336emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_3:
1337 MOVW (CX), SI
1338 MOVB 2(CX), CL
1339 MOVW SI, (AX)
1340 MOVB CL, 2(AX)
1341 JMP memmove_end_copy_emit_remainder_encodeBlockAsm
1342
1343emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_4through7:
1344 MOVL (CX), SI
1345 MOVL -4(CX)(BX*1), CX
1346 MOVL SI, (AX)
1347 MOVL CX, -4(AX)(BX*1)
1348 JMP memmove_end_copy_emit_remainder_encodeBlockAsm
1349
1350emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_8through16:
1351 MOVQ (CX), SI
1352 MOVQ -8(CX)(BX*1), CX
1353 MOVQ SI, (AX)
1354 MOVQ CX, -8(AX)(BX*1)
1355 JMP memmove_end_copy_emit_remainder_encodeBlockAsm
1356
1357emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_17through32:
1358 MOVOU (CX), X0
1359 MOVOU -16(CX)(BX*1), X1
1360 MOVOU X0, (AX)
1361 MOVOU X1, -16(AX)(BX*1)
1362 JMP memmove_end_copy_emit_remainder_encodeBlockAsm
1363
1364emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_33through64:
1365 MOVOU (CX), X0
1366 MOVOU 16(CX), X1
1367 MOVOU -32(CX)(BX*1), X2
1368 MOVOU -16(CX)(BX*1), X3
1369 MOVOU X0, (AX)
1370 MOVOU X1, 16(AX)
1371 MOVOU X2, -32(AX)(BX*1)
1372 MOVOU X3, -16(AX)(BX*1)
1373
1374memmove_end_copy_emit_remainder_encodeBlockAsm:
1375 MOVQ DX, AX
1376 JMP emit_literal_done_emit_remainder_encodeBlockAsm
1377
1378memmove_long_emit_remainder_encodeBlockAsm:
1379 LEAQ (AX)(SI*1), DX
1380 MOVL SI, BX
1381
1382 // genMemMoveLong
1383 MOVOU (CX), X0
1384 MOVOU 16(CX), X1
1385 MOVOU -32(CX)(BX*1), X2
1386 MOVOU -16(CX)(BX*1), X3
1387 MOVQ BX, DI
1388 SHRQ $0x05, DI
1389 MOVQ AX, SI
1390 ANDL $0x0000001f, SI
1391 MOVQ $0x00000040, R8
1392 SUBQ SI, R8
1393 DECQ DI
1394 JA emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_forward_sse_loop_32
1395 LEAQ -32(CX)(R8*1), SI
1396 LEAQ -32(AX)(R8*1), R9
1397
1398emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_big_loop_back:
1399 MOVOU (SI), X4
1400 MOVOU 16(SI), X5
1401 MOVOA X4, (R9)
1402 MOVOA X5, 16(R9)
1403 ADDQ $0x20, R9
1404 ADDQ $0x20, SI
1405 ADDQ $0x20, R8
1406 DECQ DI
1407 JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_big_loop_back
1408
1409emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_forward_sse_loop_32:
1410 MOVOU -32(CX)(R8*1), X4
1411 MOVOU -16(CX)(R8*1), X5
1412 MOVOA X4, -32(AX)(R8*1)
1413 MOVOA X5, -16(AX)(R8*1)
1414 ADDQ $0x20, R8
1415 CMPQ BX, R8
1416 JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_forward_sse_loop_32
1417 MOVOU X0, (AX)
1418 MOVOU X1, 16(AX)
1419 MOVOU X2, -32(AX)(BX*1)
1420 MOVOU X3, -16(AX)(BX*1)
1421 MOVQ DX, AX
1422
1423emit_literal_done_emit_remainder_encodeBlockAsm:
1424 MOVQ dst_base+0(FP), CX
1425 SUBQ CX, AX
1426 MOVQ AX, ret+48(FP)
1427 RET
1428
1429// func encodeBlockAsm4MB(dst []byte, src []byte) int
1430// Requires: BMI, SSE2
1431TEXT ·encodeBlockAsm4MB(SB), $65560-56
1432 MOVQ dst_base+0(FP), AX
1433 MOVQ $0x00000200, CX
1434 LEAQ 24(SP), DX
1435 PXOR X0, X0
1436
1437zero_loop_encodeBlockAsm4MB:
1438 MOVOU X0, (DX)
1439 MOVOU X0, 16(DX)
1440 MOVOU X0, 32(DX)
1441 MOVOU X0, 48(DX)
1442 MOVOU X0, 64(DX)
1443 MOVOU X0, 80(DX)
1444 MOVOU X0, 96(DX)
1445 MOVOU X0, 112(DX)
1446 ADDQ $0x80, DX
1447 DECQ CX
1448 JNZ zero_loop_encodeBlockAsm4MB
1449 MOVL $0x00000000, 12(SP)
1450 MOVQ src_len+32(FP), CX
1451 LEAQ -9(CX), DX
1452 LEAQ -8(CX), BX
1453 MOVL BX, 8(SP)
1454 SHRQ $0x05, CX
1455 SUBL CX, DX
1456 LEAQ (AX)(DX*1), DX
1457 MOVQ DX, (SP)
1458 MOVL $0x00000001, CX
1459 MOVL CX, 16(SP)
1460 MOVQ src_base+24(FP), DX
1461
1462search_loop_encodeBlockAsm4MB:
1463 MOVL CX, BX
1464 SUBL 12(SP), BX
1465 SHRL $0x06, BX
1466 LEAL 4(CX)(BX*1), BX
1467 CMPL BX, 8(SP)
1468 JAE emit_remainder_encodeBlockAsm4MB
1469 MOVQ (DX)(CX*1), SI
1470 MOVL BX, 20(SP)
1471 MOVQ $0x0000cf1bbcdcbf9b, R8
1472 MOVQ SI, R9
1473 MOVQ SI, R10
1474 SHRQ $0x08, R10
1475 SHLQ $0x10, R9
1476 IMULQ R8, R9
1477 SHRQ $0x32, R9
1478 SHLQ $0x10, R10
1479 IMULQ R8, R10
1480 SHRQ $0x32, R10
1481 MOVL 24(SP)(R9*4), BX
1482 MOVL 24(SP)(R10*4), DI
1483 MOVL CX, 24(SP)(R9*4)
1484 LEAL 1(CX), R9
1485 MOVL R9, 24(SP)(R10*4)
1486 MOVQ SI, R9
1487 SHRQ $0x10, R9
1488 SHLQ $0x10, R9
1489 IMULQ R8, R9
1490 SHRQ $0x32, R9
1491 MOVL CX, R8
1492 SUBL 16(SP), R8
1493 MOVL 1(DX)(R8*1), R10
1494 MOVQ SI, R8
1495 SHRQ $0x08, R8
1496 CMPL R8, R10
1497 JNE no_repeat_found_encodeBlockAsm4MB
1498 LEAL 1(CX), SI
1499 MOVL 12(SP), DI
1500 MOVL SI, BX
1501 SUBL 16(SP), BX
1502 JZ repeat_extend_back_end_encodeBlockAsm4MB
1503
1504repeat_extend_back_loop_encodeBlockAsm4MB:
1505 CMPL SI, DI
1506 JBE repeat_extend_back_end_encodeBlockAsm4MB
1507 MOVB -1(DX)(BX*1), R8
1508 MOVB -1(DX)(SI*1), R9
1509 CMPB R8, R9
1510 JNE repeat_extend_back_end_encodeBlockAsm4MB
1511 LEAL -1(SI), SI
1512 DECL BX
1513 JNZ repeat_extend_back_loop_encodeBlockAsm4MB
1514
1515repeat_extend_back_end_encodeBlockAsm4MB:
1516 MOVL 12(SP), BX
1517 CMPL BX, SI
1518 JEQ emit_literal_done_repeat_emit_encodeBlockAsm4MB
1519 MOVL SI, R8
1520 MOVL SI, 12(SP)
1521 LEAQ (DX)(BX*1), R9
1522 SUBL BX, R8
1523 LEAL -1(R8), BX
1524 CMPL BX, $0x3c
1525 JB one_byte_repeat_emit_encodeBlockAsm4MB
1526 CMPL BX, $0x00000100
1527 JB two_bytes_repeat_emit_encodeBlockAsm4MB
1528 CMPL BX, $0x00010000
1529 JB three_bytes_repeat_emit_encodeBlockAsm4MB
1530 MOVL BX, R10
1531 SHRL $0x10, R10
1532 MOVB $0xf8, (AX)
1533 MOVW BX, 1(AX)
1534 MOVB R10, 3(AX)
1535 ADDQ $0x04, AX
1536 JMP memmove_long_repeat_emit_encodeBlockAsm4MB
1537
1538three_bytes_repeat_emit_encodeBlockAsm4MB:
1539 MOVB $0xf4, (AX)
1540 MOVW BX, 1(AX)
1541 ADDQ $0x03, AX
1542 JMP memmove_long_repeat_emit_encodeBlockAsm4MB
1543
1544two_bytes_repeat_emit_encodeBlockAsm4MB:
1545 MOVB $0xf0, (AX)
1546 MOVB BL, 1(AX)
1547 ADDQ $0x02, AX
1548 CMPL BX, $0x40
1549 JB memmove_repeat_emit_encodeBlockAsm4MB
1550 JMP memmove_long_repeat_emit_encodeBlockAsm4MB
1551
1552one_byte_repeat_emit_encodeBlockAsm4MB:
1553 SHLB $0x02, BL
1554 MOVB BL, (AX)
1555 ADDQ $0x01, AX
1556
1557memmove_repeat_emit_encodeBlockAsm4MB:
1558 LEAQ (AX)(R8*1), BX
1559
1560 // genMemMoveShort
1561 CMPQ R8, $0x08
1562 JBE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8
1563 CMPQ R8, $0x10
1564 JBE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8through16
1565 CMPQ R8, $0x20
1566 JBE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_17through32
1567 JMP emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_33through64
1568
1569emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8:
1570 MOVQ (R9), R10
1571 MOVQ R10, (AX)
1572 JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB
1573
1574emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8through16:
1575 MOVQ (R9), R10
1576 MOVQ -8(R9)(R8*1), R9
1577 MOVQ R10, (AX)
1578 MOVQ R9, -8(AX)(R8*1)
1579 JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB
1580
1581emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_17through32:
1582 MOVOU (R9), X0
1583 MOVOU -16(R9)(R8*1), X1
1584 MOVOU X0, (AX)
1585 MOVOU X1, -16(AX)(R8*1)
1586 JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB
1587
1588emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_33through64:
1589 MOVOU (R9), X0
1590 MOVOU 16(R9), X1
1591 MOVOU -32(R9)(R8*1), X2
1592 MOVOU -16(R9)(R8*1), X3
1593 MOVOU X0, (AX)
1594 MOVOU X1, 16(AX)
1595 MOVOU X2, -32(AX)(R8*1)
1596 MOVOU X3, -16(AX)(R8*1)
1597
1598memmove_end_copy_repeat_emit_encodeBlockAsm4MB:
1599 MOVQ BX, AX
1600 JMP emit_literal_done_repeat_emit_encodeBlockAsm4MB
1601
1602memmove_long_repeat_emit_encodeBlockAsm4MB:
1603 LEAQ (AX)(R8*1), BX
1604
1605 // genMemMoveLong
1606 MOVOU (R9), X0
1607 MOVOU 16(R9), X1
1608 MOVOU -32(R9)(R8*1), X2
1609 MOVOU -16(R9)(R8*1), X3
1610 MOVQ R8, R11
1611 SHRQ $0x05, R11
1612 MOVQ AX, R10
1613 ANDL $0x0000001f, R10
1614 MOVQ $0x00000040, R12
1615 SUBQ R10, R12
1616 DECQ R11
1617 JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32
1618 LEAQ -32(R9)(R12*1), R10
1619 LEAQ -32(AX)(R12*1), R13
1620
1621emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_big_loop_back:
1622 MOVOU (R10), X4
1623 MOVOU 16(R10), X5
1624 MOVOA X4, (R13)
1625 MOVOA X5, 16(R13)
1626 ADDQ $0x20, R13
1627 ADDQ $0x20, R10
1628 ADDQ $0x20, R12
1629 DECQ R11
1630 JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_big_loop_back
1631
1632emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32:
1633 MOVOU -32(R9)(R12*1), X4
1634 MOVOU -16(R9)(R12*1), X5
1635 MOVOA X4, -32(AX)(R12*1)
1636 MOVOA X5, -16(AX)(R12*1)
1637 ADDQ $0x20, R12
1638 CMPQ R8, R12
1639 JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32
1640 MOVOU X0, (AX)
1641 MOVOU X1, 16(AX)
1642 MOVOU X2, -32(AX)(R8*1)
1643 MOVOU X3, -16(AX)(R8*1)
1644 MOVQ BX, AX
1645
1646emit_literal_done_repeat_emit_encodeBlockAsm4MB:
1647 ADDL $0x05, CX
1648 MOVL CX, BX
1649 SUBL 16(SP), BX
1650 MOVQ src_len+32(FP), R8
1651 SUBL CX, R8
1652 LEAQ (DX)(CX*1), R9
1653 LEAQ (DX)(BX*1), BX
1654
1655 // matchLen
1656 XORL R11, R11
1657
1658matchlen_loopback_16_repeat_extend_encodeBlockAsm4MB:
1659 CMPL R8, $0x10
1660 JB matchlen_match8_repeat_extend_encodeBlockAsm4MB
1661 MOVQ (R9)(R11*1), R10
1662 MOVQ 8(R9)(R11*1), R12
1663 XORQ (BX)(R11*1), R10
1664 JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm4MB
1665 XORQ 8(BX)(R11*1), R12
1666 JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm4MB
1667 LEAL -16(R8), R8
1668 LEAL 16(R11), R11
1669 JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm4MB
1670
1671matchlen_bsf_16repeat_extend_encodeBlockAsm4MB:
1672#ifdef GOAMD64_v3
1673 TZCNTQ R12, R12
1674
1675#else
1676 BSFQ R12, R12
1677
1678#endif
1679 SARQ $0x03, R12
1680 LEAL 8(R11)(R12*1), R11
1681 JMP repeat_extend_forward_end_encodeBlockAsm4MB
1682
1683matchlen_match8_repeat_extend_encodeBlockAsm4MB:
1684 CMPL R8, $0x08
1685 JB matchlen_match4_repeat_extend_encodeBlockAsm4MB
1686 MOVQ (R9)(R11*1), R10
1687 XORQ (BX)(R11*1), R10
1688 JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm4MB
1689 LEAL -8(R8), R8
1690 LEAL 8(R11), R11
1691 JMP matchlen_match4_repeat_extend_encodeBlockAsm4MB
1692
1693matchlen_bsf_8_repeat_extend_encodeBlockAsm4MB:
1694#ifdef GOAMD64_v3
1695 TZCNTQ R10, R10
1696
1697#else
1698 BSFQ R10, R10
1699
1700#endif
1701 SARQ $0x03, R10
1702 LEAL (R11)(R10*1), R11
1703 JMP repeat_extend_forward_end_encodeBlockAsm4MB
1704
1705matchlen_match4_repeat_extend_encodeBlockAsm4MB:
1706 CMPL R8, $0x04
1707 JB matchlen_match2_repeat_extend_encodeBlockAsm4MB
1708 MOVL (R9)(R11*1), R10
1709 CMPL (BX)(R11*1), R10
1710 JNE matchlen_match2_repeat_extend_encodeBlockAsm4MB
1711 LEAL -4(R8), R8
1712 LEAL 4(R11), R11
1713
1714matchlen_match2_repeat_extend_encodeBlockAsm4MB:
1715 CMPL R8, $0x01
1716 JE matchlen_match1_repeat_extend_encodeBlockAsm4MB
1717 JB repeat_extend_forward_end_encodeBlockAsm4MB
1718 MOVW (R9)(R11*1), R10
1719 CMPW (BX)(R11*1), R10
1720 JNE matchlen_match1_repeat_extend_encodeBlockAsm4MB
1721 LEAL 2(R11), R11
1722 SUBL $0x02, R8
1723 JZ repeat_extend_forward_end_encodeBlockAsm4MB
1724
1725matchlen_match1_repeat_extend_encodeBlockAsm4MB:
1726 MOVB (R9)(R11*1), R10
1727 CMPB (BX)(R11*1), R10
1728 JNE repeat_extend_forward_end_encodeBlockAsm4MB
1729 LEAL 1(R11), R11
1730
1731repeat_extend_forward_end_encodeBlockAsm4MB:
1732 ADDL R11, CX
1733 MOVL CX, BX
1734 SUBL SI, BX
1735 MOVL 16(SP), SI
1736 TESTL DI, DI
1737 JZ repeat_as_copy_encodeBlockAsm4MB
1738
1739 // emitRepeat
1740 MOVL BX, DI
1741 LEAL -4(BX), BX
1742 CMPL DI, $0x08
1743 JBE repeat_two_match_repeat_encodeBlockAsm4MB
1744 CMPL DI, $0x0c
1745 JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm4MB
1746 CMPL SI, $0x00000800
1747 JB repeat_two_offset_match_repeat_encodeBlockAsm4MB
1748
1749cant_repeat_two_offset_match_repeat_encodeBlockAsm4MB:
1750 CMPL BX, $0x00000104
1751 JB repeat_three_match_repeat_encodeBlockAsm4MB
1752 CMPL BX, $0x00010100
1753 JB repeat_four_match_repeat_encodeBlockAsm4MB
1754 LEAL -65536(BX), BX
1755 MOVL BX, SI
1756 MOVW $0x001d, (AX)
1757 MOVW BX, 2(AX)
1758 SARL $0x10, SI
1759 MOVB SI, 4(AX)
1760 ADDQ $0x05, AX
1761 JMP repeat_end_emit_encodeBlockAsm4MB
1762
1763repeat_four_match_repeat_encodeBlockAsm4MB:
1764 LEAL -256(BX), BX
1765 MOVW $0x0019, (AX)
1766 MOVW BX, 2(AX)
1767 ADDQ $0x04, AX
1768 JMP repeat_end_emit_encodeBlockAsm4MB
1769
1770repeat_three_match_repeat_encodeBlockAsm4MB:
1771 LEAL -4(BX), BX
1772 MOVW $0x0015, (AX)
1773 MOVB BL, 2(AX)
1774 ADDQ $0x03, AX
1775 JMP repeat_end_emit_encodeBlockAsm4MB
1776
1777repeat_two_match_repeat_encodeBlockAsm4MB:
1778 SHLL $0x02, BX
1779 ORL $0x01, BX
1780 MOVW BX, (AX)
1781 ADDQ $0x02, AX
1782 JMP repeat_end_emit_encodeBlockAsm4MB
1783
1784repeat_two_offset_match_repeat_encodeBlockAsm4MB:
1785 XORQ DI, DI
1786 LEAL 1(DI)(BX*4), BX
1787 MOVB SI, 1(AX)
1788 SARL $0x08, SI
1789 SHLL $0x05, SI
1790 ORL SI, BX
1791 MOVB BL, (AX)
1792 ADDQ $0x02, AX
1793 JMP repeat_end_emit_encodeBlockAsm4MB
1794
1795repeat_as_copy_encodeBlockAsm4MB:
1796 // emitCopy
1797 CMPL SI, $0x00010000
1798 JB two_byte_offset_repeat_as_copy_encodeBlockAsm4MB
1799 CMPL BX, $0x40
1800 JBE four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB
1801 MOVB $0xff, (AX)
1802 MOVL SI, 1(AX)
1803 LEAL -64(BX), BX
1804 ADDQ $0x05, AX
1805 CMPL BX, $0x04
1806 JB four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB
1807
1808 // emitRepeat
1809 MOVL BX, DI
1810 LEAL -4(BX), BX
1811 CMPL DI, $0x08
1812 JBE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy
1813 CMPL DI, $0x0c
1814 JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy
1815 CMPL SI, $0x00000800
1816 JB repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy
1817
1818cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy:
1819 CMPL BX, $0x00000104
1820 JB repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy
1821 CMPL BX, $0x00010100
1822 JB repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy
1823 LEAL -65536(BX), BX
1824 MOVL BX, SI
1825 MOVW $0x001d, (AX)
1826 MOVW BX, 2(AX)
1827 SARL $0x10, SI
1828 MOVB SI, 4(AX)
1829 ADDQ $0x05, AX
1830 JMP repeat_end_emit_encodeBlockAsm4MB
1831
1832repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy:
1833 LEAL -256(BX), BX
1834 MOVW $0x0019, (AX)
1835 MOVW BX, 2(AX)
1836 ADDQ $0x04, AX
1837 JMP repeat_end_emit_encodeBlockAsm4MB
1838
1839repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy:
1840 LEAL -4(BX), BX
1841 MOVW $0x0015, (AX)
1842 MOVB BL, 2(AX)
1843 ADDQ $0x03, AX
1844 JMP repeat_end_emit_encodeBlockAsm4MB
1845
1846repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy:
1847 SHLL $0x02, BX
1848 ORL $0x01, BX
1849 MOVW BX, (AX)
1850 ADDQ $0x02, AX
1851 JMP repeat_end_emit_encodeBlockAsm4MB
1852
1853repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy:
1854 XORQ DI, DI
1855 LEAL 1(DI)(BX*4), BX
1856 MOVB SI, 1(AX)
1857 SARL $0x08, SI
1858 SHLL $0x05, SI
1859 ORL SI, BX
1860 MOVB BL, (AX)
1861 ADDQ $0x02, AX
1862 JMP repeat_end_emit_encodeBlockAsm4MB
1863
1864four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB:
1865 TESTL BX, BX
1866 JZ repeat_end_emit_encodeBlockAsm4MB
1867 XORL DI, DI
1868 LEAL -1(DI)(BX*4), BX
1869 MOVB BL, (AX)
1870 MOVL SI, 1(AX)
1871 ADDQ $0x05, AX
1872 JMP repeat_end_emit_encodeBlockAsm4MB
1873
1874two_byte_offset_repeat_as_copy_encodeBlockAsm4MB:
1875 CMPL BX, $0x40
1876 JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm4MB
1877 CMPL SI, $0x00000800
1878 JAE long_offset_short_repeat_as_copy_encodeBlockAsm4MB
1879 MOVL $0x00000001, DI
1880 LEAL 16(DI), DI
1881 MOVB SI, 1(AX)
1882 SHRL $0x08, SI
1883 SHLL $0x05, SI
1884 ORL SI, DI
1885 MOVB DI, (AX)
1886 ADDQ $0x02, AX
1887 SUBL $0x08, BX
1888
1889 // emitRepeat
1890 LEAL -4(BX), BX
1891 JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
1892 MOVL BX, DI
1893 LEAL -4(BX), BX
1894 CMPL DI, $0x08
1895 JBE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
1896 CMPL DI, $0x0c
1897 JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
1898 CMPL SI, $0x00000800
1899 JB repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
1900
1901cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b:
1902 CMPL BX, $0x00000104
1903 JB repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
1904 CMPL BX, $0x00010100
1905 JB repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
1906 LEAL -65536(BX), BX
1907 MOVL BX, SI
1908 MOVW $0x001d, (AX)
1909 MOVW BX, 2(AX)
1910 SARL $0x10, SI
1911 MOVB SI, 4(AX)
1912 ADDQ $0x05, AX
1913 JMP repeat_end_emit_encodeBlockAsm4MB
1914
1915repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b:
1916 LEAL -256(BX), BX
1917 MOVW $0x0019, (AX)
1918 MOVW BX, 2(AX)
1919 ADDQ $0x04, AX
1920 JMP repeat_end_emit_encodeBlockAsm4MB
1921
1922repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b:
1923 LEAL -4(BX), BX
1924 MOVW $0x0015, (AX)
1925 MOVB BL, 2(AX)
1926 ADDQ $0x03, AX
1927 JMP repeat_end_emit_encodeBlockAsm4MB
1928
1929repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b:
1930 SHLL $0x02, BX
1931 ORL $0x01, BX
1932 MOVW BX, (AX)
1933 ADDQ $0x02, AX
1934 JMP repeat_end_emit_encodeBlockAsm4MB
1935
1936repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b:
1937 XORQ DI, DI
1938 LEAL 1(DI)(BX*4), BX
1939 MOVB SI, 1(AX)
1940 SARL $0x08, SI
1941 SHLL $0x05, SI
1942 ORL SI, BX
1943 MOVB BL, (AX)
1944 ADDQ $0x02, AX
1945 JMP repeat_end_emit_encodeBlockAsm4MB
1946
1947long_offset_short_repeat_as_copy_encodeBlockAsm4MB:
1948 MOVB $0xee, (AX)
1949 MOVW SI, 1(AX)
1950 LEAL -60(BX), BX
1951 ADDQ $0x03, AX
1952
1953 // emitRepeat
1954 MOVL BX, DI
1955 LEAL -4(BX), BX
1956 CMPL DI, $0x08
1957 JBE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short
1958 CMPL DI, $0x0c
1959 JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short
1960 CMPL SI, $0x00000800
1961 JB repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short
1962
1963cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short:
1964 CMPL BX, $0x00000104
1965 JB repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short
1966 CMPL BX, $0x00010100
1967 JB repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short
1968 LEAL -65536(BX), BX
1969 MOVL BX, SI
1970 MOVW $0x001d, (AX)
1971 MOVW BX, 2(AX)
1972 SARL $0x10, SI
1973 MOVB SI, 4(AX)
1974 ADDQ $0x05, AX
1975 JMP repeat_end_emit_encodeBlockAsm4MB
1976
1977repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short:
1978 LEAL -256(BX), BX
1979 MOVW $0x0019, (AX)
1980 MOVW BX, 2(AX)
1981 ADDQ $0x04, AX
1982 JMP repeat_end_emit_encodeBlockAsm4MB
1983
1984repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short:
1985 LEAL -4(BX), BX
1986 MOVW $0x0015, (AX)
1987 MOVB BL, 2(AX)
1988 ADDQ $0x03, AX
1989 JMP repeat_end_emit_encodeBlockAsm4MB
1990
1991repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short:
1992 SHLL $0x02, BX
1993 ORL $0x01, BX
1994 MOVW BX, (AX)
1995 ADDQ $0x02, AX
1996 JMP repeat_end_emit_encodeBlockAsm4MB
1997
1998repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short:
1999 XORQ DI, DI
2000 LEAL 1(DI)(BX*4), BX
2001 MOVB SI, 1(AX)
2002 SARL $0x08, SI
2003 SHLL $0x05, SI
2004 ORL SI, BX
2005 MOVB BL, (AX)
2006 ADDQ $0x02, AX
2007 JMP repeat_end_emit_encodeBlockAsm4MB
2008
2009two_byte_offset_short_repeat_as_copy_encodeBlockAsm4MB:
2010 MOVL BX, DI
2011 SHLL $0x02, DI
2012 CMPL BX, $0x0c
2013 JAE emit_copy_three_repeat_as_copy_encodeBlockAsm4MB
2014 CMPL SI, $0x00000800
2015 JAE emit_copy_three_repeat_as_copy_encodeBlockAsm4MB
2016 LEAL -15(DI), DI
2017 MOVB SI, 1(AX)
2018 SHRL $0x08, SI
2019 SHLL $0x05, SI
2020 ORL SI, DI
2021 MOVB DI, (AX)
2022 ADDQ $0x02, AX
2023 JMP repeat_end_emit_encodeBlockAsm4MB
2024
2025emit_copy_three_repeat_as_copy_encodeBlockAsm4MB:
2026 LEAL -2(DI), DI
2027 MOVB DI, (AX)
2028 MOVW SI, 1(AX)
2029 ADDQ $0x03, AX
2030
2031repeat_end_emit_encodeBlockAsm4MB:
2032 MOVL CX, 12(SP)
2033 JMP search_loop_encodeBlockAsm4MB
2034
2035no_repeat_found_encodeBlockAsm4MB:
2036 CMPL (DX)(BX*1), SI
2037 JEQ candidate_match_encodeBlockAsm4MB
2038 SHRQ $0x08, SI
2039 MOVL 24(SP)(R9*4), BX
2040 LEAL 2(CX), R8
2041 CMPL (DX)(DI*1), SI
2042 JEQ candidate2_match_encodeBlockAsm4MB
2043 MOVL R8, 24(SP)(R9*4)
2044 SHRQ $0x08, SI
2045 CMPL (DX)(BX*1), SI
2046 JEQ candidate3_match_encodeBlockAsm4MB
2047 MOVL 20(SP), CX
2048 JMP search_loop_encodeBlockAsm4MB
2049
2050candidate3_match_encodeBlockAsm4MB:
2051 ADDL $0x02, CX
2052 JMP candidate_match_encodeBlockAsm4MB
2053
2054candidate2_match_encodeBlockAsm4MB:
2055 MOVL R8, 24(SP)(R9*4)
2056 INCL CX
2057 MOVL DI, BX
2058
2059candidate_match_encodeBlockAsm4MB:
2060 MOVL 12(SP), SI
2061 TESTL BX, BX
2062 JZ match_extend_back_end_encodeBlockAsm4MB
2063
2064match_extend_back_loop_encodeBlockAsm4MB:
2065 CMPL CX, SI
2066 JBE match_extend_back_end_encodeBlockAsm4MB
2067 MOVB -1(DX)(BX*1), DI
2068 MOVB -1(DX)(CX*1), R8
2069 CMPB DI, R8
2070 JNE match_extend_back_end_encodeBlockAsm4MB
2071 LEAL -1(CX), CX
2072 DECL BX
2073 JZ match_extend_back_end_encodeBlockAsm4MB
2074 JMP match_extend_back_loop_encodeBlockAsm4MB
2075
2076match_extend_back_end_encodeBlockAsm4MB:
2077 MOVL CX, SI
2078 SUBL 12(SP), SI
2079 LEAQ 4(AX)(SI*1), SI
2080 CMPQ SI, (SP)
2081 JB match_dst_size_check_encodeBlockAsm4MB
2082 MOVQ $0x00000000, ret+48(FP)
2083 RET
2084
2085match_dst_size_check_encodeBlockAsm4MB:
2086 MOVL CX, SI
2087 MOVL 12(SP), DI
2088 CMPL DI, SI
2089 JEQ emit_literal_done_match_emit_encodeBlockAsm4MB
2090 MOVL SI, R8
2091 MOVL SI, 12(SP)
2092 LEAQ (DX)(DI*1), SI
2093 SUBL DI, R8
2094 LEAL -1(R8), DI
2095 CMPL DI, $0x3c
2096 JB one_byte_match_emit_encodeBlockAsm4MB
2097 CMPL DI, $0x00000100
2098 JB two_bytes_match_emit_encodeBlockAsm4MB
2099 CMPL DI, $0x00010000
2100 JB three_bytes_match_emit_encodeBlockAsm4MB
2101 MOVL DI, R9
2102 SHRL $0x10, R9
2103 MOVB $0xf8, (AX)
2104 MOVW DI, 1(AX)
2105 MOVB R9, 3(AX)
2106 ADDQ $0x04, AX
2107 JMP memmove_long_match_emit_encodeBlockAsm4MB
2108
2109three_bytes_match_emit_encodeBlockAsm4MB:
2110 MOVB $0xf4, (AX)
2111 MOVW DI, 1(AX)
2112 ADDQ $0x03, AX
2113 JMP memmove_long_match_emit_encodeBlockAsm4MB
2114
2115two_bytes_match_emit_encodeBlockAsm4MB:
2116 MOVB $0xf0, (AX)
2117 MOVB DI, 1(AX)
2118 ADDQ $0x02, AX
2119 CMPL DI, $0x40
2120 JB memmove_match_emit_encodeBlockAsm4MB
2121 JMP memmove_long_match_emit_encodeBlockAsm4MB
2122
2123one_byte_match_emit_encodeBlockAsm4MB:
2124 SHLB $0x02, DI
2125 MOVB DI, (AX)
2126 ADDQ $0x01, AX
2127
2128memmove_match_emit_encodeBlockAsm4MB:
2129 LEAQ (AX)(R8*1), DI
2130
2131 // genMemMoveShort
2132 CMPQ R8, $0x08
2133 JBE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8
2134 CMPQ R8, $0x10
2135 JBE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8through16
2136 CMPQ R8, $0x20
2137 JBE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_17through32
2138 JMP emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_33through64
2139
2140emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8:
2141 MOVQ (SI), R9
2142 MOVQ R9, (AX)
2143 JMP memmove_end_copy_match_emit_encodeBlockAsm4MB
2144
2145emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8through16:
2146 MOVQ (SI), R9
2147 MOVQ -8(SI)(R8*1), SI
2148 MOVQ R9, (AX)
2149 MOVQ SI, -8(AX)(R8*1)
2150 JMP memmove_end_copy_match_emit_encodeBlockAsm4MB
2151
2152emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_17through32:
2153 MOVOU (SI), X0
2154 MOVOU -16(SI)(R8*1), X1
2155 MOVOU X0, (AX)
2156 MOVOU X1, -16(AX)(R8*1)
2157 JMP memmove_end_copy_match_emit_encodeBlockAsm4MB
2158
2159emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_33through64:
2160 MOVOU (SI), X0
2161 MOVOU 16(SI), X1
2162 MOVOU -32(SI)(R8*1), X2
2163 MOVOU -16(SI)(R8*1), X3
2164 MOVOU X0, (AX)
2165 MOVOU X1, 16(AX)
2166 MOVOU X2, -32(AX)(R8*1)
2167 MOVOU X3, -16(AX)(R8*1)
2168
2169memmove_end_copy_match_emit_encodeBlockAsm4MB:
2170 MOVQ DI, AX
2171 JMP emit_literal_done_match_emit_encodeBlockAsm4MB
2172
2173memmove_long_match_emit_encodeBlockAsm4MB:
2174 LEAQ (AX)(R8*1), DI
2175
2176 // genMemMoveLong
2177 MOVOU (SI), X0
2178 MOVOU 16(SI), X1
2179 MOVOU -32(SI)(R8*1), X2
2180 MOVOU -16(SI)(R8*1), X3
2181 MOVQ R8, R10
2182 SHRQ $0x05, R10
2183 MOVQ AX, R9
2184 ANDL $0x0000001f, R9
2185 MOVQ $0x00000040, R11
2186 SUBQ R9, R11
2187 DECQ R10
2188 JA emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32
2189 LEAQ -32(SI)(R11*1), R9
2190 LEAQ -32(AX)(R11*1), R12
2191
2192emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_big_loop_back:
2193 MOVOU (R9), X4
2194 MOVOU 16(R9), X5
2195 MOVOA X4, (R12)
2196 MOVOA X5, 16(R12)
2197 ADDQ $0x20, R12
2198 ADDQ $0x20, R9
2199 ADDQ $0x20, R11
2200 DECQ R10
2201 JNA emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_big_loop_back
2202
2203emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32:
2204 MOVOU -32(SI)(R11*1), X4
2205 MOVOU -16(SI)(R11*1), X5
2206 MOVOA X4, -32(AX)(R11*1)
2207 MOVOA X5, -16(AX)(R11*1)
2208 ADDQ $0x20, R11
2209 CMPQ R8, R11
2210 JAE emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32
2211 MOVOU X0, (AX)
2212 MOVOU X1, 16(AX)
2213 MOVOU X2, -32(AX)(R8*1)
2214 MOVOU X3, -16(AX)(R8*1)
2215 MOVQ DI, AX
2216
2217emit_literal_done_match_emit_encodeBlockAsm4MB:
2218match_nolit_loop_encodeBlockAsm4MB:
2219 MOVL CX, SI
2220 SUBL BX, SI
2221 MOVL SI, 16(SP)
2222 ADDL $0x04, CX
2223 ADDL $0x04, BX
2224 MOVQ src_len+32(FP), SI
2225 SUBL CX, SI
2226 LEAQ (DX)(CX*1), DI
2227 LEAQ (DX)(BX*1), BX
2228
2229 // matchLen
2230 XORL R9, R9
2231
2232matchlen_loopback_16_match_nolit_encodeBlockAsm4MB:
2233 CMPL SI, $0x10
2234 JB matchlen_match8_match_nolit_encodeBlockAsm4MB
2235 MOVQ (DI)(R9*1), R8
2236 MOVQ 8(DI)(R9*1), R10
2237 XORQ (BX)(R9*1), R8
2238 JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm4MB
2239 XORQ 8(BX)(R9*1), R10
2240 JNZ matchlen_bsf_16match_nolit_encodeBlockAsm4MB
2241 LEAL -16(SI), SI
2242 LEAL 16(R9), R9
2243 JMP matchlen_loopback_16_match_nolit_encodeBlockAsm4MB
2244
2245matchlen_bsf_16match_nolit_encodeBlockAsm4MB:
2246#ifdef GOAMD64_v3
2247 TZCNTQ R10, R10
2248
2249#else
2250 BSFQ R10, R10
2251
2252#endif
2253 SARQ $0x03, R10
2254 LEAL 8(R9)(R10*1), R9
2255 JMP match_nolit_end_encodeBlockAsm4MB
2256
2257matchlen_match8_match_nolit_encodeBlockAsm4MB:
2258 CMPL SI, $0x08
2259 JB matchlen_match4_match_nolit_encodeBlockAsm4MB
2260 MOVQ (DI)(R9*1), R8
2261 XORQ (BX)(R9*1), R8
2262 JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm4MB
2263 LEAL -8(SI), SI
2264 LEAL 8(R9), R9
2265 JMP matchlen_match4_match_nolit_encodeBlockAsm4MB
2266
2267matchlen_bsf_8_match_nolit_encodeBlockAsm4MB:
2268#ifdef GOAMD64_v3
2269 TZCNTQ R8, R8
2270
2271#else
2272 BSFQ R8, R8
2273
2274#endif
2275 SARQ $0x03, R8
2276 LEAL (R9)(R8*1), R9
2277 JMP match_nolit_end_encodeBlockAsm4MB
2278
2279matchlen_match4_match_nolit_encodeBlockAsm4MB:
2280 CMPL SI, $0x04
2281 JB matchlen_match2_match_nolit_encodeBlockAsm4MB
2282 MOVL (DI)(R9*1), R8
2283 CMPL (BX)(R9*1), R8
2284 JNE matchlen_match2_match_nolit_encodeBlockAsm4MB
2285 LEAL -4(SI), SI
2286 LEAL 4(R9), R9
2287
2288matchlen_match2_match_nolit_encodeBlockAsm4MB:
2289 CMPL SI, $0x01
2290 JE matchlen_match1_match_nolit_encodeBlockAsm4MB
2291 JB match_nolit_end_encodeBlockAsm4MB
2292 MOVW (DI)(R9*1), R8
2293 CMPW (BX)(R9*1), R8
2294 JNE matchlen_match1_match_nolit_encodeBlockAsm4MB
2295 LEAL 2(R9), R9
2296 SUBL $0x02, SI
2297 JZ match_nolit_end_encodeBlockAsm4MB
2298
2299matchlen_match1_match_nolit_encodeBlockAsm4MB:
2300 MOVB (DI)(R9*1), R8
2301 CMPB (BX)(R9*1), R8
2302 JNE match_nolit_end_encodeBlockAsm4MB
2303 LEAL 1(R9), R9
2304
2305match_nolit_end_encodeBlockAsm4MB:
2306 ADDL R9, CX
2307 MOVL 16(SP), BX
2308 ADDL $0x04, R9
2309 MOVL CX, 12(SP)
2310
2311 // emitCopy
2312 CMPL BX, $0x00010000
2313 JB two_byte_offset_match_nolit_encodeBlockAsm4MB
2314 CMPL R9, $0x40
2315 JBE four_bytes_remain_match_nolit_encodeBlockAsm4MB
2316 MOVB $0xff, (AX)
2317 MOVL BX, 1(AX)
2318 LEAL -64(R9), R9
2319 ADDQ $0x05, AX
2320 CMPL R9, $0x04
2321 JB four_bytes_remain_match_nolit_encodeBlockAsm4MB
2322
2323 // emitRepeat
2324 MOVL R9, SI
2325 LEAL -4(R9), R9
2326 CMPL SI, $0x08
2327 JBE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy
2328 CMPL SI, $0x0c
2329 JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy
2330 CMPL BX, $0x00000800
2331 JB repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy
2332
2333cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy:
2334 CMPL R9, $0x00000104
2335 JB repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy
2336 CMPL R9, $0x00010100
2337 JB repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy
2338 LEAL -65536(R9), R9
2339 MOVL R9, BX
2340 MOVW $0x001d, (AX)
2341 MOVW R9, 2(AX)
2342 SARL $0x10, BX
2343 MOVB BL, 4(AX)
2344 ADDQ $0x05, AX
2345 JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
2346
2347repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy:
2348 LEAL -256(R9), R9
2349 MOVW $0x0019, (AX)
2350 MOVW R9, 2(AX)
2351 ADDQ $0x04, AX
2352 JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
2353
2354repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy:
2355 LEAL -4(R9), R9
2356 MOVW $0x0015, (AX)
2357 MOVB R9, 2(AX)
2358 ADDQ $0x03, AX
2359 JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
2360
2361repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy:
2362 SHLL $0x02, R9
2363 ORL $0x01, R9
2364 MOVW R9, (AX)
2365 ADDQ $0x02, AX
2366 JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
2367
2368repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy:
2369 XORQ SI, SI
2370 LEAL 1(SI)(R9*4), R9
2371 MOVB BL, 1(AX)
2372 SARL $0x08, BX
2373 SHLL $0x05, BX
2374 ORL BX, R9
2375 MOVB R9, (AX)
2376 ADDQ $0x02, AX
2377 JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
2378
2379four_bytes_remain_match_nolit_encodeBlockAsm4MB:
2380 TESTL R9, R9
2381 JZ match_nolit_emitcopy_end_encodeBlockAsm4MB
2382 XORL SI, SI
2383 LEAL -1(SI)(R9*4), R9
2384 MOVB R9, (AX)
2385 MOVL BX, 1(AX)
2386 ADDQ $0x05, AX
2387 JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
2388
2389two_byte_offset_match_nolit_encodeBlockAsm4MB:
2390 CMPL R9, $0x40
2391 JBE two_byte_offset_short_match_nolit_encodeBlockAsm4MB
2392 CMPL BX, $0x00000800
2393 JAE long_offset_short_match_nolit_encodeBlockAsm4MB
2394 MOVL $0x00000001, SI
2395 LEAL 16(SI), SI
2396 MOVB BL, 1(AX)
2397 SHRL $0x08, BX
2398 SHLL $0x05, BX
2399 ORL BX, SI
2400 MOVB SI, (AX)
2401 ADDQ $0x02, AX
2402 SUBL $0x08, R9
2403
2404 // emitRepeat
2405 LEAL -4(R9), R9
2406 JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
2407 MOVL R9, SI
2408 LEAL -4(R9), R9
2409 CMPL SI, $0x08
2410 JBE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
2411 CMPL SI, $0x0c
2412 JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
2413 CMPL BX, $0x00000800
2414 JB repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
2415
2416cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b:
2417 CMPL R9, $0x00000104
2418 JB repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
2419 CMPL R9, $0x00010100
2420 JB repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
2421 LEAL -65536(R9), R9
2422 MOVL R9, BX
2423 MOVW $0x001d, (AX)
2424 MOVW R9, 2(AX)
2425 SARL $0x10, BX
2426 MOVB BL, 4(AX)
2427 ADDQ $0x05, AX
2428 JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
2429
2430repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b:
2431 LEAL -256(R9), R9
2432 MOVW $0x0019, (AX)
2433 MOVW R9, 2(AX)
2434 ADDQ $0x04, AX
2435 JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
2436
2437repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b:
2438 LEAL -4(R9), R9
2439 MOVW $0x0015, (AX)
2440 MOVB R9, 2(AX)
2441 ADDQ $0x03, AX
2442 JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
2443
2444repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b:
2445 SHLL $0x02, R9
2446 ORL $0x01, R9
2447 MOVW R9, (AX)
2448 ADDQ $0x02, AX
2449 JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
2450
2451repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b:
2452 XORQ SI, SI
2453 LEAL 1(SI)(R9*4), R9
2454 MOVB BL, 1(AX)
2455 SARL $0x08, BX
2456 SHLL $0x05, BX
2457 ORL BX, R9
2458 MOVB R9, (AX)
2459 ADDQ $0x02, AX
2460 JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
2461
2462long_offset_short_match_nolit_encodeBlockAsm4MB:
2463 MOVB $0xee, (AX)
2464 MOVW BX, 1(AX)
2465 LEAL -60(R9), R9
2466 ADDQ $0x03, AX
2467
2468 // emitRepeat
2469 MOVL R9, SI
2470 LEAL -4(R9), R9
2471 CMPL SI, $0x08
2472 JBE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short
2473 CMPL SI, $0x0c
2474 JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short
2475 CMPL BX, $0x00000800
2476 JB repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short
2477
2478cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short:
2479 CMPL R9, $0x00000104
2480 JB repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short
2481 CMPL R9, $0x00010100
2482 JB repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short
2483 LEAL -65536(R9), R9
2484 MOVL R9, BX
2485 MOVW $0x001d, (AX)
2486 MOVW R9, 2(AX)
2487 SARL $0x10, BX
2488 MOVB BL, 4(AX)
2489 ADDQ $0x05, AX
2490 JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
2491
2492repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short:
2493 LEAL -256(R9), R9
2494 MOVW $0x0019, (AX)
2495 MOVW R9, 2(AX)
2496 ADDQ $0x04, AX
2497 JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
2498
2499repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short:
2500 LEAL -4(R9), R9
2501 MOVW $0x0015, (AX)
2502 MOVB R9, 2(AX)
2503 ADDQ $0x03, AX
2504 JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
2505
2506repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short:
2507 SHLL $0x02, R9
2508 ORL $0x01, R9
2509 MOVW R9, (AX)
2510 ADDQ $0x02, AX
2511 JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
2512
2513repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short:
2514 XORQ SI, SI
2515 LEAL 1(SI)(R9*4), R9
2516 MOVB BL, 1(AX)
2517 SARL $0x08, BX
2518 SHLL $0x05, BX
2519 ORL BX, R9
2520 MOVB R9, (AX)
2521 ADDQ $0x02, AX
2522 JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
2523
2524two_byte_offset_short_match_nolit_encodeBlockAsm4MB:
2525 MOVL R9, SI
2526 SHLL $0x02, SI
2527 CMPL R9, $0x0c
2528 JAE emit_copy_three_match_nolit_encodeBlockAsm4MB
2529 CMPL BX, $0x00000800
2530 JAE emit_copy_three_match_nolit_encodeBlockAsm4MB
2531 LEAL -15(SI), SI
2532 MOVB BL, 1(AX)
2533 SHRL $0x08, BX
2534 SHLL $0x05, BX
2535 ORL BX, SI
2536 MOVB SI, (AX)
2537 ADDQ $0x02, AX
2538 JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
2539
2540emit_copy_three_match_nolit_encodeBlockAsm4MB:
2541 LEAL -2(SI), SI
2542 MOVB SI, (AX)
2543 MOVW BX, 1(AX)
2544 ADDQ $0x03, AX
2545
2546match_nolit_emitcopy_end_encodeBlockAsm4MB:
2547 CMPL CX, 8(SP)
2548 JAE emit_remainder_encodeBlockAsm4MB
2549 MOVQ -2(DX)(CX*1), SI
2550 CMPQ AX, (SP)
2551 JB match_nolit_dst_ok_encodeBlockAsm4MB
2552 MOVQ $0x00000000, ret+48(FP)
2553 RET
2554
2555match_nolit_dst_ok_encodeBlockAsm4MB:
2556 MOVQ $0x0000cf1bbcdcbf9b, R8
2557 MOVQ SI, DI
2558 SHRQ $0x10, SI
2559 MOVQ SI, BX
2560 SHLQ $0x10, DI
2561 IMULQ R8, DI
2562 SHRQ $0x32, DI
2563 SHLQ $0x10, BX
2564 IMULQ R8, BX
2565 SHRQ $0x32, BX
2566 LEAL -2(CX), R8
2567 LEAQ 24(SP)(BX*4), R9
2568 MOVL (R9), BX
2569 MOVL R8, 24(SP)(DI*4)
2570 MOVL CX, (R9)
2571 CMPL (DX)(BX*1), SI
2572 JEQ match_nolit_loop_encodeBlockAsm4MB
2573 INCL CX
2574 JMP search_loop_encodeBlockAsm4MB
2575
2576emit_remainder_encodeBlockAsm4MB:
2577 MOVQ src_len+32(FP), CX
2578 SUBL 12(SP), CX
2579 LEAQ 4(AX)(CX*1), CX
2580 CMPQ CX, (SP)
2581 JB emit_remainder_ok_encodeBlockAsm4MB
2582 MOVQ $0x00000000, ret+48(FP)
2583 RET
2584
2585emit_remainder_ok_encodeBlockAsm4MB:
2586 MOVQ src_len+32(FP), CX
2587 MOVL 12(SP), BX
2588 CMPL BX, CX
2589 JEQ emit_literal_done_emit_remainder_encodeBlockAsm4MB
2590 MOVL CX, SI
2591 MOVL CX, 12(SP)
2592 LEAQ (DX)(BX*1), CX
2593 SUBL BX, SI
2594 LEAL -1(SI), DX
2595 CMPL DX, $0x3c
2596 JB one_byte_emit_remainder_encodeBlockAsm4MB
2597 CMPL DX, $0x00000100
2598 JB two_bytes_emit_remainder_encodeBlockAsm4MB
2599 CMPL DX, $0x00010000
2600 JB three_bytes_emit_remainder_encodeBlockAsm4MB
2601 MOVL DX, BX
2602 SHRL $0x10, BX
2603 MOVB $0xf8, (AX)
2604 MOVW DX, 1(AX)
2605 MOVB BL, 3(AX)
2606 ADDQ $0x04, AX
2607 JMP memmove_long_emit_remainder_encodeBlockAsm4MB
2608
2609three_bytes_emit_remainder_encodeBlockAsm4MB:
2610 MOVB $0xf4, (AX)
2611 MOVW DX, 1(AX)
2612 ADDQ $0x03, AX
2613 JMP memmove_long_emit_remainder_encodeBlockAsm4MB
2614
2615two_bytes_emit_remainder_encodeBlockAsm4MB:
2616 MOVB $0xf0, (AX)
2617 MOVB DL, 1(AX)
2618 ADDQ $0x02, AX
2619 CMPL DX, $0x40
2620 JB memmove_emit_remainder_encodeBlockAsm4MB
2621 JMP memmove_long_emit_remainder_encodeBlockAsm4MB
2622
2623one_byte_emit_remainder_encodeBlockAsm4MB:
2624 SHLB $0x02, DL
2625 MOVB DL, (AX)
2626 ADDQ $0x01, AX
2627
2628memmove_emit_remainder_encodeBlockAsm4MB:
2629 LEAQ (AX)(SI*1), DX
2630 MOVL SI, BX
2631
2632 // genMemMoveShort
2633 CMPQ BX, $0x03
2634 JB emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_1or2
2635 JE emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_3
2636 CMPQ BX, $0x08
2637 JB emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_4through7
2638 CMPQ BX, $0x10
2639 JBE emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_8through16
2640 CMPQ BX, $0x20
2641 JBE emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_17through32
2642 JMP emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_33through64
2643
2644emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_1or2:
2645 MOVB (CX), SI
2646 MOVB -1(CX)(BX*1), CL
2647 MOVB SI, (AX)
2648 MOVB CL, -1(AX)(BX*1)
2649 JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB
2650
2651emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_3:
2652 MOVW (CX), SI
2653 MOVB 2(CX), CL
2654 MOVW SI, (AX)
2655 MOVB CL, 2(AX)
2656 JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB
2657
2658emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_4through7:
2659 MOVL (CX), SI
2660 MOVL -4(CX)(BX*1), CX
2661 MOVL SI, (AX)
2662 MOVL CX, -4(AX)(BX*1)
2663 JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB
2664
2665emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_8through16:
2666 MOVQ (CX), SI
2667 MOVQ -8(CX)(BX*1), CX
2668 MOVQ SI, (AX)
2669 MOVQ CX, -8(AX)(BX*1)
2670 JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB
2671
2672emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_17through32:
2673 MOVOU (CX), X0
2674 MOVOU -16(CX)(BX*1), X1
2675 MOVOU X0, (AX)
2676 MOVOU X1, -16(AX)(BX*1)
2677 JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB
2678
2679emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_33through64:
2680 MOVOU (CX), X0
2681 MOVOU 16(CX), X1
2682 MOVOU -32(CX)(BX*1), X2
2683 MOVOU -16(CX)(BX*1), X3
2684 MOVOU X0, (AX)
2685 MOVOU X1, 16(AX)
2686 MOVOU X2, -32(AX)(BX*1)
2687 MOVOU X3, -16(AX)(BX*1)
2688
2689memmove_end_copy_emit_remainder_encodeBlockAsm4MB:
2690 MOVQ DX, AX
2691 JMP emit_literal_done_emit_remainder_encodeBlockAsm4MB
2692
2693memmove_long_emit_remainder_encodeBlockAsm4MB:
2694 LEAQ (AX)(SI*1), DX
2695 MOVL SI, BX
2696
2697 // genMemMoveLong
2698 MOVOU (CX), X0
2699 MOVOU 16(CX), X1
2700 MOVOU -32(CX)(BX*1), X2
2701 MOVOU -16(CX)(BX*1), X3
2702 MOVQ BX, DI
2703 SHRQ $0x05, DI
2704 MOVQ AX, SI
2705 ANDL $0x0000001f, SI
2706 MOVQ $0x00000040, R8
2707 SUBQ SI, R8
2708 DECQ DI
2709 JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_forward_sse_loop_32
2710 LEAQ -32(CX)(R8*1), SI
2711 LEAQ -32(AX)(R8*1), R9
2712
2713emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_big_loop_back:
2714 MOVOU (SI), X4
2715 MOVOU 16(SI), X5
2716 MOVOA X4, (R9)
2717 MOVOA X5, 16(R9)
2718 ADDQ $0x20, R9
2719 ADDQ $0x20, SI
2720 ADDQ $0x20, R8
2721 DECQ DI
2722 JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_big_loop_back
2723
2724emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_forward_sse_loop_32:
2725 MOVOU -32(CX)(R8*1), X4
2726 MOVOU -16(CX)(R8*1), X5
2727 MOVOA X4, -32(AX)(R8*1)
2728 MOVOA X5, -16(AX)(R8*1)
2729 ADDQ $0x20, R8
2730 CMPQ BX, R8
2731 JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_forward_sse_loop_32
2732 MOVOU X0, (AX)
2733 MOVOU X1, 16(AX)
2734 MOVOU X2, -32(AX)(BX*1)
2735 MOVOU X3, -16(AX)(BX*1)
2736 MOVQ DX, AX
2737
2738emit_literal_done_emit_remainder_encodeBlockAsm4MB:
2739 MOVQ dst_base+0(FP), CX
2740 SUBQ CX, AX
2741 MOVQ AX, ret+48(FP)
2742 RET
2743
2744// func encodeBlockAsm12B(dst []byte, src []byte) int
2745// Requires: BMI, SSE2
2746TEXT ·encodeBlockAsm12B(SB), $16408-56
2747 MOVQ dst_base+0(FP), AX
2748 MOVQ $0x00000080, CX
2749 LEAQ 24(SP), DX
2750 PXOR X0, X0
2751
2752zero_loop_encodeBlockAsm12B:
2753 MOVOU X0, (DX)
2754 MOVOU X0, 16(DX)
2755 MOVOU X0, 32(DX)
2756 MOVOU X0, 48(DX)
2757 MOVOU X0, 64(DX)
2758 MOVOU X0, 80(DX)
2759 MOVOU X0, 96(DX)
2760 MOVOU X0, 112(DX)
2761 ADDQ $0x80, DX
2762 DECQ CX
2763 JNZ zero_loop_encodeBlockAsm12B
2764 MOVL $0x00000000, 12(SP)
2765 MOVQ src_len+32(FP), CX
2766 LEAQ -9(CX), DX
2767 LEAQ -8(CX), BX
2768 MOVL BX, 8(SP)
2769 SHRQ $0x05, CX
2770 SUBL CX, DX
2771 LEAQ (AX)(DX*1), DX
2772 MOVQ DX, (SP)
2773 MOVL $0x00000001, CX
2774 MOVL CX, 16(SP)
2775 MOVQ src_base+24(FP), DX
2776
2777search_loop_encodeBlockAsm12B:
2778 MOVL CX, BX
2779 SUBL 12(SP), BX
2780 SHRL $0x05, BX
2781 LEAL 4(CX)(BX*1), BX
2782 CMPL BX, 8(SP)
2783 JAE emit_remainder_encodeBlockAsm12B
2784 MOVQ (DX)(CX*1), SI
2785 MOVL BX, 20(SP)
2786 MOVQ $0x000000cf1bbcdcbb, R8
2787 MOVQ SI, R9
2788 MOVQ SI, R10
2789 SHRQ $0x08, R10
2790 SHLQ $0x18, R9
2791 IMULQ R8, R9
2792 SHRQ $0x34, R9
2793 SHLQ $0x18, R10
2794 IMULQ R8, R10
2795 SHRQ $0x34, R10
2796 MOVL 24(SP)(R9*4), BX
2797 MOVL 24(SP)(R10*4), DI
2798 MOVL CX, 24(SP)(R9*4)
2799 LEAL 1(CX), R9
2800 MOVL R9, 24(SP)(R10*4)
2801 MOVQ SI, R9
2802 SHRQ $0x10, R9
2803 SHLQ $0x18, R9
2804 IMULQ R8, R9
2805 SHRQ $0x34, R9
2806 MOVL CX, R8
2807 SUBL 16(SP), R8
2808 MOVL 1(DX)(R8*1), R10
2809 MOVQ SI, R8
2810 SHRQ $0x08, R8
2811 CMPL R8, R10
2812 JNE no_repeat_found_encodeBlockAsm12B
2813 LEAL 1(CX), SI
2814 MOVL 12(SP), DI
2815 MOVL SI, BX
2816 SUBL 16(SP), BX
2817 JZ repeat_extend_back_end_encodeBlockAsm12B
2818
2819repeat_extend_back_loop_encodeBlockAsm12B:
2820 CMPL SI, DI
2821 JBE repeat_extend_back_end_encodeBlockAsm12B
2822 MOVB -1(DX)(BX*1), R8
2823 MOVB -1(DX)(SI*1), R9
2824 CMPB R8, R9
2825 JNE repeat_extend_back_end_encodeBlockAsm12B
2826 LEAL -1(SI), SI
2827 DECL BX
2828 JNZ repeat_extend_back_loop_encodeBlockAsm12B
2829
2830repeat_extend_back_end_encodeBlockAsm12B:
2831 MOVL 12(SP), BX
2832 CMPL BX, SI
2833 JEQ emit_literal_done_repeat_emit_encodeBlockAsm12B
2834 MOVL SI, R8
2835 MOVL SI, 12(SP)
2836 LEAQ (DX)(BX*1), R9
2837 SUBL BX, R8
2838 LEAL -1(R8), BX
2839 CMPL BX, $0x3c
2840 JB one_byte_repeat_emit_encodeBlockAsm12B
2841 CMPL BX, $0x00000100
2842 JB two_bytes_repeat_emit_encodeBlockAsm12B
2843 JB three_bytes_repeat_emit_encodeBlockAsm12B
2844
2845three_bytes_repeat_emit_encodeBlockAsm12B:
2846 MOVB $0xf4, (AX)
2847 MOVW BX, 1(AX)
2848 ADDQ $0x03, AX
2849 JMP memmove_long_repeat_emit_encodeBlockAsm12B
2850
2851two_bytes_repeat_emit_encodeBlockAsm12B:
2852 MOVB $0xf0, (AX)
2853 MOVB BL, 1(AX)
2854 ADDQ $0x02, AX
2855 CMPL BX, $0x40
2856 JB memmove_repeat_emit_encodeBlockAsm12B
2857 JMP memmove_long_repeat_emit_encodeBlockAsm12B
2858
2859one_byte_repeat_emit_encodeBlockAsm12B:
2860 SHLB $0x02, BL
2861 MOVB BL, (AX)
2862 ADDQ $0x01, AX
2863
2864memmove_repeat_emit_encodeBlockAsm12B:
2865 LEAQ (AX)(R8*1), BX
2866
2867 // genMemMoveShort
2868 CMPQ R8, $0x08
2869 JBE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8
2870 CMPQ R8, $0x10
2871 JBE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8through16
2872 CMPQ R8, $0x20
2873 JBE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_17through32
2874 JMP emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_33through64
2875
2876emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8:
2877 MOVQ (R9), R10
2878 MOVQ R10, (AX)
2879 JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B
2880
2881emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8through16:
2882 MOVQ (R9), R10
2883 MOVQ -8(R9)(R8*1), R9
2884 MOVQ R10, (AX)
2885 MOVQ R9, -8(AX)(R8*1)
2886 JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B
2887
2888emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_17through32:
2889 MOVOU (R9), X0
2890 MOVOU -16(R9)(R8*1), X1
2891 MOVOU X0, (AX)
2892 MOVOU X1, -16(AX)(R8*1)
2893 JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B
2894
2895emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_33through64:
2896 MOVOU (R9), X0
2897 MOVOU 16(R9), X1
2898 MOVOU -32(R9)(R8*1), X2
2899 MOVOU -16(R9)(R8*1), X3
2900 MOVOU X0, (AX)
2901 MOVOU X1, 16(AX)
2902 MOVOU X2, -32(AX)(R8*1)
2903 MOVOU X3, -16(AX)(R8*1)
2904
2905memmove_end_copy_repeat_emit_encodeBlockAsm12B:
2906 MOVQ BX, AX
2907 JMP emit_literal_done_repeat_emit_encodeBlockAsm12B
2908
2909memmove_long_repeat_emit_encodeBlockAsm12B:
2910 LEAQ (AX)(R8*1), BX
2911
2912 // genMemMoveLong
2913 MOVOU (R9), X0
2914 MOVOU 16(R9), X1
2915 MOVOU -32(R9)(R8*1), X2
2916 MOVOU -16(R9)(R8*1), X3
2917 MOVQ R8, R11
2918 SHRQ $0x05, R11
2919 MOVQ AX, R10
2920 ANDL $0x0000001f, R10
2921 MOVQ $0x00000040, R12
2922 SUBQ R10, R12
2923 DECQ R11
2924 JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32
2925 LEAQ -32(R9)(R12*1), R10
2926 LEAQ -32(AX)(R12*1), R13
2927
2928emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_big_loop_back:
2929 MOVOU (R10), X4
2930 MOVOU 16(R10), X5
2931 MOVOA X4, (R13)
2932 MOVOA X5, 16(R13)
2933 ADDQ $0x20, R13
2934 ADDQ $0x20, R10
2935 ADDQ $0x20, R12
2936 DECQ R11
2937 JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_big_loop_back
2938
2939emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32:
2940 MOVOU -32(R9)(R12*1), X4
2941 MOVOU -16(R9)(R12*1), X5
2942 MOVOA X4, -32(AX)(R12*1)
2943 MOVOA X5, -16(AX)(R12*1)
2944 ADDQ $0x20, R12
2945 CMPQ R8, R12
2946 JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32
2947 MOVOU X0, (AX)
2948 MOVOU X1, 16(AX)
2949 MOVOU X2, -32(AX)(R8*1)
2950 MOVOU X3, -16(AX)(R8*1)
2951 MOVQ BX, AX
2952
2953emit_literal_done_repeat_emit_encodeBlockAsm12B:
2954 ADDL $0x05, CX
2955 MOVL CX, BX
2956 SUBL 16(SP), BX
2957 MOVQ src_len+32(FP), R8
2958 SUBL CX, R8
2959 LEAQ (DX)(CX*1), R9
2960 LEAQ (DX)(BX*1), BX
2961
2962 // matchLen
2963 XORL R11, R11
2964
2965matchlen_loopback_16_repeat_extend_encodeBlockAsm12B:
2966 CMPL R8, $0x10
2967 JB matchlen_match8_repeat_extend_encodeBlockAsm12B
2968 MOVQ (R9)(R11*1), R10
2969 MOVQ 8(R9)(R11*1), R12
2970 XORQ (BX)(R11*1), R10
2971 JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm12B
2972 XORQ 8(BX)(R11*1), R12
2973 JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm12B
2974 LEAL -16(R8), R8
2975 LEAL 16(R11), R11
2976 JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm12B
2977
2978matchlen_bsf_16repeat_extend_encodeBlockAsm12B:
2979#ifdef GOAMD64_v3
2980 TZCNTQ R12, R12
2981
2982#else
2983 BSFQ R12, R12
2984
2985#endif
2986 SARQ $0x03, R12
2987 LEAL 8(R11)(R12*1), R11
2988 JMP repeat_extend_forward_end_encodeBlockAsm12B
2989
2990matchlen_match8_repeat_extend_encodeBlockAsm12B:
2991 CMPL R8, $0x08
2992 JB matchlen_match4_repeat_extend_encodeBlockAsm12B
2993 MOVQ (R9)(R11*1), R10
2994 XORQ (BX)(R11*1), R10
2995 JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm12B
2996 LEAL -8(R8), R8
2997 LEAL 8(R11), R11
2998 JMP matchlen_match4_repeat_extend_encodeBlockAsm12B
2999
3000matchlen_bsf_8_repeat_extend_encodeBlockAsm12B:
3001#ifdef GOAMD64_v3
3002 TZCNTQ R10, R10
3003
3004#else
3005 BSFQ R10, R10
3006
3007#endif
3008 SARQ $0x03, R10
3009 LEAL (R11)(R10*1), R11
3010 JMP repeat_extend_forward_end_encodeBlockAsm12B
3011
3012matchlen_match4_repeat_extend_encodeBlockAsm12B:
3013 CMPL R8, $0x04
3014 JB matchlen_match2_repeat_extend_encodeBlockAsm12B
3015 MOVL (R9)(R11*1), R10
3016 CMPL (BX)(R11*1), R10
3017 JNE matchlen_match2_repeat_extend_encodeBlockAsm12B
3018 LEAL -4(R8), R8
3019 LEAL 4(R11), R11
3020
3021matchlen_match2_repeat_extend_encodeBlockAsm12B:
3022 CMPL R8, $0x01
3023 JE matchlen_match1_repeat_extend_encodeBlockAsm12B
3024 JB repeat_extend_forward_end_encodeBlockAsm12B
3025 MOVW (R9)(R11*1), R10
3026 CMPW (BX)(R11*1), R10
3027 JNE matchlen_match1_repeat_extend_encodeBlockAsm12B
3028 LEAL 2(R11), R11
3029 SUBL $0x02, R8
3030 JZ repeat_extend_forward_end_encodeBlockAsm12B
3031
3032matchlen_match1_repeat_extend_encodeBlockAsm12B:
3033 MOVB (R9)(R11*1), R10
3034 CMPB (BX)(R11*1), R10
3035 JNE repeat_extend_forward_end_encodeBlockAsm12B
3036 LEAL 1(R11), R11
3037
3038repeat_extend_forward_end_encodeBlockAsm12B:
3039 ADDL R11, CX
3040 MOVL CX, BX
3041 SUBL SI, BX
3042 MOVL 16(SP), SI
3043 TESTL DI, DI
3044 JZ repeat_as_copy_encodeBlockAsm12B
3045
3046 // emitRepeat
3047 MOVL BX, DI
3048 LEAL -4(BX), BX
3049 CMPL DI, $0x08
3050 JBE repeat_two_match_repeat_encodeBlockAsm12B
3051 CMPL DI, $0x0c
3052 JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm12B
3053 CMPL SI, $0x00000800
3054 JB repeat_two_offset_match_repeat_encodeBlockAsm12B
3055
3056cant_repeat_two_offset_match_repeat_encodeBlockAsm12B:
3057 CMPL BX, $0x00000104
3058 JB repeat_three_match_repeat_encodeBlockAsm12B
3059 LEAL -256(BX), BX
3060 MOVW $0x0019, (AX)
3061 MOVW BX, 2(AX)
3062 ADDQ $0x04, AX
3063 JMP repeat_end_emit_encodeBlockAsm12B
3064
3065repeat_three_match_repeat_encodeBlockAsm12B:
3066 LEAL -4(BX), BX
3067 MOVW $0x0015, (AX)
3068 MOVB BL, 2(AX)
3069 ADDQ $0x03, AX
3070 JMP repeat_end_emit_encodeBlockAsm12B
3071
3072repeat_two_match_repeat_encodeBlockAsm12B:
3073 SHLL $0x02, BX
3074 ORL $0x01, BX
3075 MOVW BX, (AX)
3076 ADDQ $0x02, AX
3077 JMP repeat_end_emit_encodeBlockAsm12B
3078
3079repeat_two_offset_match_repeat_encodeBlockAsm12B:
3080 XORQ DI, DI
3081 LEAL 1(DI)(BX*4), BX
3082 MOVB SI, 1(AX)
3083 SARL $0x08, SI
3084 SHLL $0x05, SI
3085 ORL SI, BX
3086 MOVB BL, (AX)
3087 ADDQ $0x02, AX
3088 JMP repeat_end_emit_encodeBlockAsm12B
3089
3090repeat_as_copy_encodeBlockAsm12B:
3091 // emitCopy
3092 CMPL BX, $0x40
3093 JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm12B
3094 CMPL SI, $0x00000800
3095 JAE long_offset_short_repeat_as_copy_encodeBlockAsm12B
3096 MOVL $0x00000001, DI
3097 LEAL 16(DI), DI
3098 MOVB SI, 1(AX)
3099 SHRL $0x08, SI
3100 SHLL $0x05, SI
3101 ORL SI, DI
3102 MOVB DI, (AX)
3103 ADDQ $0x02, AX
3104 SUBL $0x08, BX
3105
3106 // emitRepeat
3107 LEAL -4(BX), BX
3108 JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b
3109 MOVL BX, DI
3110 LEAL -4(BX), BX
3111 CMPL DI, $0x08
3112 JBE repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b
3113 CMPL DI, $0x0c
3114 JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b
3115 CMPL SI, $0x00000800
3116 JB repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b
3117
3118cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b:
3119 CMPL BX, $0x00000104
3120 JB repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b
3121 LEAL -256(BX), BX
3122 MOVW $0x0019, (AX)
3123 MOVW BX, 2(AX)
3124 ADDQ $0x04, AX
3125 JMP repeat_end_emit_encodeBlockAsm12B
3126
3127repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b:
3128 LEAL -4(BX), BX
3129 MOVW $0x0015, (AX)
3130 MOVB BL, 2(AX)
3131 ADDQ $0x03, AX
3132 JMP repeat_end_emit_encodeBlockAsm12B
3133
3134repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b:
3135 SHLL $0x02, BX
3136 ORL $0x01, BX
3137 MOVW BX, (AX)
3138 ADDQ $0x02, AX
3139 JMP repeat_end_emit_encodeBlockAsm12B
3140
3141repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b:
3142 XORQ DI, DI
3143 LEAL 1(DI)(BX*4), BX
3144 MOVB SI, 1(AX)
3145 SARL $0x08, SI
3146 SHLL $0x05, SI
3147 ORL SI, BX
3148 MOVB BL, (AX)
3149 ADDQ $0x02, AX
3150 JMP repeat_end_emit_encodeBlockAsm12B
3151
3152long_offset_short_repeat_as_copy_encodeBlockAsm12B:
3153 MOVB $0xee, (AX)
3154 MOVW SI, 1(AX)
3155 LEAL -60(BX), BX
3156 ADDQ $0x03, AX
3157
3158 // emitRepeat
3159 MOVL BX, DI
3160 LEAL -4(BX), BX
3161 CMPL DI, $0x08
3162 JBE repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short
3163 CMPL DI, $0x0c
3164 JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short
3165 CMPL SI, $0x00000800
3166 JB repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short
3167
3168cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short:
3169 CMPL BX, $0x00000104
3170 JB repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short
3171 LEAL -256(BX), BX
3172 MOVW $0x0019, (AX)
3173 MOVW BX, 2(AX)
3174 ADDQ $0x04, AX
3175 JMP repeat_end_emit_encodeBlockAsm12B
3176
3177repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short:
3178 LEAL -4(BX), BX
3179 MOVW $0x0015, (AX)
3180 MOVB BL, 2(AX)
3181 ADDQ $0x03, AX
3182 JMP repeat_end_emit_encodeBlockAsm12B
3183
3184repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short:
3185 SHLL $0x02, BX
3186 ORL $0x01, BX
3187 MOVW BX, (AX)
3188 ADDQ $0x02, AX
3189 JMP repeat_end_emit_encodeBlockAsm12B
3190
3191repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short:
3192 XORQ DI, DI
3193 LEAL 1(DI)(BX*4), BX
3194 MOVB SI, 1(AX)
3195 SARL $0x08, SI
3196 SHLL $0x05, SI
3197 ORL SI, BX
3198 MOVB BL, (AX)
3199 ADDQ $0x02, AX
3200 JMP repeat_end_emit_encodeBlockAsm12B
3201
3202two_byte_offset_short_repeat_as_copy_encodeBlockAsm12B:
3203 MOVL BX, DI
3204 SHLL $0x02, DI
3205 CMPL BX, $0x0c
3206 JAE emit_copy_three_repeat_as_copy_encodeBlockAsm12B
3207 CMPL SI, $0x00000800
3208 JAE emit_copy_three_repeat_as_copy_encodeBlockAsm12B
3209 LEAL -15(DI), DI
3210 MOVB SI, 1(AX)
3211 SHRL $0x08, SI
3212 SHLL $0x05, SI
3213 ORL SI, DI
3214 MOVB DI, (AX)
3215 ADDQ $0x02, AX
3216 JMP repeat_end_emit_encodeBlockAsm12B
3217
3218emit_copy_three_repeat_as_copy_encodeBlockAsm12B:
3219 LEAL -2(DI), DI
3220 MOVB DI, (AX)
3221 MOVW SI, 1(AX)
3222 ADDQ $0x03, AX
3223
3224repeat_end_emit_encodeBlockAsm12B:
3225 MOVL CX, 12(SP)
3226 JMP search_loop_encodeBlockAsm12B
3227
3228no_repeat_found_encodeBlockAsm12B:
3229 CMPL (DX)(BX*1), SI
3230 JEQ candidate_match_encodeBlockAsm12B
3231 SHRQ $0x08, SI
3232 MOVL 24(SP)(R9*4), BX
3233 LEAL 2(CX), R8
3234 CMPL (DX)(DI*1), SI
3235 JEQ candidate2_match_encodeBlockAsm12B
3236 MOVL R8, 24(SP)(R9*4)
3237 SHRQ $0x08, SI
3238 CMPL (DX)(BX*1), SI
3239 JEQ candidate3_match_encodeBlockAsm12B
3240 MOVL 20(SP), CX
3241 JMP search_loop_encodeBlockAsm12B
3242
3243candidate3_match_encodeBlockAsm12B:
3244 ADDL $0x02, CX
3245 JMP candidate_match_encodeBlockAsm12B
3246
3247candidate2_match_encodeBlockAsm12B:
3248 MOVL R8, 24(SP)(R9*4)
3249 INCL CX
3250 MOVL DI, BX
3251
3252candidate_match_encodeBlockAsm12B:
3253 MOVL 12(SP), SI
3254 TESTL BX, BX
3255 JZ match_extend_back_end_encodeBlockAsm12B
3256
3257match_extend_back_loop_encodeBlockAsm12B:
3258 CMPL CX, SI
3259 JBE match_extend_back_end_encodeBlockAsm12B
3260 MOVB -1(DX)(BX*1), DI
3261 MOVB -1(DX)(CX*1), R8
3262 CMPB DI, R8
3263 JNE match_extend_back_end_encodeBlockAsm12B
3264 LEAL -1(CX), CX
3265 DECL BX
3266 JZ match_extend_back_end_encodeBlockAsm12B
3267 JMP match_extend_back_loop_encodeBlockAsm12B
3268
3269match_extend_back_end_encodeBlockAsm12B:
3270 MOVL CX, SI
3271 SUBL 12(SP), SI
3272 LEAQ 3(AX)(SI*1), SI
3273 CMPQ SI, (SP)
3274 JB match_dst_size_check_encodeBlockAsm12B
3275 MOVQ $0x00000000, ret+48(FP)
3276 RET
3277
3278match_dst_size_check_encodeBlockAsm12B:
3279 MOVL CX, SI
3280 MOVL 12(SP), DI
3281 CMPL DI, SI
3282 JEQ emit_literal_done_match_emit_encodeBlockAsm12B
3283 MOVL SI, R8
3284 MOVL SI, 12(SP)
3285 LEAQ (DX)(DI*1), SI
3286 SUBL DI, R8
3287 LEAL -1(R8), DI
3288 CMPL DI, $0x3c
3289 JB one_byte_match_emit_encodeBlockAsm12B
3290 CMPL DI, $0x00000100
3291 JB two_bytes_match_emit_encodeBlockAsm12B
3292 JB three_bytes_match_emit_encodeBlockAsm12B
3293
3294three_bytes_match_emit_encodeBlockAsm12B:
3295 MOVB $0xf4, (AX)
3296 MOVW DI, 1(AX)
3297 ADDQ $0x03, AX
3298 JMP memmove_long_match_emit_encodeBlockAsm12B
3299
3300two_bytes_match_emit_encodeBlockAsm12B:
3301 MOVB $0xf0, (AX)
3302 MOVB DI, 1(AX)
3303 ADDQ $0x02, AX
3304 CMPL DI, $0x40
3305 JB memmove_match_emit_encodeBlockAsm12B
3306 JMP memmove_long_match_emit_encodeBlockAsm12B
3307
3308one_byte_match_emit_encodeBlockAsm12B:
3309 SHLB $0x02, DI
3310 MOVB DI, (AX)
3311 ADDQ $0x01, AX
3312
3313memmove_match_emit_encodeBlockAsm12B:
3314 LEAQ (AX)(R8*1), DI
3315
3316 // genMemMoveShort
3317 CMPQ R8, $0x08
3318 JBE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8
3319 CMPQ R8, $0x10
3320 JBE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8through16
3321 CMPQ R8, $0x20
3322 JBE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_17through32
3323 JMP emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_33through64
3324
3325emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8:
3326 MOVQ (SI), R9
3327 MOVQ R9, (AX)
3328 JMP memmove_end_copy_match_emit_encodeBlockAsm12B
3329
3330emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8through16:
3331 MOVQ (SI), R9
3332 MOVQ -8(SI)(R8*1), SI
3333 MOVQ R9, (AX)
3334 MOVQ SI, -8(AX)(R8*1)
3335 JMP memmove_end_copy_match_emit_encodeBlockAsm12B
3336
3337emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_17through32:
3338 MOVOU (SI), X0
3339 MOVOU -16(SI)(R8*1), X1
3340 MOVOU X0, (AX)
3341 MOVOU X1, -16(AX)(R8*1)
3342 JMP memmove_end_copy_match_emit_encodeBlockAsm12B
3343
3344emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_33through64:
3345 MOVOU (SI), X0
3346 MOVOU 16(SI), X1
3347 MOVOU -32(SI)(R8*1), X2
3348 MOVOU -16(SI)(R8*1), X3
3349 MOVOU X0, (AX)
3350 MOVOU X1, 16(AX)
3351 MOVOU X2, -32(AX)(R8*1)
3352 MOVOU X3, -16(AX)(R8*1)
3353
3354memmove_end_copy_match_emit_encodeBlockAsm12B:
3355 MOVQ DI, AX
3356 JMP emit_literal_done_match_emit_encodeBlockAsm12B
3357
3358memmove_long_match_emit_encodeBlockAsm12B:
3359 LEAQ (AX)(R8*1), DI
3360
3361 // genMemMoveLong
3362 MOVOU (SI), X0
3363 MOVOU 16(SI), X1
3364 MOVOU -32(SI)(R8*1), X2
3365 MOVOU -16(SI)(R8*1), X3
3366 MOVQ R8, R10
3367 SHRQ $0x05, R10
3368 MOVQ AX, R9
3369 ANDL $0x0000001f, R9
3370 MOVQ $0x00000040, R11
3371 SUBQ R9, R11
3372 DECQ R10
3373 JA emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32
3374 LEAQ -32(SI)(R11*1), R9
3375 LEAQ -32(AX)(R11*1), R12
3376
3377emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_big_loop_back:
3378 MOVOU (R9), X4
3379 MOVOU 16(R9), X5
3380 MOVOA X4, (R12)
3381 MOVOA X5, 16(R12)
3382 ADDQ $0x20, R12
3383 ADDQ $0x20, R9
3384 ADDQ $0x20, R11
3385 DECQ R10
3386 JNA emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_big_loop_back
3387
3388emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32:
3389 MOVOU -32(SI)(R11*1), X4
3390 MOVOU -16(SI)(R11*1), X5
3391 MOVOA X4, -32(AX)(R11*1)
3392 MOVOA X5, -16(AX)(R11*1)
3393 ADDQ $0x20, R11
3394 CMPQ R8, R11
3395 JAE emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32
3396 MOVOU X0, (AX)
3397 MOVOU X1, 16(AX)
3398 MOVOU X2, -32(AX)(R8*1)
3399 MOVOU X3, -16(AX)(R8*1)
3400 MOVQ DI, AX
3401
3402emit_literal_done_match_emit_encodeBlockAsm12B:
3403match_nolit_loop_encodeBlockAsm12B:
3404 MOVL CX, SI
3405 SUBL BX, SI
3406 MOVL SI, 16(SP)
3407 ADDL $0x04, CX
3408 ADDL $0x04, BX
3409 MOVQ src_len+32(FP), SI
3410 SUBL CX, SI
3411 LEAQ (DX)(CX*1), DI
3412 LEAQ (DX)(BX*1), BX
3413
3414 // matchLen
3415 XORL R9, R9
3416
3417matchlen_loopback_16_match_nolit_encodeBlockAsm12B:
3418 CMPL SI, $0x10
3419 JB matchlen_match8_match_nolit_encodeBlockAsm12B
3420 MOVQ (DI)(R9*1), R8
3421 MOVQ 8(DI)(R9*1), R10
3422 XORQ (BX)(R9*1), R8
3423 JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm12B
3424 XORQ 8(BX)(R9*1), R10
3425 JNZ matchlen_bsf_16match_nolit_encodeBlockAsm12B
3426 LEAL -16(SI), SI
3427 LEAL 16(R9), R9
3428 JMP matchlen_loopback_16_match_nolit_encodeBlockAsm12B
3429
3430matchlen_bsf_16match_nolit_encodeBlockAsm12B:
3431#ifdef GOAMD64_v3
3432 TZCNTQ R10, R10
3433
3434#else
3435 BSFQ R10, R10
3436
3437#endif
3438 SARQ $0x03, R10
3439 LEAL 8(R9)(R10*1), R9
3440 JMP match_nolit_end_encodeBlockAsm12B
3441
3442matchlen_match8_match_nolit_encodeBlockAsm12B:
3443 CMPL SI, $0x08
3444 JB matchlen_match4_match_nolit_encodeBlockAsm12B
3445 MOVQ (DI)(R9*1), R8
3446 XORQ (BX)(R9*1), R8
3447 JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm12B
3448 LEAL -8(SI), SI
3449 LEAL 8(R9), R9
3450 JMP matchlen_match4_match_nolit_encodeBlockAsm12B
3451
3452matchlen_bsf_8_match_nolit_encodeBlockAsm12B:
3453#ifdef GOAMD64_v3
3454 TZCNTQ R8, R8
3455
3456#else
3457 BSFQ R8, R8
3458
3459#endif
3460 SARQ $0x03, R8
3461 LEAL (R9)(R8*1), R9
3462 JMP match_nolit_end_encodeBlockAsm12B
3463
3464matchlen_match4_match_nolit_encodeBlockAsm12B:
3465 CMPL SI, $0x04
3466 JB matchlen_match2_match_nolit_encodeBlockAsm12B
3467 MOVL (DI)(R9*1), R8
3468 CMPL (BX)(R9*1), R8
3469 JNE matchlen_match2_match_nolit_encodeBlockAsm12B
3470 LEAL -4(SI), SI
3471 LEAL 4(R9), R9
3472
3473matchlen_match2_match_nolit_encodeBlockAsm12B:
3474 CMPL SI, $0x01
3475 JE matchlen_match1_match_nolit_encodeBlockAsm12B
3476 JB match_nolit_end_encodeBlockAsm12B
3477 MOVW (DI)(R9*1), R8
3478 CMPW (BX)(R9*1), R8
3479 JNE matchlen_match1_match_nolit_encodeBlockAsm12B
3480 LEAL 2(R9), R9
3481 SUBL $0x02, SI
3482 JZ match_nolit_end_encodeBlockAsm12B
3483
3484matchlen_match1_match_nolit_encodeBlockAsm12B:
3485 MOVB (DI)(R9*1), R8
3486 CMPB (BX)(R9*1), R8
3487 JNE match_nolit_end_encodeBlockAsm12B
3488 LEAL 1(R9), R9
3489
3490match_nolit_end_encodeBlockAsm12B:
3491 ADDL R9, CX
3492 MOVL 16(SP), BX
3493 ADDL $0x04, R9
3494 MOVL CX, 12(SP)
3495
3496 // emitCopy
3497 CMPL R9, $0x40
3498 JBE two_byte_offset_short_match_nolit_encodeBlockAsm12B
3499 CMPL BX, $0x00000800
3500 JAE long_offset_short_match_nolit_encodeBlockAsm12B
3501 MOVL $0x00000001, SI
3502 LEAL 16(SI), SI
3503 MOVB BL, 1(AX)
3504 SHRL $0x08, BX
3505 SHLL $0x05, BX
3506 ORL BX, SI
3507 MOVB SI, (AX)
3508 ADDQ $0x02, AX
3509 SUBL $0x08, R9
3510
3511 // emitRepeat
3512 LEAL -4(R9), R9
3513 JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b
3514 MOVL R9, SI
3515 LEAL -4(R9), R9
3516 CMPL SI, $0x08
3517 JBE repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short_2b
3518 CMPL SI, $0x0c
3519 JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b
3520 CMPL BX, $0x00000800
3521 JB repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b
3522
3523cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b:
3524 CMPL R9, $0x00000104
3525 JB repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short_2b
3526 LEAL -256(R9), R9
3527 MOVW $0x0019, (AX)
3528 MOVW R9, 2(AX)
3529 ADDQ $0x04, AX
3530 JMP match_nolit_emitcopy_end_encodeBlockAsm12B
3531
3532repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short_2b:
3533 LEAL -4(R9), R9
3534 MOVW $0x0015, (AX)
3535 MOVB R9, 2(AX)
3536 ADDQ $0x03, AX
3537 JMP match_nolit_emitcopy_end_encodeBlockAsm12B
3538
3539repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short_2b:
3540 SHLL $0x02, R9
3541 ORL $0x01, R9
3542 MOVW R9, (AX)
3543 ADDQ $0x02, AX
3544 JMP match_nolit_emitcopy_end_encodeBlockAsm12B
3545
3546repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b:
3547 XORQ SI, SI
3548 LEAL 1(SI)(R9*4), R9
3549 MOVB BL, 1(AX)
3550 SARL $0x08, BX
3551 SHLL $0x05, BX
3552 ORL BX, R9
3553 MOVB R9, (AX)
3554 ADDQ $0x02, AX
3555 JMP match_nolit_emitcopy_end_encodeBlockAsm12B
3556
3557long_offset_short_match_nolit_encodeBlockAsm12B:
3558 MOVB $0xee, (AX)
3559 MOVW BX, 1(AX)
3560 LEAL -60(R9), R9
3561 ADDQ $0x03, AX
3562
3563 // emitRepeat
3564 MOVL R9, SI
3565 LEAL -4(R9), R9
3566 CMPL SI, $0x08
3567 JBE repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short
3568 CMPL SI, $0x0c
3569 JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short
3570 CMPL BX, $0x00000800
3571 JB repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short
3572
3573cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short:
3574 CMPL R9, $0x00000104
3575 JB repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short
3576 LEAL -256(R9), R9
3577 MOVW $0x0019, (AX)
3578 MOVW R9, 2(AX)
3579 ADDQ $0x04, AX
3580 JMP match_nolit_emitcopy_end_encodeBlockAsm12B
3581
3582repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short:
3583 LEAL -4(R9), R9
3584 MOVW $0x0015, (AX)
3585 MOVB R9, 2(AX)
3586 ADDQ $0x03, AX
3587 JMP match_nolit_emitcopy_end_encodeBlockAsm12B
3588
3589repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short:
3590 SHLL $0x02, R9
3591 ORL $0x01, R9
3592 MOVW R9, (AX)
3593 ADDQ $0x02, AX
3594 JMP match_nolit_emitcopy_end_encodeBlockAsm12B
3595
3596repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short:
3597 XORQ SI, SI
3598 LEAL 1(SI)(R9*4), R9
3599 MOVB BL, 1(AX)
3600 SARL $0x08, BX
3601 SHLL $0x05, BX
3602 ORL BX, R9
3603 MOVB R9, (AX)
3604 ADDQ $0x02, AX
3605 JMP match_nolit_emitcopy_end_encodeBlockAsm12B
3606
3607two_byte_offset_short_match_nolit_encodeBlockAsm12B:
3608 MOVL R9, SI
3609 SHLL $0x02, SI
3610 CMPL R9, $0x0c
3611 JAE emit_copy_three_match_nolit_encodeBlockAsm12B
3612 CMPL BX, $0x00000800
3613 JAE emit_copy_three_match_nolit_encodeBlockAsm12B
3614 LEAL -15(SI), SI
3615 MOVB BL, 1(AX)
3616 SHRL $0x08, BX
3617 SHLL $0x05, BX
3618 ORL BX, SI
3619 MOVB SI, (AX)
3620 ADDQ $0x02, AX
3621 JMP match_nolit_emitcopy_end_encodeBlockAsm12B
3622
3623emit_copy_three_match_nolit_encodeBlockAsm12B:
3624 LEAL -2(SI), SI
3625 MOVB SI, (AX)
3626 MOVW BX, 1(AX)
3627 ADDQ $0x03, AX
3628
3629match_nolit_emitcopy_end_encodeBlockAsm12B:
3630 CMPL CX, 8(SP)
3631 JAE emit_remainder_encodeBlockAsm12B
3632 MOVQ -2(DX)(CX*1), SI
3633 CMPQ AX, (SP)
3634 JB match_nolit_dst_ok_encodeBlockAsm12B
3635 MOVQ $0x00000000, ret+48(FP)
3636 RET
3637
3638match_nolit_dst_ok_encodeBlockAsm12B:
3639 MOVQ $0x000000cf1bbcdcbb, R8
3640 MOVQ SI, DI
3641 SHRQ $0x10, SI
3642 MOVQ SI, BX
3643 SHLQ $0x18, DI
3644 IMULQ R8, DI
3645 SHRQ $0x34, DI
3646 SHLQ $0x18, BX
3647 IMULQ R8, BX
3648 SHRQ $0x34, BX
3649 LEAL -2(CX), R8
3650 LEAQ 24(SP)(BX*4), R9
3651 MOVL (R9), BX
3652 MOVL R8, 24(SP)(DI*4)
3653 MOVL CX, (R9)
3654 CMPL (DX)(BX*1), SI
3655 JEQ match_nolit_loop_encodeBlockAsm12B
3656 INCL CX
3657 JMP search_loop_encodeBlockAsm12B
3658
3659emit_remainder_encodeBlockAsm12B:
3660 MOVQ src_len+32(FP), CX
3661 SUBL 12(SP), CX
3662 LEAQ 3(AX)(CX*1), CX
3663 CMPQ CX, (SP)
3664 JB emit_remainder_ok_encodeBlockAsm12B
3665 MOVQ $0x00000000, ret+48(FP)
3666 RET
3667
3668emit_remainder_ok_encodeBlockAsm12B:
3669 MOVQ src_len+32(FP), CX
3670 MOVL 12(SP), BX
3671 CMPL BX, CX
3672 JEQ emit_literal_done_emit_remainder_encodeBlockAsm12B
3673 MOVL CX, SI
3674 MOVL CX, 12(SP)
3675 LEAQ (DX)(BX*1), CX
3676 SUBL BX, SI
3677 LEAL -1(SI), DX
3678 CMPL DX, $0x3c
3679 JB one_byte_emit_remainder_encodeBlockAsm12B
3680 CMPL DX, $0x00000100
3681 JB two_bytes_emit_remainder_encodeBlockAsm12B
3682 JB three_bytes_emit_remainder_encodeBlockAsm12B
3683
3684three_bytes_emit_remainder_encodeBlockAsm12B:
3685 MOVB $0xf4, (AX)
3686 MOVW DX, 1(AX)
3687 ADDQ $0x03, AX
3688 JMP memmove_long_emit_remainder_encodeBlockAsm12B
3689
3690two_bytes_emit_remainder_encodeBlockAsm12B:
3691 MOVB $0xf0, (AX)
3692 MOVB DL, 1(AX)
3693 ADDQ $0x02, AX
3694 CMPL DX, $0x40
3695 JB memmove_emit_remainder_encodeBlockAsm12B
3696 JMP memmove_long_emit_remainder_encodeBlockAsm12B
3697
3698one_byte_emit_remainder_encodeBlockAsm12B:
3699 SHLB $0x02, DL
3700 MOVB DL, (AX)
3701 ADDQ $0x01, AX
3702
3703memmove_emit_remainder_encodeBlockAsm12B:
3704 LEAQ (AX)(SI*1), DX
3705 MOVL SI, BX
3706
3707 // genMemMoveShort
3708 CMPQ BX, $0x03
3709 JB emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_1or2
3710 JE emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_3
3711 CMPQ BX, $0x08
3712 JB emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_4through7
3713 CMPQ BX, $0x10
3714 JBE emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_8through16
3715 CMPQ BX, $0x20
3716 JBE emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_17through32
3717 JMP emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_33through64
3718
3719emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_1or2:
3720 MOVB (CX), SI
3721 MOVB -1(CX)(BX*1), CL
3722 MOVB SI, (AX)
3723 MOVB CL, -1(AX)(BX*1)
3724 JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B
3725
3726emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_3:
3727 MOVW (CX), SI
3728 MOVB 2(CX), CL
3729 MOVW SI, (AX)
3730 MOVB CL, 2(AX)
3731 JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B
3732
3733emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_4through7:
3734 MOVL (CX), SI
3735 MOVL -4(CX)(BX*1), CX
3736 MOVL SI, (AX)
3737 MOVL CX, -4(AX)(BX*1)
3738 JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B
3739
3740emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_8through16:
3741 MOVQ (CX), SI
3742 MOVQ -8(CX)(BX*1), CX
3743 MOVQ SI, (AX)
3744 MOVQ CX, -8(AX)(BX*1)
3745 JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B
3746
3747emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_17through32:
3748 MOVOU (CX), X0
3749 MOVOU -16(CX)(BX*1), X1
3750 MOVOU X0, (AX)
3751 MOVOU X1, -16(AX)(BX*1)
3752 JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B
3753
3754emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_33through64:
3755 MOVOU (CX), X0
3756 MOVOU 16(CX), X1
3757 MOVOU -32(CX)(BX*1), X2
3758 MOVOU -16(CX)(BX*1), X3
3759 MOVOU X0, (AX)
3760 MOVOU X1, 16(AX)
3761 MOVOU X2, -32(AX)(BX*1)
3762 MOVOU X3, -16(AX)(BX*1)
3763
3764memmove_end_copy_emit_remainder_encodeBlockAsm12B:
3765 MOVQ DX, AX
3766 JMP emit_literal_done_emit_remainder_encodeBlockAsm12B
3767
3768memmove_long_emit_remainder_encodeBlockAsm12B:
3769 LEAQ (AX)(SI*1), DX
3770 MOVL SI, BX
3771
3772 // genMemMoveLong
3773 MOVOU (CX), X0
3774 MOVOU 16(CX), X1
3775 MOVOU -32(CX)(BX*1), X2
3776 MOVOU -16(CX)(BX*1), X3
3777 MOVQ BX, DI
3778 SHRQ $0x05, DI
3779 MOVQ AX, SI
3780 ANDL $0x0000001f, SI
3781 MOVQ $0x00000040, R8
3782 SUBQ SI, R8
3783 DECQ DI
3784 JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_forward_sse_loop_32
3785 LEAQ -32(CX)(R8*1), SI
3786 LEAQ -32(AX)(R8*1), R9
3787
3788emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_big_loop_back:
3789 MOVOU (SI), X4
3790 MOVOU 16(SI), X5
3791 MOVOA X4, (R9)
3792 MOVOA X5, 16(R9)
3793 ADDQ $0x20, R9
3794 ADDQ $0x20, SI
3795 ADDQ $0x20, R8
3796 DECQ DI
3797 JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_big_loop_back
3798
3799emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_forward_sse_loop_32:
3800 MOVOU -32(CX)(R8*1), X4
3801 MOVOU -16(CX)(R8*1), X5
3802 MOVOA X4, -32(AX)(R8*1)
3803 MOVOA X5, -16(AX)(R8*1)
3804 ADDQ $0x20, R8
3805 CMPQ BX, R8
3806 JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_forward_sse_loop_32
3807 MOVOU X0, (AX)
3808 MOVOU X1, 16(AX)
3809 MOVOU X2, -32(AX)(BX*1)
3810 MOVOU X3, -16(AX)(BX*1)
3811 MOVQ DX, AX
3812
3813emit_literal_done_emit_remainder_encodeBlockAsm12B:
3814 MOVQ dst_base+0(FP), CX
3815 SUBQ CX, AX
3816 MOVQ AX, ret+48(FP)
3817 RET
3818
3819// func encodeBlockAsm10B(dst []byte, src []byte) int
3820// Requires: BMI, SSE2
3821TEXT ·encodeBlockAsm10B(SB), $4120-56
3822 MOVQ dst_base+0(FP), AX
3823 MOVQ $0x00000020, CX
3824 LEAQ 24(SP), DX
3825 PXOR X0, X0
3826
3827zero_loop_encodeBlockAsm10B:
3828 MOVOU X0, (DX)
3829 MOVOU X0, 16(DX)
3830 MOVOU X0, 32(DX)
3831 MOVOU X0, 48(DX)
3832 MOVOU X0, 64(DX)
3833 MOVOU X0, 80(DX)
3834 MOVOU X0, 96(DX)
3835 MOVOU X0, 112(DX)
3836 ADDQ $0x80, DX
3837 DECQ CX
3838 JNZ zero_loop_encodeBlockAsm10B
3839 MOVL $0x00000000, 12(SP)
3840 MOVQ src_len+32(FP), CX
3841 LEAQ -9(CX), DX
3842 LEAQ -8(CX), BX
3843 MOVL BX, 8(SP)
3844 SHRQ $0x05, CX
3845 SUBL CX, DX
3846 LEAQ (AX)(DX*1), DX
3847 MOVQ DX, (SP)
3848 MOVL $0x00000001, CX
3849 MOVL CX, 16(SP)
3850 MOVQ src_base+24(FP), DX
3851
3852search_loop_encodeBlockAsm10B:
3853 MOVL CX, BX
3854 SUBL 12(SP), BX
3855 SHRL $0x05, BX
3856 LEAL 4(CX)(BX*1), BX
3857 CMPL BX, 8(SP)
3858 JAE emit_remainder_encodeBlockAsm10B
3859 MOVQ (DX)(CX*1), SI
3860 MOVL BX, 20(SP)
3861 MOVQ $0x9e3779b1, R8
3862 MOVQ SI, R9
3863 MOVQ SI, R10
3864 SHRQ $0x08, R10
3865 SHLQ $0x20, R9
3866 IMULQ R8, R9
3867 SHRQ $0x36, R9
3868 SHLQ $0x20, R10
3869 IMULQ R8, R10
3870 SHRQ $0x36, R10
3871 MOVL 24(SP)(R9*4), BX
3872 MOVL 24(SP)(R10*4), DI
3873 MOVL CX, 24(SP)(R9*4)
3874 LEAL 1(CX), R9
3875 MOVL R9, 24(SP)(R10*4)
3876 MOVQ SI, R9
3877 SHRQ $0x10, R9
3878 SHLQ $0x20, R9
3879 IMULQ R8, R9
3880 SHRQ $0x36, R9
3881 MOVL CX, R8
3882 SUBL 16(SP), R8
3883 MOVL 1(DX)(R8*1), R10
3884 MOVQ SI, R8
3885 SHRQ $0x08, R8
3886 CMPL R8, R10
3887 JNE no_repeat_found_encodeBlockAsm10B
3888 LEAL 1(CX), SI
3889 MOVL 12(SP), DI
3890 MOVL SI, BX
3891 SUBL 16(SP), BX
3892 JZ repeat_extend_back_end_encodeBlockAsm10B
3893
3894repeat_extend_back_loop_encodeBlockAsm10B:
3895 CMPL SI, DI
3896 JBE repeat_extend_back_end_encodeBlockAsm10B
3897 MOVB -1(DX)(BX*1), R8
3898 MOVB -1(DX)(SI*1), R9
3899 CMPB R8, R9
3900 JNE repeat_extend_back_end_encodeBlockAsm10B
3901 LEAL -1(SI), SI
3902 DECL BX
3903 JNZ repeat_extend_back_loop_encodeBlockAsm10B
3904
3905repeat_extend_back_end_encodeBlockAsm10B:
3906 MOVL 12(SP), BX
3907 CMPL BX, SI
3908 JEQ emit_literal_done_repeat_emit_encodeBlockAsm10B
3909 MOVL SI, R8
3910 MOVL SI, 12(SP)
3911 LEAQ (DX)(BX*1), R9
3912 SUBL BX, R8
3913 LEAL -1(R8), BX
3914 CMPL BX, $0x3c
3915 JB one_byte_repeat_emit_encodeBlockAsm10B
3916 CMPL BX, $0x00000100
3917 JB two_bytes_repeat_emit_encodeBlockAsm10B
3918 JB three_bytes_repeat_emit_encodeBlockAsm10B
3919
3920three_bytes_repeat_emit_encodeBlockAsm10B:
3921 MOVB $0xf4, (AX)
3922 MOVW BX, 1(AX)
3923 ADDQ $0x03, AX
3924 JMP memmove_long_repeat_emit_encodeBlockAsm10B
3925
3926two_bytes_repeat_emit_encodeBlockAsm10B:
3927 MOVB $0xf0, (AX)
3928 MOVB BL, 1(AX)
3929 ADDQ $0x02, AX
3930 CMPL BX, $0x40
3931 JB memmove_repeat_emit_encodeBlockAsm10B
3932 JMP memmove_long_repeat_emit_encodeBlockAsm10B
3933
3934one_byte_repeat_emit_encodeBlockAsm10B:
3935 SHLB $0x02, BL
3936 MOVB BL, (AX)
3937 ADDQ $0x01, AX
3938
3939memmove_repeat_emit_encodeBlockAsm10B:
3940 LEAQ (AX)(R8*1), BX
3941
3942 // genMemMoveShort
3943 CMPQ R8, $0x08
3944 JBE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8
3945 CMPQ R8, $0x10
3946 JBE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8through16
3947 CMPQ R8, $0x20
3948 JBE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_17through32
3949 JMP emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_33through64
3950
3951emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8:
3952 MOVQ (R9), R10
3953 MOVQ R10, (AX)
3954 JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B
3955
3956emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8through16:
3957 MOVQ (R9), R10
3958 MOVQ -8(R9)(R8*1), R9
3959 MOVQ R10, (AX)
3960 MOVQ R9, -8(AX)(R8*1)
3961 JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B
3962
3963emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_17through32:
3964 MOVOU (R9), X0
3965 MOVOU -16(R9)(R8*1), X1
3966 MOVOU X0, (AX)
3967 MOVOU X1, -16(AX)(R8*1)
3968 JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B
3969
3970emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_33through64:
3971 MOVOU (R9), X0
3972 MOVOU 16(R9), X1
3973 MOVOU -32(R9)(R8*1), X2
3974 MOVOU -16(R9)(R8*1), X3
3975 MOVOU X0, (AX)
3976 MOVOU X1, 16(AX)
3977 MOVOU X2, -32(AX)(R8*1)
3978 MOVOU X3, -16(AX)(R8*1)
3979
3980memmove_end_copy_repeat_emit_encodeBlockAsm10B:
3981 MOVQ BX, AX
3982 JMP emit_literal_done_repeat_emit_encodeBlockAsm10B
3983
3984memmove_long_repeat_emit_encodeBlockAsm10B:
3985 LEAQ (AX)(R8*1), BX
3986
3987 // genMemMoveLong
3988 MOVOU (R9), X0
3989 MOVOU 16(R9), X1
3990 MOVOU -32(R9)(R8*1), X2
3991 MOVOU -16(R9)(R8*1), X3
3992 MOVQ R8, R11
3993 SHRQ $0x05, R11
3994 MOVQ AX, R10
3995 ANDL $0x0000001f, R10
3996 MOVQ $0x00000040, R12
3997 SUBQ R10, R12
3998 DECQ R11
3999 JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32
4000 LEAQ -32(R9)(R12*1), R10
4001 LEAQ -32(AX)(R12*1), R13
4002
4003emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_big_loop_back:
4004 MOVOU (R10), X4
4005 MOVOU 16(R10), X5
4006 MOVOA X4, (R13)
4007 MOVOA X5, 16(R13)
4008 ADDQ $0x20, R13
4009 ADDQ $0x20, R10
4010 ADDQ $0x20, R12
4011 DECQ R11
4012 JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_big_loop_back
4013
4014emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32:
4015 MOVOU -32(R9)(R12*1), X4
4016 MOVOU -16(R9)(R12*1), X5
4017 MOVOA X4, -32(AX)(R12*1)
4018 MOVOA X5, -16(AX)(R12*1)
4019 ADDQ $0x20, R12
4020 CMPQ R8, R12
4021 JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32
4022 MOVOU X0, (AX)
4023 MOVOU X1, 16(AX)
4024 MOVOU X2, -32(AX)(R8*1)
4025 MOVOU X3, -16(AX)(R8*1)
4026 MOVQ BX, AX
4027
4028emit_literal_done_repeat_emit_encodeBlockAsm10B:
4029 ADDL $0x05, CX
4030 MOVL CX, BX
4031 SUBL 16(SP), BX
4032 MOVQ src_len+32(FP), R8
4033 SUBL CX, R8
4034 LEAQ (DX)(CX*1), R9
4035 LEAQ (DX)(BX*1), BX
4036
4037 // matchLen
4038 XORL R11, R11
4039
4040matchlen_loopback_16_repeat_extend_encodeBlockAsm10B:
4041 CMPL R8, $0x10
4042 JB matchlen_match8_repeat_extend_encodeBlockAsm10B
4043 MOVQ (R9)(R11*1), R10
4044 MOVQ 8(R9)(R11*1), R12
4045 XORQ (BX)(R11*1), R10
4046 JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm10B
4047 XORQ 8(BX)(R11*1), R12
4048 JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm10B
4049 LEAL -16(R8), R8
4050 LEAL 16(R11), R11
4051 JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm10B
4052
4053matchlen_bsf_16repeat_extend_encodeBlockAsm10B:
4054#ifdef GOAMD64_v3
4055 TZCNTQ R12, R12
4056
4057#else
4058 BSFQ R12, R12
4059
4060#endif
4061 SARQ $0x03, R12
4062 LEAL 8(R11)(R12*1), R11
4063 JMP repeat_extend_forward_end_encodeBlockAsm10B
4064
4065matchlen_match8_repeat_extend_encodeBlockAsm10B:
4066 CMPL R8, $0x08
4067 JB matchlen_match4_repeat_extend_encodeBlockAsm10B
4068 MOVQ (R9)(R11*1), R10
4069 XORQ (BX)(R11*1), R10
4070 JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm10B
4071 LEAL -8(R8), R8
4072 LEAL 8(R11), R11
4073 JMP matchlen_match4_repeat_extend_encodeBlockAsm10B
4074
4075matchlen_bsf_8_repeat_extend_encodeBlockAsm10B:
4076#ifdef GOAMD64_v3
4077 TZCNTQ R10, R10
4078
4079#else
4080 BSFQ R10, R10
4081
4082#endif
4083 SARQ $0x03, R10
4084 LEAL (R11)(R10*1), R11
4085 JMP repeat_extend_forward_end_encodeBlockAsm10B
4086
4087matchlen_match4_repeat_extend_encodeBlockAsm10B:
4088 CMPL R8, $0x04
4089 JB matchlen_match2_repeat_extend_encodeBlockAsm10B
4090 MOVL (R9)(R11*1), R10
4091 CMPL (BX)(R11*1), R10
4092 JNE matchlen_match2_repeat_extend_encodeBlockAsm10B
4093 LEAL -4(R8), R8
4094 LEAL 4(R11), R11
4095
4096matchlen_match2_repeat_extend_encodeBlockAsm10B:
4097 CMPL R8, $0x01
4098 JE matchlen_match1_repeat_extend_encodeBlockAsm10B
4099 JB repeat_extend_forward_end_encodeBlockAsm10B
4100 MOVW (R9)(R11*1), R10
4101 CMPW (BX)(R11*1), R10
4102 JNE matchlen_match1_repeat_extend_encodeBlockAsm10B
4103 LEAL 2(R11), R11
4104 SUBL $0x02, R8
4105 JZ repeat_extend_forward_end_encodeBlockAsm10B
4106
4107matchlen_match1_repeat_extend_encodeBlockAsm10B:
4108 MOVB (R9)(R11*1), R10
4109 CMPB (BX)(R11*1), R10
4110 JNE repeat_extend_forward_end_encodeBlockAsm10B
4111 LEAL 1(R11), R11
4112
4113repeat_extend_forward_end_encodeBlockAsm10B:
4114 ADDL R11, CX
4115 MOVL CX, BX
4116 SUBL SI, BX
4117 MOVL 16(SP), SI
4118 TESTL DI, DI
4119 JZ repeat_as_copy_encodeBlockAsm10B
4120
4121 // emitRepeat
4122 MOVL BX, DI
4123 LEAL -4(BX), BX
4124 CMPL DI, $0x08
4125 JBE repeat_two_match_repeat_encodeBlockAsm10B
4126 CMPL DI, $0x0c
4127 JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm10B
4128 CMPL SI, $0x00000800
4129 JB repeat_two_offset_match_repeat_encodeBlockAsm10B
4130
4131cant_repeat_two_offset_match_repeat_encodeBlockAsm10B:
4132 CMPL BX, $0x00000104
4133 JB repeat_three_match_repeat_encodeBlockAsm10B
4134 LEAL -256(BX), BX
4135 MOVW $0x0019, (AX)
4136 MOVW BX, 2(AX)
4137 ADDQ $0x04, AX
4138 JMP repeat_end_emit_encodeBlockAsm10B
4139
4140repeat_three_match_repeat_encodeBlockAsm10B:
4141 LEAL -4(BX), BX
4142 MOVW $0x0015, (AX)
4143 MOVB BL, 2(AX)
4144 ADDQ $0x03, AX
4145 JMP repeat_end_emit_encodeBlockAsm10B
4146
4147repeat_two_match_repeat_encodeBlockAsm10B:
4148 SHLL $0x02, BX
4149 ORL $0x01, BX
4150 MOVW BX, (AX)
4151 ADDQ $0x02, AX
4152 JMP repeat_end_emit_encodeBlockAsm10B
4153
4154repeat_two_offset_match_repeat_encodeBlockAsm10B:
4155 XORQ DI, DI
4156 LEAL 1(DI)(BX*4), BX
4157 MOVB SI, 1(AX)
4158 SARL $0x08, SI
4159 SHLL $0x05, SI
4160 ORL SI, BX
4161 MOVB BL, (AX)
4162 ADDQ $0x02, AX
4163 JMP repeat_end_emit_encodeBlockAsm10B
4164
4165repeat_as_copy_encodeBlockAsm10B:
4166 // emitCopy
4167 CMPL BX, $0x40
4168 JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm10B
4169 CMPL SI, $0x00000800
4170 JAE long_offset_short_repeat_as_copy_encodeBlockAsm10B
4171 MOVL $0x00000001, DI
4172 LEAL 16(DI), DI
4173 MOVB SI, 1(AX)
4174 SHRL $0x08, SI
4175 SHLL $0x05, SI
4176 ORL SI, DI
4177 MOVB DI, (AX)
4178 ADDQ $0x02, AX
4179 SUBL $0x08, BX
4180
4181 // emitRepeat
4182 LEAL -4(BX), BX
4183 JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b
4184 MOVL BX, DI
4185 LEAL -4(BX), BX
4186 CMPL DI, $0x08
4187 JBE repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b
4188 CMPL DI, $0x0c
4189 JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b
4190 CMPL SI, $0x00000800
4191 JB repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b
4192
4193cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b:
4194 CMPL BX, $0x00000104
4195 JB repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b
4196 LEAL -256(BX), BX
4197 MOVW $0x0019, (AX)
4198 MOVW BX, 2(AX)
4199 ADDQ $0x04, AX
4200 JMP repeat_end_emit_encodeBlockAsm10B
4201
4202repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b:
4203 LEAL -4(BX), BX
4204 MOVW $0x0015, (AX)
4205 MOVB BL, 2(AX)
4206 ADDQ $0x03, AX
4207 JMP repeat_end_emit_encodeBlockAsm10B
4208
4209repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b:
4210 SHLL $0x02, BX
4211 ORL $0x01, BX
4212 MOVW BX, (AX)
4213 ADDQ $0x02, AX
4214 JMP repeat_end_emit_encodeBlockAsm10B
4215
4216repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b:
4217 XORQ DI, DI
4218 LEAL 1(DI)(BX*4), BX
4219 MOVB SI, 1(AX)
4220 SARL $0x08, SI
4221 SHLL $0x05, SI
4222 ORL SI, BX
4223 MOVB BL, (AX)
4224 ADDQ $0x02, AX
4225 JMP repeat_end_emit_encodeBlockAsm10B
4226
4227long_offset_short_repeat_as_copy_encodeBlockAsm10B:
4228 MOVB $0xee, (AX)
4229 MOVW SI, 1(AX)
4230 LEAL -60(BX), BX
4231 ADDQ $0x03, AX
4232
4233 // emitRepeat
4234 MOVL BX, DI
4235 LEAL -4(BX), BX
4236 CMPL DI, $0x08
4237 JBE repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short
4238 CMPL DI, $0x0c
4239 JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short
4240 CMPL SI, $0x00000800
4241 JB repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short
4242
4243cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short:
4244 CMPL BX, $0x00000104
4245 JB repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short
4246 LEAL -256(BX), BX
4247 MOVW $0x0019, (AX)
4248 MOVW BX, 2(AX)
4249 ADDQ $0x04, AX
4250 JMP repeat_end_emit_encodeBlockAsm10B
4251
4252repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short:
4253 LEAL -4(BX), BX
4254 MOVW $0x0015, (AX)
4255 MOVB BL, 2(AX)
4256 ADDQ $0x03, AX
4257 JMP repeat_end_emit_encodeBlockAsm10B
4258
4259repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short:
4260 SHLL $0x02, BX
4261 ORL $0x01, BX
4262 MOVW BX, (AX)
4263 ADDQ $0x02, AX
4264 JMP repeat_end_emit_encodeBlockAsm10B
4265
4266repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short:
4267 XORQ DI, DI
4268 LEAL 1(DI)(BX*4), BX
4269 MOVB SI, 1(AX)
4270 SARL $0x08, SI
4271 SHLL $0x05, SI
4272 ORL SI, BX
4273 MOVB BL, (AX)
4274 ADDQ $0x02, AX
4275 JMP repeat_end_emit_encodeBlockAsm10B
4276
4277two_byte_offset_short_repeat_as_copy_encodeBlockAsm10B:
4278 MOVL BX, DI
4279 SHLL $0x02, DI
4280 CMPL BX, $0x0c
4281 JAE emit_copy_three_repeat_as_copy_encodeBlockAsm10B
4282 CMPL SI, $0x00000800
4283 JAE emit_copy_three_repeat_as_copy_encodeBlockAsm10B
4284 LEAL -15(DI), DI
4285 MOVB SI, 1(AX)
4286 SHRL $0x08, SI
4287 SHLL $0x05, SI
4288 ORL SI, DI
4289 MOVB DI, (AX)
4290 ADDQ $0x02, AX
4291 JMP repeat_end_emit_encodeBlockAsm10B
4292
4293emit_copy_three_repeat_as_copy_encodeBlockAsm10B:
4294 LEAL -2(DI), DI
4295 MOVB DI, (AX)
4296 MOVW SI, 1(AX)
4297 ADDQ $0x03, AX
4298
4299repeat_end_emit_encodeBlockAsm10B:
4300 MOVL CX, 12(SP)
4301 JMP search_loop_encodeBlockAsm10B
4302
4303no_repeat_found_encodeBlockAsm10B:
4304 CMPL (DX)(BX*1), SI
4305 JEQ candidate_match_encodeBlockAsm10B
4306 SHRQ $0x08, SI
4307 MOVL 24(SP)(R9*4), BX
4308 LEAL 2(CX), R8
4309 CMPL (DX)(DI*1), SI
4310 JEQ candidate2_match_encodeBlockAsm10B
4311 MOVL R8, 24(SP)(R9*4)
4312 SHRQ $0x08, SI
4313 CMPL (DX)(BX*1), SI
4314 JEQ candidate3_match_encodeBlockAsm10B
4315 MOVL 20(SP), CX
4316 JMP search_loop_encodeBlockAsm10B
4317
4318candidate3_match_encodeBlockAsm10B:
4319 ADDL $0x02, CX
4320 JMP candidate_match_encodeBlockAsm10B
4321
4322candidate2_match_encodeBlockAsm10B:
4323 MOVL R8, 24(SP)(R9*4)
4324 INCL CX
4325 MOVL DI, BX
4326
4327candidate_match_encodeBlockAsm10B:
4328 MOVL 12(SP), SI
4329 TESTL BX, BX
4330 JZ match_extend_back_end_encodeBlockAsm10B
4331
4332match_extend_back_loop_encodeBlockAsm10B:
4333 CMPL CX, SI
4334 JBE match_extend_back_end_encodeBlockAsm10B
4335 MOVB -1(DX)(BX*1), DI
4336 MOVB -1(DX)(CX*1), R8
4337 CMPB DI, R8
4338 JNE match_extend_back_end_encodeBlockAsm10B
4339 LEAL -1(CX), CX
4340 DECL BX
4341 JZ match_extend_back_end_encodeBlockAsm10B
4342 JMP match_extend_back_loop_encodeBlockAsm10B
4343
4344match_extend_back_end_encodeBlockAsm10B:
4345 MOVL CX, SI
4346 SUBL 12(SP), SI
4347 LEAQ 3(AX)(SI*1), SI
4348 CMPQ SI, (SP)
4349 JB match_dst_size_check_encodeBlockAsm10B
4350 MOVQ $0x00000000, ret+48(FP)
4351 RET
4352
4353match_dst_size_check_encodeBlockAsm10B:
4354 MOVL CX, SI
4355 MOVL 12(SP), DI
4356 CMPL DI, SI
4357 JEQ emit_literal_done_match_emit_encodeBlockAsm10B
4358 MOVL SI, R8
4359 MOVL SI, 12(SP)
4360 LEAQ (DX)(DI*1), SI
4361 SUBL DI, R8
4362 LEAL -1(R8), DI
4363 CMPL DI, $0x3c
4364 JB one_byte_match_emit_encodeBlockAsm10B
4365 CMPL DI, $0x00000100
4366 JB two_bytes_match_emit_encodeBlockAsm10B
4367 JB three_bytes_match_emit_encodeBlockAsm10B
4368
4369three_bytes_match_emit_encodeBlockAsm10B:
4370 MOVB $0xf4, (AX)
4371 MOVW DI, 1(AX)
4372 ADDQ $0x03, AX
4373 JMP memmove_long_match_emit_encodeBlockAsm10B
4374
4375two_bytes_match_emit_encodeBlockAsm10B:
4376 MOVB $0xf0, (AX)
4377 MOVB DI, 1(AX)
4378 ADDQ $0x02, AX
4379 CMPL DI, $0x40
4380 JB memmove_match_emit_encodeBlockAsm10B
4381 JMP memmove_long_match_emit_encodeBlockAsm10B
4382
4383one_byte_match_emit_encodeBlockAsm10B:
4384 SHLB $0x02, DI
4385 MOVB DI, (AX)
4386 ADDQ $0x01, AX
4387
4388memmove_match_emit_encodeBlockAsm10B:
4389 LEAQ (AX)(R8*1), DI
4390
4391 // genMemMoveShort
4392 CMPQ R8, $0x08
4393 JBE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8
4394 CMPQ R8, $0x10
4395 JBE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8through16
4396 CMPQ R8, $0x20
4397 JBE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_17through32
4398 JMP emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_33through64
4399
4400emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8:
4401 MOVQ (SI), R9
4402 MOVQ R9, (AX)
4403 JMP memmove_end_copy_match_emit_encodeBlockAsm10B
4404
4405emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8through16:
4406 MOVQ (SI), R9
4407 MOVQ -8(SI)(R8*1), SI
4408 MOVQ R9, (AX)
4409 MOVQ SI, -8(AX)(R8*1)
4410 JMP memmove_end_copy_match_emit_encodeBlockAsm10B
4411
4412emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_17through32:
4413 MOVOU (SI), X0
4414 MOVOU -16(SI)(R8*1), X1
4415 MOVOU X0, (AX)
4416 MOVOU X1, -16(AX)(R8*1)
4417 JMP memmove_end_copy_match_emit_encodeBlockAsm10B
4418
4419emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_33through64:
4420 MOVOU (SI), X0
4421 MOVOU 16(SI), X1
4422 MOVOU -32(SI)(R8*1), X2
4423 MOVOU -16(SI)(R8*1), X3
4424 MOVOU X0, (AX)
4425 MOVOU X1, 16(AX)
4426 MOVOU X2, -32(AX)(R8*1)
4427 MOVOU X3, -16(AX)(R8*1)
4428
4429memmove_end_copy_match_emit_encodeBlockAsm10B:
4430 MOVQ DI, AX
4431 JMP emit_literal_done_match_emit_encodeBlockAsm10B
4432
4433memmove_long_match_emit_encodeBlockAsm10B:
4434 LEAQ (AX)(R8*1), DI
4435
4436 // genMemMoveLong
4437 MOVOU (SI), X0
4438 MOVOU 16(SI), X1
4439 MOVOU -32(SI)(R8*1), X2
4440 MOVOU -16(SI)(R8*1), X3
4441 MOVQ R8, R10
4442 SHRQ $0x05, R10
4443 MOVQ AX, R9
4444 ANDL $0x0000001f, R9
4445 MOVQ $0x00000040, R11
4446 SUBQ R9, R11
4447 DECQ R10
4448 JA emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32
4449 LEAQ -32(SI)(R11*1), R9
4450 LEAQ -32(AX)(R11*1), R12
4451
4452emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_big_loop_back:
4453 MOVOU (R9), X4
4454 MOVOU 16(R9), X5
4455 MOVOA X4, (R12)
4456 MOVOA X5, 16(R12)
4457 ADDQ $0x20, R12
4458 ADDQ $0x20, R9
4459 ADDQ $0x20, R11
4460 DECQ R10
4461 JNA emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_big_loop_back
4462
4463emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32:
4464 MOVOU -32(SI)(R11*1), X4
4465 MOVOU -16(SI)(R11*1), X5
4466 MOVOA X4, -32(AX)(R11*1)
4467 MOVOA X5, -16(AX)(R11*1)
4468 ADDQ $0x20, R11
4469 CMPQ R8, R11
4470 JAE emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32
4471 MOVOU X0, (AX)
4472 MOVOU X1, 16(AX)
4473 MOVOU X2, -32(AX)(R8*1)
4474 MOVOU X3, -16(AX)(R8*1)
4475 MOVQ DI, AX
4476
4477emit_literal_done_match_emit_encodeBlockAsm10B:
4478match_nolit_loop_encodeBlockAsm10B:
4479 MOVL CX, SI
4480 SUBL BX, SI
4481 MOVL SI, 16(SP)
4482 ADDL $0x04, CX
4483 ADDL $0x04, BX
4484 MOVQ src_len+32(FP), SI
4485 SUBL CX, SI
4486 LEAQ (DX)(CX*1), DI
4487 LEAQ (DX)(BX*1), BX
4488
4489 // matchLen
4490 XORL R9, R9
4491
4492matchlen_loopback_16_match_nolit_encodeBlockAsm10B:
4493 CMPL SI, $0x10
4494 JB matchlen_match8_match_nolit_encodeBlockAsm10B
4495 MOVQ (DI)(R9*1), R8
4496 MOVQ 8(DI)(R9*1), R10
4497 XORQ (BX)(R9*1), R8
4498 JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm10B
4499 XORQ 8(BX)(R9*1), R10
4500 JNZ matchlen_bsf_16match_nolit_encodeBlockAsm10B
4501 LEAL -16(SI), SI
4502 LEAL 16(R9), R9
4503 JMP matchlen_loopback_16_match_nolit_encodeBlockAsm10B
4504
4505matchlen_bsf_16match_nolit_encodeBlockAsm10B:
4506#ifdef GOAMD64_v3
4507 TZCNTQ R10, R10
4508
4509#else
4510 BSFQ R10, R10
4511
4512#endif
4513 SARQ $0x03, R10
4514 LEAL 8(R9)(R10*1), R9
4515 JMP match_nolit_end_encodeBlockAsm10B
4516
4517matchlen_match8_match_nolit_encodeBlockAsm10B:
4518 CMPL SI, $0x08
4519 JB matchlen_match4_match_nolit_encodeBlockAsm10B
4520 MOVQ (DI)(R9*1), R8
4521 XORQ (BX)(R9*1), R8
4522 JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm10B
4523 LEAL -8(SI), SI
4524 LEAL 8(R9), R9
4525 JMP matchlen_match4_match_nolit_encodeBlockAsm10B
4526
4527matchlen_bsf_8_match_nolit_encodeBlockAsm10B:
4528#ifdef GOAMD64_v3
4529 TZCNTQ R8, R8
4530
4531#else
4532 BSFQ R8, R8
4533
4534#endif
4535 SARQ $0x03, R8
4536 LEAL (R9)(R8*1), R9
4537 JMP match_nolit_end_encodeBlockAsm10B
4538
4539matchlen_match4_match_nolit_encodeBlockAsm10B:
4540 CMPL SI, $0x04
4541 JB matchlen_match2_match_nolit_encodeBlockAsm10B
4542 MOVL (DI)(R9*1), R8
4543 CMPL (BX)(R9*1), R8
4544 JNE matchlen_match2_match_nolit_encodeBlockAsm10B
4545 LEAL -4(SI), SI
4546 LEAL 4(R9), R9
4547
4548matchlen_match2_match_nolit_encodeBlockAsm10B:
4549 CMPL SI, $0x01
4550 JE matchlen_match1_match_nolit_encodeBlockAsm10B
4551 JB match_nolit_end_encodeBlockAsm10B
4552 MOVW (DI)(R9*1), R8
4553 CMPW (BX)(R9*1), R8
4554 JNE matchlen_match1_match_nolit_encodeBlockAsm10B
4555 LEAL 2(R9), R9
4556 SUBL $0x02, SI
4557 JZ match_nolit_end_encodeBlockAsm10B
4558
4559matchlen_match1_match_nolit_encodeBlockAsm10B:
4560 MOVB (DI)(R9*1), R8
4561 CMPB (BX)(R9*1), R8
4562 JNE match_nolit_end_encodeBlockAsm10B
4563 LEAL 1(R9), R9
4564
4565match_nolit_end_encodeBlockAsm10B:
4566 ADDL R9, CX
4567 MOVL 16(SP), BX
4568 ADDL $0x04, R9
4569 MOVL CX, 12(SP)
4570
4571 // emitCopy
4572 CMPL R9, $0x40
4573 JBE two_byte_offset_short_match_nolit_encodeBlockAsm10B
4574 CMPL BX, $0x00000800
4575 JAE long_offset_short_match_nolit_encodeBlockAsm10B
4576 MOVL $0x00000001, SI
4577 LEAL 16(SI), SI
4578 MOVB BL, 1(AX)
4579 SHRL $0x08, BX
4580 SHLL $0x05, BX
4581 ORL BX, SI
4582 MOVB SI, (AX)
4583 ADDQ $0x02, AX
4584 SUBL $0x08, R9
4585
4586 // emitRepeat
4587 LEAL -4(R9), R9
4588 JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b
4589 MOVL R9, SI
4590 LEAL -4(R9), R9
4591 CMPL SI, $0x08
4592 JBE repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short_2b
4593 CMPL SI, $0x0c
4594 JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b
4595 CMPL BX, $0x00000800
4596 JB repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b
4597
4598cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b:
4599 CMPL R9, $0x00000104
4600 JB repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short_2b
4601 LEAL -256(R9), R9
4602 MOVW $0x0019, (AX)
4603 MOVW R9, 2(AX)
4604 ADDQ $0x04, AX
4605 JMP match_nolit_emitcopy_end_encodeBlockAsm10B
4606
4607repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short_2b:
4608 LEAL -4(R9), R9
4609 MOVW $0x0015, (AX)
4610 MOVB R9, 2(AX)
4611 ADDQ $0x03, AX
4612 JMP match_nolit_emitcopy_end_encodeBlockAsm10B
4613
4614repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short_2b:
4615 SHLL $0x02, R9
4616 ORL $0x01, R9
4617 MOVW R9, (AX)
4618 ADDQ $0x02, AX
4619 JMP match_nolit_emitcopy_end_encodeBlockAsm10B
4620
4621repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b:
4622 XORQ SI, SI
4623 LEAL 1(SI)(R9*4), R9
4624 MOVB BL, 1(AX)
4625 SARL $0x08, BX
4626 SHLL $0x05, BX
4627 ORL BX, R9
4628 MOVB R9, (AX)
4629 ADDQ $0x02, AX
4630 JMP match_nolit_emitcopy_end_encodeBlockAsm10B
4631
4632long_offset_short_match_nolit_encodeBlockAsm10B:
4633 MOVB $0xee, (AX)
4634 MOVW BX, 1(AX)
4635 LEAL -60(R9), R9
4636 ADDQ $0x03, AX
4637
4638 // emitRepeat
4639 MOVL R9, SI
4640 LEAL -4(R9), R9
4641 CMPL SI, $0x08
4642 JBE repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short
4643 CMPL SI, $0x0c
4644 JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short
4645 CMPL BX, $0x00000800
4646 JB repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short
4647
4648cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short:
4649 CMPL R9, $0x00000104
4650 JB repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short
4651 LEAL -256(R9), R9
4652 MOVW $0x0019, (AX)
4653 MOVW R9, 2(AX)
4654 ADDQ $0x04, AX
4655 JMP match_nolit_emitcopy_end_encodeBlockAsm10B
4656
4657repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short:
4658 LEAL -4(R9), R9
4659 MOVW $0x0015, (AX)
4660 MOVB R9, 2(AX)
4661 ADDQ $0x03, AX
4662 JMP match_nolit_emitcopy_end_encodeBlockAsm10B
4663
4664repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short:
4665 SHLL $0x02, R9
4666 ORL $0x01, R9
4667 MOVW R9, (AX)
4668 ADDQ $0x02, AX
4669 JMP match_nolit_emitcopy_end_encodeBlockAsm10B
4670
4671repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short:
4672 XORQ SI, SI
4673 LEAL 1(SI)(R9*4), R9
4674 MOVB BL, 1(AX)
4675 SARL $0x08, BX
4676 SHLL $0x05, BX
4677 ORL BX, R9
4678 MOVB R9, (AX)
4679 ADDQ $0x02, AX
4680 JMP match_nolit_emitcopy_end_encodeBlockAsm10B
4681
4682two_byte_offset_short_match_nolit_encodeBlockAsm10B:
4683 MOVL R9, SI
4684 SHLL $0x02, SI
4685 CMPL R9, $0x0c
4686 JAE emit_copy_three_match_nolit_encodeBlockAsm10B
4687 CMPL BX, $0x00000800
4688 JAE emit_copy_three_match_nolit_encodeBlockAsm10B
4689 LEAL -15(SI), SI
4690 MOVB BL, 1(AX)
4691 SHRL $0x08, BX
4692 SHLL $0x05, BX
4693 ORL BX, SI
4694 MOVB SI, (AX)
4695 ADDQ $0x02, AX
4696 JMP match_nolit_emitcopy_end_encodeBlockAsm10B
4697
4698emit_copy_three_match_nolit_encodeBlockAsm10B:
4699 LEAL -2(SI), SI
4700 MOVB SI, (AX)
4701 MOVW BX, 1(AX)
4702 ADDQ $0x03, AX
4703
4704match_nolit_emitcopy_end_encodeBlockAsm10B:
4705 CMPL CX, 8(SP)
4706 JAE emit_remainder_encodeBlockAsm10B
4707 MOVQ -2(DX)(CX*1), SI
4708 CMPQ AX, (SP)
4709 JB match_nolit_dst_ok_encodeBlockAsm10B
4710 MOVQ $0x00000000, ret+48(FP)
4711 RET
4712
4713match_nolit_dst_ok_encodeBlockAsm10B:
4714 MOVQ $0x9e3779b1, R8
4715 MOVQ SI, DI
4716 SHRQ $0x10, SI
4717 MOVQ SI, BX
4718 SHLQ $0x20, DI
4719 IMULQ R8, DI
4720 SHRQ $0x36, DI
4721 SHLQ $0x20, BX
4722 IMULQ R8, BX
4723 SHRQ $0x36, BX
4724 LEAL -2(CX), R8
4725 LEAQ 24(SP)(BX*4), R9
4726 MOVL (R9), BX
4727 MOVL R8, 24(SP)(DI*4)
4728 MOVL CX, (R9)
4729 CMPL (DX)(BX*1), SI
4730 JEQ match_nolit_loop_encodeBlockAsm10B
4731 INCL CX
4732 JMP search_loop_encodeBlockAsm10B
4733
4734emit_remainder_encodeBlockAsm10B:
4735 MOVQ src_len+32(FP), CX
4736 SUBL 12(SP), CX
4737 LEAQ 3(AX)(CX*1), CX
4738 CMPQ CX, (SP)
4739 JB emit_remainder_ok_encodeBlockAsm10B
4740 MOVQ $0x00000000, ret+48(FP)
4741 RET
4742
4743emit_remainder_ok_encodeBlockAsm10B:
4744 MOVQ src_len+32(FP), CX
4745 MOVL 12(SP), BX
4746 CMPL BX, CX
4747 JEQ emit_literal_done_emit_remainder_encodeBlockAsm10B
4748 MOVL CX, SI
4749 MOVL CX, 12(SP)
4750 LEAQ (DX)(BX*1), CX
4751 SUBL BX, SI
4752 LEAL -1(SI), DX
4753 CMPL DX, $0x3c
4754 JB one_byte_emit_remainder_encodeBlockAsm10B
4755 CMPL DX, $0x00000100
4756 JB two_bytes_emit_remainder_encodeBlockAsm10B
4757 JB three_bytes_emit_remainder_encodeBlockAsm10B
4758
4759three_bytes_emit_remainder_encodeBlockAsm10B:
4760 MOVB $0xf4, (AX)
4761 MOVW DX, 1(AX)
4762 ADDQ $0x03, AX
4763 JMP memmove_long_emit_remainder_encodeBlockAsm10B
4764
4765two_bytes_emit_remainder_encodeBlockAsm10B:
4766 MOVB $0xf0, (AX)
4767 MOVB DL, 1(AX)
4768 ADDQ $0x02, AX
4769 CMPL DX, $0x40
4770 JB memmove_emit_remainder_encodeBlockAsm10B
4771 JMP memmove_long_emit_remainder_encodeBlockAsm10B
4772
4773one_byte_emit_remainder_encodeBlockAsm10B:
4774 SHLB $0x02, DL
4775 MOVB DL, (AX)
4776 ADDQ $0x01, AX
4777
4778memmove_emit_remainder_encodeBlockAsm10B:
4779 LEAQ (AX)(SI*1), DX
4780 MOVL SI, BX
4781
4782 // genMemMoveShort
4783 CMPQ BX, $0x03
4784 JB emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_1or2
4785 JE emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_3
4786 CMPQ BX, $0x08
4787 JB emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_4through7
4788 CMPQ BX, $0x10
4789 JBE emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_8through16
4790 CMPQ BX, $0x20
4791 JBE emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_17through32
4792 JMP emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_33through64
4793
4794emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_1or2:
4795 MOVB (CX), SI
4796 MOVB -1(CX)(BX*1), CL
4797 MOVB SI, (AX)
4798 MOVB CL, -1(AX)(BX*1)
4799 JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B
4800
4801emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_3:
4802 MOVW (CX), SI
4803 MOVB 2(CX), CL
4804 MOVW SI, (AX)
4805 MOVB CL, 2(AX)
4806 JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B
4807
4808emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_4through7:
4809 MOVL (CX), SI
4810 MOVL -4(CX)(BX*1), CX
4811 MOVL SI, (AX)
4812 MOVL CX, -4(AX)(BX*1)
4813 JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B
4814
4815emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_8through16:
4816 MOVQ (CX), SI
4817 MOVQ -8(CX)(BX*1), CX
4818 MOVQ SI, (AX)
4819 MOVQ CX, -8(AX)(BX*1)
4820 JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B
4821
4822emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_17through32:
4823 MOVOU (CX), X0
4824 MOVOU -16(CX)(BX*1), X1
4825 MOVOU X0, (AX)
4826 MOVOU X1, -16(AX)(BX*1)
4827 JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B
4828
4829emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_33through64:
4830 MOVOU (CX), X0
4831 MOVOU 16(CX), X1
4832 MOVOU -32(CX)(BX*1), X2
4833 MOVOU -16(CX)(BX*1), X3
4834 MOVOU X0, (AX)
4835 MOVOU X1, 16(AX)
4836 MOVOU X2, -32(AX)(BX*1)
4837 MOVOU X3, -16(AX)(BX*1)
4838
4839memmove_end_copy_emit_remainder_encodeBlockAsm10B:
4840 MOVQ DX, AX
4841 JMP emit_literal_done_emit_remainder_encodeBlockAsm10B
4842
4843memmove_long_emit_remainder_encodeBlockAsm10B:
4844 LEAQ (AX)(SI*1), DX
4845 MOVL SI, BX
4846
4847 // genMemMoveLong
4848 MOVOU (CX), X0
4849 MOVOU 16(CX), X1
4850 MOVOU -32(CX)(BX*1), X2
4851 MOVOU -16(CX)(BX*1), X3
4852 MOVQ BX, DI
4853 SHRQ $0x05, DI
4854 MOVQ AX, SI
4855 ANDL $0x0000001f, SI
4856 MOVQ $0x00000040, R8
4857 SUBQ SI, R8
4858 DECQ DI
4859 JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_forward_sse_loop_32
4860 LEAQ -32(CX)(R8*1), SI
4861 LEAQ -32(AX)(R8*1), R9
4862
4863emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_big_loop_back:
4864 MOVOU (SI), X4
4865 MOVOU 16(SI), X5
4866 MOVOA X4, (R9)
4867 MOVOA X5, 16(R9)
4868 ADDQ $0x20, R9
4869 ADDQ $0x20, SI
4870 ADDQ $0x20, R8
4871 DECQ DI
4872 JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_big_loop_back
4873
4874emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_forward_sse_loop_32:
4875 MOVOU -32(CX)(R8*1), X4
4876 MOVOU -16(CX)(R8*1), X5
4877 MOVOA X4, -32(AX)(R8*1)
4878 MOVOA X5, -16(AX)(R8*1)
4879 ADDQ $0x20, R8
4880 CMPQ BX, R8
4881 JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_forward_sse_loop_32
4882 MOVOU X0, (AX)
4883 MOVOU X1, 16(AX)
4884 MOVOU X2, -32(AX)(BX*1)
4885 MOVOU X3, -16(AX)(BX*1)
4886 MOVQ DX, AX
4887
4888emit_literal_done_emit_remainder_encodeBlockAsm10B:
4889 MOVQ dst_base+0(FP), CX
4890 SUBQ CX, AX
4891 MOVQ AX, ret+48(FP)
4892 RET
4893
4894// func encodeBlockAsm8B(dst []byte, src []byte) int
4895// Requires: BMI, SSE2
4896TEXT ·encodeBlockAsm8B(SB), $1048-56
4897 MOVQ dst_base+0(FP), AX
4898 MOVQ $0x00000008, CX
4899 LEAQ 24(SP), DX
4900 PXOR X0, X0
4901
4902zero_loop_encodeBlockAsm8B:
4903 MOVOU X0, (DX)
4904 MOVOU X0, 16(DX)
4905 MOVOU X0, 32(DX)
4906 MOVOU X0, 48(DX)
4907 MOVOU X0, 64(DX)
4908 MOVOU X0, 80(DX)
4909 MOVOU X0, 96(DX)
4910 MOVOU X0, 112(DX)
4911 ADDQ $0x80, DX
4912 DECQ CX
4913 JNZ zero_loop_encodeBlockAsm8B
4914 MOVL $0x00000000, 12(SP)
4915 MOVQ src_len+32(FP), CX
4916 LEAQ -9(CX), DX
4917 LEAQ -8(CX), BX
4918 MOVL BX, 8(SP)
4919 SHRQ $0x05, CX
4920 SUBL CX, DX
4921 LEAQ (AX)(DX*1), DX
4922 MOVQ DX, (SP)
4923 MOVL $0x00000001, CX
4924 MOVL CX, 16(SP)
4925 MOVQ src_base+24(FP), DX
4926
4927search_loop_encodeBlockAsm8B:
4928 MOVL CX, BX
4929 SUBL 12(SP), BX
4930 SHRL $0x04, BX
4931 LEAL 4(CX)(BX*1), BX
4932 CMPL BX, 8(SP)
4933 JAE emit_remainder_encodeBlockAsm8B
4934 MOVQ (DX)(CX*1), SI
4935 MOVL BX, 20(SP)
4936 MOVQ $0x9e3779b1, R8
4937 MOVQ SI, R9
4938 MOVQ SI, R10
4939 SHRQ $0x08, R10
4940 SHLQ $0x20, R9
4941 IMULQ R8, R9
4942 SHRQ $0x38, R9
4943 SHLQ $0x20, R10
4944 IMULQ R8, R10
4945 SHRQ $0x38, R10
4946 MOVL 24(SP)(R9*4), BX
4947 MOVL 24(SP)(R10*4), DI
4948 MOVL CX, 24(SP)(R9*4)
4949 LEAL 1(CX), R9
4950 MOVL R9, 24(SP)(R10*4)
4951 MOVQ SI, R9
4952 SHRQ $0x10, R9
4953 SHLQ $0x20, R9
4954 IMULQ R8, R9
4955 SHRQ $0x38, R9
4956 MOVL CX, R8
4957 SUBL 16(SP), R8
4958 MOVL 1(DX)(R8*1), R10
4959 MOVQ SI, R8
4960 SHRQ $0x08, R8
4961 CMPL R8, R10
4962 JNE no_repeat_found_encodeBlockAsm8B
4963 LEAL 1(CX), SI
4964 MOVL 12(SP), DI
4965 MOVL SI, BX
4966 SUBL 16(SP), BX
4967 JZ repeat_extend_back_end_encodeBlockAsm8B
4968
4969repeat_extend_back_loop_encodeBlockAsm8B:
4970 CMPL SI, DI
4971 JBE repeat_extend_back_end_encodeBlockAsm8B
4972 MOVB -1(DX)(BX*1), R8
4973 MOVB -1(DX)(SI*1), R9
4974 CMPB R8, R9
4975 JNE repeat_extend_back_end_encodeBlockAsm8B
4976 LEAL -1(SI), SI
4977 DECL BX
4978 JNZ repeat_extend_back_loop_encodeBlockAsm8B
4979
4980repeat_extend_back_end_encodeBlockAsm8B:
4981 MOVL 12(SP), BX
4982 CMPL BX, SI
4983 JEQ emit_literal_done_repeat_emit_encodeBlockAsm8B
4984 MOVL SI, R8
4985 MOVL SI, 12(SP)
4986 LEAQ (DX)(BX*1), R9
4987 SUBL BX, R8
4988 LEAL -1(R8), BX
4989 CMPL BX, $0x3c
4990 JB one_byte_repeat_emit_encodeBlockAsm8B
4991 CMPL BX, $0x00000100
4992 JB two_bytes_repeat_emit_encodeBlockAsm8B
4993 JB three_bytes_repeat_emit_encodeBlockAsm8B
4994
4995three_bytes_repeat_emit_encodeBlockAsm8B:
4996 MOVB $0xf4, (AX)
4997 MOVW BX, 1(AX)
4998 ADDQ $0x03, AX
4999 JMP memmove_long_repeat_emit_encodeBlockAsm8B
5000
5001two_bytes_repeat_emit_encodeBlockAsm8B:
5002 MOVB $0xf0, (AX)
5003 MOVB BL, 1(AX)
5004 ADDQ $0x02, AX
5005 CMPL BX, $0x40
5006 JB memmove_repeat_emit_encodeBlockAsm8B
5007 JMP memmove_long_repeat_emit_encodeBlockAsm8B
5008
5009one_byte_repeat_emit_encodeBlockAsm8B:
5010 SHLB $0x02, BL
5011 MOVB BL, (AX)
5012 ADDQ $0x01, AX
5013
5014memmove_repeat_emit_encodeBlockAsm8B:
5015 LEAQ (AX)(R8*1), BX
5016
5017 // genMemMoveShort
5018 CMPQ R8, $0x08
5019 JBE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8
5020 CMPQ R8, $0x10
5021 JBE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8through16
5022 CMPQ R8, $0x20
5023 JBE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_17through32
5024 JMP emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_33through64
5025
5026emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8:
5027 MOVQ (R9), R10
5028 MOVQ R10, (AX)
5029 JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B
5030
5031emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8through16:
5032 MOVQ (R9), R10
5033 MOVQ -8(R9)(R8*1), R9
5034 MOVQ R10, (AX)
5035 MOVQ R9, -8(AX)(R8*1)
5036 JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B
5037
5038emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_17through32:
5039 MOVOU (R9), X0
5040 MOVOU -16(R9)(R8*1), X1
5041 MOVOU X0, (AX)
5042 MOVOU X1, -16(AX)(R8*1)
5043 JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B
5044
5045emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_33through64:
5046 MOVOU (R9), X0
5047 MOVOU 16(R9), X1
5048 MOVOU -32(R9)(R8*1), X2
5049 MOVOU -16(R9)(R8*1), X3
5050 MOVOU X0, (AX)
5051 MOVOU X1, 16(AX)
5052 MOVOU X2, -32(AX)(R8*1)
5053 MOVOU X3, -16(AX)(R8*1)
5054
5055memmove_end_copy_repeat_emit_encodeBlockAsm8B:
5056 MOVQ BX, AX
5057 JMP emit_literal_done_repeat_emit_encodeBlockAsm8B
5058
5059memmove_long_repeat_emit_encodeBlockAsm8B:
5060 LEAQ (AX)(R8*1), BX
5061
5062 // genMemMoveLong
5063 MOVOU (R9), X0
5064 MOVOU 16(R9), X1
5065 MOVOU -32(R9)(R8*1), X2
5066 MOVOU -16(R9)(R8*1), X3
5067 MOVQ R8, R11
5068 SHRQ $0x05, R11
5069 MOVQ AX, R10
5070 ANDL $0x0000001f, R10
5071 MOVQ $0x00000040, R12
5072 SUBQ R10, R12
5073 DECQ R11
5074 JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32
5075 LEAQ -32(R9)(R12*1), R10
5076 LEAQ -32(AX)(R12*1), R13
5077
5078emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_big_loop_back:
5079 MOVOU (R10), X4
5080 MOVOU 16(R10), X5
5081 MOVOA X4, (R13)
5082 MOVOA X5, 16(R13)
5083 ADDQ $0x20, R13
5084 ADDQ $0x20, R10
5085 ADDQ $0x20, R12
5086 DECQ R11
5087 JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_big_loop_back
5088
5089emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32:
5090 MOVOU -32(R9)(R12*1), X4
5091 MOVOU -16(R9)(R12*1), X5
5092 MOVOA X4, -32(AX)(R12*1)
5093 MOVOA X5, -16(AX)(R12*1)
5094 ADDQ $0x20, R12
5095 CMPQ R8, R12
5096 JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32
5097 MOVOU X0, (AX)
5098 MOVOU X1, 16(AX)
5099 MOVOU X2, -32(AX)(R8*1)
5100 MOVOU X3, -16(AX)(R8*1)
5101 MOVQ BX, AX
5102
5103emit_literal_done_repeat_emit_encodeBlockAsm8B:
5104 ADDL $0x05, CX
5105 MOVL CX, BX
5106 SUBL 16(SP), BX
5107 MOVQ src_len+32(FP), R8
5108 SUBL CX, R8
5109 LEAQ (DX)(CX*1), R9
5110 LEAQ (DX)(BX*1), BX
5111
5112 // matchLen
5113 XORL R11, R11
5114
5115matchlen_loopback_16_repeat_extend_encodeBlockAsm8B:
5116 CMPL R8, $0x10
5117 JB matchlen_match8_repeat_extend_encodeBlockAsm8B
5118 MOVQ (R9)(R11*1), R10
5119 MOVQ 8(R9)(R11*1), R12
5120 XORQ (BX)(R11*1), R10
5121 JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm8B
5122 XORQ 8(BX)(R11*1), R12
5123 JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm8B
5124 LEAL -16(R8), R8
5125 LEAL 16(R11), R11
5126 JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm8B
5127
5128matchlen_bsf_16repeat_extend_encodeBlockAsm8B:
5129#ifdef GOAMD64_v3
5130 TZCNTQ R12, R12
5131
5132#else
5133 BSFQ R12, R12
5134
5135#endif
5136 SARQ $0x03, R12
5137 LEAL 8(R11)(R12*1), R11
5138 JMP repeat_extend_forward_end_encodeBlockAsm8B
5139
5140matchlen_match8_repeat_extend_encodeBlockAsm8B:
5141 CMPL R8, $0x08
5142 JB matchlen_match4_repeat_extend_encodeBlockAsm8B
5143 MOVQ (R9)(R11*1), R10
5144 XORQ (BX)(R11*1), R10
5145 JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm8B
5146 LEAL -8(R8), R8
5147 LEAL 8(R11), R11
5148 JMP matchlen_match4_repeat_extend_encodeBlockAsm8B
5149
5150matchlen_bsf_8_repeat_extend_encodeBlockAsm8B:
5151#ifdef GOAMD64_v3
5152 TZCNTQ R10, R10
5153
5154#else
5155 BSFQ R10, R10
5156
5157#endif
5158 SARQ $0x03, R10
5159 LEAL (R11)(R10*1), R11
5160 JMP repeat_extend_forward_end_encodeBlockAsm8B
5161
5162matchlen_match4_repeat_extend_encodeBlockAsm8B:
5163 CMPL R8, $0x04
5164 JB matchlen_match2_repeat_extend_encodeBlockAsm8B
5165 MOVL (R9)(R11*1), R10
5166 CMPL (BX)(R11*1), R10
5167 JNE matchlen_match2_repeat_extend_encodeBlockAsm8B
5168 LEAL -4(R8), R8
5169 LEAL 4(R11), R11
5170
5171matchlen_match2_repeat_extend_encodeBlockAsm8B:
5172 CMPL R8, $0x01
5173 JE matchlen_match1_repeat_extend_encodeBlockAsm8B
5174 JB repeat_extend_forward_end_encodeBlockAsm8B
5175 MOVW (R9)(R11*1), R10
5176 CMPW (BX)(R11*1), R10
5177 JNE matchlen_match1_repeat_extend_encodeBlockAsm8B
5178 LEAL 2(R11), R11
5179 SUBL $0x02, R8
5180 JZ repeat_extend_forward_end_encodeBlockAsm8B
5181
5182matchlen_match1_repeat_extend_encodeBlockAsm8B:
5183 MOVB (R9)(R11*1), R10
5184 CMPB (BX)(R11*1), R10
5185 JNE repeat_extend_forward_end_encodeBlockAsm8B
5186 LEAL 1(R11), R11
5187
5188repeat_extend_forward_end_encodeBlockAsm8B:
5189 ADDL R11, CX
5190 MOVL CX, BX
5191 SUBL SI, BX
5192 MOVL 16(SP), SI
5193 TESTL DI, DI
5194 JZ repeat_as_copy_encodeBlockAsm8B
5195
5196 // emitRepeat
5197 MOVL BX, SI
5198 LEAL -4(BX), BX
5199 CMPL SI, $0x08
5200 JBE repeat_two_match_repeat_encodeBlockAsm8B
5201 CMPL SI, $0x0c
5202 JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm8B
5203
5204cant_repeat_two_offset_match_repeat_encodeBlockAsm8B:
5205 CMPL BX, $0x00000104
5206 JB repeat_three_match_repeat_encodeBlockAsm8B
5207 LEAL -256(BX), BX
5208 MOVW $0x0019, (AX)
5209 MOVW BX, 2(AX)
5210 ADDQ $0x04, AX
5211 JMP repeat_end_emit_encodeBlockAsm8B
5212
5213repeat_three_match_repeat_encodeBlockAsm8B:
5214 LEAL -4(BX), BX
5215 MOVW $0x0015, (AX)
5216 MOVB BL, 2(AX)
5217 ADDQ $0x03, AX
5218 JMP repeat_end_emit_encodeBlockAsm8B
5219
5220repeat_two_match_repeat_encodeBlockAsm8B:
5221 SHLL $0x02, BX
5222 ORL $0x01, BX
5223 MOVW BX, (AX)
5224 ADDQ $0x02, AX
5225 JMP repeat_end_emit_encodeBlockAsm8B
5226 XORQ DI, DI
5227 LEAL 1(DI)(BX*4), BX
5228 MOVB SI, 1(AX)
5229 SARL $0x08, SI
5230 SHLL $0x05, SI
5231 ORL SI, BX
5232 MOVB BL, (AX)
5233 ADDQ $0x02, AX
5234 JMP repeat_end_emit_encodeBlockAsm8B
5235
5236repeat_as_copy_encodeBlockAsm8B:
5237 // emitCopy
5238 CMPL BX, $0x40
5239 JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm8B
5240 CMPL SI, $0x00000800
5241 JAE long_offset_short_repeat_as_copy_encodeBlockAsm8B
5242 MOVL $0x00000001, DI
5243 LEAL 16(DI), DI
5244 MOVB SI, 1(AX)
5245 SHRL $0x08, SI
5246 SHLL $0x05, SI
5247 ORL SI, DI
5248 MOVB DI, (AX)
5249 ADDQ $0x02, AX
5250 SUBL $0x08, BX
5251
5252 // emitRepeat
5253 LEAL -4(BX), BX
5254 JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b
5255 MOVL BX, SI
5256 LEAL -4(BX), BX
5257 CMPL SI, $0x08
5258 JBE repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b
5259 CMPL SI, $0x0c
5260 JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b
5261
5262cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b:
5263 CMPL BX, $0x00000104
5264 JB repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b
5265 LEAL -256(BX), BX
5266 MOVW $0x0019, (AX)
5267 MOVW BX, 2(AX)
5268 ADDQ $0x04, AX
5269 JMP repeat_end_emit_encodeBlockAsm8B
5270
5271repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b:
5272 LEAL -4(BX), BX
5273 MOVW $0x0015, (AX)
5274 MOVB BL, 2(AX)
5275 ADDQ $0x03, AX
5276 JMP repeat_end_emit_encodeBlockAsm8B
5277
5278repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b:
5279 SHLL $0x02, BX
5280 ORL $0x01, BX
5281 MOVW BX, (AX)
5282 ADDQ $0x02, AX
5283 JMP repeat_end_emit_encodeBlockAsm8B
5284 XORQ DI, DI
5285 LEAL 1(DI)(BX*4), BX
5286 MOVB SI, 1(AX)
5287 SARL $0x08, SI
5288 SHLL $0x05, SI
5289 ORL SI, BX
5290 MOVB BL, (AX)
5291 ADDQ $0x02, AX
5292 JMP repeat_end_emit_encodeBlockAsm8B
5293
5294long_offset_short_repeat_as_copy_encodeBlockAsm8B:
5295 MOVB $0xee, (AX)
5296 MOVW SI, 1(AX)
5297 LEAL -60(BX), BX
5298 ADDQ $0x03, AX
5299
5300 // emitRepeat
5301 MOVL BX, SI
5302 LEAL -4(BX), BX
5303 CMPL SI, $0x08
5304 JBE repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short
5305 CMPL SI, $0x0c
5306 JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short
5307
5308cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short:
5309 CMPL BX, $0x00000104
5310 JB repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short
5311 LEAL -256(BX), BX
5312 MOVW $0x0019, (AX)
5313 MOVW BX, 2(AX)
5314 ADDQ $0x04, AX
5315 JMP repeat_end_emit_encodeBlockAsm8B
5316
5317repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short:
5318 LEAL -4(BX), BX
5319 MOVW $0x0015, (AX)
5320 MOVB BL, 2(AX)
5321 ADDQ $0x03, AX
5322 JMP repeat_end_emit_encodeBlockAsm8B
5323
5324repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short:
5325 SHLL $0x02, BX
5326 ORL $0x01, BX
5327 MOVW BX, (AX)
5328 ADDQ $0x02, AX
5329 JMP repeat_end_emit_encodeBlockAsm8B
5330 XORQ DI, DI
5331 LEAL 1(DI)(BX*4), BX
5332 MOVB SI, 1(AX)
5333 SARL $0x08, SI
5334 SHLL $0x05, SI
5335 ORL SI, BX
5336 MOVB BL, (AX)
5337 ADDQ $0x02, AX
5338 JMP repeat_end_emit_encodeBlockAsm8B
5339
5340two_byte_offset_short_repeat_as_copy_encodeBlockAsm8B:
5341 MOVL BX, DI
5342 SHLL $0x02, DI
5343 CMPL BX, $0x0c
5344 JAE emit_copy_three_repeat_as_copy_encodeBlockAsm8B
5345 LEAL -15(DI), DI
5346 MOVB SI, 1(AX)
5347 SHRL $0x08, SI
5348 SHLL $0x05, SI
5349 ORL SI, DI
5350 MOVB DI, (AX)
5351 ADDQ $0x02, AX
5352 JMP repeat_end_emit_encodeBlockAsm8B
5353
5354emit_copy_three_repeat_as_copy_encodeBlockAsm8B:
5355 LEAL -2(DI), DI
5356 MOVB DI, (AX)
5357 MOVW SI, 1(AX)
5358 ADDQ $0x03, AX
5359
5360repeat_end_emit_encodeBlockAsm8B:
5361 MOVL CX, 12(SP)
5362 JMP search_loop_encodeBlockAsm8B
5363
5364no_repeat_found_encodeBlockAsm8B:
5365 CMPL (DX)(BX*1), SI
5366 JEQ candidate_match_encodeBlockAsm8B
5367 SHRQ $0x08, SI
5368 MOVL 24(SP)(R9*4), BX
5369 LEAL 2(CX), R8
5370 CMPL (DX)(DI*1), SI
5371 JEQ candidate2_match_encodeBlockAsm8B
5372 MOVL R8, 24(SP)(R9*4)
5373 SHRQ $0x08, SI
5374 CMPL (DX)(BX*1), SI
5375 JEQ candidate3_match_encodeBlockAsm8B
5376 MOVL 20(SP), CX
5377 JMP search_loop_encodeBlockAsm8B
5378
5379candidate3_match_encodeBlockAsm8B:
5380 ADDL $0x02, CX
5381 JMP candidate_match_encodeBlockAsm8B
5382
5383candidate2_match_encodeBlockAsm8B:
5384 MOVL R8, 24(SP)(R9*4)
5385 INCL CX
5386 MOVL DI, BX
5387
5388candidate_match_encodeBlockAsm8B:
5389 MOVL 12(SP), SI
5390 TESTL BX, BX
5391 JZ match_extend_back_end_encodeBlockAsm8B
5392
5393match_extend_back_loop_encodeBlockAsm8B:
5394 CMPL CX, SI
5395 JBE match_extend_back_end_encodeBlockAsm8B
5396 MOVB -1(DX)(BX*1), DI
5397 MOVB -1(DX)(CX*1), R8
5398 CMPB DI, R8
5399 JNE match_extend_back_end_encodeBlockAsm8B
5400 LEAL -1(CX), CX
5401 DECL BX
5402 JZ match_extend_back_end_encodeBlockAsm8B
5403 JMP match_extend_back_loop_encodeBlockAsm8B
5404
5405match_extend_back_end_encodeBlockAsm8B:
5406 MOVL CX, SI
5407 SUBL 12(SP), SI
5408 LEAQ 3(AX)(SI*1), SI
5409 CMPQ SI, (SP)
5410 JB match_dst_size_check_encodeBlockAsm8B
5411 MOVQ $0x00000000, ret+48(FP)
5412 RET
5413
5414match_dst_size_check_encodeBlockAsm8B:
5415 MOVL CX, SI
5416 MOVL 12(SP), DI
5417 CMPL DI, SI
5418 JEQ emit_literal_done_match_emit_encodeBlockAsm8B
5419 MOVL SI, R8
5420 MOVL SI, 12(SP)
5421 LEAQ (DX)(DI*1), SI
5422 SUBL DI, R8
5423 LEAL -1(R8), DI
5424 CMPL DI, $0x3c
5425 JB one_byte_match_emit_encodeBlockAsm8B
5426 CMPL DI, $0x00000100
5427 JB two_bytes_match_emit_encodeBlockAsm8B
5428 JB three_bytes_match_emit_encodeBlockAsm8B
5429
5430three_bytes_match_emit_encodeBlockAsm8B:
5431 MOVB $0xf4, (AX)
5432 MOVW DI, 1(AX)
5433 ADDQ $0x03, AX
5434 JMP memmove_long_match_emit_encodeBlockAsm8B
5435
5436two_bytes_match_emit_encodeBlockAsm8B:
5437 MOVB $0xf0, (AX)
5438 MOVB DI, 1(AX)
5439 ADDQ $0x02, AX
5440 CMPL DI, $0x40
5441 JB memmove_match_emit_encodeBlockAsm8B
5442 JMP memmove_long_match_emit_encodeBlockAsm8B
5443
5444one_byte_match_emit_encodeBlockAsm8B:
5445 SHLB $0x02, DI
5446 MOVB DI, (AX)
5447 ADDQ $0x01, AX
5448
5449memmove_match_emit_encodeBlockAsm8B:
5450 LEAQ (AX)(R8*1), DI
5451
5452 // genMemMoveShort
5453 CMPQ R8, $0x08
5454 JBE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8
5455 CMPQ R8, $0x10
5456 JBE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8through16
5457 CMPQ R8, $0x20
5458 JBE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_17through32
5459 JMP emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_33through64
5460
5461emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8:
5462 MOVQ (SI), R9
5463 MOVQ R9, (AX)
5464 JMP memmove_end_copy_match_emit_encodeBlockAsm8B
5465
5466emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8through16:
5467 MOVQ (SI), R9
5468 MOVQ -8(SI)(R8*1), SI
5469 MOVQ R9, (AX)
5470 MOVQ SI, -8(AX)(R8*1)
5471 JMP memmove_end_copy_match_emit_encodeBlockAsm8B
5472
5473emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_17through32:
5474 MOVOU (SI), X0
5475 MOVOU -16(SI)(R8*1), X1
5476 MOVOU X0, (AX)
5477 MOVOU X1, -16(AX)(R8*1)
5478 JMP memmove_end_copy_match_emit_encodeBlockAsm8B
5479
5480emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_33through64:
5481 MOVOU (SI), X0
5482 MOVOU 16(SI), X1
5483 MOVOU -32(SI)(R8*1), X2
5484 MOVOU -16(SI)(R8*1), X3
5485 MOVOU X0, (AX)
5486 MOVOU X1, 16(AX)
5487 MOVOU X2, -32(AX)(R8*1)
5488 MOVOU X3, -16(AX)(R8*1)
5489
5490memmove_end_copy_match_emit_encodeBlockAsm8B:
5491 MOVQ DI, AX
5492 JMP emit_literal_done_match_emit_encodeBlockAsm8B
5493
5494memmove_long_match_emit_encodeBlockAsm8B:
5495 LEAQ (AX)(R8*1), DI
5496
5497 // genMemMoveLong
5498 MOVOU (SI), X0
5499 MOVOU 16(SI), X1
5500 MOVOU -32(SI)(R8*1), X2
5501 MOVOU -16(SI)(R8*1), X3
5502 MOVQ R8, R10
5503 SHRQ $0x05, R10
5504 MOVQ AX, R9
5505 ANDL $0x0000001f, R9
5506 MOVQ $0x00000040, R11
5507 SUBQ R9, R11
5508 DECQ R10
5509 JA emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32
5510 LEAQ -32(SI)(R11*1), R9
5511 LEAQ -32(AX)(R11*1), R12
5512
5513emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_big_loop_back:
5514 MOVOU (R9), X4
5515 MOVOU 16(R9), X5
5516 MOVOA X4, (R12)
5517 MOVOA X5, 16(R12)
5518 ADDQ $0x20, R12
5519 ADDQ $0x20, R9
5520 ADDQ $0x20, R11
5521 DECQ R10
5522 JNA emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_big_loop_back
5523
5524emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32:
5525 MOVOU -32(SI)(R11*1), X4
5526 MOVOU -16(SI)(R11*1), X5
5527 MOVOA X4, -32(AX)(R11*1)
5528 MOVOA X5, -16(AX)(R11*1)
5529 ADDQ $0x20, R11
5530 CMPQ R8, R11
5531 JAE emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32
5532 MOVOU X0, (AX)
5533 MOVOU X1, 16(AX)
5534 MOVOU X2, -32(AX)(R8*1)
5535 MOVOU X3, -16(AX)(R8*1)
5536 MOVQ DI, AX
5537
5538emit_literal_done_match_emit_encodeBlockAsm8B:
5539match_nolit_loop_encodeBlockAsm8B:
5540 MOVL CX, SI
5541 SUBL BX, SI
5542 MOVL SI, 16(SP)
5543 ADDL $0x04, CX
5544 ADDL $0x04, BX
5545 MOVQ src_len+32(FP), SI
5546 SUBL CX, SI
5547 LEAQ (DX)(CX*1), DI
5548 LEAQ (DX)(BX*1), BX
5549
5550 // matchLen
5551 XORL R9, R9
5552
5553matchlen_loopback_16_match_nolit_encodeBlockAsm8B:
5554 CMPL SI, $0x10
5555 JB matchlen_match8_match_nolit_encodeBlockAsm8B
5556 MOVQ (DI)(R9*1), R8
5557 MOVQ 8(DI)(R9*1), R10
5558 XORQ (BX)(R9*1), R8
5559 JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm8B
5560 XORQ 8(BX)(R9*1), R10
5561 JNZ matchlen_bsf_16match_nolit_encodeBlockAsm8B
5562 LEAL -16(SI), SI
5563 LEAL 16(R9), R9
5564 JMP matchlen_loopback_16_match_nolit_encodeBlockAsm8B
5565
5566matchlen_bsf_16match_nolit_encodeBlockAsm8B:
5567#ifdef GOAMD64_v3
5568 TZCNTQ R10, R10
5569
5570#else
5571 BSFQ R10, R10
5572
5573#endif
5574 SARQ $0x03, R10
5575 LEAL 8(R9)(R10*1), R9
5576 JMP match_nolit_end_encodeBlockAsm8B
5577
5578matchlen_match8_match_nolit_encodeBlockAsm8B:
5579 CMPL SI, $0x08
5580 JB matchlen_match4_match_nolit_encodeBlockAsm8B
5581 MOVQ (DI)(R9*1), R8
5582 XORQ (BX)(R9*1), R8
5583 JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm8B
5584 LEAL -8(SI), SI
5585 LEAL 8(R9), R9
5586 JMP matchlen_match4_match_nolit_encodeBlockAsm8B
5587
5588matchlen_bsf_8_match_nolit_encodeBlockAsm8B:
5589#ifdef GOAMD64_v3
5590 TZCNTQ R8, R8
5591
5592#else
5593 BSFQ R8, R8
5594
5595#endif
5596 SARQ $0x03, R8
5597 LEAL (R9)(R8*1), R9
5598 JMP match_nolit_end_encodeBlockAsm8B
5599
5600matchlen_match4_match_nolit_encodeBlockAsm8B:
5601 CMPL SI, $0x04
5602 JB matchlen_match2_match_nolit_encodeBlockAsm8B
5603 MOVL (DI)(R9*1), R8
5604 CMPL (BX)(R9*1), R8
5605 JNE matchlen_match2_match_nolit_encodeBlockAsm8B
5606 LEAL -4(SI), SI
5607 LEAL 4(R9), R9
5608
5609matchlen_match2_match_nolit_encodeBlockAsm8B:
5610 CMPL SI, $0x01
5611 JE matchlen_match1_match_nolit_encodeBlockAsm8B
5612 JB match_nolit_end_encodeBlockAsm8B
5613 MOVW (DI)(R9*1), R8
5614 CMPW (BX)(R9*1), R8
5615 JNE matchlen_match1_match_nolit_encodeBlockAsm8B
5616 LEAL 2(R9), R9
5617 SUBL $0x02, SI
5618 JZ match_nolit_end_encodeBlockAsm8B
5619
5620matchlen_match1_match_nolit_encodeBlockAsm8B:
5621 MOVB (DI)(R9*1), R8
5622 CMPB (BX)(R9*1), R8
5623 JNE match_nolit_end_encodeBlockAsm8B
5624 LEAL 1(R9), R9
5625
5626match_nolit_end_encodeBlockAsm8B:
5627 ADDL R9, CX
5628 MOVL 16(SP), BX
5629 ADDL $0x04, R9
5630 MOVL CX, 12(SP)
5631
5632 // emitCopy
5633 CMPL R9, $0x40
5634 JBE two_byte_offset_short_match_nolit_encodeBlockAsm8B
5635 CMPL BX, $0x00000800
5636 JAE long_offset_short_match_nolit_encodeBlockAsm8B
5637 MOVL $0x00000001, SI
5638 LEAL 16(SI), SI
5639 MOVB BL, 1(AX)
5640 SHRL $0x08, BX
5641 SHLL $0x05, BX
5642 ORL BX, SI
5643 MOVB SI, (AX)
5644 ADDQ $0x02, AX
5645 SUBL $0x08, R9
5646
5647 // emitRepeat
5648 LEAL -4(R9), R9
5649 JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short_2b
5650 MOVL R9, BX
5651 LEAL -4(R9), R9
5652 CMPL BX, $0x08
5653 JBE repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short_2b
5654 CMPL BX, $0x0c
5655 JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short_2b
5656
5657cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short_2b:
5658 CMPL R9, $0x00000104
5659 JB repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short_2b
5660 LEAL -256(R9), R9
5661 MOVW $0x0019, (AX)
5662 MOVW R9, 2(AX)
5663 ADDQ $0x04, AX
5664 JMP match_nolit_emitcopy_end_encodeBlockAsm8B
5665
5666repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short_2b:
5667 LEAL -4(R9), R9
5668 MOVW $0x0015, (AX)
5669 MOVB R9, 2(AX)
5670 ADDQ $0x03, AX
5671 JMP match_nolit_emitcopy_end_encodeBlockAsm8B
5672
5673repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short_2b:
5674 SHLL $0x02, R9
5675 ORL $0x01, R9
5676 MOVW R9, (AX)
5677 ADDQ $0x02, AX
5678 JMP match_nolit_emitcopy_end_encodeBlockAsm8B
5679 XORQ SI, SI
5680 LEAL 1(SI)(R9*4), R9
5681 MOVB BL, 1(AX)
5682 SARL $0x08, BX
5683 SHLL $0x05, BX
5684 ORL BX, R9
5685 MOVB R9, (AX)
5686 ADDQ $0x02, AX
5687 JMP match_nolit_emitcopy_end_encodeBlockAsm8B
5688
5689long_offset_short_match_nolit_encodeBlockAsm8B:
5690 MOVB $0xee, (AX)
5691 MOVW BX, 1(AX)
5692 LEAL -60(R9), R9
5693 ADDQ $0x03, AX
5694
5695 // emitRepeat
5696 MOVL R9, BX
5697 LEAL -4(R9), R9
5698 CMPL BX, $0x08
5699 JBE repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short
5700 CMPL BX, $0x0c
5701 JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short
5702
5703cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short:
5704 CMPL R9, $0x00000104
5705 JB repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short
5706 LEAL -256(R9), R9
5707 MOVW $0x0019, (AX)
5708 MOVW R9, 2(AX)
5709 ADDQ $0x04, AX
5710 JMP match_nolit_emitcopy_end_encodeBlockAsm8B
5711
5712repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short:
5713 LEAL -4(R9), R9
5714 MOVW $0x0015, (AX)
5715 MOVB R9, 2(AX)
5716 ADDQ $0x03, AX
5717 JMP match_nolit_emitcopy_end_encodeBlockAsm8B
5718
5719repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short:
5720 SHLL $0x02, R9
5721 ORL $0x01, R9
5722 MOVW R9, (AX)
5723 ADDQ $0x02, AX
5724 JMP match_nolit_emitcopy_end_encodeBlockAsm8B
5725 XORQ SI, SI
5726 LEAL 1(SI)(R9*4), R9
5727 MOVB BL, 1(AX)
5728 SARL $0x08, BX
5729 SHLL $0x05, BX
5730 ORL BX, R9
5731 MOVB R9, (AX)
5732 ADDQ $0x02, AX
5733 JMP match_nolit_emitcopy_end_encodeBlockAsm8B
5734
5735two_byte_offset_short_match_nolit_encodeBlockAsm8B:
5736 MOVL R9, SI
5737 SHLL $0x02, SI
5738 CMPL R9, $0x0c
5739 JAE emit_copy_three_match_nolit_encodeBlockAsm8B
5740 LEAL -15(SI), SI
5741 MOVB BL, 1(AX)
5742 SHRL $0x08, BX
5743 SHLL $0x05, BX
5744 ORL BX, SI
5745 MOVB SI, (AX)
5746 ADDQ $0x02, AX
5747 JMP match_nolit_emitcopy_end_encodeBlockAsm8B
5748
5749emit_copy_three_match_nolit_encodeBlockAsm8B:
5750 LEAL -2(SI), SI
5751 MOVB SI, (AX)
5752 MOVW BX, 1(AX)
5753 ADDQ $0x03, AX
5754
5755match_nolit_emitcopy_end_encodeBlockAsm8B:
5756 CMPL CX, 8(SP)
5757 JAE emit_remainder_encodeBlockAsm8B
5758 MOVQ -2(DX)(CX*1), SI
5759 CMPQ AX, (SP)
5760 JB match_nolit_dst_ok_encodeBlockAsm8B
5761 MOVQ $0x00000000, ret+48(FP)
5762 RET
5763
5764match_nolit_dst_ok_encodeBlockAsm8B:
5765 MOVQ $0x9e3779b1, R8
5766 MOVQ SI, DI
5767 SHRQ $0x10, SI
5768 MOVQ SI, BX
5769 SHLQ $0x20, DI
5770 IMULQ R8, DI
5771 SHRQ $0x38, DI
5772 SHLQ $0x20, BX
5773 IMULQ R8, BX
5774 SHRQ $0x38, BX
5775 LEAL -2(CX), R8
5776 LEAQ 24(SP)(BX*4), R9
5777 MOVL (R9), BX
5778 MOVL R8, 24(SP)(DI*4)
5779 MOVL CX, (R9)
5780 CMPL (DX)(BX*1), SI
5781 JEQ match_nolit_loop_encodeBlockAsm8B
5782 INCL CX
5783 JMP search_loop_encodeBlockAsm8B
5784
5785emit_remainder_encodeBlockAsm8B:
5786 MOVQ src_len+32(FP), CX
5787 SUBL 12(SP), CX
5788 LEAQ 3(AX)(CX*1), CX
5789 CMPQ CX, (SP)
5790 JB emit_remainder_ok_encodeBlockAsm8B
5791 MOVQ $0x00000000, ret+48(FP)
5792 RET
5793
5794emit_remainder_ok_encodeBlockAsm8B:
5795 MOVQ src_len+32(FP), CX
5796 MOVL 12(SP), BX
5797 CMPL BX, CX
5798 JEQ emit_literal_done_emit_remainder_encodeBlockAsm8B
5799 MOVL CX, SI
5800 MOVL CX, 12(SP)
5801 LEAQ (DX)(BX*1), CX
5802 SUBL BX, SI
5803 LEAL -1(SI), DX
5804 CMPL DX, $0x3c
5805 JB one_byte_emit_remainder_encodeBlockAsm8B
5806 CMPL DX, $0x00000100
5807 JB two_bytes_emit_remainder_encodeBlockAsm8B
5808 JB three_bytes_emit_remainder_encodeBlockAsm8B
5809
5810three_bytes_emit_remainder_encodeBlockAsm8B:
5811 MOVB $0xf4, (AX)
5812 MOVW DX, 1(AX)
5813 ADDQ $0x03, AX
5814 JMP memmove_long_emit_remainder_encodeBlockAsm8B
5815
5816two_bytes_emit_remainder_encodeBlockAsm8B:
5817 MOVB $0xf0, (AX)
5818 MOVB DL, 1(AX)
5819 ADDQ $0x02, AX
5820 CMPL DX, $0x40
5821 JB memmove_emit_remainder_encodeBlockAsm8B
5822 JMP memmove_long_emit_remainder_encodeBlockAsm8B
5823
5824one_byte_emit_remainder_encodeBlockAsm8B:
5825 SHLB $0x02, DL
5826 MOVB DL, (AX)
5827 ADDQ $0x01, AX
5828
5829memmove_emit_remainder_encodeBlockAsm8B:
5830 LEAQ (AX)(SI*1), DX
5831 MOVL SI, BX
5832
5833 // genMemMoveShort
5834 CMPQ BX, $0x03
5835 JB emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_1or2
5836 JE emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_3
5837 CMPQ BX, $0x08
5838 JB emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_4through7
5839 CMPQ BX, $0x10
5840 JBE emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_8through16
5841 CMPQ BX, $0x20
5842 JBE emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_17through32
5843 JMP emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_33through64
5844
5845emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_1or2:
5846 MOVB (CX), SI
5847 MOVB -1(CX)(BX*1), CL
5848 MOVB SI, (AX)
5849 MOVB CL, -1(AX)(BX*1)
5850 JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B
5851
5852emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_3:
5853 MOVW (CX), SI
5854 MOVB 2(CX), CL
5855 MOVW SI, (AX)
5856 MOVB CL, 2(AX)
5857 JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B
5858
5859emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_4through7:
5860 MOVL (CX), SI
5861 MOVL -4(CX)(BX*1), CX
5862 MOVL SI, (AX)
5863 MOVL CX, -4(AX)(BX*1)
5864 JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B
5865
5866emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_8through16:
5867 MOVQ (CX), SI
5868 MOVQ -8(CX)(BX*1), CX
5869 MOVQ SI, (AX)
5870 MOVQ CX, -8(AX)(BX*1)
5871 JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B
5872
5873emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_17through32:
5874 MOVOU (CX), X0
5875 MOVOU -16(CX)(BX*1), X1
5876 MOVOU X0, (AX)
5877 MOVOU X1, -16(AX)(BX*1)
5878 JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B
5879
5880emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_33through64:
5881 MOVOU (CX), X0
5882 MOVOU 16(CX), X1
5883 MOVOU -32(CX)(BX*1), X2
5884 MOVOU -16(CX)(BX*1), X3
5885 MOVOU X0, (AX)
5886 MOVOU X1, 16(AX)
5887 MOVOU X2, -32(AX)(BX*1)
5888 MOVOU X3, -16(AX)(BX*1)
5889
5890memmove_end_copy_emit_remainder_encodeBlockAsm8B:
5891 MOVQ DX, AX
5892 JMP emit_literal_done_emit_remainder_encodeBlockAsm8B
5893
5894memmove_long_emit_remainder_encodeBlockAsm8B:
5895 LEAQ (AX)(SI*1), DX
5896 MOVL SI, BX
5897
5898 // genMemMoveLong
5899 MOVOU (CX), X0
5900 MOVOU 16(CX), X1
5901 MOVOU -32(CX)(BX*1), X2
5902 MOVOU -16(CX)(BX*1), X3
5903 MOVQ BX, DI
5904 SHRQ $0x05, DI
5905 MOVQ AX, SI
5906 ANDL $0x0000001f, SI
5907 MOVQ $0x00000040, R8
5908 SUBQ SI, R8
5909 DECQ DI
5910 JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_forward_sse_loop_32
5911 LEAQ -32(CX)(R8*1), SI
5912 LEAQ -32(AX)(R8*1), R9
5913
5914emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_big_loop_back:
5915 MOVOU (SI), X4
5916 MOVOU 16(SI), X5
5917 MOVOA X4, (R9)
5918 MOVOA X5, 16(R9)
5919 ADDQ $0x20, R9
5920 ADDQ $0x20, SI
5921 ADDQ $0x20, R8
5922 DECQ DI
5923 JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_big_loop_back
5924
5925emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_forward_sse_loop_32:
5926 MOVOU -32(CX)(R8*1), X4
5927 MOVOU -16(CX)(R8*1), X5
5928 MOVOA X4, -32(AX)(R8*1)
5929 MOVOA X5, -16(AX)(R8*1)
5930 ADDQ $0x20, R8
5931 CMPQ BX, R8
5932 JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_forward_sse_loop_32
5933 MOVOU X0, (AX)
5934 MOVOU X1, 16(AX)
5935 MOVOU X2, -32(AX)(BX*1)
5936 MOVOU X3, -16(AX)(BX*1)
5937 MOVQ DX, AX
5938
5939emit_literal_done_emit_remainder_encodeBlockAsm8B:
5940 MOVQ dst_base+0(FP), CX
5941 SUBQ CX, AX
5942 MOVQ AX, ret+48(FP)
5943 RET
5944
5945// func encodeBetterBlockAsm(dst []byte, src []byte) int
5946// Requires: BMI, SSE2
5947TEXT ·encodeBetterBlockAsm(SB), $589848-56
5948 MOVQ dst_base+0(FP), AX
5949 MOVQ $0x00001200, CX
5950 LEAQ 24(SP), DX
5951 PXOR X0, X0
5952
5953zero_loop_encodeBetterBlockAsm:
5954 MOVOU X0, (DX)
5955 MOVOU X0, 16(DX)
5956 MOVOU X0, 32(DX)
5957 MOVOU X0, 48(DX)
5958 MOVOU X0, 64(DX)
5959 MOVOU X0, 80(DX)
5960 MOVOU X0, 96(DX)
5961 MOVOU X0, 112(DX)
5962 ADDQ $0x80, DX
5963 DECQ CX
5964 JNZ zero_loop_encodeBetterBlockAsm
5965 MOVL $0x00000000, 12(SP)
5966 MOVQ src_len+32(FP), CX
5967 LEAQ -6(CX), DX
5968 LEAQ -8(CX), BX
5969 MOVL BX, 8(SP)
5970 SHRQ $0x05, CX
5971 SUBL CX, DX
5972 LEAQ (AX)(DX*1), DX
5973 MOVQ DX, (SP)
5974 MOVL $0x00000001, CX
5975 MOVL $0x00000000, 16(SP)
5976 MOVQ src_base+24(FP), DX
5977
5978search_loop_encodeBetterBlockAsm:
5979 MOVL CX, BX
5980 SUBL 12(SP), BX
5981 SHRL $0x07, BX
5982 CMPL BX, $0x63
5983 JBE check_maxskip_ok_encodeBetterBlockAsm
5984 LEAL 100(CX), BX
5985 JMP check_maxskip_cont_encodeBetterBlockAsm
5986
5987check_maxskip_ok_encodeBetterBlockAsm:
5988 LEAL 1(CX)(BX*1), BX
5989
5990check_maxskip_cont_encodeBetterBlockAsm:
5991 CMPL BX, 8(SP)
5992 JAE emit_remainder_encodeBetterBlockAsm
5993 MOVQ (DX)(CX*1), SI
5994 MOVL BX, 20(SP)
5995 MOVQ $0x00cf1bbcdcbfa563, R8
5996 MOVQ $0x9e3779b1, BX
5997 MOVQ SI, R9
5998 MOVQ SI, R10
5999 SHLQ $0x08, R9
6000 IMULQ R8, R9
6001 SHRQ $0x2f, R9
6002 SHLQ $0x20, R10
6003 IMULQ BX, R10
6004 SHRQ $0x32, R10
6005 MOVL 24(SP)(R9*4), BX
6006 MOVL 524312(SP)(R10*4), DI
6007 MOVL CX, 24(SP)(R9*4)
6008 MOVL CX, 524312(SP)(R10*4)
6009 MOVQ (DX)(BX*1), R9
6010 MOVQ (DX)(DI*1), R10
6011 CMPQ R9, SI
6012 JEQ candidate_match_encodeBetterBlockAsm
6013 CMPQ R10, SI
6014 JNE no_short_found_encodeBetterBlockAsm
6015 MOVL DI, BX
6016 JMP candidate_match_encodeBetterBlockAsm
6017
6018no_short_found_encodeBetterBlockAsm:
6019 CMPL R9, SI
6020 JEQ candidate_match_encodeBetterBlockAsm
6021 CMPL R10, SI
6022 JEQ candidateS_match_encodeBetterBlockAsm
6023 MOVL 20(SP), CX
6024 JMP search_loop_encodeBetterBlockAsm
6025
6026candidateS_match_encodeBetterBlockAsm:
6027 SHRQ $0x08, SI
6028 MOVQ SI, R9
6029 SHLQ $0x08, R9
6030 IMULQ R8, R9
6031 SHRQ $0x2f, R9
6032 MOVL 24(SP)(R9*4), BX
6033 INCL CX
6034 MOVL CX, 24(SP)(R9*4)
6035 CMPL (DX)(BX*1), SI
6036 JEQ candidate_match_encodeBetterBlockAsm
6037 DECL CX
6038 MOVL DI, BX
6039
6040candidate_match_encodeBetterBlockAsm:
6041 MOVL 12(SP), SI
6042 TESTL BX, BX
6043 JZ match_extend_back_end_encodeBetterBlockAsm
6044
6045match_extend_back_loop_encodeBetterBlockAsm:
6046 CMPL CX, SI
6047 JBE match_extend_back_end_encodeBetterBlockAsm
6048 MOVB -1(DX)(BX*1), DI
6049 MOVB -1(DX)(CX*1), R8
6050 CMPB DI, R8
6051 JNE match_extend_back_end_encodeBetterBlockAsm
6052 LEAL -1(CX), CX
6053 DECL BX
6054 JZ match_extend_back_end_encodeBetterBlockAsm
6055 JMP match_extend_back_loop_encodeBetterBlockAsm
6056
6057match_extend_back_end_encodeBetterBlockAsm:
6058 MOVL CX, SI
6059 SUBL 12(SP), SI
6060 LEAQ 5(AX)(SI*1), SI
6061 CMPQ SI, (SP)
6062 JB match_dst_size_check_encodeBetterBlockAsm
6063 MOVQ $0x00000000, ret+48(FP)
6064 RET
6065
6066match_dst_size_check_encodeBetterBlockAsm:
6067 MOVL CX, SI
6068 ADDL $0x04, CX
6069 ADDL $0x04, BX
6070 MOVQ src_len+32(FP), DI
6071 SUBL CX, DI
6072 LEAQ (DX)(CX*1), R8
6073 LEAQ (DX)(BX*1), R9
6074
6075 // matchLen
6076 XORL R11, R11
6077
6078matchlen_loopback_16_match_nolit_encodeBetterBlockAsm:
6079 CMPL DI, $0x10
6080 JB matchlen_match8_match_nolit_encodeBetterBlockAsm
6081 MOVQ (R8)(R11*1), R10
6082 MOVQ 8(R8)(R11*1), R12
6083 XORQ (R9)(R11*1), R10
6084 JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm
6085 XORQ 8(R9)(R11*1), R12
6086 JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm
6087 LEAL -16(DI), DI
6088 LEAL 16(R11), R11
6089 JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm
6090
6091matchlen_bsf_16match_nolit_encodeBetterBlockAsm:
6092#ifdef GOAMD64_v3
6093 TZCNTQ R12, R12
6094
6095#else
6096 BSFQ R12, R12
6097
6098#endif
6099 SARQ $0x03, R12
6100 LEAL 8(R11)(R12*1), R11
6101 JMP match_nolit_end_encodeBetterBlockAsm
6102
6103matchlen_match8_match_nolit_encodeBetterBlockAsm:
6104 CMPL DI, $0x08
6105 JB matchlen_match4_match_nolit_encodeBetterBlockAsm
6106 MOVQ (R8)(R11*1), R10
6107 XORQ (R9)(R11*1), R10
6108 JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm
6109 LEAL -8(DI), DI
6110 LEAL 8(R11), R11
6111 JMP matchlen_match4_match_nolit_encodeBetterBlockAsm
6112
6113matchlen_bsf_8_match_nolit_encodeBetterBlockAsm:
6114#ifdef GOAMD64_v3
6115 TZCNTQ R10, R10
6116
6117#else
6118 BSFQ R10, R10
6119
6120#endif
6121 SARQ $0x03, R10
6122 LEAL (R11)(R10*1), R11
6123 JMP match_nolit_end_encodeBetterBlockAsm
6124
6125matchlen_match4_match_nolit_encodeBetterBlockAsm:
6126 CMPL DI, $0x04
6127 JB matchlen_match2_match_nolit_encodeBetterBlockAsm
6128 MOVL (R8)(R11*1), R10
6129 CMPL (R9)(R11*1), R10
6130 JNE matchlen_match2_match_nolit_encodeBetterBlockAsm
6131 LEAL -4(DI), DI
6132 LEAL 4(R11), R11
6133
6134matchlen_match2_match_nolit_encodeBetterBlockAsm:
6135 CMPL DI, $0x01
6136 JE matchlen_match1_match_nolit_encodeBetterBlockAsm
6137 JB match_nolit_end_encodeBetterBlockAsm
6138 MOVW (R8)(R11*1), R10
6139 CMPW (R9)(R11*1), R10
6140 JNE matchlen_match1_match_nolit_encodeBetterBlockAsm
6141 LEAL 2(R11), R11
6142 SUBL $0x02, DI
6143 JZ match_nolit_end_encodeBetterBlockAsm
6144
6145matchlen_match1_match_nolit_encodeBetterBlockAsm:
6146 MOVB (R8)(R11*1), R10
6147 CMPB (R9)(R11*1), R10
6148 JNE match_nolit_end_encodeBetterBlockAsm
6149 LEAL 1(R11), R11
6150
6151match_nolit_end_encodeBetterBlockAsm:
6152 MOVL CX, DI
6153 SUBL BX, DI
6154
6155 // Check if repeat
6156 CMPL 16(SP), DI
6157 JEQ match_is_repeat_encodeBetterBlockAsm
6158 CMPL R11, $0x01
6159 JA match_length_ok_encodeBetterBlockAsm
6160 CMPL DI, $0x0000ffff
6161 JBE match_length_ok_encodeBetterBlockAsm
6162 MOVL 20(SP), CX
6163 INCL CX
6164 JMP search_loop_encodeBetterBlockAsm
6165
6166match_length_ok_encodeBetterBlockAsm:
6167 MOVL DI, 16(SP)
6168 MOVL 12(SP), BX
6169 CMPL BX, SI
6170 JEQ emit_literal_done_match_emit_encodeBetterBlockAsm
6171 MOVL SI, R8
6172 MOVL SI, 12(SP)
6173 LEAQ (DX)(BX*1), R9
6174 SUBL BX, R8
6175 LEAL -1(R8), BX
6176 CMPL BX, $0x3c
6177 JB one_byte_match_emit_encodeBetterBlockAsm
6178 CMPL BX, $0x00000100
6179 JB two_bytes_match_emit_encodeBetterBlockAsm
6180 CMPL BX, $0x00010000
6181 JB three_bytes_match_emit_encodeBetterBlockAsm
6182 CMPL BX, $0x01000000
6183 JB four_bytes_match_emit_encodeBetterBlockAsm
6184 MOVB $0xfc, (AX)
6185 MOVL BX, 1(AX)
6186 ADDQ $0x05, AX
6187 JMP memmove_long_match_emit_encodeBetterBlockAsm
6188
6189four_bytes_match_emit_encodeBetterBlockAsm:
6190 MOVL BX, R10
6191 SHRL $0x10, R10
6192 MOVB $0xf8, (AX)
6193 MOVW BX, 1(AX)
6194 MOVB R10, 3(AX)
6195 ADDQ $0x04, AX
6196 JMP memmove_long_match_emit_encodeBetterBlockAsm
6197
6198three_bytes_match_emit_encodeBetterBlockAsm:
6199 MOVB $0xf4, (AX)
6200 MOVW BX, 1(AX)
6201 ADDQ $0x03, AX
6202 JMP memmove_long_match_emit_encodeBetterBlockAsm
6203
6204two_bytes_match_emit_encodeBetterBlockAsm:
6205 MOVB $0xf0, (AX)
6206 MOVB BL, 1(AX)
6207 ADDQ $0x02, AX
6208 CMPL BX, $0x40
6209 JB memmove_match_emit_encodeBetterBlockAsm
6210 JMP memmove_long_match_emit_encodeBetterBlockAsm
6211
6212one_byte_match_emit_encodeBetterBlockAsm:
6213 SHLB $0x02, BL
6214 MOVB BL, (AX)
6215 ADDQ $0x01, AX
6216
6217memmove_match_emit_encodeBetterBlockAsm:
6218 LEAQ (AX)(R8*1), BX
6219
6220 // genMemMoveShort
6221 CMPQ R8, $0x04
6222 JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4
6223 CMPQ R8, $0x08
6224 JB emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4through7
6225 CMPQ R8, $0x10
6226 JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_8through16
6227 CMPQ R8, $0x20
6228 JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_17through32
6229 JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_33through64
6230
6231emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4:
6232 MOVL (R9), R10
6233 MOVL R10, (AX)
6234 JMP memmove_end_copy_match_emit_encodeBetterBlockAsm
6235
6236emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4through7:
6237 MOVL (R9), R10
6238 MOVL -4(R9)(R8*1), R9
6239 MOVL R10, (AX)
6240 MOVL R9, -4(AX)(R8*1)
6241 JMP memmove_end_copy_match_emit_encodeBetterBlockAsm
6242
6243emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_8through16:
6244 MOVQ (R9), R10
6245 MOVQ -8(R9)(R8*1), R9
6246 MOVQ R10, (AX)
6247 MOVQ R9, -8(AX)(R8*1)
6248 JMP memmove_end_copy_match_emit_encodeBetterBlockAsm
6249
6250emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_17through32:
6251 MOVOU (R9), X0
6252 MOVOU -16(R9)(R8*1), X1
6253 MOVOU X0, (AX)
6254 MOVOU X1, -16(AX)(R8*1)
6255 JMP memmove_end_copy_match_emit_encodeBetterBlockAsm
6256
6257emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_33through64:
6258 MOVOU (R9), X0
6259 MOVOU 16(R9), X1
6260 MOVOU -32(R9)(R8*1), X2
6261 MOVOU -16(R9)(R8*1), X3
6262 MOVOU X0, (AX)
6263 MOVOU X1, 16(AX)
6264 MOVOU X2, -32(AX)(R8*1)
6265 MOVOU X3, -16(AX)(R8*1)
6266
6267memmove_end_copy_match_emit_encodeBetterBlockAsm:
6268 MOVQ BX, AX
6269 JMP emit_literal_done_match_emit_encodeBetterBlockAsm
6270
6271memmove_long_match_emit_encodeBetterBlockAsm:
6272 LEAQ (AX)(R8*1), BX
6273
6274 // genMemMoveLong
6275 MOVOU (R9), X0
6276 MOVOU 16(R9), X1
6277 MOVOU -32(R9)(R8*1), X2
6278 MOVOU -16(R9)(R8*1), X3
6279 MOVQ R8, R12
6280 SHRQ $0x05, R12
6281 MOVQ AX, R10
6282 ANDL $0x0000001f, R10
6283 MOVQ $0x00000040, R13
6284 SUBQ R10, R13
6285 DECQ R12
6286 JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32
6287 LEAQ -32(R9)(R13*1), R10
6288 LEAQ -32(AX)(R13*1), R14
6289
6290emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_big_loop_back:
6291 MOVOU (R10), X4
6292 MOVOU 16(R10), X5
6293 MOVOA X4, (R14)
6294 MOVOA X5, 16(R14)
6295 ADDQ $0x20, R14
6296 ADDQ $0x20, R10
6297 ADDQ $0x20, R13
6298 DECQ R12
6299 JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_big_loop_back
6300
6301emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32:
6302 MOVOU -32(R9)(R13*1), X4
6303 MOVOU -16(R9)(R13*1), X5
6304 MOVOA X4, -32(AX)(R13*1)
6305 MOVOA X5, -16(AX)(R13*1)
6306 ADDQ $0x20, R13
6307 CMPQ R8, R13
6308 JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32
6309 MOVOU X0, (AX)
6310 MOVOU X1, 16(AX)
6311 MOVOU X2, -32(AX)(R8*1)
6312 MOVOU X3, -16(AX)(R8*1)
6313 MOVQ BX, AX
6314
6315emit_literal_done_match_emit_encodeBetterBlockAsm:
6316 ADDL R11, CX
6317 ADDL $0x04, R11
6318 MOVL CX, 12(SP)
6319
6320 // emitCopy
6321 CMPL DI, $0x00010000
6322 JB two_byte_offset_match_nolit_encodeBetterBlockAsm
6323 CMPL R11, $0x40
6324 JBE four_bytes_remain_match_nolit_encodeBetterBlockAsm
6325 MOVB $0xff, (AX)
6326 MOVL DI, 1(AX)
6327 LEAL -64(R11), R11
6328 ADDQ $0x05, AX
6329 CMPL R11, $0x04
6330 JB four_bytes_remain_match_nolit_encodeBetterBlockAsm
6331
6332 // emitRepeat
6333emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy:
6334 MOVL R11, BX
6335 LEAL -4(R11), R11
6336 CMPL BX, $0x08
6337 JBE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy
6338 CMPL BX, $0x0c
6339 JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy
6340 CMPL DI, $0x00000800
6341 JB repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy
6342
6343cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy:
6344 CMPL R11, $0x00000104
6345 JB repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy
6346 CMPL R11, $0x00010100
6347 JB repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy
6348 CMPL R11, $0x0100ffff
6349 JB repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy
6350 LEAL -16842747(R11), R11
6351 MOVL $0xfffb001d, (AX)
6352 MOVB $0xff, 4(AX)
6353 ADDQ $0x05, AX
6354 JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy
6355
6356repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy:
6357 LEAL -65536(R11), R11
6358 MOVL R11, DI
6359 MOVW $0x001d, (AX)
6360 MOVW R11, 2(AX)
6361 SARL $0x10, DI
6362 MOVB DI, 4(AX)
6363 ADDQ $0x05, AX
6364 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
6365
6366repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy:
6367 LEAL -256(R11), R11
6368 MOVW $0x0019, (AX)
6369 MOVW R11, 2(AX)
6370 ADDQ $0x04, AX
6371 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
6372
6373repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy:
6374 LEAL -4(R11), R11
6375 MOVW $0x0015, (AX)
6376 MOVB R11, 2(AX)
6377 ADDQ $0x03, AX
6378 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
6379
6380repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy:
6381 SHLL $0x02, R11
6382 ORL $0x01, R11
6383 MOVW R11, (AX)
6384 ADDQ $0x02, AX
6385 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
6386
6387repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy:
6388 XORQ BX, BX
6389 LEAL 1(BX)(R11*4), R11
6390 MOVB DI, 1(AX)
6391 SARL $0x08, DI
6392 SHLL $0x05, DI
6393 ORL DI, R11
6394 MOVB R11, (AX)
6395 ADDQ $0x02, AX
6396 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
6397
6398four_bytes_remain_match_nolit_encodeBetterBlockAsm:
6399 TESTL R11, R11
6400 JZ match_nolit_emitcopy_end_encodeBetterBlockAsm
6401 XORL BX, BX
6402 LEAL -1(BX)(R11*4), R11
6403 MOVB R11, (AX)
6404 MOVL DI, 1(AX)
6405 ADDQ $0x05, AX
6406 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
6407
6408two_byte_offset_match_nolit_encodeBetterBlockAsm:
6409 CMPL R11, $0x40
6410 JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm
6411 CMPL DI, $0x00000800
6412 JAE long_offset_short_match_nolit_encodeBetterBlockAsm
6413 MOVL $0x00000001, BX
6414 LEAL 16(BX), BX
6415 MOVB DI, 1(AX)
6416 MOVL DI, R8
6417 SHRL $0x08, R8
6418 SHLL $0x05, R8
6419 ORL R8, BX
6420 MOVB BL, (AX)
6421 ADDQ $0x02, AX
6422 SUBL $0x08, R11
6423
6424 // emitRepeat
6425 LEAL -4(R11), R11
6426 JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
6427
6428emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
6429 MOVL R11, BX
6430 LEAL -4(R11), R11
6431 CMPL BX, $0x08
6432 JBE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
6433 CMPL BX, $0x0c
6434 JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
6435 CMPL DI, $0x00000800
6436 JB repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
6437
6438cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
6439 CMPL R11, $0x00000104
6440 JB repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
6441 CMPL R11, $0x00010100
6442 JB repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
6443 CMPL R11, $0x0100ffff
6444 JB repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
6445 LEAL -16842747(R11), R11
6446 MOVL $0xfffb001d, (AX)
6447 MOVB $0xff, 4(AX)
6448 ADDQ $0x05, AX
6449 JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
6450
6451repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
6452 LEAL -65536(R11), R11
6453 MOVL R11, DI
6454 MOVW $0x001d, (AX)
6455 MOVW R11, 2(AX)
6456 SARL $0x10, DI
6457 MOVB DI, 4(AX)
6458 ADDQ $0x05, AX
6459 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
6460
6461repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
6462 LEAL -256(R11), R11
6463 MOVW $0x0019, (AX)
6464 MOVW R11, 2(AX)
6465 ADDQ $0x04, AX
6466 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
6467
6468repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
6469 LEAL -4(R11), R11
6470 MOVW $0x0015, (AX)
6471 MOVB R11, 2(AX)
6472 ADDQ $0x03, AX
6473 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
6474
6475repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
6476 SHLL $0x02, R11
6477 ORL $0x01, R11
6478 MOVW R11, (AX)
6479 ADDQ $0x02, AX
6480 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
6481
6482repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
6483 XORQ BX, BX
6484 LEAL 1(BX)(R11*4), R11
6485 MOVB DI, 1(AX)
6486 SARL $0x08, DI
6487 SHLL $0x05, DI
6488 ORL DI, R11
6489 MOVB R11, (AX)
6490 ADDQ $0x02, AX
6491 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
6492
6493long_offset_short_match_nolit_encodeBetterBlockAsm:
6494 MOVB $0xee, (AX)
6495 MOVW DI, 1(AX)
6496 LEAL -60(R11), R11
6497 ADDQ $0x03, AX
6498
6499 // emitRepeat
6500emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short:
6501 MOVL R11, BX
6502 LEAL -4(R11), R11
6503 CMPL BX, $0x08
6504 JBE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short
6505 CMPL BX, $0x0c
6506 JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short
6507 CMPL DI, $0x00000800
6508 JB repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short
6509
6510cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short:
6511 CMPL R11, $0x00000104
6512 JB repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short
6513 CMPL R11, $0x00010100
6514 JB repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short
6515 CMPL R11, $0x0100ffff
6516 JB repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short
6517 LEAL -16842747(R11), R11
6518 MOVL $0xfffb001d, (AX)
6519 MOVB $0xff, 4(AX)
6520 ADDQ $0x05, AX
6521 JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short
6522
6523repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short:
6524 LEAL -65536(R11), R11
6525 MOVL R11, DI
6526 MOVW $0x001d, (AX)
6527 MOVW R11, 2(AX)
6528 SARL $0x10, DI
6529 MOVB DI, 4(AX)
6530 ADDQ $0x05, AX
6531 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
6532
6533repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short:
6534 LEAL -256(R11), R11
6535 MOVW $0x0019, (AX)
6536 MOVW R11, 2(AX)
6537 ADDQ $0x04, AX
6538 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
6539
6540repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short:
6541 LEAL -4(R11), R11
6542 MOVW $0x0015, (AX)
6543 MOVB R11, 2(AX)
6544 ADDQ $0x03, AX
6545 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
6546
6547repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short:
6548 SHLL $0x02, R11
6549 ORL $0x01, R11
6550 MOVW R11, (AX)
6551 ADDQ $0x02, AX
6552 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
6553
6554repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short:
6555 XORQ BX, BX
6556 LEAL 1(BX)(R11*4), R11
6557 MOVB DI, 1(AX)
6558 SARL $0x08, DI
6559 SHLL $0x05, DI
6560 ORL DI, R11
6561 MOVB R11, (AX)
6562 ADDQ $0x02, AX
6563 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
6564
6565two_byte_offset_short_match_nolit_encodeBetterBlockAsm:
6566 MOVL R11, BX
6567 SHLL $0x02, BX
6568 CMPL R11, $0x0c
6569 JAE emit_copy_three_match_nolit_encodeBetterBlockAsm
6570 CMPL DI, $0x00000800
6571 JAE emit_copy_three_match_nolit_encodeBetterBlockAsm
6572 LEAL -15(BX), BX
6573 MOVB DI, 1(AX)
6574 SHRL $0x08, DI
6575 SHLL $0x05, DI
6576 ORL DI, BX
6577 MOVB BL, (AX)
6578 ADDQ $0x02, AX
6579 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
6580
6581emit_copy_three_match_nolit_encodeBetterBlockAsm:
6582 LEAL -2(BX), BX
6583 MOVB BL, (AX)
6584 MOVW DI, 1(AX)
6585 ADDQ $0x03, AX
6586 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
6587
6588match_is_repeat_encodeBetterBlockAsm:
6589 MOVL 12(SP), BX
6590 CMPL BX, SI
6591 JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm
6592 MOVL SI, R8
6593 MOVL SI, 12(SP)
6594 LEAQ (DX)(BX*1), R9
6595 SUBL BX, R8
6596 LEAL -1(R8), BX
6597 CMPL BX, $0x3c
6598 JB one_byte_match_emit_repeat_encodeBetterBlockAsm
6599 CMPL BX, $0x00000100
6600 JB two_bytes_match_emit_repeat_encodeBetterBlockAsm
6601 CMPL BX, $0x00010000
6602 JB three_bytes_match_emit_repeat_encodeBetterBlockAsm
6603 CMPL BX, $0x01000000
6604 JB four_bytes_match_emit_repeat_encodeBetterBlockAsm
6605 MOVB $0xfc, (AX)
6606 MOVL BX, 1(AX)
6607 ADDQ $0x05, AX
6608 JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm
6609
6610four_bytes_match_emit_repeat_encodeBetterBlockAsm:
6611 MOVL BX, R10
6612 SHRL $0x10, R10
6613 MOVB $0xf8, (AX)
6614 MOVW BX, 1(AX)
6615 MOVB R10, 3(AX)
6616 ADDQ $0x04, AX
6617 JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm
6618
6619three_bytes_match_emit_repeat_encodeBetterBlockAsm:
6620 MOVB $0xf4, (AX)
6621 MOVW BX, 1(AX)
6622 ADDQ $0x03, AX
6623 JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm
6624
6625two_bytes_match_emit_repeat_encodeBetterBlockAsm:
6626 MOVB $0xf0, (AX)
6627 MOVB BL, 1(AX)
6628 ADDQ $0x02, AX
6629 CMPL BX, $0x40
6630 JB memmove_match_emit_repeat_encodeBetterBlockAsm
6631 JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm
6632
6633one_byte_match_emit_repeat_encodeBetterBlockAsm:
6634 SHLB $0x02, BL
6635 MOVB BL, (AX)
6636 ADDQ $0x01, AX
6637
6638memmove_match_emit_repeat_encodeBetterBlockAsm:
6639 LEAQ (AX)(R8*1), BX
6640
6641 // genMemMoveShort
6642 CMPQ R8, $0x04
6643 JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4
6644 CMPQ R8, $0x08
6645 JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4through7
6646 CMPQ R8, $0x10
6647 JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_8through16
6648 CMPQ R8, $0x20
6649 JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_17through32
6650 JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_33through64
6651
6652emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4:
6653 MOVL (R9), R10
6654 MOVL R10, (AX)
6655 JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm
6656
6657emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4through7:
6658 MOVL (R9), R10
6659 MOVL -4(R9)(R8*1), R9
6660 MOVL R10, (AX)
6661 MOVL R9, -4(AX)(R8*1)
6662 JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm
6663
6664emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_8through16:
6665 MOVQ (R9), R10
6666 MOVQ -8(R9)(R8*1), R9
6667 MOVQ R10, (AX)
6668 MOVQ R9, -8(AX)(R8*1)
6669 JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm
6670
6671emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_17through32:
6672 MOVOU (R9), X0
6673 MOVOU -16(R9)(R8*1), X1
6674 MOVOU X0, (AX)
6675 MOVOU X1, -16(AX)(R8*1)
6676 JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm
6677
6678emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_33through64:
6679 MOVOU (R9), X0
6680 MOVOU 16(R9), X1
6681 MOVOU -32(R9)(R8*1), X2
6682 MOVOU -16(R9)(R8*1), X3
6683 MOVOU X0, (AX)
6684 MOVOU X1, 16(AX)
6685 MOVOU X2, -32(AX)(R8*1)
6686 MOVOU X3, -16(AX)(R8*1)
6687
6688memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm:
6689 MOVQ BX, AX
6690 JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm
6691
6692memmove_long_match_emit_repeat_encodeBetterBlockAsm:
6693 LEAQ (AX)(R8*1), BX
6694
6695 // genMemMoveLong
6696 MOVOU (R9), X0
6697 MOVOU 16(R9), X1
6698 MOVOU -32(R9)(R8*1), X2
6699 MOVOU -16(R9)(R8*1), X3
6700 MOVQ R8, R12
6701 SHRQ $0x05, R12
6702 MOVQ AX, R10
6703 ANDL $0x0000001f, R10
6704 MOVQ $0x00000040, R13
6705 SUBQ R10, R13
6706 DECQ R12
6707 JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32
6708 LEAQ -32(R9)(R13*1), R10
6709 LEAQ -32(AX)(R13*1), R14
6710
6711emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_big_loop_back:
6712 MOVOU (R10), X4
6713 MOVOU 16(R10), X5
6714 MOVOA X4, (R14)
6715 MOVOA X5, 16(R14)
6716 ADDQ $0x20, R14
6717 ADDQ $0x20, R10
6718 ADDQ $0x20, R13
6719 DECQ R12
6720 JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_big_loop_back
6721
6722emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32:
6723 MOVOU -32(R9)(R13*1), X4
6724 MOVOU -16(R9)(R13*1), X5
6725 MOVOA X4, -32(AX)(R13*1)
6726 MOVOA X5, -16(AX)(R13*1)
6727 ADDQ $0x20, R13
6728 CMPQ R8, R13
6729 JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32
6730 MOVOU X0, (AX)
6731 MOVOU X1, 16(AX)
6732 MOVOU X2, -32(AX)(R8*1)
6733 MOVOU X3, -16(AX)(R8*1)
6734 MOVQ BX, AX
6735
6736emit_literal_done_match_emit_repeat_encodeBetterBlockAsm:
6737 ADDL R11, CX
6738 ADDL $0x04, R11
6739 MOVL CX, 12(SP)
6740
6741 // emitRepeat
6742emit_repeat_again_match_nolit_repeat_encodeBetterBlockAsm:
6743 MOVL R11, BX
6744 LEAL -4(R11), R11
6745 CMPL BX, $0x08
6746 JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm
6747 CMPL BX, $0x0c
6748 JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm
6749 CMPL DI, $0x00000800
6750 JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm
6751
6752cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm:
6753 CMPL R11, $0x00000104
6754 JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm
6755 CMPL R11, $0x00010100
6756 JB repeat_four_match_nolit_repeat_encodeBetterBlockAsm
6757 CMPL R11, $0x0100ffff
6758 JB repeat_five_match_nolit_repeat_encodeBetterBlockAsm
6759 LEAL -16842747(R11), R11
6760 MOVL $0xfffb001d, (AX)
6761 MOVB $0xff, 4(AX)
6762 ADDQ $0x05, AX
6763 JMP emit_repeat_again_match_nolit_repeat_encodeBetterBlockAsm
6764
6765repeat_five_match_nolit_repeat_encodeBetterBlockAsm:
6766 LEAL -65536(R11), R11
6767 MOVL R11, DI
6768 MOVW $0x001d, (AX)
6769 MOVW R11, 2(AX)
6770 SARL $0x10, DI
6771 MOVB DI, 4(AX)
6772 ADDQ $0x05, AX
6773 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
6774
6775repeat_four_match_nolit_repeat_encodeBetterBlockAsm:
6776 LEAL -256(R11), R11
6777 MOVW $0x0019, (AX)
6778 MOVW R11, 2(AX)
6779 ADDQ $0x04, AX
6780 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
6781
6782repeat_three_match_nolit_repeat_encodeBetterBlockAsm:
6783 LEAL -4(R11), R11
6784 MOVW $0x0015, (AX)
6785 MOVB R11, 2(AX)
6786 ADDQ $0x03, AX
6787 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
6788
6789repeat_two_match_nolit_repeat_encodeBetterBlockAsm:
6790 SHLL $0x02, R11
6791 ORL $0x01, R11
6792 MOVW R11, (AX)
6793 ADDQ $0x02, AX
6794 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
6795
6796repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm:
6797 XORQ BX, BX
6798 LEAL 1(BX)(R11*4), R11
6799 MOVB DI, 1(AX)
6800 SARL $0x08, DI
6801 SHLL $0x05, DI
6802 ORL DI, R11
6803 MOVB R11, (AX)
6804 ADDQ $0x02, AX
6805
6806match_nolit_emitcopy_end_encodeBetterBlockAsm:
6807 CMPL CX, 8(SP)
6808 JAE emit_remainder_encodeBetterBlockAsm
6809 CMPQ AX, (SP)
6810 JB match_nolit_dst_ok_encodeBetterBlockAsm
6811 MOVQ $0x00000000, ret+48(FP)
6812 RET
6813
6814match_nolit_dst_ok_encodeBetterBlockAsm:
6815 MOVQ $0x00cf1bbcdcbfa563, BX
6816 MOVQ $0x9e3779b1, DI
6817 LEAQ 1(SI), SI
6818 LEAQ -2(CX), R8
6819 MOVQ (DX)(SI*1), R9
6820 MOVQ 1(DX)(SI*1), R10
6821 MOVQ (DX)(R8*1), R11
6822 MOVQ 1(DX)(R8*1), R12
6823 SHLQ $0x08, R9
6824 IMULQ BX, R9
6825 SHRQ $0x2f, R9
6826 SHLQ $0x20, R10
6827 IMULQ DI, R10
6828 SHRQ $0x32, R10
6829 SHLQ $0x08, R11
6830 IMULQ BX, R11
6831 SHRQ $0x2f, R11
6832 SHLQ $0x20, R12
6833 IMULQ DI, R12
6834 SHRQ $0x32, R12
6835 LEAQ 1(SI), DI
6836 LEAQ 1(R8), R13
6837 MOVL SI, 24(SP)(R9*4)
6838 MOVL R8, 24(SP)(R11*4)
6839 MOVL DI, 524312(SP)(R10*4)
6840 MOVL R13, 524312(SP)(R12*4)
6841 LEAQ 1(R8)(SI*1), DI
6842 SHRQ $0x01, DI
6843 ADDQ $0x01, SI
6844 SUBQ $0x01, R8
6845
6846index_loop_encodeBetterBlockAsm:
6847 CMPQ DI, R8
6848 JAE search_loop_encodeBetterBlockAsm
6849 MOVQ (DX)(SI*1), R9
6850 MOVQ (DX)(DI*1), R10
6851 SHLQ $0x08, R9
6852 IMULQ BX, R9
6853 SHRQ $0x2f, R9
6854 SHLQ $0x08, R10
6855 IMULQ BX, R10
6856 SHRQ $0x2f, R10
6857 MOVL SI, 24(SP)(R9*4)
6858 MOVL DI, 24(SP)(R10*4)
6859 ADDQ $0x02, SI
6860 ADDQ $0x02, DI
6861 JMP index_loop_encodeBetterBlockAsm
6862
6863emit_remainder_encodeBetterBlockAsm:
6864 MOVQ src_len+32(FP), CX
6865 SUBL 12(SP), CX
6866 LEAQ 5(AX)(CX*1), CX
6867 CMPQ CX, (SP)
6868 JB emit_remainder_ok_encodeBetterBlockAsm
6869 MOVQ $0x00000000, ret+48(FP)
6870 RET
6871
6872emit_remainder_ok_encodeBetterBlockAsm:
6873 MOVQ src_len+32(FP), CX
6874 MOVL 12(SP), BX
6875 CMPL BX, CX
6876 JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm
6877 MOVL CX, SI
6878 MOVL CX, 12(SP)
6879 LEAQ (DX)(BX*1), CX
6880 SUBL BX, SI
6881 LEAL -1(SI), DX
6882 CMPL DX, $0x3c
6883 JB one_byte_emit_remainder_encodeBetterBlockAsm
6884 CMPL DX, $0x00000100
6885 JB two_bytes_emit_remainder_encodeBetterBlockAsm
6886 CMPL DX, $0x00010000
6887 JB three_bytes_emit_remainder_encodeBetterBlockAsm
6888 CMPL DX, $0x01000000
6889 JB four_bytes_emit_remainder_encodeBetterBlockAsm
6890 MOVB $0xfc, (AX)
6891 MOVL DX, 1(AX)
6892 ADDQ $0x05, AX
6893 JMP memmove_long_emit_remainder_encodeBetterBlockAsm
6894
6895four_bytes_emit_remainder_encodeBetterBlockAsm:
6896 MOVL DX, BX
6897 SHRL $0x10, BX
6898 MOVB $0xf8, (AX)
6899 MOVW DX, 1(AX)
6900 MOVB BL, 3(AX)
6901 ADDQ $0x04, AX
6902 JMP memmove_long_emit_remainder_encodeBetterBlockAsm
6903
6904three_bytes_emit_remainder_encodeBetterBlockAsm:
6905 MOVB $0xf4, (AX)
6906 MOVW DX, 1(AX)
6907 ADDQ $0x03, AX
6908 JMP memmove_long_emit_remainder_encodeBetterBlockAsm
6909
6910two_bytes_emit_remainder_encodeBetterBlockAsm:
6911 MOVB $0xf0, (AX)
6912 MOVB DL, 1(AX)
6913 ADDQ $0x02, AX
6914 CMPL DX, $0x40
6915 JB memmove_emit_remainder_encodeBetterBlockAsm
6916 JMP memmove_long_emit_remainder_encodeBetterBlockAsm
6917
6918one_byte_emit_remainder_encodeBetterBlockAsm:
6919 SHLB $0x02, DL
6920 MOVB DL, (AX)
6921 ADDQ $0x01, AX
6922
6923memmove_emit_remainder_encodeBetterBlockAsm:
6924 LEAQ (AX)(SI*1), DX
6925 MOVL SI, BX
6926
6927 // genMemMoveShort
6928 CMPQ BX, $0x03
6929 JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_1or2
6930 JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_3
6931 CMPQ BX, $0x08
6932 JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_4through7
6933 CMPQ BX, $0x10
6934 JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_8through16
6935 CMPQ BX, $0x20
6936 JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_17through32
6937 JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_33through64
6938
6939emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_1or2:
6940 MOVB (CX), SI
6941 MOVB -1(CX)(BX*1), CL
6942 MOVB SI, (AX)
6943 MOVB CL, -1(AX)(BX*1)
6944 JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm
6945
6946emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_3:
6947 MOVW (CX), SI
6948 MOVB 2(CX), CL
6949 MOVW SI, (AX)
6950 MOVB CL, 2(AX)
6951 JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm
6952
6953emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_4through7:
6954 MOVL (CX), SI
6955 MOVL -4(CX)(BX*1), CX
6956 MOVL SI, (AX)
6957 MOVL CX, -4(AX)(BX*1)
6958 JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm
6959
6960emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_8through16:
6961 MOVQ (CX), SI
6962 MOVQ -8(CX)(BX*1), CX
6963 MOVQ SI, (AX)
6964 MOVQ CX, -8(AX)(BX*1)
6965 JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm
6966
6967emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_17through32:
6968 MOVOU (CX), X0
6969 MOVOU -16(CX)(BX*1), X1
6970 MOVOU X0, (AX)
6971 MOVOU X1, -16(AX)(BX*1)
6972 JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm
6973
6974emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_33through64:
6975 MOVOU (CX), X0
6976 MOVOU 16(CX), X1
6977 MOVOU -32(CX)(BX*1), X2
6978 MOVOU -16(CX)(BX*1), X3
6979 MOVOU X0, (AX)
6980 MOVOU X1, 16(AX)
6981 MOVOU X2, -32(AX)(BX*1)
6982 MOVOU X3, -16(AX)(BX*1)
6983
6984memmove_end_copy_emit_remainder_encodeBetterBlockAsm:
6985 MOVQ DX, AX
6986 JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm
6987
6988memmove_long_emit_remainder_encodeBetterBlockAsm:
6989 LEAQ (AX)(SI*1), DX
6990 MOVL SI, BX
6991
6992 // genMemMoveLong
6993 MOVOU (CX), X0
6994 MOVOU 16(CX), X1
6995 MOVOU -32(CX)(BX*1), X2
6996 MOVOU -16(CX)(BX*1), X3
6997 MOVQ BX, DI
6998 SHRQ $0x05, DI
6999 MOVQ AX, SI
7000 ANDL $0x0000001f, SI
7001 MOVQ $0x00000040, R8
7002 SUBQ SI, R8
7003 DECQ DI
7004 JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_forward_sse_loop_32
7005 LEAQ -32(CX)(R8*1), SI
7006 LEAQ -32(AX)(R8*1), R9
7007
7008emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_big_loop_back:
7009 MOVOU (SI), X4
7010 MOVOU 16(SI), X5
7011 MOVOA X4, (R9)
7012 MOVOA X5, 16(R9)
7013 ADDQ $0x20, R9
7014 ADDQ $0x20, SI
7015 ADDQ $0x20, R8
7016 DECQ DI
7017 JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_big_loop_back
7018
7019emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_forward_sse_loop_32:
7020 MOVOU -32(CX)(R8*1), X4
7021 MOVOU -16(CX)(R8*1), X5
7022 MOVOA X4, -32(AX)(R8*1)
7023 MOVOA X5, -16(AX)(R8*1)
7024 ADDQ $0x20, R8
7025 CMPQ BX, R8
7026 JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_forward_sse_loop_32
7027 MOVOU X0, (AX)
7028 MOVOU X1, 16(AX)
7029 MOVOU X2, -32(AX)(BX*1)
7030 MOVOU X3, -16(AX)(BX*1)
7031 MOVQ DX, AX
7032
7033emit_literal_done_emit_remainder_encodeBetterBlockAsm:
7034 MOVQ dst_base+0(FP), CX
7035 SUBQ CX, AX
7036 MOVQ AX, ret+48(FP)
7037 RET
7038
7039// func encodeBetterBlockAsm4MB(dst []byte, src []byte) int
7040// Requires: BMI, SSE2
7041TEXT ·encodeBetterBlockAsm4MB(SB), $589848-56
7042 MOVQ dst_base+0(FP), AX
7043 MOVQ $0x00001200, CX
7044 LEAQ 24(SP), DX
7045 PXOR X0, X0
7046
7047zero_loop_encodeBetterBlockAsm4MB:
7048 MOVOU X0, (DX)
7049 MOVOU X0, 16(DX)
7050 MOVOU X0, 32(DX)
7051 MOVOU X0, 48(DX)
7052 MOVOU X0, 64(DX)
7053 MOVOU X0, 80(DX)
7054 MOVOU X0, 96(DX)
7055 MOVOU X0, 112(DX)
7056 ADDQ $0x80, DX
7057 DECQ CX
7058 JNZ zero_loop_encodeBetterBlockAsm4MB
7059 MOVL $0x00000000, 12(SP)
7060 MOVQ src_len+32(FP), CX
7061 LEAQ -6(CX), DX
7062 LEAQ -8(CX), BX
7063 MOVL BX, 8(SP)
7064 SHRQ $0x05, CX
7065 SUBL CX, DX
7066 LEAQ (AX)(DX*1), DX
7067 MOVQ DX, (SP)
7068 MOVL $0x00000001, CX
7069 MOVL $0x00000000, 16(SP)
7070 MOVQ src_base+24(FP), DX
7071
7072search_loop_encodeBetterBlockAsm4MB:
7073 MOVL CX, BX
7074 SUBL 12(SP), BX
7075 SHRL $0x07, BX
7076 CMPL BX, $0x63
7077 JBE check_maxskip_ok_encodeBetterBlockAsm4MB
7078 LEAL 100(CX), BX
7079 JMP check_maxskip_cont_encodeBetterBlockAsm4MB
7080
7081check_maxskip_ok_encodeBetterBlockAsm4MB:
7082 LEAL 1(CX)(BX*1), BX
7083
7084check_maxskip_cont_encodeBetterBlockAsm4MB:
7085 CMPL BX, 8(SP)
7086 JAE emit_remainder_encodeBetterBlockAsm4MB
7087 MOVQ (DX)(CX*1), SI
7088 MOVL BX, 20(SP)
7089 MOVQ $0x00cf1bbcdcbfa563, R8
7090 MOVQ $0x9e3779b1, BX
7091 MOVQ SI, R9
7092 MOVQ SI, R10
7093 SHLQ $0x08, R9
7094 IMULQ R8, R9
7095 SHRQ $0x2f, R9
7096 SHLQ $0x20, R10
7097 IMULQ BX, R10
7098 SHRQ $0x32, R10
7099 MOVL 24(SP)(R9*4), BX
7100 MOVL 524312(SP)(R10*4), DI
7101 MOVL CX, 24(SP)(R9*4)
7102 MOVL CX, 524312(SP)(R10*4)
7103 MOVQ (DX)(BX*1), R9
7104 MOVQ (DX)(DI*1), R10
7105 CMPQ R9, SI
7106 JEQ candidate_match_encodeBetterBlockAsm4MB
7107 CMPQ R10, SI
7108 JNE no_short_found_encodeBetterBlockAsm4MB
7109 MOVL DI, BX
7110 JMP candidate_match_encodeBetterBlockAsm4MB
7111
7112no_short_found_encodeBetterBlockAsm4MB:
7113 CMPL R9, SI
7114 JEQ candidate_match_encodeBetterBlockAsm4MB
7115 CMPL R10, SI
7116 JEQ candidateS_match_encodeBetterBlockAsm4MB
7117 MOVL 20(SP), CX
7118 JMP search_loop_encodeBetterBlockAsm4MB
7119
7120candidateS_match_encodeBetterBlockAsm4MB:
7121 SHRQ $0x08, SI
7122 MOVQ SI, R9
7123 SHLQ $0x08, R9
7124 IMULQ R8, R9
7125 SHRQ $0x2f, R9
7126 MOVL 24(SP)(R9*4), BX
7127 INCL CX
7128 MOVL CX, 24(SP)(R9*4)
7129 CMPL (DX)(BX*1), SI
7130 JEQ candidate_match_encodeBetterBlockAsm4MB
7131 DECL CX
7132 MOVL DI, BX
7133
7134candidate_match_encodeBetterBlockAsm4MB:
7135 MOVL 12(SP), SI
7136 TESTL BX, BX
7137 JZ match_extend_back_end_encodeBetterBlockAsm4MB
7138
7139match_extend_back_loop_encodeBetterBlockAsm4MB:
7140 CMPL CX, SI
7141 JBE match_extend_back_end_encodeBetterBlockAsm4MB
7142 MOVB -1(DX)(BX*1), DI
7143 MOVB -1(DX)(CX*1), R8
7144 CMPB DI, R8
7145 JNE match_extend_back_end_encodeBetterBlockAsm4MB
7146 LEAL -1(CX), CX
7147 DECL BX
7148 JZ match_extend_back_end_encodeBetterBlockAsm4MB
7149 JMP match_extend_back_loop_encodeBetterBlockAsm4MB
7150
7151match_extend_back_end_encodeBetterBlockAsm4MB:
7152 MOVL CX, SI
7153 SUBL 12(SP), SI
7154 LEAQ 4(AX)(SI*1), SI
7155 CMPQ SI, (SP)
7156 JB match_dst_size_check_encodeBetterBlockAsm4MB
7157 MOVQ $0x00000000, ret+48(FP)
7158 RET
7159
7160match_dst_size_check_encodeBetterBlockAsm4MB:
7161 MOVL CX, SI
7162 ADDL $0x04, CX
7163 ADDL $0x04, BX
7164 MOVQ src_len+32(FP), DI
7165 SUBL CX, DI
7166 LEAQ (DX)(CX*1), R8
7167 LEAQ (DX)(BX*1), R9
7168
7169 // matchLen
7170 XORL R11, R11
7171
7172matchlen_loopback_16_match_nolit_encodeBetterBlockAsm4MB:
7173 CMPL DI, $0x10
7174 JB matchlen_match8_match_nolit_encodeBetterBlockAsm4MB
7175 MOVQ (R8)(R11*1), R10
7176 MOVQ 8(R8)(R11*1), R12
7177 XORQ (R9)(R11*1), R10
7178 JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm4MB
7179 XORQ 8(R9)(R11*1), R12
7180 JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm4MB
7181 LEAL -16(DI), DI
7182 LEAL 16(R11), R11
7183 JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm4MB
7184
7185matchlen_bsf_16match_nolit_encodeBetterBlockAsm4MB:
7186#ifdef GOAMD64_v3
7187 TZCNTQ R12, R12
7188
7189#else
7190 BSFQ R12, R12
7191
7192#endif
7193 SARQ $0x03, R12
7194 LEAL 8(R11)(R12*1), R11
7195 JMP match_nolit_end_encodeBetterBlockAsm4MB
7196
7197matchlen_match8_match_nolit_encodeBetterBlockAsm4MB:
7198 CMPL DI, $0x08
7199 JB matchlen_match4_match_nolit_encodeBetterBlockAsm4MB
7200 MOVQ (R8)(R11*1), R10
7201 XORQ (R9)(R11*1), R10
7202 JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm4MB
7203 LEAL -8(DI), DI
7204 LEAL 8(R11), R11
7205 JMP matchlen_match4_match_nolit_encodeBetterBlockAsm4MB
7206
7207matchlen_bsf_8_match_nolit_encodeBetterBlockAsm4MB:
7208#ifdef GOAMD64_v3
7209 TZCNTQ R10, R10
7210
7211#else
7212 BSFQ R10, R10
7213
7214#endif
7215 SARQ $0x03, R10
7216 LEAL (R11)(R10*1), R11
7217 JMP match_nolit_end_encodeBetterBlockAsm4MB
7218
7219matchlen_match4_match_nolit_encodeBetterBlockAsm4MB:
7220 CMPL DI, $0x04
7221 JB matchlen_match2_match_nolit_encodeBetterBlockAsm4MB
7222 MOVL (R8)(R11*1), R10
7223 CMPL (R9)(R11*1), R10
7224 JNE matchlen_match2_match_nolit_encodeBetterBlockAsm4MB
7225 LEAL -4(DI), DI
7226 LEAL 4(R11), R11
7227
7228matchlen_match2_match_nolit_encodeBetterBlockAsm4MB:
7229 CMPL DI, $0x01
7230 JE matchlen_match1_match_nolit_encodeBetterBlockAsm4MB
7231 JB match_nolit_end_encodeBetterBlockAsm4MB
7232 MOVW (R8)(R11*1), R10
7233 CMPW (R9)(R11*1), R10
7234 JNE matchlen_match1_match_nolit_encodeBetterBlockAsm4MB
7235 LEAL 2(R11), R11
7236 SUBL $0x02, DI
7237 JZ match_nolit_end_encodeBetterBlockAsm4MB
7238
7239matchlen_match1_match_nolit_encodeBetterBlockAsm4MB:
7240 MOVB (R8)(R11*1), R10
7241 CMPB (R9)(R11*1), R10
7242 JNE match_nolit_end_encodeBetterBlockAsm4MB
7243 LEAL 1(R11), R11
7244
7245match_nolit_end_encodeBetterBlockAsm4MB:
7246 MOVL CX, DI
7247 SUBL BX, DI
7248
7249 // Check if repeat
7250 CMPL 16(SP), DI
7251 JEQ match_is_repeat_encodeBetterBlockAsm4MB
7252 CMPL R11, $0x01
7253 JA match_length_ok_encodeBetterBlockAsm4MB
7254 CMPL DI, $0x0000ffff
7255 JBE match_length_ok_encodeBetterBlockAsm4MB
7256 MOVL 20(SP), CX
7257 INCL CX
7258 JMP search_loop_encodeBetterBlockAsm4MB
7259
7260match_length_ok_encodeBetterBlockAsm4MB:
7261 MOVL DI, 16(SP)
7262 MOVL 12(SP), BX
7263 CMPL BX, SI
7264 JEQ emit_literal_done_match_emit_encodeBetterBlockAsm4MB
7265 MOVL SI, R8
7266 MOVL SI, 12(SP)
7267 LEAQ (DX)(BX*1), R9
7268 SUBL BX, R8
7269 LEAL -1(R8), BX
7270 CMPL BX, $0x3c
7271 JB one_byte_match_emit_encodeBetterBlockAsm4MB
7272 CMPL BX, $0x00000100
7273 JB two_bytes_match_emit_encodeBetterBlockAsm4MB
7274 CMPL BX, $0x00010000
7275 JB three_bytes_match_emit_encodeBetterBlockAsm4MB
7276 MOVL BX, R10
7277 SHRL $0x10, R10
7278 MOVB $0xf8, (AX)
7279 MOVW BX, 1(AX)
7280 MOVB R10, 3(AX)
7281 ADDQ $0x04, AX
7282 JMP memmove_long_match_emit_encodeBetterBlockAsm4MB
7283
7284three_bytes_match_emit_encodeBetterBlockAsm4MB:
7285 MOVB $0xf4, (AX)
7286 MOVW BX, 1(AX)
7287 ADDQ $0x03, AX
7288 JMP memmove_long_match_emit_encodeBetterBlockAsm4MB
7289
7290two_bytes_match_emit_encodeBetterBlockAsm4MB:
7291 MOVB $0xf0, (AX)
7292 MOVB BL, 1(AX)
7293 ADDQ $0x02, AX
7294 CMPL BX, $0x40
7295 JB memmove_match_emit_encodeBetterBlockAsm4MB
7296 JMP memmove_long_match_emit_encodeBetterBlockAsm4MB
7297
7298one_byte_match_emit_encodeBetterBlockAsm4MB:
7299 SHLB $0x02, BL
7300 MOVB BL, (AX)
7301 ADDQ $0x01, AX
7302
7303memmove_match_emit_encodeBetterBlockAsm4MB:
7304 LEAQ (AX)(R8*1), BX
7305
7306 // genMemMoveShort
7307 CMPQ R8, $0x04
7308 JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4
7309 CMPQ R8, $0x08
7310 JB emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4through7
7311 CMPQ R8, $0x10
7312 JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_8through16
7313 CMPQ R8, $0x20
7314 JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_17through32
7315 JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_33through64
7316
7317emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4:
7318 MOVL (R9), R10
7319 MOVL R10, (AX)
7320 JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB
7321
7322emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4through7:
7323 MOVL (R9), R10
7324 MOVL -4(R9)(R8*1), R9
7325 MOVL R10, (AX)
7326 MOVL R9, -4(AX)(R8*1)
7327 JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB
7328
7329emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_8through16:
7330 MOVQ (R9), R10
7331 MOVQ -8(R9)(R8*1), R9
7332 MOVQ R10, (AX)
7333 MOVQ R9, -8(AX)(R8*1)
7334 JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB
7335
7336emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_17through32:
7337 MOVOU (R9), X0
7338 MOVOU -16(R9)(R8*1), X1
7339 MOVOU X0, (AX)
7340 MOVOU X1, -16(AX)(R8*1)
7341 JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB
7342
7343emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_33through64:
7344 MOVOU (R9), X0
7345 MOVOU 16(R9), X1
7346 MOVOU -32(R9)(R8*1), X2
7347 MOVOU -16(R9)(R8*1), X3
7348 MOVOU X0, (AX)
7349 MOVOU X1, 16(AX)
7350 MOVOU X2, -32(AX)(R8*1)
7351 MOVOU X3, -16(AX)(R8*1)
7352
7353memmove_end_copy_match_emit_encodeBetterBlockAsm4MB:
7354 MOVQ BX, AX
7355 JMP emit_literal_done_match_emit_encodeBetterBlockAsm4MB
7356
7357memmove_long_match_emit_encodeBetterBlockAsm4MB:
7358 LEAQ (AX)(R8*1), BX
7359
7360 // genMemMoveLong
7361 MOVOU (R9), X0
7362 MOVOU 16(R9), X1
7363 MOVOU -32(R9)(R8*1), X2
7364 MOVOU -16(R9)(R8*1), X3
7365 MOVQ R8, R12
7366 SHRQ $0x05, R12
7367 MOVQ AX, R10
7368 ANDL $0x0000001f, R10
7369 MOVQ $0x00000040, R13
7370 SUBQ R10, R13
7371 DECQ R12
7372 JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32
7373 LEAQ -32(R9)(R13*1), R10
7374 LEAQ -32(AX)(R13*1), R14
7375
7376emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_big_loop_back:
7377 MOVOU (R10), X4
7378 MOVOU 16(R10), X5
7379 MOVOA X4, (R14)
7380 MOVOA X5, 16(R14)
7381 ADDQ $0x20, R14
7382 ADDQ $0x20, R10
7383 ADDQ $0x20, R13
7384 DECQ R12
7385 JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_big_loop_back
7386
7387emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32:
7388 MOVOU -32(R9)(R13*1), X4
7389 MOVOU -16(R9)(R13*1), X5
7390 MOVOA X4, -32(AX)(R13*1)
7391 MOVOA X5, -16(AX)(R13*1)
7392 ADDQ $0x20, R13
7393 CMPQ R8, R13
7394 JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32
7395 MOVOU X0, (AX)
7396 MOVOU X1, 16(AX)
7397 MOVOU X2, -32(AX)(R8*1)
7398 MOVOU X3, -16(AX)(R8*1)
7399 MOVQ BX, AX
7400
7401emit_literal_done_match_emit_encodeBetterBlockAsm4MB:
7402 ADDL R11, CX
7403 ADDL $0x04, R11
7404 MOVL CX, 12(SP)
7405
7406 // emitCopy
7407 CMPL DI, $0x00010000
7408 JB two_byte_offset_match_nolit_encodeBetterBlockAsm4MB
7409 CMPL R11, $0x40
7410 JBE four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB
7411 MOVB $0xff, (AX)
7412 MOVL DI, 1(AX)
7413 LEAL -64(R11), R11
7414 ADDQ $0x05, AX
7415 CMPL R11, $0x04
7416 JB four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB
7417
7418 // emitRepeat
7419 MOVL R11, BX
7420 LEAL -4(R11), R11
7421 CMPL BX, $0x08
7422 JBE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy
7423 CMPL BX, $0x0c
7424 JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy
7425 CMPL DI, $0x00000800
7426 JB repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy
7427
7428cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy:
7429 CMPL R11, $0x00000104
7430 JB repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy
7431 CMPL R11, $0x00010100
7432 JB repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy
7433 LEAL -65536(R11), R11
7434 MOVL R11, DI
7435 MOVW $0x001d, (AX)
7436 MOVW R11, 2(AX)
7437 SARL $0x10, DI
7438 MOVB DI, 4(AX)
7439 ADDQ $0x05, AX
7440 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
7441
7442repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy:
7443 LEAL -256(R11), R11
7444 MOVW $0x0019, (AX)
7445 MOVW R11, 2(AX)
7446 ADDQ $0x04, AX
7447 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
7448
7449repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy:
7450 LEAL -4(R11), R11
7451 MOVW $0x0015, (AX)
7452 MOVB R11, 2(AX)
7453 ADDQ $0x03, AX
7454 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
7455
7456repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy:
7457 SHLL $0x02, R11
7458 ORL $0x01, R11
7459 MOVW R11, (AX)
7460 ADDQ $0x02, AX
7461 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
7462
7463repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy:
7464 XORQ BX, BX
7465 LEAL 1(BX)(R11*4), R11
7466 MOVB DI, 1(AX)
7467 SARL $0x08, DI
7468 SHLL $0x05, DI
7469 ORL DI, R11
7470 MOVB R11, (AX)
7471 ADDQ $0x02, AX
7472 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
7473
7474four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB:
7475 TESTL R11, R11
7476 JZ match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
7477 XORL BX, BX
7478 LEAL -1(BX)(R11*4), R11
7479 MOVB R11, (AX)
7480 MOVL DI, 1(AX)
7481 ADDQ $0x05, AX
7482 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
7483
7484two_byte_offset_match_nolit_encodeBetterBlockAsm4MB:
7485 CMPL R11, $0x40
7486 JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm4MB
7487 CMPL DI, $0x00000800
7488 JAE long_offset_short_match_nolit_encodeBetterBlockAsm4MB
7489 MOVL $0x00000001, BX
7490 LEAL 16(BX), BX
7491 MOVB DI, 1(AX)
7492 SHRL $0x08, DI
7493 SHLL $0x05, DI
7494 ORL DI, BX
7495 MOVB BL, (AX)
7496 ADDQ $0x02, AX
7497 SUBL $0x08, R11
7498
7499 // emitRepeat
7500 LEAL -4(R11), R11
7501 JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
7502 MOVL R11, BX
7503 LEAL -4(R11), R11
7504 CMPL BX, $0x08
7505 JBE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
7506 CMPL BX, $0x0c
7507 JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
7508 CMPL DI, $0x00000800
7509 JB repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
7510
7511cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b:
7512 CMPL R11, $0x00000104
7513 JB repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
7514 CMPL R11, $0x00010100
7515 JB repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
7516 LEAL -65536(R11), R11
7517 MOVL R11, DI
7518 MOVW $0x001d, (AX)
7519 MOVW R11, 2(AX)
7520 SARL $0x10, DI
7521 MOVB DI, 4(AX)
7522 ADDQ $0x05, AX
7523 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
7524
7525repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b:
7526 LEAL -256(R11), R11
7527 MOVW $0x0019, (AX)
7528 MOVW R11, 2(AX)
7529 ADDQ $0x04, AX
7530 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
7531
7532repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b:
7533 LEAL -4(R11), R11
7534 MOVW $0x0015, (AX)
7535 MOVB R11, 2(AX)
7536 ADDQ $0x03, AX
7537 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
7538
7539repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b:
7540 SHLL $0x02, R11
7541 ORL $0x01, R11
7542 MOVW R11, (AX)
7543 ADDQ $0x02, AX
7544 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
7545
7546repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b:
7547 XORQ BX, BX
7548 LEAL 1(BX)(R11*4), R11
7549 MOVB DI, 1(AX)
7550 SARL $0x08, DI
7551 SHLL $0x05, DI
7552 ORL DI, R11
7553 MOVB R11, (AX)
7554 ADDQ $0x02, AX
7555 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
7556
7557long_offset_short_match_nolit_encodeBetterBlockAsm4MB:
7558 MOVB $0xee, (AX)
7559 MOVW DI, 1(AX)
7560 LEAL -60(R11), R11
7561 ADDQ $0x03, AX
7562
7563 // emitRepeat
7564 MOVL R11, BX
7565 LEAL -4(R11), R11
7566 CMPL BX, $0x08
7567 JBE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short
7568 CMPL BX, $0x0c
7569 JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short
7570 CMPL DI, $0x00000800
7571 JB repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short
7572
7573cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short:
7574 CMPL R11, $0x00000104
7575 JB repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short
7576 CMPL R11, $0x00010100
7577 JB repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short
7578 LEAL -65536(R11), R11
7579 MOVL R11, DI
7580 MOVW $0x001d, (AX)
7581 MOVW R11, 2(AX)
7582 SARL $0x10, DI
7583 MOVB DI, 4(AX)
7584 ADDQ $0x05, AX
7585 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
7586
7587repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short:
7588 LEAL -256(R11), R11
7589 MOVW $0x0019, (AX)
7590 MOVW R11, 2(AX)
7591 ADDQ $0x04, AX
7592 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
7593
7594repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short:
7595 LEAL -4(R11), R11
7596 MOVW $0x0015, (AX)
7597 MOVB R11, 2(AX)
7598 ADDQ $0x03, AX
7599 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
7600
7601repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short:
7602 SHLL $0x02, R11
7603 ORL $0x01, R11
7604 MOVW R11, (AX)
7605 ADDQ $0x02, AX
7606 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
7607
7608repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short:
7609 XORQ BX, BX
7610 LEAL 1(BX)(R11*4), R11
7611 MOVB DI, 1(AX)
7612 SARL $0x08, DI
7613 SHLL $0x05, DI
7614 ORL DI, R11
7615 MOVB R11, (AX)
7616 ADDQ $0x02, AX
7617 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
7618
7619two_byte_offset_short_match_nolit_encodeBetterBlockAsm4MB:
7620 MOVL R11, BX
7621 SHLL $0x02, BX
7622 CMPL R11, $0x0c
7623 JAE emit_copy_three_match_nolit_encodeBetterBlockAsm4MB
7624 CMPL DI, $0x00000800
7625 JAE emit_copy_three_match_nolit_encodeBetterBlockAsm4MB
7626 LEAL -15(BX), BX
7627 MOVB DI, 1(AX)
7628 SHRL $0x08, DI
7629 SHLL $0x05, DI
7630 ORL DI, BX
7631 MOVB BL, (AX)
7632 ADDQ $0x02, AX
7633 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
7634
7635emit_copy_three_match_nolit_encodeBetterBlockAsm4MB:
7636 LEAL -2(BX), BX
7637 MOVB BL, (AX)
7638 MOVW DI, 1(AX)
7639 ADDQ $0x03, AX
7640 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
7641
7642match_is_repeat_encodeBetterBlockAsm4MB:
7643 MOVL 12(SP), BX
7644 CMPL BX, SI
7645 JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB
7646 MOVL SI, R8
7647 MOVL SI, 12(SP)
7648 LEAQ (DX)(BX*1), R9
7649 SUBL BX, R8
7650 LEAL -1(R8), BX
7651 CMPL BX, $0x3c
7652 JB one_byte_match_emit_repeat_encodeBetterBlockAsm4MB
7653 CMPL BX, $0x00000100
7654 JB two_bytes_match_emit_repeat_encodeBetterBlockAsm4MB
7655 CMPL BX, $0x00010000
7656 JB three_bytes_match_emit_repeat_encodeBetterBlockAsm4MB
7657 MOVL BX, R10
7658 SHRL $0x10, R10
7659 MOVB $0xf8, (AX)
7660 MOVW BX, 1(AX)
7661 MOVB R10, 3(AX)
7662 ADDQ $0x04, AX
7663 JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB
7664
7665three_bytes_match_emit_repeat_encodeBetterBlockAsm4MB:
7666 MOVB $0xf4, (AX)
7667 MOVW BX, 1(AX)
7668 ADDQ $0x03, AX
7669 JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB
7670
7671two_bytes_match_emit_repeat_encodeBetterBlockAsm4MB:
7672 MOVB $0xf0, (AX)
7673 MOVB BL, 1(AX)
7674 ADDQ $0x02, AX
7675 CMPL BX, $0x40
7676 JB memmove_match_emit_repeat_encodeBetterBlockAsm4MB
7677 JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB
7678
7679one_byte_match_emit_repeat_encodeBetterBlockAsm4MB:
7680 SHLB $0x02, BL
7681 MOVB BL, (AX)
7682 ADDQ $0x01, AX
7683
7684memmove_match_emit_repeat_encodeBetterBlockAsm4MB:
7685 LEAQ (AX)(R8*1), BX
7686
7687 // genMemMoveShort
7688 CMPQ R8, $0x04
7689 JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4
7690 CMPQ R8, $0x08
7691 JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4through7
7692 CMPQ R8, $0x10
7693 JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_8through16
7694 CMPQ R8, $0x20
7695 JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_17through32
7696 JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_33through64
7697
7698emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4:
7699 MOVL (R9), R10
7700 MOVL R10, (AX)
7701 JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB
7702
7703emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4through7:
7704 MOVL (R9), R10
7705 MOVL -4(R9)(R8*1), R9
7706 MOVL R10, (AX)
7707 MOVL R9, -4(AX)(R8*1)
7708 JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB
7709
7710emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_8through16:
7711 MOVQ (R9), R10
7712 MOVQ -8(R9)(R8*1), R9
7713 MOVQ R10, (AX)
7714 MOVQ R9, -8(AX)(R8*1)
7715 JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB
7716
7717emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_17through32:
7718 MOVOU (R9), X0
7719 MOVOU -16(R9)(R8*1), X1
7720 MOVOU X0, (AX)
7721 MOVOU X1, -16(AX)(R8*1)
7722 JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB
7723
7724emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_33through64:
7725 MOVOU (R9), X0
7726 MOVOU 16(R9), X1
7727 MOVOU -32(R9)(R8*1), X2
7728 MOVOU -16(R9)(R8*1), X3
7729 MOVOU X0, (AX)
7730 MOVOU X1, 16(AX)
7731 MOVOU X2, -32(AX)(R8*1)
7732 MOVOU X3, -16(AX)(R8*1)
7733
7734memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB:
7735 MOVQ BX, AX
7736 JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB
7737
7738memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB:
7739 LEAQ (AX)(R8*1), BX
7740
7741 // genMemMoveLong
7742 MOVOU (R9), X0
7743 MOVOU 16(R9), X1
7744 MOVOU -32(R9)(R8*1), X2
7745 MOVOU -16(R9)(R8*1), X3
7746 MOVQ R8, R12
7747 SHRQ $0x05, R12
7748 MOVQ AX, R10
7749 ANDL $0x0000001f, R10
7750 MOVQ $0x00000040, R13
7751 SUBQ R10, R13
7752 DECQ R12
7753 JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32
7754 LEAQ -32(R9)(R13*1), R10
7755 LEAQ -32(AX)(R13*1), R14
7756
7757emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_big_loop_back:
7758 MOVOU (R10), X4
7759 MOVOU 16(R10), X5
7760 MOVOA X4, (R14)
7761 MOVOA X5, 16(R14)
7762 ADDQ $0x20, R14
7763 ADDQ $0x20, R10
7764 ADDQ $0x20, R13
7765 DECQ R12
7766 JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_big_loop_back
7767
7768emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32:
7769 MOVOU -32(R9)(R13*1), X4
7770 MOVOU -16(R9)(R13*1), X5
7771 MOVOA X4, -32(AX)(R13*1)
7772 MOVOA X5, -16(AX)(R13*1)
7773 ADDQ $0x20, R13
7774 CMPQ R8, R13
7775 JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32
7776 MOVOU X0, (AX)
7777 MOVOU X1, 16(AX)
7778 MOVOU X2, -32(AX)(R8*1)
7779 MOVOU X3, -16(AX)(R8*1)
7780 MOVQ BX, AX
7781
7782emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB:
7783 ADDL R11, CX
7784 ADDL $0x04, R11
7785 MOVL CX, 12(SP)
7786
7787 // emitRepeat
7788 MOVL R11, BX
7789 LEAL -4(R11), R11
7790 CMPL BX, $0x08
7791 JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm4MB
7792 CMPL BX, $0x0c
7793 JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB
7794 CMPL DI, $0x00000800
7795 JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB
7796
7797cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB:
7798 CMPL R11, $0x00000104
7799 JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm4MB
7800 CMPL R11, $0x00010100
7801 JB repeat_four_match_nolit_repeat_encodeBetterBlockAsm4MB
7802 LEAL -65536(R11), R11
7803 MOVL R11, DI
7804 MOVW $0x001d, (AX)
7805 MOVW R11, 2(AX)
7806 SARL $0x10, DI
7807 MOVB DI, 4(AX)
7808 ADDQ $0x05, AX
7809 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
7810
7811repeat_four_match_nolit_repeat_encodeBetterBlockAsm4MB:
7812 LEAL -256(R11), R11
7813 MOVW $0x0019, (AX)
7814 MOVW R11, 2(AX)
7815 ADDQ $0x04, AX
7816 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
7817
7818repeat_three_match_nolit_repeat_encodeBetterBlockAsm4MB:
7819 LEAL -4(R11), R11
7820 MOVW $0x0015, (AX)
7821 MOVB R11, 2(AX)
7822 ADDQ $0x03, AX
7823 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
7824
7825repeat_two_match_nolit_repeat_encodeBetterBlockAsm4MB:
7826 SHLL $0x02, R11
7827 ORL $0x01, R11
7828 MOVW R11, (AX)
7829 ADDQ $0x02, AX
7830 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
7831
7832repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB:
7833 XORQ BX, BX
7834 LEAL 1(BX)(R11*4), R11
7835 MOVB DI, 1(AX)
7836 SARL $0x08, DI
7837 SHLL $0x05, DI
7838 ORL DI, R11
7839 MOVB R11, (AX)
7840 ADDQ $0x02, AX
7841
7842match_nolit_emitcopy_end_encodeBetterBlockAsm4MB:
7843 CMPL CX, 8(SP)
7844 JAE emit_remainder_encodeBetterBlockAsm4MB
7845 CMPQ AX, (SP)
7846 JB match_nolit_dst_ok_encodeBetterBlockAsm4MB
7847 MOVQ $0x00000000, ret+48(FP)
7848 RET
7849
7850match_nolit_dst_ok_encodeBetterBlockAsm4MB:
7851 MOVQ $0x00cf1bbcdcbfa563, BX
7852 MOVQ $0x9e3779b1, DI
7853 LEAQ 1(SI), SI
7854 LEAQ -2(CX), R8
7855 MOVQ (DX)(SI*1), R9
7856 MOVQ 1(DX)(SI*1), R10
7857 MOVQ (DX)(R8*1), R11
7858 MOVQ 1(DX)(R8*1), R12
7859 SHLQ $0x08, R9
7860 IMULQ BX, R9
7861 SHRQ $0x2f, R9
7862 SHLQ $0x20, R10
7863 IMULQ DI, R10
7864 SHRQ $0x32, R10
7865 SHLQ $0x08, R11
7866 IMULQ BX, R11
7867 SHRQ $0x2f, R11
7868 SHLQ $0x20, R12
7869 IMULQ DI, R12
7870 SHRQ $0x32, R12
7871 LEAQ 1(SI), DI
7872 LEAQ 1(R8), R13
7873 MOVL SI, 24(SP)(R9*4)
7874 MOVL R8, 24(SP)(R11*4)
7875 MOVL DI, 524312(SP)(R10*4)
7876 MOVL R13, 524312(SP)(R12*4)
7877 LEAQ 1(R8)(SI*1), DI
7878 SHRQ $0x01, DI
7879 ADDQ $0x01, SI
7880 SUBQ $0x01, R8
7881
7882index_loop_encodeBetterBlockAsm4MB:
7883 CMPQ DI, R8
7884 JAE search_loop_encodeBetterBlockAsm4MB
7885 MOVQ (DX)(SI*1), R9
7886 MOVQ (DX)(DI*1), R10
7887 SHLQ $0x08, R9
7888 IMULQ BX, R9
7889 SHRQ $0x2f, R9
7890 SHLQ $0x08, R10
7891 IMULQ BX, R10
7892 SHRQ $0x2f, R10
7893 MOVL SI, 24(SP)(R9*4)
7894 MOVL DI, 24(SP)(R10*4)
7895 ADDQ $0x02, SI
7896 ADDQ $0x02, DI
7897 JMP index_loop_encodeBetterBlockAsm4MB
7898
7899emit_remainder_encodeBetterBlockAsm4MB:
7900 MOVQ src_len+32(FP), CX
7901 SUBL 12(SP), CX
7902 LEAQ 4(AX)(CX*1), CX
7903 CMPQ CX, (SP)
7904 JB emit_remainder_ok_encodeBetterBlockAsm4MB
7905 MOVQ $0x00000000, ret+48(FP)
7906 RET
7907
7908emit_remainder_ok_encodeBetterBlockAsm4MB:
7909 MOVQ src_len+32(FP), CX
7910 MOVL 12(SP), BX
7911 CMPL BX, CX
7912 JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm4MB
7913 MOVL CX, SI
7914 MOVL CX, 12(SP)
7915 LEAQ (DX)(BX*1), CX
7916 SUBL BX, SI
7917 LEAL -1(SI), DX
7918 CMPL DX, $0x3c
7919 JB one_byte_emit_remainder_encodeBetterBlockAsm4MB
7920 CMPL DX, $0x00000100
7921 JB two_bytes_emit_remainder_encodeBetterBlockAsm4MB
7922 CMPL DX, $0x00010000
7923 JB three_bytes_emit_remainder_encodeBetterBlockAsm4MB
7924 MOVL DX, BX
7925 SHRL $0x10, BX
7926 MOVB $0xf8, (AX)
7927 MOVW DX, 1(AX)
7928 MOVB BL, 3(AX)
7929 ADDQ $0x04, AX
7930 JMP memmove_long_emit_remainder_encodeBetterBlockAsm4MB
7931
7932three_bytes_emit_remainder_encodeBetterBlockAsm4MB:
7933 MOVB $0xf4, (AX)
7934 MOVW DX, 1(AX)
7935 ADDQ $0x03, AX
7936 JMP memmove_long_emit_remainder_encodeBetterBlockAsm4MB
7937
7938two_bytes_emit_remainder_encodeBetterBlockAsm4MB:
7939 MOVB $0xf0, (AX)
7940 MOVB DL, 1(AX)
7941 ADDQ $0x02, AX
7942 CMPL DX, $0x40
7943 JB memmove_emit_remainder_encodeBetterBlockAsm4MB
7944 JMP memmove_long_emit_remainder_encodeBetterBlockAsm4MB
7945
7946one_byte_emit_remainder_encodeBetterBlockAsm4MB:
7947 SHLB $0x02, DL
7948 MOVB DL, (AX)
7949 ADDQ $0x01, AX
7950
7951memmove_emit_remainder_encodeBetterBlockAsm4MB:
7952 LEAQ (AX)(SI*1), DX
7953 MOVL SI, BX
7954
7955 // genMemMoveShort
7956 CMPQ BX, $0x03
7957 JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_1or2
7958 JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_3
7959 CMPQ BX, $0x08
7960 JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_4through7
7961 CMPQ BX, $0x10
7962 JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_8through16
7963 CMPQ BX, $0x20
7964 JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_17through32
7965 JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_33through64
7966
7967emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_1or2:
7968 MOVB (CX), SI
7969 MOVB -1(CX)(BX*1), CL
7970 MOVB SI, (AX)
7971 MOVB CL, -1(AX)(BX*1)
7972 JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB
7973
7974emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_3:
7975 MOVW (CX), SI
7976 MOVB 2(CX), CL
7977 MOVW SI, (AX)
7978 MOVB CL, 2(AX)
7979 JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB
7980
7981emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_4through7:
7982 MOVL (CX), SI
7983 MOVL -4(CX)(BX*1), CX
7984 MOVL SI, (AX)
7985 MOVL CX, -4(AX)(BX*1)
7986 JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB
7987
7988emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_8through16:
7989 MOVQ (CX), SI
7990 MOVQ -8(CX)(BX*1), CX
7991 MOVQ SI, (AX)
7992 MOVQ CX, -8(AX)(BX*1)
7993 JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB
7994
7995emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_17through32:
7996 MOVOU (CX), X0
7997 MOVOU -16(CX)(BX*1), X1
7998 MOVOU X0, (AX)
7999 MOVOU X1, -16(AX)(BX*1)
8000 JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB
8001
8002emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_33through64:
8003 MOVOU (CX), X0
8004 MOVOU 16(CX), X1
8005 MOVOU -32(CX)(BX*1), X2
8006 MOVOU -16(CX)(BX*1), X3
8007 MOVOU X0, (AX)
8008 MOVOU X1, 16(AX)
8009 MOVOU X2, -32(AX)(BX*1)
8010 MOVOU X3, -16(AX)(BX*1)
8011
8012memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB:
8013 MOVQ DX, AX
8014 JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm4MB
8015
8016memmove_long_emit_remainder_encodeBetterBlockAsm4MB:
8017 LEAQ (AX)(SI*1), DX
8018 MOVL SI, BX
8019
8020 // genMemMoveLong
8021 MOVOU (CX), X0
8022 MOVOU 16(CX), X1
8023 MOVOU -32(CX)(BX*1), X2
8024 MOVOU -16(CX)(BX*1), X3
8025 MOVQ BX, DI
8026 SHRQ $0x05, DI
8027 MOVQ AX, SI
8028 ANDL $0x0000001f, SI
8029 MOVQ $0x00000040, R8
8030 SUBQ SI, R8
8031 DECQ DI
8032 JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32
8033 LEAQ -32(CX)(R8*1), SI
8034 LEAQ -32(AX)(R8*1), R9
8035
8036emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_big_loop_back:
8037 MOVOU (SI), X4
8038 MOVOU 16(SI), X5
8039 MOVOA X4, (R9)
8040 MOVOA X5, 16(R9)
8041 ADDQ $0x20, R9
8042 ADDQ $0x20, SI
8043 ADDQ $0x20, R8
8044 DECQ DI
8045 JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_big_loop_back
8046
8047emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32:
8048 MOVOU -32(CX)(R8*1), X4
8049 MOVOU -16(CX)(R8*1), X5
8050 MOVOA X4, -32(AX)(R8*1)
8051 MOVOA X5, -16(AX)(R8*1)
8052 ADDQ $0x20, R8
8053 CMPQ BX, R8
8054 JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32
8055 MOVOU X0, (AX)
8056 MOVOU X1, 16(AX)
8057 MOVOU X2, -32(AX)(BX*1)
8058 MOVOU X3, -16(AX)(BX*1)
8059 MOVQ DX, AX
8060
8061emit_literal_done_emit_remainder_encodeBetterBlockAsm4MB:
8062 MOVQ dst_base+0(FP), CX
8063 SUBQ CX, AX
8064 MOVQ AX, ret+48(FP)
8065 RET
8066
8067// func encodeBetterBlockAsm12B(dst []byte, src []byte) int
8068// Requires: BMI, SSE2
8069TEXT ·encodeBetterBlockAsm12B(SB), $81944-56
8070 MOVQ dst_base+0(FP), AX
8071 MOVQ $0x00000280, CX
8072 LEAQ 24(SP), DX
8073 PXOR X0, X0
8074
8075zero_loop_encodeBetterBlockAsm12B:
8076 MOVOU X0, (DX)
8077 MOVOU X0, 16(DX)
8078 MOVOU X0, 32(DX)
8079 MOVOU X0, 48(DX)
8080 MOVOU X0, 64(DX)
8081 MOVOU X0, 80(DX)
8082 MOVOU X0, 96(DX)
8083 MOVOU X0, 112(DX)
8084 ADDQ $0x80, DX
8085 DECQ CX
8086 JNZ zero_loop_encodeBetterBlockAsm12B
8087 MOVL $0x00000000, 12(SP)
8088 MOVQ src_len+32(FP), CX
8089 LEAQ -6(CX), DX
8090 LEAQ -8(CX), BX
8091 MOVL BX, 8(SP)
8092 SHRQ $0x05, CX
8093 SUBL CX, DX
8094 LEAQ (AX)(DX*1), DX
8095 MOVQ DX, (SP)
8096 MOVL $0x00000001, CX
8097 MOVL $0x00000000, 16(SP)
8098 MOVQ src_base+24(FP), DX
8099
8100search_loop_encodeBetterBlockAsm12B:
8101 MOVL CX, BX
8102 SUBL 12(SP), BX
8103 SHRL $0x06, BX
8104 LEAL 1(CX)(BX*1), BX
8105 CMPL BX, 8(SP)
8106 JAE emit_remainder_encodeBetterBlockAsm12B
8107 MOVQ (DX)(CX*1), SI
8108 MOVL BX, 20(SP)
8109 MOVQ $0x0000cf1bbcdcbf9b, R8
8110 MOVQ $0x9e3779b1, BX
8111 MOVQ SI, R9
8112 MOVQ SI, R10
8113 SHLQ $0x10, R9
8114 IMULQ R8, R9
8115 SHRQ $0x32, R9
8116 SHLQ $0x20, R10
8117 IMULQ BX, R10
8118 SHRQ $0x34, R10
8119 MOVL 24(SP)(R9*4), BX
8120 MOVL 65560(SP)(R10*4), DI
8121 MOVL CX, 24(SP)(R9*4)
8122 MOVL CX, 65560(SP)(R10*4)
8123 MOVQ (DX)(BX*1), R9
8124 MOVQ (DX)(DI*1), R10
8125 CMPQ R9, SI
8126 JEQ candidate_match_encodeBetterBlockAsm12B
8127 CMPQ R10, SI
8128 JNE no_short_found_encodeBetterBlockAsm12B
8129 MOVL DI, BX
8130 JMP candidate_match_encodeBetterBlockAsm12B
8131
8132no_short_found_encodeBetterBlockAsm12B:
8133 CMPL R9, SI
8134 JEQ candidate_match_encodeBetterBlockAsm12B
8135 CMPL R10, SI
8136 JEQ candidateS_match_encodeBetterBlockAsm12B
8137 MOVL 20(SP), CX
8138 JMP search_loop_encodeBetterBlockAsm12B
8139
8140candidateS_match_encodeBetterBlockAsm12B:
8141 SHRQ $0x08, SI
8142 MOVQ SI, R9
8143 SHLQ $0x10, R9
8144 IMULQ R8, R9
8145 SHRQ $0x32, R9
8146 MOVL 24(SP)(R9*4), BX
8147 INCL CX
8148 MOVL CX, 24(SP)(R9*4)
8149 CMPL (DX)(BX*1), SI
8150 JEQ candidate_match_encodeBetterBlockAsm12B
8151 DECL CX
8152 MOVL DI, BX
8153
8154candidate_match_encodeBetterBlockAsm12B:
8155 MOVL 12(SP), SI
8156 TESTL BX, BX
8157 JZ match_extend_back_end_encodeBetterBlockAsm12B
8158
8159match_extend_back_loop_encodeBetterBlockAsm12B:
8160 CMPL CX, SI
8161 JBE match_extend_back_end_encodeBetterBlockAsm12B
8162 MOVB -1(DX)(BX*1), DI
8163 MOVB -1(DX)(CX*1), R8
8164 CMPB DI, R8
8165 JNE match_extend_back_end_encodeBetterBlockAsm12B
8166 LEAL -1(CX), CX
8167 DECL BX
8168 JZ match_extend_back_end_encodeBetterBlockAsm12B
8169 JMP match_extend_back_loop_encodeBetterBlockAsm12B
8170
8171match_extend_back_end_encodeBetterBlockAsm12B:
8172 MOVL CX, SI
8173 SUBL 12(SP), SI
8174 LEAQ 3(AX)(SI*1), SI
8175 CMPQ SI, (SP)
8176 JB match_dst_size_check_encodeBetterBlockAsm12B
8177 MOVQ $0x00000000, ret+48(FP)
8178 RET
8179
8180match_dst_size_check_encodeBetterBlockAsm12B:
8181 MOVL CX, SI
8182 ADDL $0x04, CX
8183 ADDL $0x04, BX
8184 MOVQ src_len+32(FP), DI
8185 SUBL CX, DI
8186 LEAQ (DX)(CX*1), R8
8187 LEAQ (DX)(BX*1), R9
8188
8189 // matchLen
8190 XORL R11, R11
8191
8192matchlen_loopback_16_match_nolit_encodeBetterBlockAsm12B:
8193 CMPL DI, $0x10
8194 JB matchlen_match8_match_nolit_encodeBetterBlockAsm12B
8195 MOVQ (R8)(R11*1), R10
8196 MOVQ 8(R8)(R11*1), R12
8197 XORQ (R9)(R11*1), R10
8198 JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm12B
8199 XORQ 8(R9)(R11*1), R12
8200 JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm12B
8201 LEAL -16(DI), DI
8202 LEAL 16(R11), R11
8203 JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm12B
8204
8205matchlen_bsf_16match_nolit_encodeBetterBlockAsm12B:
8206#ifdef GOAMD64_v3
8207 TZCNTQ R12, R12
8208
8209#else
8210 BSFQ R12, R12
8211
8212#endif
8213 SARQ $0x03, R12
8214 LEAL 8(R11)(R12*1), R11
8215 JMP match_nolit_end_encodeBetterBlockAsm12B
8216
8217matchlen_match8_match_nolit_encodeBetterBlockAsm12B:
8218 CMPL DI, $0x08
8219 JB matchlen_match4_match_nolit_encodeBetterBlockAsm12B
8220 MOVQ (R8)(R11*1), R10
8221 XORQ (R9)(R11*1), R10
8222 JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm12B
8223 LEAL -8(DI), DI
8224 LEAL 8(R11), R11
8225 JMP matchlen_match4_match_nolit_encodeBetterBlockAsm12B
8226
8227matchlen_bsf_8_match_nolit_encodeBetterBlockAsm12B:
8228#ifdef GOAMD64_v3
8229 TZCNTQ R10, R10
8230
8231#else
8232 BSFQ R10, R10
8233
8234#endif
8235 SARQ $0x03, R10
8236 LEAL (R11)(R10*1), R11
8237 JMP match_nolit_end_encodeBetterBlockAsm12B
8238
8239matchlen_match4_match_nolit_encodeBetterBlockAsm12B:
8240 CMPL DI, $0x04
8241 JB matchlen_match2_match_nolit_encodeBetterBlockAsm12B
8242 MOVL (R8)(R11*1), R10
8243 CMPL (R9)(R11*1), R10
8244 JNE matchlen_match2_match_nolit_encodeBetterBlockAsm12B
8245 LEAL -4(DI), DI
8246 LEAL 4(R11), R11
8247
8248matchlen_match2_match_nolit_encodeBetterBlockAsm12B:
8249 CMPL DI, $0x01
8250 JE matchlen_match1_match_nolit_encodeBetterBlockAsm12B
8251 JB match_nolit_end_encodeBetterBlockAsm12B
8252 MOVW (R8)(R11*1), R10
8253 CMPW (R9)(R11*1), R10
8254 JNE matchlen_match1_match_nolit_encodeBetterBlockAsm12B
8255 LEAL 2(R11), R11
8256 SUBL $0x02, DI
8257 JZ match_nolit_end_encodeBetterBlockAsm12B
8258
8259matchlen_match1_match_nolit_encodeBetterBlockAsm12B:
8260 MOVB (R8)(R11*1), R10
8261 CMPB (R9)(R11*1), R10
8262 JNE match_nolit_end_encodeBetterBlockAsm12B
8263 LEAL 1(R11), R11
8264
8265match_nolit_end_encodeBetterBlockAsm12B:
8266 MOVL CX, DI
8267 SUBL BX, DI
8268
8269 // Check if repeat
8270 CMPL 16(SP), DI
8271 JEQ match_is_repeat_encodeBetterBlockAsm12B
8272 MOVL DI, 16(SP)
8273 MOVL 12(SP), BX
8274 CMPL BX, SI
8275 JEQ emit_literal_done_match_emit_encodeBetterBlockAsm12B
8276 MOVL SI, R8
8277 MOVL SI, 12(SP)
8278 LEAQ (DX)(BX*1), R9
8279 SUBL BX, R8
8280 LEAL -1(R8), BX
8281 CMPL BX, $0x3c
8282 JB one_byte_match_emit_encodeBetterBlockAsm12B
8283 CMPL BX, $0x00000100
8284 JB two_bytes_match_emit_encodeBetterBlockAsm12B
8285 JB three_bytes_match_emit_encodeBetterBlockAsm12B
8286
8287three_bytes_match_emit_encodeBetterBlockAsm12B:
8288 MOVB $0xf4, (AX)
8289 MOVW BX, 1(AX)
8290 ADDQ $0x03, AX
8291 JMP memmove_long_match_emit_encodeBetterBlockAsm12B
8292
8293two_bytes_match_emit_encodeBetterBlockAsm12B:
8294 MOVB $0xf0, (AX)
8295 MOVB BL, 1(AX)
8296 ADDQ $0x02, AX
8297 CMPL BX, $0x40
8298 JB memmove_match_emit_encodeBetterBlockAsm12B
8299 JMP memmove_long_match_emit_encodeBetterBlockAsm12B
8300
8301one_byte_match_emit_encodeBetterBlockAsm12B:
8302 SHLB $0x02, BL
8303 MOVB BL, (AX)
8304 ADDQ $0x01, AX
8305
8306memmove_match_emit_encodeBetterBlockAsm12B:
8307 LEAQ (AX)(R8*1), BX
8308
8309 // genMemMoveShort
8310 CMPQ R8, $0x04
8311 JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4
8312 CMPQ R8, $0x08
8313 JB emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4through7
8314 CMPQ R8, $0x10
8315 JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_8through16
8316 CMPQ R8, $0x20
8317 JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_17through32
8318 JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_33through64
8319
8320emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4:
8321 MOVL (R9), R10
8322 MOVL R10, (AX)
8323 JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B
8324
8325emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4through7:
8326 MOVL (R9), R10
8327 MOVL -4(R9)(R8*1), R9
8328 MOVL R10, (AX)
8329 MOVL R9, -4(AX)(R8*1)
8330 JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B
8331
8332emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_8through16:
8333 MOVQ (R9), R10
8334 MOVQ -8(R9)(R8*1), R9
8335 MOVQ R10, (AX)
8336 MOVQ R9, -8(AX)(R8*1)
8337 JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B
8338
8339emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_17through32:
8340 MOVOU (R9), X0
8341 MOVOU -16(R9)(R8*1), X1
8342 MOVOU X0, (AX)
8343 MOVOU X1, -16(AX)(R8*1)
8344 JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B
8345
8346emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_33through64:
8347 MOVOU (R9), X0
8348 MOVOU 16(R9), X1
8349 MOVOU -32(R9)(R8*1), X2
8350 MOVOU -16(R9)(R8*1), X3
8351 MOVOU X0, (AX)
8352 MOVOU X1, 16(AX)
8353 MOVOU X2, -32(AX)(R8*1)
8354 MOVOU X3, -16(AX)(R8*1)
8355
8356memmove_end_copy_match_emit_encodeBetterBlockAsm12B:
8357 MOVQ BX, AX
8358 JMP emit_literal_done_match_emit_encodeBetterBlockAsm12B
8359
8360memmove_long_match_emit_encodeBetterBlockAsm12B:
8361 LEAQ (AX)(R8*1), BX
8362
8363 // genMemMoveLong
8364 MOVOU (R9), X0
8365 MOVOU 16(R9), X1
8366 MOVOU -32(R9)(R8*1), X2
8367 MOVOU -16(R9)(R8*1), X3
8368 MOVQ R8, R12
8369 SHRQ $0x05, R12
8370 MOVQ AX, R10
8371 ANDL $0x0000001f, R10
8372 MOVQ $0x00000040, R13
8373 SUBQ R10, R13
8374 DECQ R12
8375 JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32
8376 LEAQ -32(R9)(R13*1), R10
8377 LEAQ -32(AX)(R13*1), R14
8378
8379emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_big_loop_back:
8380 MOVOU (R10), X4
8381 MOVOU 16(R10), X5
8382 MOVOA X4, (R14)
8383 MOVOA X5, 16(R14)
8384 ADDQ $0x20, R14
8385 ADDQ $0x20, R10
8386 ADDQ $0x20, R13
8387 DECQ R12
8388 JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_big_loop_back
8389
8390emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32:
8391 MOVOU -32(R9)(R13*1), X4
8392 MOVOU -16(R9)(R13*1), X5
8393 MOVOA X4, -32(AX)(R13*1)
8394 MOVOA X5, -16(AX)(R13*1)
8395 ADDQ $0x20, R13
8396 CMPQ R8, R13
8397 JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32
8398 MOVOU X0, (AX)
8399 MOVOU X1, 16(AX)
8400 MOVOU X2, -32(AX)(R8*1)
8401 MOVOU X3, -16(AX)(R8*1)
8402 MOVQ BX, AX
8403
8404emit_literal_done_match_emit_encodeBetterBlockAsm12B:
8405 ADDL R11, CX
8406 ADDL $0x04, R11
8407 MOVL CX, 12(SP)
8408
8409 // emitCopy
8410 CMPL R11, $0x40
8411 JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm12B
8412 CMPL DI, $0x00000800
8413 JAE long_offset_short_match_nolit_encodeBetterBlockAsm12B
8414 MOVL $0x00000001, BX
8415 LEAL 16(BX), BX
8416 MOVB DI, 1(AX)
8417 SHRL $0x08, DI
8418 SHLL $0x05, DI
8419 ORL DI, BX
8420 MOVB BL, (AX)
8421 ADDQ $0x02, AX
8422 SUBL $0x08, R11
8423
8424 // emitRepeat
8425 LEAL -4(R11), R11
8426 JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b
8427 MOVL R11, BX
8428 LEAL -4(R11), R11
8429 CMPL BX, $0x08
8430 JBE repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b
8431 CMPL BX, $0x0c
8432 JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b
8433 CMPL DI, $0x00000800
8434 JB repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b
8435
8436cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b:
8437 CMPL R11, $0x00000104
8438 JB repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b
8439 LEAL -256(R11), R11
8440 MOVW $0x0019, (AX)
8441 MOVW R11, 2(AX)
8442 ADDQ $0x04, AX
8443 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
8444
8445repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b:
8446 LEAL -4(R11), R11
8447 MOVW $0x0015, (AX)
8448 MOVB R11, 2(AX)
8449 ADDQ $0x03, AX
8450 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
8451
8452repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b:
8453 SHLL $0x02, R11
8454 ORL $0x01, R11
8455 MOVW R11, (AX)
8456 ADDQ $0x02, AX
8457 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
8458
8459repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b:
8460 XORQ BX, BX
8461 LEAL 1(BX)(R11*4), R11
8462 MOVB DI, 1(AX)
8463 SARL $0x08, DI
8464 SHLL $0x05, DI
8465 ORL DI, R11
8466 MOVB R11, (AX)
8467 ADDQ $0x02, AX
8468 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
8469
8470long_offset_short_match_nolit_encodeBetterBlockAsm12B:
8471 MOVB $0xee, (AX)
8472 MOVW DI, 1(AX)
8473 LEAL -60(R11), R11
8474 ADDQ $0x03, AX
8475
8476 // emitRepeat
8477 MOVL R11, BX
8478 LEAL -4(R11), R11
8479 CMPL BX, $0x08
8480 JBE repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short
8481 CMPL BX, $0x0c
8482 JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short
8483 CMPL DI, $0x00000800
8484 JB repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short
8485
8486cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short:
8487 CMPL R11, $0x00000104
8488 JB repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short
8489 LEAL -256(R11), R11
8490 MOVW $0x0019, (AX)
8491 MOVW R11, 2(AX)
8492 ADDQ $0x04, AX
8493 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
8494
8495repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short:
8496 LEAL -4(R11), R11
8497 MOVW $0x0015, (AX)
8498 MOVB R11, 2(AX)
8499 ADDQ $0x03, AX
8500 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
8501
8502repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short:
8503 SHLL $0x02, R11
8504 ORL $0x01, R11
8505 MOVW R11, (AX)
8506 ADDQ $0x02, AX
8507 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
8508
8509repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short:
8510 XORQ BX, BX
8511 LEAL 1(BX)(R11*4), R11
8512 MOVB DI, 1(AX)
8513 SARL $0x08, DI
8514 SHLL $0x05, DI
8515 ORL DI, R11
8516 MOVB R11, (AX)
8517 ADDQ $0x02, AX
8518 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
8519
8520two_byte_offset_short_match_nolit_encodeBetterBlockAsm12B:
8521 MOVL R11, BX
8522 SHLL $0x02, BX
8523 CMPL R11, $0x0c
8524 JAE emit_copy_three_match_nolit_encodeBetterBlockAsm12B
8525 CMPL DI, $0x00000800
8526 JAE emit_copy_three_match_nolit_encodeBetterBlockAsm12B
8527 LEAL -15(BX), BX
8528 MOVB DI, 1(AX)
8529 SHRL $0x08, DI
8530 SHLL $0x05, DI
8531 ORL DI, BX
8532 MOVB BL, (AX)
8533 ADDQ $0x02, AX
8534 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
8535
8536emit_copy_three_match_nolit_encodeBetterBlockAsm12B:
8537 LEAL -2(BX), BX
8538 MOVB BL, (AX)
8539 MOVW DI, 1(AX)
8540 ADDQ $0x03, AX
8541 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
8542
8543match_is_repeat_encodeBetterBlockAsm12B:
8544 MOVL 12(SP), BX
8545 CMPL BX, SI
8546 JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B
8547 MOVL SI, R8
8548 MOVL SI, 12(SP)
8549 LEAQ (DX)(BX*1), R9
8550 SUBL BX, R8
8551 LEAL -1(R8), BX
8552 CMPL BX, $0x3c
8553 JB one_byte_match_emit_repeat_encodeBetterBlockAsm12B
8554 CMPL BX, $0x00000100
8555 JB two_bytes_match_emit_repeat_encodeBetterBlockAsm12B
8556 JB three_bytes_match_emit_repeat_encodeBetterBlockAsm12B
8557
8558three_bytes_match_emit_repeat_encodeBetterBlockAsm12B:
8559 MOVB $0xf4, (AX)
8560 MOVW BX, 1(AX)
8561 ADDQ $0x03, AX
8562 JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm12B
8563
8564two_bytes_match_emit_repeat_encodeBetterBlockAsm12B:
8565 MOVB $0xf0, (AX)
8566 MOVB BL, 1(AX)
8567 ADDQ $0x02, AX
8568 CMPL BX, $0x40
8569 JB memmove_match_emit_repeat_encodeBetterBlockAsm12B
8570 JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm12B
8571
8572one_byte_match_emit_repeat_encodeBetterBlockAsm12B:
8573 SHLB $0x02, BL
8574 MOVB BL, (AX)
8575 ADDQ $0x01, AX
8576
8577memmove_match_emit_repeat_encodeBetterBlockAsm12B:
8578 LEAQ (AX)(R8*1), BX
8579
8580 // genMemMoveShort
8581 CMPQ R8, $0x04
8582 JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4
8583 CMPQ R8, $0x08
8584 JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4through7
8585 CMPQ R8, $0x10
8586 JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_8through16
8587 CMPQ R8, $0x20
8588 JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_17through32
8589 JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_33through64
8590
8591emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4:
8592 MOVL (R9), R10
8593 MOVL R10, (AX)
8594 JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B
8595
8596emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4through7:
8597 MOVL (R9), R10
8598 MOVL -4(R9)(R8*1), R9
8599 MOVL R10, (AX)
8600 MOVL R9, -4(AX)(R8*1)
8601 JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B
8602
8603emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_8through16:
8604 MOVQ (R9), R10
8605 MOVQ -8(R9)(R8*1), R9
8606 MOVQ R10, (AX)
8607 MOVQ R9, -8(AX)(R8*1)
8608 JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B
8609
8610emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_17through32:
8611 MOVOU (R9), X0
8612 MOVOU -16(R9)(R8*1), X1
8613 MOVOU X0, (AX)
8614 MOVOU X1, -16(AX)(R8*1)
8615 JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B
8616
8617emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_33through64:
8618 MOVOU (R9), X0
8619 MOVOU 16(R9), X1
8620 MOVOU -32(R9)(R8*1), X2
8621 MOVOU -16(R9)(R8*1), X3
8622 MOVOU X0, (AX)
8623 MOVOU X1, 16(AX)
8624 MOVOU X2, -32(AX)(R8*1)
8625 MOVOU X3, -16(AX)(R8*1)
8626
8627memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B:
8628 MOVQ BX, AX
8629 JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B
8630
8631memmove_long_match_emit_repeat_encodeBetterBlockAsm12B:
8632 LEAQ (AX)(R8*1), BX
8633
8634 // genMemMoveLong
8635 MOVOU (R9), X0
8636 MOVOU 16(R9), X1
8637 MOVOU -32(R9)(R8*1), X2
8638 MOVOU -16(R9)(R8*1), X3
8639 MOVQ R8, R12
8640 SHRQ $0x05, R12
8641 MOVQ AX, R10
8642 ANDL $0x0000001f, R10
8643 MOVQ $0x00000040, R13
8644 SUBQ R10, R13
8645 DECQ R12
8646 JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32
8647 LEAQ -32(R9)(R13*1), R10
8648 LEAQ -32(AX)(R13*1), R14
8649
8650emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_big_loop_back:
8651 MOVOU (R10), X4
8652 MOVOU 16(R10), X5
8653 MOVOA X4, (R14)
8654 MOVOA X5, 16(R14)
8655 ADDQ $0x20, R14
8656 ADDQ $0x20, R10
8657 ADDQ $0x20, R13
8658 DECQ R12
8659 JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_big_loop_back
8660
8661emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32:
8662 MOVOU -32(R9)(R13*1), X4
8663 MOVOU -16(R9)(R13*1), X5
8664 MOVOA X4, -32(AX)(R13*1)
8665 MOVOA X5, -16(AX)(R13*1)
8666 ADDQ $0x20, R13
8667 CMPQ R8, R13
8668 JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32
8669 MOVOU X0, (AX)
8670 MOVOU X1, 16(AX)
8671 MOVOU X2, -32(AX)(R8*1)
8672 MOVOU X3, -16(AX)(R8*1)
8673 MOVQ BX, AX
8674
8675emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B:
8676 ADDL R11, CX
8677 ADDL $0x04, R11
8678 MOVL CX, 12(SP)
8679
8680 // emitRepeat
8681 MOVL R11, BX
8682 LEAL -4(R11), R11
8683 CMPL BX, $0x08
8684 JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm12B
8685 CMPL BX, $0x0c
8686 JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B
8687 CMPL DI, $0x00000800
8688 JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B
8689
8690cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B:
8691 CMPL R11, $0x00000104
8692 JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm12B
8693 LEAL -256(R11), R11
8694 MOVW $0x0019, (AX)
8695 MOVW R11, 2(AX)
8696 ADDQ $0x04, AX
8697 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
8698
8699repeat_three_match_nolit_repeat_encodeBetterBlockAsm12B:
8700 LEAL -4(R11), R11
8701 MOVW $0x0015, (AX)
8702 MOVB R11, 2(AX)
8703 ADDQ $0x03, AX
8704 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
8705
8706repeat_two_match_nolit_repeat_encodeBetterBlockAsm12B:
8707 SHLL $0x02, R11
8708 ORL $0x01, R11
8709 MOVW R11, (AX)
8710 ADDQ $0x02, AX
8711 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
8712
8713repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B:
8714 XORQ BX, BX
8715 LEAL 1(BX)(R11*4), R11
8716 MOVB DI, 1(AX)
8717 SARL $0x08, DI
8718 SHLL $0x05, DI
8719 ORL DI, R11
8720 MOVB R11, (AX)
8721 ADDQ $0x02, AX
8722
8723match_nolit_emitcopy_end_encodeBetterBlockAsm12B:
8724 CMPL CX, 8(SP)
8725 JAE emit_remainder_encodeBetterBlockAsm12B
8726 CMPQ AX, (SP)
8727 JB match_nolit_dst_ok_encodeBetterBlockAsm12B
8728 MOVQ $0x00000000, ret+48(FP)
8729 RET
8730
8731match_nolit_dst_ok_encodeBetterBlockAsm12B:
8732 MOVQ $0x0000cf1bbcdcbf9b, BX
8733 MOVQ $0x9e3779b1, DI
8734 LEAQ 1(SI), SI
8735 LEAQ -2(CX), R8
8736 MOVQ (DX)(SI*1), R9
8737 MOVQ 1(DX)(SI*1), R10
8738 MOVQ (DX)(R8*1), R11
8739 MOVQ 1(DX)(R8*1), R12
8740 SHLQ $0x10, R9
8741 IMULQ BX, R9
8742 SHRQ $0x32, R9
8743 SHLQ $0x20, R10
8744 IMULQ DI, R10
8745 SHRQ $0x34, R10
8746 SHLQ $0x10, R11
8747 IMULQ BX, R11
8748 SHRQ $0x32, R11
8749 SHLQ $0x20, R12
8750 IMULQ DI, R12
8751 SHRQ $0x34, R12
8752 LEAQ 1(SI), DI
8753 LEAQ 1(R8), R13
8754 MOVL SI, 24(SP)(R9*4)
8755 MOVL R8, 24(SP)(R11*4)
8756 MOVL DI, 65560(SP)(R10*4)
8757 MOVL R13, 65560(SP)(R12*4)
8758 LEAQ 1(R8)(SI*1), DI
8759 SHRQ $0x01, DI
8760 ADDQ $0x01, SI
8761 SUBQ $0x01, R8
8762
8763index_loop_encodeBetterBlockAsm12B:
8764 CMPQ DI, R8
8765 JAE search_loop_encodeBetterBlockAsm12B
8766 MOVQ (DX)(SI*1), R9
8767 MOVQ (DX)(DI*1), R10
8768 SHLQ $0x10, R9
8769 IMULQ BX, R9
8770 SHRQ $0x32, R9
8771 SHLQ $0x10, R10
8772 IMULQ BX, R10
8773 SHRQ $0x32, R10
8774 MOVL SI, 24(SP)(R9*4)
8775 MOVL DI, 24(SP)(R10*4)
8776 ADDQ $0x02, SI
8777 ADDQ $0x02, DI
8778 JMP index_loop_encodeBetterBlockAsm12B
8779
8780emit_remainder_encodeBetterBlockAsm12B:
8781 MOVQ src_len+32(FP), CX
8782 SUBL 12(SP), CX
8783 LEAQ 3(AX)(CX*1), CX
8784 CMPQ CX, (SP)
8785 JB emit_remainder_ok_encodeBetterBlockAsm12B
8786 MOVQ $0x00000000, ret+48(FP)
8787 RET
8788
8789emit_remainder_ok_encodeBetterBlockAsm12B:
8790 MOVQ src_len+32(FP), CX
8791 MOVL 12(SP), BX
8792 CMPL BX, CX
8793 JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm12B
8794 MOVL CX, SI
8795 MOVL CX, 12(SP)
8796 LEAQ (DX)(BX*1), CX
8797 SUBL BX, SI
8798 LEAL -1(SI), DX
8799 CMPL DX, $0x3c
8800 JB one_byte_emit_remainder_encodeBetterBlockAsm12B
8801 CMPL DX, $0x00000100
8802 JB two_bytes_emit_remainder_encodeBetterBlockAsm12B
8803 JB three_bytes_emit_remainder_encodeBetterBlockAsm12B
8804
8805three_bytes_emit_remainder_encodeBetterBlockAsm12B:
8806 MOVB $0xf4, (AX)
8807 MOVW DX, 1(AX)
8808 ADDQ $0x03, AX
8809 JMP memmove_long_emit_remainder_encodeBetterBlockAsm12B
8810
8811two_bytes_emit_remainder_encodeBetterBlockAsm12B:
8812 MOVB $0xf0, (AX)
8813 MOVB DL, 1(AX)
8814 ADDQ $0x02, AX
8815 CMPL DX, $0x40
8816 JB memmove_emit_remainder_encodeBetterBlockAsm12B
8817 JMP memmove_long_emit_remainder_encodeBetterBlockAsm12B
8818
8819one_byte_emit_remainder_encodeBetterBlockAsm12B:
8820 SHLB $0x02, DL
8821 MOVB DL, (AX)
8822 ADDQ $0x01, AX
8823
8824memmove_emit_remainder_encodeBetterBlockAsm12B:
8825 LEAQ (AX)(SI*1), DX
8826 MOVL SI, BX
8827
8828 // genMemMoveShort
8829 CMPQ BX, $0x03
8830 JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_1or2
8831 JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_3
8832 CMPQ BX, $0x08
8833 JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_4through7
8834 CMPQ BX, $0x10
8835 JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_8through16
8836 CMPQ BX, $0x20
8837 JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_17through32
8838 JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_33through64
8839
8840emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_1or2:
8841 MOVB (CX), SI
8842 MOVB -1(CX)(BX*1), CL
8843 MOVB SI, (AX)
8844 MOVB CL, -1(AX)(BX*1)
8845 JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B
8846
8847emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_3:
8848 MOVW (CX), SI
8849 MOVB 2(CX), CL
8850 MOVW SI, (AX)
8851 MOVB CL, 2(AX)
8852 JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B
8853
8854emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_4through7:
8855 MOVL (CX), SI
8856 MOVL -4(CX)(BX*1), CX
8857 MOVL SI, (AX)
8858 MOVL CX, -4(AX)(BX*1)
8859 JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B
8860
8861emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_8through16:
8862 MOVQ (CX), SI
8863 MOVQ -8(CX)(BX*1), CX
8864 MOVQ SI, (AX)
8865 MOVQ CX, -8(AX)(BX*1)
8866 JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B
8867
8868emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_17through32:
8869 MOVOU (CX), X0
8870 MOVOU -16(CX)(BX*1), X1
8871 MOVOU X0, (AX)
8872 MOVOU X1, -16(AX)(BX*1)
8873 JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B
8874
8875emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_33through64:
8876 MOVOU (CX), X0
8877 MOVOU 16(CX), X1
8878 MOVOU -32(CX)(BX*1), X2
8879 MOVOU -16(CX)(BX*1), X3
8880 MOVOU X0, (AX)
8881 MOVOU X1, 16(AX)
8882 MOVOU X2, -32(AX)(BX*1)
8883 MOVOU X3, -16(AX)(BX*1)
8884
8885memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B:
8886 MOVQ DX, AX
8887 JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm12B
8888
8889memmove_long_emit_remainder_encodeBetterBlockAsm12B:
8890 LEAQ (AX)(SI*1), DX
8891 MOVL SI, BX
8892
8893 // genMemMoveLong
8894 MOVOU (CX), X0
8895 MOVOU 16(CX), X1
8896 MOVOU -32(CX)(BX*1), X2
8897 MOVOU -16(CX)(BX*1), X3
8898 MOVQ BX, DI
8899 SHRQ $0x05, DI
8900 MOVQ AX, SI
8901 ANDL $0x0000001f, SI
8902 MOVQ $0x00000040, R8
8903 SUBQ SI, R8
8904 DECQ DI
8905 JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_forward_sse_loop_32
8906 LEAQ -32(CX)(R8*1), SI
8907 LEAQ -32(AX)(R8*1), R9
8908
8909emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_big_loop_back:
8910 MOVOU (SI), X4
8911 MOVOU 16(SI), X5
8912 MOVOA X4, (R9)
8913 MOVOA X5, 16(R9)
8914 ADDQ $0x20, R9
8915 ADDQ $0x20, SI
8916 ADDQ $0x20, R8
8917 DECQ DI
8918 JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_big_loop_back
8919
8920emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_forward_sse_loop_32:
8921 MOVOU -32(CX)(R8*1), X4
8922 MOVOU -16(CX)(R8*1), X5
8923 MOVOA X4, -32(AX)(R8*1)
8924 MOVOA X5, -16(AX)(R8*1)
8925 ADDQ $0x20, R8
8926 CMPQ BX, R8
8927 JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_forward_sse_loop_32
8928 MOVOU X0, (AX)
8929 MOVOU X1, 16(AX)
8930 MOVOU X2, -32(AX)(BX*1)
8931 MOVOU X3, -16(AX)(BX*1)
8932 MOVQ DX, AX
8933
8934emit_literal_done_emit_remainder_encodeBetterBlockAsm12B:
8935 MOVQ dst_base+0(FP), CX
8936 SUBQ CX, AX
8937 MOVQ AX, ret+48(FP)
8938 RET
8939
8940// func encodeBetterBlockAsm10B(dst []byte, src []byte) int
8941// Requires: BMI, SSE2
8942TEXT ·encodeBetterBlockAsm10B(SB), $20504-56
8943 MOVQ dst_base+0(FP), AX
8944 MOVQ $0x000000a0, CX
8945 LEAQ 24(SP), DX
8946 PXOR X0, X0
8947
8948zero_loop_encodeBetterBlockAsm10B:
8949 MOVOU X0, (DX)
8950 MOVOU X0, 16(DX)
8951 MOVOU X0, 32(DX)
8952 MOVOU X0, 48(DX)
8953 MOVOU X0, 64(DX)
8954 MOVOU X0, 80(DX)
8955 MOVOU X0, 96(DX)
8956 MOVOU X0, 112(DX)
8957 ADDQ $0x80, DX
8958 DECQ CX
8959 JNZ zero_loop_encodeBetterBlockAsm10B
8960 MOVL $0x00000000, 12(SP)
8961 MOVQ src_len+32(FP), CX
8962 LEAQ -6(CX), DX
8963 LEAQ -8(CX), BX
8964 MOVL BX, 8(SP)
8965 SHRQ $0x05, CX
8966 SUBL CX, DX
8967 LEAQ (AX)(DX*1), DX
8968 MOVQ DX, (SP)
8969 MOVL $0x00000001, CX
8970 MOVL $0x00000000, 16(SP)
8971 MOVQ src_base+24(FP), DX
8972
8973search_loop_encodeBetterBlockAsm10B:
8974 MOVL CX, BX
8975 SUBL 12(SP), BX
8976 SHRL $0x05, BX
8977 LEAL 1(CX)(BX*1), BX
8978 CMPL BX, 8(SP)
8979 JAE emit_remainder_encodeBetterBlockAsm10B
8980 MOVQ (DX)(CX*1), SI
8981 MOVL BX, 20(SP)
8982 MOVQ $0x0000cf1bbcdcbf9b, R8
8983 MOVQ $0x9e3779b1, BX
8984 MOVQ SI, R9
8985 MOVQ SI, R10
8986 SHLQ $0x10, R9
8987 IMULQ R8, R9
8988 SHRQ $0x34, R9
8989 SHLQ $0x20, R10
8990 IMULQ BX, R10
8991 SHRQ $0x36, R10
8992 MOVL 24(SP)(R9*4), BX
8993 MOVL 16408(SP)(R10*4), DI
8994 MOVL CX, 24(SP)(R9*4)
8995 MOVL CX, 16408(SP)(R10*4)
8996 MOVQ (DX)(BX*1), R9
8997 MOVQ (DX)(DI*1), R10
8998 CMPQ R9, SI
8999 JEQ candidate_match_encodeBetterBlockAsm10B
9000 CMPQ R10, SI
9001 JNE no_short_found_encodeBetterBlockAsm10B
9002 MOVL DI, BX
9003 JMP candidate_match_encodeBetterBlockAsm10B
9004
9005no_short_found_encodeBetterBlockAsm10B:
9006 CMPL R9, SI
9007 JEQ candidate_match_encodeBetterBlockAsm10B
9008 CMPL R10, SI
9009 JEQ candidateS_match_encodeBetterBlockAsm10B
9010 MOVL 20(SP), CX
9011 JMP search_loop_encodeBetterBlockAsm10B
9012
9013candidateS_match_encodeBetterBlockAsm10B:
9014 SHRQ $0x08, SI
9015 MOVQ SI, R9
9016 SHLQ $0x10, R9
9017 IMULQ R8, R9
9018 SHRQ $0x34, R9
9019 MOVL 24(SP)(R9*4), BX
9020 INCL CX
9021 MOVL CX, 24(SP)(R9*4)
9022 CMPL (DX)(BX*1), SI
9023 JEQ candidate_match_encodeBetterBlockAsm10B
9024 DECL CX
9025 MOVL DI, BX
9026
9027candidate_match_encodeBetterBlockAsm10B:
9028 MOVL 12(SP), SI
9029 TESTL BX, BX
9030 JZ match_extend_back_end_encodeBetterBlockAsm10B
9031
9032match_extend_back_loop_encodeBetterBlockAsm10B:
9033 CMPL CX, SI
9034 JBE match_extend_back_end_encodeBetterBlockAsm10B
9035 MOVB -1(DX)(BX*1), DI
9036 MOVB -1(DX)(CX*1), R8
9037 CMPB DI, R8
9038 JNE match_extend_back_end_encodeBetterBlockAsm10B
9039 LEAL -1(CX), CX
9040 DECL BX
9041 JZ match_extend_back_end_encodeBetterBlockAsm10B
9042 JMP match_extend_back_loop_encodeBetterBlockAsm10B
9043
9044match_extend_back_end_encodeBetterBlockAsm10B:
9045 MOVL CX, SI
9046 SUBL 12(SP), SI
9047 LEAQ 3(AX)(SI*1), SI
9048 CMPQ SI, (SP)
9049 JB match_dst_size_check_encodeBetterBlockAsm10B
9050 MOVQ $0x00000000, ret+48(FP)
9051 RET
9052
9053match_dst_size_check_encodeBetterBlockAsm10B:
9054 MOVL CX, SI
9055 ADDL $0x04, CX
9056 ADDL $0x04, BX
9057 MOVQ src_len+32(FP), DI
9058 SUBL CX, DI
9059 LEAQ (DX)(CX*1), R8
9060 LEAQ (DX)(BX*1), R9
9061
9062 // matchLen
9063 XORL R11, R11
9064
9065matchlen_loopback_16_match_nolit_encodeBetterBlockAsm10B:
9066 CMPL DI, $0x10
9067 JB matchlen_match8_match_nolit_encodeBetterBlockAsm10B
9068 MOVQ (R8)(R11*1), R10
9069 MOVQ 8(R8)(R11*1), R12
9070 XORQ (R9)(R11*1), R10
9071 JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm10B
9072 XORQ 8(R9)(R11*1), R12
9073 JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm10B
9074 LEAL -16(DI), DI
9075 LEAL 16(R11), R11
9076 JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm10B
9077
9078matchlen_bsf_16match_nolit_encodeBetterBlockAsm10B:
9079#ifdef GOAMD64_v3
9080 TZCNTQ R12, R12
9081
9082#else
9083 BSFQ R12, R12
9084
9085#endif
9086 SARQ $0x03, R12
9087 LEAL 8(R11)(R12*1), R11
9088 JMP match_nolit_end_encodeBetterBlockAsm10B
9089
9090matchlen_match8_match_nolit_encodeBetterBlockAsm10B:
9091 CMPL DI, $0x08
9092 JB matchlen_match4_match_nolit_encodeBetterBlockAsm10B
9093 MOVQ (R8)(R11*1), R10
9094 XORQ (R9)(R11*1), R10
9095 JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm10B
9096 LEAL -8(DI), DI
9097 LEAL 8(R11), R11
9098 JMP matchlen_match4_match_nolit_encodeBetterBlockAsm10B
9099
9100matchlen_bsf_8_match_nolit_encodeBetterBlockAsm10B:
9101#ifdef GOAMD64_v3
9102 TZCNTQ R10, R10
9103
9104#else
9105 BSFQ R10, R10
9106
9107#endif
9108 SARQ $0x03, R10
9109 LEAL (R11)(R10*1), R11
9110 JMP match_nolit_end_encodeBetterBlockAsm10B
9111
9112matchlen_match4_match_nolit_encodeBetterBlockAsm10B:
9113 CMPL DI, $0x04
9114 JB matchlen_match2_match_nolit_encodeBetterBlockAsm10B
9115 MOVL (R8)(R11*1), R10
9116 CMPL (R9)(R11*1), R10
9117 JNE matchlen_match2_match_nolit_encodeBetterBlockAsm10B
9118 LEAL -4(DI), DI
9119 LEAL 4(R11), R11
9120
9121matchlen_match2_match_nolit_encodeBetterBlockAsm10B:
9122 CMPL DI, $0x01
9123 JE matchlen_match1_match_nolit_encodeBetterBlockAsm10B
9124 JB match_nolit_end_encodeBetterBlockAsm10B
9125 MOVW (R8)(R11*1), R10
9126 CMPW (R9)(R11*1), R10
9127 JNE matchlen_match1_match_nolit_encodeBetterBlockAsm10B
9128 LEAL 2(R11), R11
9129 SUBL $0x02, DI
9130 JZ match_nolit_end_encodeBetterBlockAsm10B
9131
9132matchlen_match1_match_nolit_encodeBetterBlockAsm10B:
9133 MOVB (R8)(R11*1), R10
9134 CMPB (R9)(R11*1), R10
9135 JNE match_nolit_end_encodeBetterBlockAsm10B
9136 LEAL 1(R11), R11
9137
9138match_nolit_end_encodeBetterBlockAsm10B:
9139 MOVL CX, DI
9140 SUBL BX, DI
9141
9142 // Check if repeat
9143 CMPL 16(SP), DI
9144 JEQ match_is_repeat_encodeBetterBlockAsm10B
9145 MOVL DI, 16(SP)
9146 MOVL 12(SP), BX
9147 CMPL BX, SI
9148 JEQ emit_literal_done_match_emit_encodeBetterBlockAsm10B
9149 MOVL SI, R8
9150 MOVL SI, 12(SP)
9151 LEAQ (DX)(BX*1), R9
9152 SUBL BX, R8
9153 LEAL -1(R8), BX
9154 CMPL BX, $0x3c
9155 JB one_byte_match_emit_encodeBetterBlockAsm10B
9156 CMPL BX, $0x00000100
9157 JB two_bytes_match_emit_encodeBetterBlockAsm10B
9158 JB three_bytes_match_emit_encodeBetterBlockAsm10B
9159
9160three_bytes_match_emit_encodeBetterBlockAsm10B:
9161 MOVB $0xf4, (AX)
9162 MOVW BX, 1(AX)
9163 ADDQ $0x03, AX
9164 JMP memmove_long_match_emit_encodeBetterBlockAsm10B
9165
9166two_bytes_match_emit_encodeBetterBlockAsm10B:
9167 MOVB $0xf0, (AX)
9168 MOVB BL, 1(AX)
9169 ADDQ $0x02, AX
9170 CMPL BX, $0x40
9171 JB memmove_match_emit_encodeBetterBlockAsm10B
9172 JMP memmove_long_match_emit_encodeBetterBlockAsm10B
9173
9174one_byte_match_emit_encodeBetterBlockAsm10B:
9175 SHLB $0x02, BL
9176 MOVB BL, (AX)
9177 ADDQ $0x01, AX
9178
9179memmove_match_emit_encodeBetterBlockAsm10B:
9180 LEAQ (AX)(R8*1), BX
9181
9182 // genMemMoveShort
9183 CMPQ R8, $0x04
9184 JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4
9185 CMPQ R8, $0x08
9186 JB emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4through7
9187 CMPQ R8, $0x10
9188 JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_8through16
9189 CMPQ R8, $0x20
9190 JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_17through32
9191 JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_33through64
9192
9193emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4:
9194 MOVL (R9), R10
9195 MOVL R10, (AX)
9196 JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B
9197
9198emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4through7:
9199 MOVL (R9), R10
9200 MOVL -4(R9)(R8*1), R9
9201 MOVL R10, (AX)
9202 MOVL R9, -4(AX)(R8*1)
9203 JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B
9204
9205emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_8through16:
9206 MOVQ (R9), R10
9207 MOVQ -8(R9)(R8*1), R9
9208 MOVQ R10, (AX)
9209 MOVQ R9, -8(AX)(R8*1)
9210 JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B
9211
9212emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_17through32:
9213 MOVOU (R9), X0
9214 MOVOU -16(R9)(R8*1), X1
9215 MOVOU X0, (AX)
9216 MOVOU X1, -16(AX)(R8*1)
9217 JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B
9218
9219emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_33through64:
9220 MOVOU (R9), X0
9221 MOVOU 16(R9), X1
9222 MOVOU -32(R9)(R8*1), X2
9223 MOVOU -16(R9)(R8*1), X3
9224 MOVOU X0, (AX)
9225 MOVOU X1, 16(AX)
9226 MOVOU X2, -32(AX)(R8*1)
9227 MOVOU X3, -16(AX)(R8*1)
9228
9229memmove_end_copy_match_emit_encodeBetterBlockAsm10B:
9230 MOVQ BX, AX
9231 JMP emit_literal_done_match_emit_encodeBetterBlockAsm10B
9232
9233memmove_long_match_emit_encodeBetterBlockAsm10B:
9234 LEAQ (AX)(R8*1), BX
9235
9236 // genMemMoveLong
9237 MOVOU (R9), X0
9238 MOVOU 16(R9), X1
9239 MOVOU -32(R9)(R8*1), X2
9240 MOVOU -16(R9)(R8*1), X3
9241 MOVQ R8, R12
9242 SHRQ $0x05, R12
9243 MOVQ AX, R10
9244 ANDL $0x0000001f, R10
9245 MOVQ $0x00000040, R13
9246 SUBQ R10, R13
9247 DECQ R12
9248 JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32
9249 LEAQ -32(R9)(R13*1), R10
9250 LEAQ -32(AX)(R13*1), R14
9251
9252emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_big_loop_back:
9253 MOVOU (R10), X4
9254 MOVOU 16(R10), X5
9255 MOVOA X4, (R14)
9256 MOVOA X5, 16(R14)
9257 ADDQ $0x20, R14
9258 ADDQ $0x20, R10
9259 ADDQ $0x20, R13
9260 DECQ R12
9261 JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_big_loop_back
9262
9263emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32:
9264 MOVOU -32(R9)(R13*1), X4
9265 MOVOU -16(R9)(R13*1), X5
9266 MOVOA X4, -32(AX)(R13*1)
9267 MOVOA X5, -16(AX)(R13*1)
9268 ADDQ $0x20, R13
9269 CMPQ R8, R13
9270 JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32
9271 MOVOU X0, (AX)
9272 MOVOU X1, 16(AX)
9273 MOVOU X2, -32(AX)(R8*1)
9274 MOVOU X3, -16(AX)(R8*1)
9275 MOVQ BX, AX
9276
9277emit_literal_done_match_emit_encodeBetterBlockAsm10B:
9278 ADDL R11, CX
9279 ADDL $0x04, R11
9280 MOVL CX, 12(SP)
9281
9282 // emitCopy
9283 CMPL R11, $0x40
9284 JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm10B
9285 CMPL DI, $0x00000800
9286 JAE long_offset_short_match_nolit_encodeBetterBlockAsm10B
9287 MOVL $0x00000001, BX
9288 LEAL 16(BX), BX
9289 MOVB DI, 1(AX)
9290 SHRL $0x08, DI
9291 SHLL $0x05, DI
9292 ORL DI, BX
9293 MOVB BL, (AX)
9294 ADDQ $0x02, AX
9295 SUBL $0x08, R11
9296
9297 // emitRepeat
9298 LEAL -4(R11), R11
9299 JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b
9300 MOVL R11, BX
9301 LEAL -4(R11), R11
9302 CMPL BX, $0x08
9303 JBE repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b
9304 CMPL BX, $0x0c
9305 JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b
9306 CMPL DI, $0x00000800
9307 JB repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b
9308
9309cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b:
9310 CMPL R11, $0x00000104
9311 JB repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b
9312 LEAL -256(R11), R11
9313 MOVW $0x0019, (AX)
9314 MOVW R11, 2(AX)
9315 ADDQ $0x04, AX
9316 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
9317
9318repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b:
9319 LEAL -4(R11), R11
9320 MOVW $0x0015, (AX)
9321 MOVB R11, 2(AX)
9322 ADDQ $0x03, AX
9323 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
9324
9325repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b:
9326 SHLL $0x02, R11
9327 ORL $0x01, R11
9328 MOVW R11, (AX)
9329 ADDQ $0x02, AX
9330 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
9331
9332repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b:
9333 XORQ BX, BX
9334 LEAL 1(BX)(R11*4), R11
9335 MOVB DI, 1(AX)
9336 SARL $0x08, DI
9337 SHLL $0x05, DI
9338 ORL DI, R11
9339 MOVB R11, (AX)
9340 ADDQ $0x02, AX
9341 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
9342
9343long_offset_short_match_nolit_encodeBetterBlockAsm10B:
9344 MOVB $0xee, (AX)
9345 MOVW DI, 1(AX)
9346 LEAL -60(R11), R11
9347 ADDQ $0x03, AX
9348
9349 // emitRepeat
9350 MOVL R11, BX
9351 LEAL -4(R11), R11
9352 CMPL BX, $0x08
9353 JBE repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short
9354 CMPL BX, $0x0c
9355 JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short
9356 CMPL DI, $0x00000800
9357 JB repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short
9358
9359cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short:
9360 CMPL R11, $0x00000104
9361 JB repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short
9362 LEAL -256(R11), R11
9363 MOVW $0x0019, (AX)
9364 MOVW R11, 2(AX)
9365 ADDQ $0x04, AX
9366 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
9367
9368repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short:
9369 LEAL -4(R11), R11
9370 MOVW $0x0015, (AX)
9371 MOVB R11, 2(AX)
9372 ADDQ $0x03, AX
9373 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
9374
9375repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short:
9376 SHLL $0x02, R11
9377 ORL $0x01, R11
9378 MOVW R11, (AX)
9379 ADDQ $0x02, AX
9380 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
9381
9382repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short:
9383 XORQ BX, BX
9384 LEAL 1(BX)(R11*4), R11
9385 MOVB DI, 1(AX)
9386 SARL $0x08, DI
9387 SHLL $0x05, DI
9388 ORL DI, R11
9389 MOVB R11, (AX)
9390 ADDQ $0x02, AX
9391 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
9392
9393two_byte_offset_short_match_nolit_encodeBetterBlockAsm10B:
9394 MOVL R11, BX
9395 SHLL $0x02, BX
9396 CMPL R11, $0x0c
9397 JAE emit_copy_three_match_nolit_encodeBetterBlockAsm10B
9398 CMPL DI, $0x00000800
9399 JAE emit_copy_three_match_nolit_encodeBetterBlockAsm10B
9400 LEAL -15(BX), BX
9401 MOVB DI, 1(AX)
9402 SHRL $0x08, DI
9403 SHLL $0x05, DI
9404 ORL DI, BX
9405 MOVB BL, (AX)
9406 ADDQ $0x02, AX
9407 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
9408
9409emit_copy_three_match_nolit_encodeBetterBlockAsm10B:
9410 LEAL -2(BX), BX
9411 MOVB BL, (AX)
9412 MOVW DI, 1(AX)
9413 ADDQ $0x03, AX
9414 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
9415
9416match_is_repeat_encodeBetterBlockAsm10B:
9417 MOVL 12(SP), BX
9418 CMPL BX, SI
9419 JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B
9420 MOVL SI, R8
9421 MOVL SI, 12(SP)
9422 LEAQ (DX)(BX*1), R9
9423 SUBL BX, R8
9424 LEAL -1(R8), BX
9425 CMPL BX, $0x3c
9426 JB one_byte_match_emit_repeat_encodeBetterBlockAsm10B
9427 CMPL BX, $0x00000100
9428 JB two_bytes_match_emit_repeat_encodeBetterBlockAsm10B
9429 JB three_bytes_match_emit_repeat_encodeBetterBlockAsm10B
9430
9431three_bytes_match_emit_repeat_encodeBetterBlockAsm10B:
9432 MOVB $0xf4, (AX)
9433 MOVW BX, 1(AX)
9434 ADDQ $0x03, AX
9435 JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm10B
9436
9437two_bytes_match_emit_repeat_encodeBetterBlockAsm10B:
9438 MOVB $0xf0, (AX)
9439 MOVB BL, 1(AX)
9440 ADDQ $0x02, AX
9441 CMPL BX, $0x40
9442 JB memmove_match_emit_repeat_encodeBetterBlockAsm10B
9443 JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm10B
9444
9445one_byte_match_emit_repeat_encodeBetterBlockAsm10B:
9446 SHLB $0x02, BL
9447 MOVB BL, (AX)
9448 ADDQ $0x01, AX
9449
9450memmove_match_emit_repeat_encodeBetterBlockAsm10B:
9451 LEAQ (AX)(R8*1), BX
9452
9453 // genMemMoveShort
9454 CMPQ R8, $0x04
9455 JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4
9456 CMPQ R8, $0x08
9457 JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4through7
9458 CMPQ R8, $0x10
9459 JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_8through16
9460 CMPQ R8, $0x20
9461 JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_17through32
9462 JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_33through64
9463
9464emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4:
9465 MOVL (R9), R10
9466 MOVL R10, (AX)
9467 JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B
9468
9469emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4through7:
9470 MOVL (R9), R10
9471 MOVL -4(R9)(R8*1), R9
9472 MOVL R10, (AX)
9473 MOVL R9, -4(AX)(R8*1)
9474 JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B
9475
9476emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_8through16:
9477 MOVQ (R9), R10
9478 MOVQ -8(R9)(R8*1), R9
9479 MOVQ R10, (AX)
9480 MOVQ R9, -8(AX)(R8*1)
9481 JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B
9482
9483emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_17through32:
9484 MOVOU (R9), X0
9485 MOVOU -16(R9)(R8*1), X1
9486 MOVOU X0, (AX)
9487 MOVOU X1, -16(AX)(R8*1)
9488 JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B
9489
9490emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_33through64:
9491 MOVOU (R9), X0
9492 MOVOU 16(R9), X1
9493 MOVOU -32(R9)(R8*1), X2
9494 MOVOU -16(R9)(R8*1), X3
9495 MOVOU X0, (AX)
9496 MOVOU X1, 16(AX)
9497 MOVOU X2, -32(AX)(R8*1)
9498 MOVOU X3, -16(AX)(R8*1)
9499
9500memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B:
9501 MOVQ BX, AX
9502 JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B
9503
9504memmove_long_match_emit_repeat_encodeBetterBlockAsm10B:
9505 LEAQ (AX)(R8*1), BX
9506
9507 // genMemMoveLong
9508 MOVOU (R9), X0
9509 MOVOU 16(R9), X1
9510 MOVOU -32(R9)(R8*1), X2
9511 MOVOU -16(R9)(R8*1), X3
9512 MOVQ R8, R12
9513 SHRQ $0x05, R12
9514 MOVQ AX, R10
9515 ANDL $0x0000001f, R10
9516 MOVQ $0x00000040, R13
9517 SUBQ R10, R13
9518 DECQ R12
9519 JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32
9520 LEAQ -32(R9)(R13*1), R10
9521 LEAQ -32(AX)(R13*1), R14
9522
9523emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_big_loop_back:
9524 MOVOU (R10), X4
9525 MOVOU 16(R10), X5
9526 MOVOA X4, (R14)
9527 MOVOA X5, 16(R14)
9528 ADDQ $0x20, R14
9529 ADDQ $0x20, R10
9530 ADDQ $0x20, R13
9531 DECQ R12
9532 JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_big_loop_back
9533
9534emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32:
9535 MOVOU -32(R9)(R13*1), X4
9536 MOVOU -16(R9)(R13*1), X5
9537 MOVOA X4, -32(AX)(R13*1)
9538 MOVOA X5, -16(AX)(R13*1)
9539 ADDQ $0x20, R13
9540 CMPQ R8, R13
9541 JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32
9542 MOVOU X0, (AX)
9543 MOVOU X1, 16(AX)
9544 MOVOU X2, -32(AX)(R8*1)
9545 MOVOU X3, -16(AX)(R8*1)
9546 MOVQ BX, AX
9547
9548emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B:
9549 ADDL R11, CX
9550 ADDL $0x04, R11
9551 MOVL CX, 12(SP)
9552
9553 // emitRepeat
9554 MOVL R11, BX
9555 LEAL -4(R11), R11
9556 CMPL BX, $0x08
9557 JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm10B
9558 CMPL BX, $0x0c
9559 JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B
9560 CMPL DI, $0x00000800
9561 JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B
9562
9563cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B:
9564 CMPL R11, $0x00000104
9565 JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm10B
9566 LEAL -256(R11), R11
9567 MOVW $0x0019, (AX)
9568 MOVW R11, 2(AX)
9569 ADDQ $0x04, AX
9570 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
9571
9572repeat_three_match_nolit_repeat_encodeBetterBlockAsm10B:
9573 LEAL -4(R11), R11
9574 MOVW $0x0015, (AX)
9575 MOVB R11, 2(AX)
9576 ADDQ $0x03, AX
9577 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
9578
9579repeat_two_match_nolit_repeat_encodeBetterBlockAsm10B:
9580 SHLL $0x02, R11
9581 ORL $0x01, R11
9582 MOVW R11, (AX)
9583 ADDQ $0x02, AX
9584 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
9585
9586repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B:
9587 XORQ BX, BX
9588 LEAL 1(BX)(R11*4), R11
9589 MOVB DI, 1(AX)
9590 SARL $0x08, DI
9591 SHLL $0x05, DI
9592 ORL DI, R11
9593 MOVB R11, (AX)
9594 ADDQ $0x02, AX
9595
9596match_nolit_emitcopy_end_encodeBetterBlockAsm10B:
9597 CMPL CX, 8(SP)
9598 JAE emit_remainder_encodeBetterBlockAsm10B
9599 CMPQ AX, (SP)
9600 JB match_nolit_dst_ok_encodeBetterBlockAsm10B
9601 MOVQ $0x00000000, ret+48(FP)
9602 RET
9603
9604match_nolit_dst_ok_encodeBetterBlockAsm10B:
9605 MOVQ $0x0000cf1bbcdcbf9b, BX
9606 MOVQ $0x9e3779b1, DI
9607 LEAQ 1(SI), SI
9608 LEAQ -2(CX), R8
9609 MOVQ (DX)(SI*1), R9
9610 MOVQ 1(DX)(SI*1), R10
9611 MOVQ (DX)(R8*1), R11
9612 MOVQ 1(DX)(R8*1), R12
9613 SHLQ $0x10, R9
9614 IMULQ BX, R9
9615 SHRQ $0x34, R9
9616 SHLQ $0x20, R10
9617 IMULQ DI, R10
9618 SHRQ $0x36, R10
9619 SHLQ $0x10, R11
9620 IMULQ BX, R11
9621 SHRQ $0x34, R11
9622 SHLQ $0x20, R12
9623 IMULQ DI, R12
9624 SHRQ $0x36, R12
9625 LEAQ 1(SI), DI
9626 LEAQ 1(R8), R13
9627 MOVL SI, 24(SP)(R9*4)
9628 MOVL R8, 24(SP)(R11*4)
9629 MOVL DI, 16408(SP)(R10*4)
9630 MOVL R13, 16408(SP)(R12*4)
9631 LEAQ 1(R8)(SI*1), DI
9632 SHRQ $0x01, DI
9633 ADDQ $0x01, SI
9634 SUBQ $0x01, R8
9635
9636index_loop_encodeBetterBlockAsm10B:
9637 CMPQ DI, R8
9638 JAE search_loop_encodeBetterBlockAsm10B
9639 MOVQ (DX)(SI*1), R9
9640 MOVQ (DX)(DI*1), R10
9641 SHLQ $0x10, R9
9642 IMULQ BX, R9
9643 SHRQ $0x34, R9
9644 SHLQ $0x10, R10
9645 IMULQ BX, R10
9646 SHRQ $0x34, R10
9647 MOVL SI, 24(SP)(R9*4)
9648 MOVL DI, 24(SP)(R10*4)
9649 ADDQ $0x02, SI
9650 ADDQ $0x02, DI
9651 JMP index_loop_encodeBetterBlockAsm10B
9652
9653emit_remainder_encodeBetterBlockAsm10B:
9654 MOVQ src_len+32(FP), CX
9655 SUBL 12(SP), CX
9656 LEAQ 3(AX)(CX*1), CX
9657 CMPQ CX, (SP)
9658 JB emit_remainder_ok_encodeBetterBlockAsm10B
9659 MOVQ $0x00000000, ret+48(FP)
9660 RET
9661
9662emit_remainder_ok_encodeBetterBlockAsm10B:
9663 MOVQ src_len+32(FP), CX
9664 MOVL 12(SP), BX
9665 CMPL BX, CX
9666 JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm10B
9667 MOVL CX, SI
9668 MOVL CX, 12(SP)
9669 LEAQ (DX)(BX*1), CX
9670 SUBL BX, SI
9671 LEAL -1(SI), DX
9672 CMPL DX, $0x3c
9673 JB one_byte_emit_remainder_encodeBetterBlockAsm10B
9674 CMPL DX, $0x00000100
9675 JB two_bytes_emit_remainder_encodeBetterBlockAsm10B
9676 JB three_bytes_emit_remainder_encodeBetterBlockAsm10B
9677
9678three_bytes_emit_remainder_encodeBetterBlockAsm10B:
9679 MOVB $0xf4, (AX)
9680 MOVW DX, 1(AX)
9681 ADDQ $0x03, AX
9682 JMP memmove_long_emit_remainder_encodeBetterBlockAsm10B
9683
9684two_bytes_emit_remainder_encodeBetterBlockAsm10B:
9685 MOVB $0xf0, (AX)
9686 MOVB DL, 1(AX)
9687 ADDQ $0x02, AX
9688 CMPL DX, $0x40
9689 JB memmove_emit_remainder_encodeBetterBlockAsm10B
9690 JMP memmove_long_emit_remainder_encodeBetterBlockAsm10B
9691
9692one_byte_emit_remainder_encodeBetterBlockAsm10B:
9693 SHLB $0x02, DL
9694 MOVB DL, (AX)
9695 ADDQ $0x01, AX
9696
9697memmove_emit_remainder_encodeBetterBlockAsm10B:
9698 LEAQ (AX)(SI*1), DX
9699 MOVL SI, BX
9700
9701 // genMemMoveShort
9702 CMPQ BX, $0x03
9703 JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_1or2
9704 JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_3
9705 CMPQ BX, $0x08
9706 JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_4through7
9707 CMPQ BX, $0x10
9708 JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_8through16
9709 CMPQ BX, $0x20
9710 JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_17through32
9711 JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_33through64
9712
9713emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_1or2:
9714 MOVB (CX), SI
9715 MOVB -1(CX)(BX*1), CL
9716 MOVB SI, (AX)
9717 MOVB CL, -1(AX)(BX*1)
9718 JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B
9719
9720emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_3:
9721 MOVW (CX), SI
9722 MOVB 2(CX), CL
9723 MOVW SI, (AX)
9724 MOVB CL, 2(AX)
9725 JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B
9726
9727emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_4through7:
9728 MOVL (CX), SI
9729 MOVL -4(CX)(BX*1), CX
9730 MOVL SI, (AX)
9731 MOVL CX, -4(AX)(BX*1)
9732 JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B
9733
9734emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_8through16:
9735 MOVQ (CX), SI
9736 MOVQ -8(CX)(BX*1), CX
9737 MOVQ SI, (AX)
9738 MOVQ CX, -8(AX)(BX*1)
9739 JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B
9740
9741emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_17through32:
9742 MOVOU (CX), X0
9743 MOVOU -16(CX)(BX*1), X1
9744 MOVOU X0, (AX)
9745 MOVOU X1, -16(AX)(BX*1)
9746 JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B
9747
9748emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_33through64:
9749 MOVOU (CX), X0
9750 MOVOU 16(CX), X1
9751 MOVOU -32(CX)(BX*1), X2
9752 MOVOU -16(CX)(BX*1), X3
9753 MOVOU X0, (AX)
9754 MOVOU X1, 16(AX)
9755 MOVOU X2, -32(AX)(BX*1)
9756 MOVOU X3, -16(AX)(BX*1)
9757
9758memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B:
9759 MOVQ DX, AX
9760 JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm10B
9761
9762memmove_long_emit_remainder_encodeBetterBlockAsm10B:
9763 LEAQ (AX)(SI*1), DX
9764 MOVL SI, BX
9765
9766 // genMemMoveLong
9767 MOVOU (CX), X0
9768 MOVOU 16(CX), X1
9769 MOVOU -32(CX)(BX*1), X2
9770 MOVOU -16(CX)(BX*1), X3
9771 MOVQ BX, DI
9772 SHRQ $0x05, DI
9773 MOVQ AX, SI
9774 ANDL $0x0000001f, SI
9775 MOVQ $0x00000040, R8
9776 SUBQ SI, R8
9777 DECQ DI
9778 JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_forward_sse_loop_32
9779 LEAQ -32(CX)(R8*1), SI
9780 LEAQ -32(AX)(R8*1), R9
9781
9782emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_big_loop_back:
9783 MOVOU (SI), X4
9784 MOVOU 16(SI), X5
9785 MOVOA X4, (R9)
9786 MOVOA X5, 16(R9)
9787 ADDQ $0x20, R9
9788 ADDQ $0x20, SI
9789 ADDQ $0x20, R8
9790 DECQ DI
9791 JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_big_loop_back
9792
9793emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_forward_sse_loop_32:
9794 MOVOU -32(CX)(R8*1), X4
9795 MOVOU -16(CX)(R8*1), X5
9796 MOVOA X4, -32(AX)(R8*1)
9797 MOVOA X5, -16(AX)(R8*1)
9798 ADDQ $0x20, R8
9799 CMPQ BX, R8
9800 JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_forward_sse_loop_32
9801 MOVOU X0, (AX)
9802 MOVOU X1, 16(AX)
9803 MOVOU X2, -32(AX)(BX*1)
9804 MOVOU X3, -16(AX)(BX*1)
9805 MOVQ DX, AX
9806
9807emit_literal_done_emit_remainder_encodeBetterBlockAsm10B:
9808 MOVQ dst_base+0(FP), CX
9809 SUBQ CX, AX
9810 MOVQ AX, ret+48(FP)
9811 RET
9812
9813// func encodeBetterBlockAsm8B(dst []byte, src []byte) int
9814// Requires: BMI, SSE2
9815TEXT ·encodeBetterBlockAsm8B(SB), $5144-56
9816 MOVQ dst_base+0(FP), AX
9817 MOVQ $0x00000028, CX
9818 LEAQ 24(SP), DX
9819 PXOR X0, X0
9820
9821zero_loop_encodeBetterBlockAsm8B:
9822 MOVOU X0, (DX)
9823 MOVOU X0, 16(DX)
9824 MOVOU X0, 32(DX)
9825 MOVOU X0, 48(DX)
9826 MOVOU X0, 64(DX)
9827 MOVOU X0, 80(DX)
9828 MOVOU X0, 96(DX)
9829 MOVOU X0, 112(DX)
9830 ADDQ $0x80, DX
9831 DECQ CX
9832 JNZ zero_loop_encodeBetterBlockAsm8B
9833 MOVL $0x00000000, 12(SP)
9834 MOVQ src_len+32(FP), CX
9835 LEAQ -6(CX), DX
9836 LEAQ -8(CX), BX
9837 MOVL BX, 8(SP)
9838 SHRQ $0x05, CX
9839 SUBL CX, DX
9840 LEAQ (AX)(DX*1), DX
9841 MOVQ DX, (SP)
9842 MOVL $0x00000001, CX
9843 MOVL $0x00000000, 16(SP)
9844 MOVQ src_base+24(FP), DX
9845
9846search_loop_encodeBetterBlockAsm8B:
9847 MOVL CX, BX
9848 SUBL 12(SP), BX
9849 SHRL $0x04, BX
9850 LEAL 1(CX)(BX*1), BX
9851 CMPL BX, 8(SP)
9852 JAE emit_remainder_encodeBetterBlockAsm8B
9853 MOVQ (DX)(CX*1), SI
9854 MOVL BX, 20(SP)
9855 MOVQ $0x0000cf1bbcdcbf9b, R8
9856 MOVQ $0x9e3779b1, BX
9857 MOVQ SI, R9
9858 MOVQ SI, R10
9859 SHLQ $0x10, R9
9860 IMULQ R8, R9
9861 SHRQ $0x36, R9
9862 SHLQ $0x20, R10
9863 IMULQ BX, R10
9864 SHRQ $0x38, R10
9865 MOVL 24(SP)(R9*4), BX
9866 MOVL 4120(SP)(R10*4), DI
9867 MOVL CX, 24(SP)(R9*4)
9868 MOVL CX, 4120(SP)(R10*4)
9869 MOVQ (DX)(BX*1), R9
9870 MOVQ (DX)(DI*1), R10
9871 CMPQ R9, SI
9872 JEQ candidate_match_encodeBetterBlockAsm8B
9873 CMPQ R10, SI
9874 JNE no_short_found_encodeBetterBlockAsm8B
9875 MOVL DI, BX
9876 JMP candidate_match_encodeBetterBlockAsm8B
9877
9878no_short_found_encodeBetterBlockAsm8B:
9879 CMPL R9, SI
9880 JEQ candidate_match_encodeBetterBlockAsm8B
9881 CMPL R10, SI
9882 JEQ candidateS_match_encodeBetterBlockAsm8B
9883 MOVL 20(SP), CX
9884 JMP search_loop_encodeBetterBlockAsm8B
9885
9886candidateS_match_encodeBetterBlockAsm8B:
9887 SHRQ $0x08, SI
9888 MOVQ SI, R9
9889 SHLQ $0x10, R9
9890 IMULQ R8, R9
9891 SHRQ $0x36, R9
9892 MOVL 24(SP)(R9*4), BX
9893 INCL CX
9894 MOVL CX, 24(SP)(R9*4)
9895 CMPL (DX)(BX*1), SI
9896 JEQ candidate_match_encodeBetterBlockAsm8B
9897 DECL CX
9898 MOVL DI, BX
9899
9900candidate_match_encodeBetterBlockAsm8B:
9901 MOVL 12(SP), SI
9902 TESTL BX, BX
9903 JZ match_extend_back_end_encodeBetterBlockAsm8B
9904
9905match_extend_back_loop_encodeBetterBlockAsm8B:
9906 CMPL CX, SI
9907 JBE match_extend_back_end_encodeBetterBlockAsm8B
9908 MOVB -1(DX)(BX*1), DI
9909 MOVB -1(DX)(CX*1), R8
9910 CMPB DI, R8
9911 JNE match_extend_back_end_encodeBetterBlockAsm8B
9912 LEAL -1(CX), CX
9913 DECL BX
9914 JZ match_extend_back_end_encodeBetterBlockAsm8B
9915 JMP match_extend_back_loop_encodeBetterBlockAsm8B
9916
9917match_extend_back_end_encodeBetterBlockAsm8B:
9918 MOVL CX, SI
9919 SUBL 12(SP), SI
9920 LEAQ 3(AX)(SI*1), SI
9921 CMPQ SI, (SP)
9922 JB match_dst_size_check_encodeBetterBlockAsm8B
9923 MOVQ $0x00000000, ret+48(FP)
9924 RET
9925
9926match_dst_size_check_encodeBetterBlockAsm8B:
9927 MOVL CX, SI
9928 ADDL $0x04, CX
9929 ADDL $0x04, BX
9930 MOVQ src_len+32(FP), DI
9931 SUBL CX, DI
9932 LEAQ (DX)(CX*1), R8
9933 LEAQ (DX)(BX*1), R9
9934
9935 // matchLen
9936 XORL R11, R11
9937
9938matchlen_loopback_16_match_nolit_encodeBetterBlockAsm8B:
9939 CMPL DI, $0x10
9940 JB matchlen_match8_match_nolit_encodeBetterBlockAsm8B
9941 MOVQ (R8)(R11*1), R10
9942 MOVQ 8(R8)(R11*1), R12
9943 XORQ (R9)(R11*1), R10
9944 JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm8B
9945 XORQ 8(R9)(R11*1), R12
9946 JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm8B
9947 LEAL -16(DI), DI
9948 LEAL 16(R11), R11
9949 JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm8B
9950
9951matchlen_bsf_16match_nolit_encodeBetterBlockAsm8B:
9952#ifdef GOAMD64_v3
9953 TZCNTQ R12, R12
9954
9955#else
9956 BSFQ R12, R12
9957
9958#endif
9959 SARQ $0x03, R12
9960 LEAL 8(R11)(R12*1), R11
9961 JMP match_nolit_end_encodeBetterBlockAsm8B
9962
9963matchlen_match8_match_nolit_encodeBetterBlockAsm8B:
9964 CMPL DI, $0x08
9965 JB matchlen_match4_match_nolit_encodeBetterBlockAsm8B
9966 MOVQ (R8)(R11*1), R10
9967 XORQ (R9)(R11*1), R10
9968 JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm8B
9969 LEAL -8(DI), DI
9970 LEAL 8(R11), R11
9971 JMP matchlen_match4_match_nolit_encodeBetterBlockAsm8B
9972
9973matchlen_bsf_8_match_nolit_encodeBetterBlockAsm8B:
9974#ifdef GOAMD64_v3
9975 TZCNTQ R10, R10
9976
9977#else
9978 BSFQ R10, R10
9979
9980#endif
9981 SARQ $0x03, R10
9982 LEAL (R11)(R10*1), R11
9983 JMP match_nolit_end_encodeBetterBlockAsm8B
9984
9985matchlen_match4_match_nolit_encodeBetterBlockAsm8B:
9986 CMPL DI, $0x04
9987 JB matchlen_match2_match_nolit_encodeBetterBlockAsm8B
9988 MOVL (R8)(R11*1), R10
9989 CMPL (R9)(R11*1), R10
9990 JNE matchlen_match2_match_nolit_encodeBetterBlockAsm8B
9991 LEAL -4(DI), DI
9992 LEAL 4(R11), R11
9993
9994matchlen_match2_match_nolit_encodeBetterBlockAsm8B:
9995 CMPL DI, $0x01
9996 JE matchlen_match1_match_nolit_encodeBetterBlockAsm8B
9997 JB match_nolit_end_encodeBetterBlockAsm8B
9998 MOVW (R8)(R11*1), R10
9999 CMPW (R9)(R11*1), R10
10000 JNE matchlen_match1_match_nolit_encodeBetterBlockAsm8B
10001 LEAL 2(R11), R11
10002 SUBL $0x02, DI
10003 JZ match_nolit_end_encodeBetterBlockAsm8B
10004
10005matchlen_match1_match_nolit_encodeBetterBlockAsm8B:
10006 MOVB (R8)(R11*1), R10
10007 CMPB (R9)(R11*1), R10
10008 JNE match_nolit_end_encodeBetterBlockAsm8B
10009 LEAL 1(R11), R11
10010
10011match_nolit_end_encodeBetterBlockAsm8B:
10012 MOVL CX, DI
10013 SUBL BX, DI
10014
10015 // Check if repeat
10016 CMPL 16(SP), DI
10017 JEQ match_is_repeat_encodeBetterBlockAsm8B
10018 MOVL DI, 16(SP)
10019 MOVL 12(SP), BX
10020 CMPL BX, SI
10021 JEQ emit_literal_done_match_emit_encodeBetterBlockAsm8B
10022 MOVL SI, R8
10023 MOVL SI, 12(SP)
10024 LEAQ (DX)(BX*1), R9
10025 SUBL BX, R8
10026 LEAL -1(R8), BX
10027 CMPL BX, $0x3c
10028 JB one_byte_match_emit_encodeBetterBlockAsm8B
10029 CMPL BX, $0x00000100
10030 JB two_bytes_match_emit_encodeBetterBlockAsm8B
10031 JB three_bytes_match_emit_encodeBetterBlockAsm8B
10032
10033three_bytes_match_emit_encodeBetterBlockAsm8B:
10034 MOVB $0xf4, (AX)
10035 MOVW BX, 1(AX)
10036 ADDQ $0x03, AX
10037 JMP memmove_long_match_emit_encodeBetterBlockAsm8B
10038
10039two_bytes_match_emit_encodeBetterBlockAsm8B:
10040 MOVB $0xf0, (AX)
10041 MOVB BL, 1(AX)
10042 ADDQ $0x02, AX
10043 CMPL BX, $0x40
10044 JB memmove_match_emit_encodeBetterBlockAsm8B
10045 JMP memmove_long_match_emit_encodeBetterBlockAsm8B
10046
10047one_byte_match_emit_encodeBetterBlockAsm8B:
10048 SHLB $0x02, BL
10049 MOVB BL, (AX)
10050 ADDQ $0x01, AX
10051
10052memmove_match_emit_encodeBetterBlockAsm8B:
10053 LEAQ (AX)(R8*1), BX
10054
10055 // genMemMoveShort
10056 CMPQ R8, $0x04
10057 JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4
10058 CMPQ R8, $0x08
10059 JB emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4through7
10060 CMPQ R8, $0x10
10061 JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_8through16
10062 CMPQ R8, $0x20
10063 JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_17through32
10064 JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_33through64
10065
10066emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4:
10067 MOVL (R9), R10
10068 MOVL R10, (AX)
10069 JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B
10070
10071emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4through7:
10072 MOVL (R9), R10
10073 MOVL -4(R9)(R8*1), R9
10074 MOVL R10, (AX)
10075 MOVL R9, -4(AX)(R8*1)
10076 JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B
10077
10078emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_8through16:
10079 MOVQ (R9), R10
10080 MOVQ -8(R9)(R8*1), R9
10081 MOVQ R10, (AX)
10082 MOVQ R9, -8(AX)(R8*1)
10083 JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B
10084
10085emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_17through32:
10086 MOVOU (R9), X0
10087 MOVOU -16(R9)(R8*1), X1
10088 MOVOU X0, (AX)
10089 MOVOU X1, -16(AX)(R8*1)
10090 JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B
10091
10092emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_33through64:
10093 MOVOU (R9), X0
10094 MOVOU 16(R9), X1
10095 MOVOU -32(R9)(R8*1), X2
10096 MOVOU -16(R9)(R8*1), X3
10097 MOVOU X0, (AX)
10098 MOVOU X1, 16(AX)
10099 MOVOU X2, -32(AX)(R8*1)
10100 MOVOU X3, -16(AX)(R8*1)
10101
10102memmove_end_copy_match_emit_encodeBetterBlockAsm8B:
10103 MOVQ BX, AX
10104 JMP emit_literal_done_match_emit_encodeBetterBlockAsm8B
10105
10106memmove_long_match_emit_encodeBetterBlockAsm8B:
10107 LEAQ (AX)(R8*1), BX
10108
10109 // genMemMoveLong
10110 MOVOU (R9), X0
10111 MOVOU 16(R9), X1
10112 MOVOU -32(R9)(R8*1), X2
10113 MOVOU -16(R9)(R8*1), X3
10114 MOVQ R8, R12
10115 SHRQ $0x05, R12
10116 MOVQ AX, R10
10117 ANDL $0x0000001f, R10
10118 MOVQ $0x00000040, R13
10119 SUBQ R10, R13
10120 DECQ R12
10121 JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32
10122 LEAQ -32(R9)(R13*1), R10
10123 LEAQ -32(AX)(R13*1), R14
10124
10125emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_big_loop_back:
10126 MOVOU (R10), X4
10127 MOVOU 16(R10), X5
10128 MOVOA X4, (R14)
10129 MOVOA X5, 16(R14)
10130 ADDQ $0x20, R14
10131 ADDQ $0x20, R10
10132 ADDQ $0x20, R13
10133 DECQ R12
10134 JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_big_loop_back
10135
10136emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32:
10137 MOVOU -32(R9)(R13*1), X4
10138 MOVOU -16(R9)(R13*1), X5
10139 MOVOA X4, -32(AX)(R13*1)
10140 MOVOA X5, -16(AX)(R13*1)
10141 ADDQ $0x20, R13
10142 CMPQ R8, R13
10143 JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32
10144 MOVOU X0, (AX)
10145 MOVOU X1, 16(AX)
10146 MOVOU X2, -32(AX)(R8*1)
10147 MOVOU X3, -16(AX)(R8*1)
10148 MOVQ BX, AX
10149
10150emit_literal_done_match_emit_encodeBetterBlockAsm8B:
10151 ADDL R11, CX
10152 ADDL $0x04, R11
10153 MOVL CX, 12(SP)
10154
10155 // emitCopy
10156 CMPL R11, $0x40
10157 JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm8B
10158 CMPL DI, $0x00000800
10159 JAE long_offset_short_match_nolit_encodeBetterBlockAsm8B
10160 MOVL $0x00000001, BX
10161 LEAL 16(BX), BX
10162 MOVB DI, 1(AX)
10163 SHRL $0x08, DI
10164 SHLL $0x05, DI
10165 ORL DI, BX
10166 MOVB BL, (AX)
10167 ADDQ $0x02, AX
10168 SUBL $0x08, R11
10169
10170 // emitRepeat
10171 LEAL -4(R11), R11
10172 JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b
10173 MOVL R11, BX
10174 LEAL -4(R11), R11
10175 CMPL BX, $0x08
10176 JBE repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b
10177 CMPL BX, $0x0c
10178 JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b
10179
10180cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b:
10181 CMPL R11, $0x00000104
10182 JB repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b
10183 LEAL -256(R11), R11
10184 MOVW $0x0019, (AX)
10185 MOVW R11, 2(AX)
10186 ADDQ $0x04, AX
10187 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
10188
10189repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b:
10190 LEAL -4(R11), R11
10191 MOVW $0x0015, (AX)
10192 MOVB R11, 2(AX)
10193 ADDQ $0x03, AX
10194 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
10195
10196repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b:
10197 SHLL $0x02, R11
10198 ORL $0x01, R11
10199 MOVW R11, (AX)
10200 ADDQ $0x02, AX
10201 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
10202 XORQ BX, BX
10203 LEAL 1(BX)(R11*4), R11
10204 MOVB DI, 1(AX)
10205 SARL $0x08, DI
10206 SHLL $0x05, DI
10207 ORL DI, R11
10208 MOVB R11, (AX)
10209 ADDQ $0x02, AX
10210 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
10211
10212long_offset_short_match_nolit_encodeBetterBlockAsm8B:
10213 MOVB $0xee, (AX)
10214 MOVW DI, 1(AX)
10215 LEAL -60(R11), R11
10216 ADDQ $0x03, AX
10217
10218 // emitRepeat
10219 MOVL R11, BX
10220 LEAL -4(R11), R11
10221 CMPL BX, $0x08
10222 JBE repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short
10223 CMPL BX, $0x0c
10224 JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short
10225
10226cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short:
10227 CMPL R11, $0x00000104
10228 JB repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short
10229 LEAL -256(R11), R11
10230 MOVW $0x0019, (AX)
10231 MOVW R11, 2(AX)
10232 ADDQ $0x04, AX
10233 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
10234
10235repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short:
10236 LEAL -4(R11), R11
10237 MOVW $0x0015, (AX)
10238 MOVB R11, 2(AX)
10239 ADDQ $0x03, AX
10240 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
10241
10242repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short:
10243 SHLL $0x02, R11
10244 ORL $0x01, R11
10245 MOVW R11, (AX)
10246 ADDQ $0x02, AX
10247 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
10248 XORQ BX, BX
10249 LEAL 1(BX)(R11*4), R11
10250 MOVB DI, 1(AX)
10251 SARL $0x08, DI
10252 SHLL $0x05, DI
10253 ORL DI, R11
10254 MOVB R11, (AX)
10255 ADDQ $0x02, AX
10256 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
10257
10258two_byte_offset_short_match_nolit_encodeBetterBlockAsm8B:
10259 MOVL R11, BX
10260 SHLL $0x02, BX
10261 CMPL R11, $0x0c
10262 JAE emit_copy_three_match_nolit_encodeBetterBlockAsm8B
10263 LEAL -15(BX), BX
10264 MOVB DI, 1(AX)
10265 SHRL $0x08, DI
10266 SHLL $0x05, DI
10267 ORL DI, BX
10268 MOVB BL, (AX)
10269 ADDQ $0x02, AX
10270 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
10271
10272emit_copy_three_match_nolit_encodeBetterBlockAsm8B:
10273 LEAL -2(BX), BX
10274 MOVB BL, (AX)
10275 MOVW DI, 1(AX)
10276 ADDQ $0x03, AX
10277 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
10278
10279match_is_repeat_encodeBetterBlockAsm8B:
10280 MOVL 12(SP), BX
10281 CMPL BX, SI
10282 JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B
10283 MOVL SI, DI
10284 MOVL SI, 12(SP)
10285 LEAQ (DX)(BX*1), R8
10286 SUBL BX, DI
10287 LEAL -1(DI), BX
10288 CMPL BX, $0x3c
10289 JB one_byte_match_emit_repeat_encodeBetterBlockAsm8B
10290 CMPL BX, $0x00000100
10291 JB two_bytes_match_emit_repeat_encodeBetterBlockAsm8B
10292 JB three_bytes_match_emit_repeat_encodeBetterBlockAsm8B
10293
10294three_bytes_match_emit_repeat_encodeBetterBlockAsm8B:
10295 MOVB $0xf4, (AX)
10296 MOVW BX, 1(AX)
10297 ADDQ $0x03, AX
10298 JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm8B
10299
10300two_bytes_match_emit_repeat_encodeBetterBlockAsm8B:
10301 MOVB $0xf0, (AX)
10302 MOVB BL, 1(AX)
10303 ADDQ $0x02, AX
10304 CMPL BX, $0x40
10305 JB memmove_match_emit_repeat_encodeBetterBlockAsm8B
10306 JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm8B
10307
10308one_byte_match_emit_repeat_encodeBetterBlockAsm8B:
10309 SHLB $0x02, BL
10310 MOVB BL, (AX)
10311 ADDQ $0x01, AX
10312
10313memmove_match_emit_repeat_encodeBetterBlockAsm8B:
10314 LEAQ (AX)(DI*1), BX
10315
10316 // genMemMoveShort
10317 CMPQ DI, $0x04
10318 JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4
10319 CMPQ DI, $0x08
10320 JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4through7
10321 CMPQ DI, $0x10
10322 JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_8through16
10323 CMPQ DI, $0x20
10324 JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_17through32
10325 JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_33through64
10326
10327emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4:
10328 MOVL (R8), R9
10329 MOVL R9, (AX)
10330 JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B
10331
10332emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4through7:
10333 MOVL (R8), R9
10334 MOVL -4(R8)(DI*1), R8
10335 MOVL R9, (AX)
10336 MOVL R8, -4(AX)(DI*1)
10337 JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B
10338
10339emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_8through16:
10340 MOVQ (R8), R9
10341 MOVQ -8(R8)(DI*1), R8
10342 MOVQ R9, (AX)
10343 MOVQ R8, -8(AX)(DI*1)
10344 JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B
10345
10346emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_17through32:
10347 MOVOU (R8), X0
10348 MOVOU -16(R8)(DI*1), X1
10349 MOVOU X0, (AX)
10350 MOVOU X1, -16(AX)(DI*1)
10351 JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B
10352
10353emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_33through64:
10354 MOVOU (R8), X0
10355 MOVOU 16(R8), X1
10356 MOVOU -32(R8)(DI*1), X2
10357 MOVOU -16(R8)(DI*1), X3
10358 MOVOU X0, (AX)
10359 MOVOU X1, 16(AX)
10360 MOVOU X2, -32(AX)(DI*1)
10361 MOVOU X3, -16(AX)(DI*1)
10362
10363memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B:
10364 MOVQ BX, AX
10365 JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B
10366
10367memmove_long_match_emit_repeat_encodeBetterBlockAsm8B:
10368 LEAQ (AX)(DI*1), BX
10369
10370 // genMemMoveLong
10371 MOVOU (R8), X0
10372 MOVOU 16(R8), X1
10373 MOVOU -32(R8)(DI*1), X2
10374 MOVOU -16(R8)(DI*1), X3
10375 MOVQ DI, R10
10376 SHRQ $0x05, R10
10377 MOVQ AX, R9
10378 ANDL $0x0000001f, R9
10379 MOVQ $0x00000040, R12
10380 SUBQ R9, R12
10381 DECQ R10
10382 JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32
10383 LEAQ -32(R8)(R12*1), R9
10384 LEAQ -32(AX)(R12*1), R13
10385
10386emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_big_loop_back:
10387 MOVOU (R9), X4
10388 MOVOU 16(R9), X5
10389 MOVOA X4, (R13)
10390 MOVOA X5, 16(R13)
10391 ADDQ $0x20, R13
10392 ADDQ $0x20, R9
10393 ADDQ $0x20, R12
10394 DECQ R10
10395 JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_big_loop_back
10396
10397emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32:
10398 MOVOU -32(R8)(R12*1), X4
10399 MOVOU -16(R8)(R12*1), X5
10400 MOVOA X4, -32(AX)(R12*1)
10401 MOVOA X5, -16(AX)(R12*1)
10402 ADDQ $0x20, R12
10403 CMPQ DI, R12
10404 JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32
10405 MOVOU X0, (AX)
10406 MOVOU X1, 16(AX)
10407 MOVOU X2, -32(AX)(DI*1)
10408 MOVOU X3, -16(AX)(DI*1)
10409 MOVQ BX, AX
10410
10411emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B:
10412 ADDL R11, CX
10413 ADDL $0x04, R11
10414 MOVL CX, 12(SP)
10415
10416 // emitRepeat
10417 MOVL R11, BX
10418 LEAL -4(R11), R11
10419 CMPL BX, $0x08
10420 JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm8B
10421 CMPL BX, $0x0c
10422 JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm8B
10423
10424cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm8B:
10425 CMPL R11, $0x00000104
10426 JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm8B
10427 LEAL -256(R11), R11
10428 MOVW $0x0019, (AX)
10429 MOVW R11, 2(AX)
10430 ADDQ $0x04, AX
10431 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
10432
10433repeat_three_match_nolit_repeat_encodeBetterBlockAsm8B:
10434 LEAL -4(R11), R11
10435 MOVW $0x0015, (AX)
10436 MOVB R11, 2(AX)
10437 ADDQ $0x03, AX
10438 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
10439
10440repeat_two_match_nolit_repeat_encodeBetterBlockAsm8B:
10441 SHLL $0x02, R11
10442 ORL $0x01, R11
10443 MOVW R11, (AX)
10444 ADDQ $0x02, AX
10445 JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
10446 XORQ BX, BX
10447 LEAL 1(BX)(R11*4), R11
10448 MOVB DI, 1(AX)
10449 SARL $0x08, DI
10450 SHLL $0x05, DI
10451 ORL DI, R11
10452 MOVB R11, (AX)
10453 ADDQ $0x02, AX
10454
10455match_nolit_emitcopy_end_encodeBetterBlockAsm8B:
10456 CMPL CX, 8(SP)
10457 JAE emit_remainder_encodeBetterBlockAsm8B
10458 CMPQ AX, (SP)
10459 JB match_nolit_dst_ok_encodeBetterBlockAsm8B
10460 MOVQ $0x00000000, ret+48(FP)
10461 RET
10462
10463match_nolit_dst_ok_encodeBetterBlockAsm8B:
10464 MOVQ $0x0000cf1bbcdcbf9b, BX
10465 MOVQ $0x9e3779b1, DI
10466 LEAQ 1(SI), SI
10467 LEAQ -2(CX), R8
10468 MOVQ (DX)(SI*1), R9
10469 MOVQ 1(DX)(SI*1), R10
10470 MOVQ (DX)(R8*1), R11
10471 MOVQ 1(DX)(R8*1), R12
10472 SHLQ $0x10, R9
10473 IMULQ BX, R9
10474 SHRQ $0x36, R9
10475 SHLQ $0x20, R10
10476 IMULQ DI, R10
10477 SHRQ $0x38, R10
10478 SHLQ $0x10, R11
10479 IMULQ BX, R11
10480 SHRQ $0x36, R11
10481 SHLQ $0x20, R12
10482 IMULQ DI, R12
10483 SHRQ $0x38, R12
10484 LEAQ 1(SI), DI
10485 LEAQ 1(R8), R13
10486 MOVL SI, 24(SP)(R9*4)
10487 MOVL R8, 24(SP)(R11*4)
10488 MOVL DI, 4120(SP)(R10*4)
10489 MOVL R13, 4120(SP)(R12*4)
10490 LEAQ 1(R8)(SI*1), DI
10491 SHRQ $0x01, DI
10492 ADDQ $0x01, SI
10493 SUBQ $0x01, R8
10494
10495index_loop_encodeBetterBlockAsm8B:
10496 CMPQ DI, R8
10497 JAE search_loop_encodeBetterBlockAsm8B
10498 MOVQ (DX)(SI*1), R9
10499 MOVQ (DX)(DI*1), R10
10500 SHLQ $0x10, R9
10501 IMULQ BX, R9
10502 SHRQ $0x36, R9
10503 SHLQ $0x10, R10
10504 IMULQ BX, R10
10505 SHRQ $0x36, R10
10506 MOVL SI, 24(SP)(R9*4)
10507 MOVL DI, 24(SP)(R10*4)
10508 ADDQ $0x02, SI
10509 ADDQ $0x02, DI
10510 JMP index_loop_encodeBetterBlockAsm8B
10511
10512emit_remainder_encodeBetterBlockAsm8B:
10513 MOVQ src_len+32(FP), CX
10514 SUBL 12(SP), CX
10515 LEAQ 3(AX)(CX*1), CX
10516 CMPQ CX, (SP)
10517 JB emit_remainder_ok_encodeBetterBlockAsm8B
10518 MOVQ $0x00000000, ret+48(FP)
10519 RET
10520
10521emit_remainder_ok_encodeBetterBlockAsm8B:
10522 MOVQ src_len+32(FP), CX
10523 MOVL 12(SP), BX
10524 CMPL BX, CX
10525 JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm8B
10526 MOVL CX, SI
10527 MOVL CX, 12(SP)
10528 LEAQ (DX)(BX*1), CX
10529 SUBL BX, SI
10530 LEAL -1(SI), DX
10531 CMPL DX, $0x3c
10532 JB one_byte_emit_remainder_encodeBetterBlockAsm8B
10533 CMPL DX, $0x00000100
10534 JB two_bytes_emit_remainder_encodeBetterBlockAsm8B
10535 JB three_bytes_emit_remainder_encodeBetterBlockAsm8B
10536
10537three_bytes_emit_remainder_encodeBetterBlockAsm8B:
10538 MOVB $0xf4, (AX)
10539 MOVW DX, 1(AX)
10540 ADDQ $0x03, AX
10541 JMP memmove_long_emit_remainder_encodeBetterBlockAsm8B
10542
10543two_bytes_emit_remainder_encodeBetterBlockAsm8B:
10544 MOVB $0xf0, (AX)
10545 MOVB DL, 1(AX)
10546 ADDQ $0x02, AX
10547 CMPL DX, $0x40
10548 JB memmove_emit_remainder_encodeBetterBlockAsm8B
10549 JMP memmove_long_emit_remainder_encodeBetterBlockAsm8B
10550
10551one_byte_emit_remainder_encodeBetterBlockAsm8B:
10552 SHLB $0x02, DL
10553 MOVB DL, (AX)
10554 ADDQ $0x01, AX
10555
10556memmove_emit_remainder_encodeBetterBlockAsm8B:
10557 LEAQ (AX)(SI*1), DX
10558 MOVL SI, BX
10559
10560 // genMemMoveShort
10561 CMPQ BX, $0x03
10562 JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_1or2
10563 JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_3
10564 CMPQ BX, $0x08
10565 JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_4through7
10566 CMPQ BX, $0x10
10567 JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_8through16
10568 CMPQ BX, $0x20
10569 JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_17through32
10570 JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_33through64
10571
10572emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_1or2:
10573 MOVB (CX), SI
10574 MOVB -1(CX)(BX*1), CL
10575 MOVB SI, (AX)
10576 MOVB CL, -1(AX)(BX*1)
10577 JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B
10578
10579emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_3:
10580 MOVW (CX), SI
10581 MOVB 2(CX), CL
10582 MOVW SI, (AX)
10583 MOVB CL, 2(AX)
10584 JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B
10585
10586emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_4through7:
10587 MOVL (CX), SI
10588 MOVL -4(CX)(BX*1), CX
10589 MOVL SI, (AX)
10590 MOVL CX, -4(AX)(BX*1)
10591 JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B
10592
10593emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_8through16:
10594 MOVQ (CX), SI
10595 MOVQ -8(CX)(BX*1), CX
10596 MOVQ SI, (AX)
10597 MOVQ CX, -8(AX)(BX*1)
10598 JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B
10599
10600emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_17through32:
10601 MOVOU (CX), X0
10602 MOVOU -16(CX)(BX*1), X1
10603 MOVOU X0, (AX)
10604 MOVOU X1, -16(AX)(BX*1)
10605 JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B
10606
10607emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_33through64:
10608 MOVOU (CX), X0
10609 MOVOU 16(CX), X1
10610 MOVOU -32(CX)(BX*1), X2
10611 MOVOU -16(CX)(BX*1), X3
10612 MOVOU X0, (AX)
10613 MOVOU X1, 16(AX)
10614 MOVOU X2, -32(AX)(BX*1)
10615 MOVOU X3, -16(AX)(BX*1)
10616
10617memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B:
10618 MOVQ DX, AX
10619 JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm8B
10620
10621memmove_long_emit_remainder_encodeBetterBlockAsm8B:
10622 LEAQ (AX)(SI*1), DX
10623 MOVL SI, BX
10624
10625 // genMemMoveLong
10626 MOVOU (CX), X0
10627 MOVOU 16(CX), X1
10628 MOVOU -32(CX)(BX*1), X2
10629 MOVOU -16(CX)(BX*1), X3
10630 MOVQ BX, DI
10631 SHRQ $0x05, DI
10632 MOVQ AX, SI
10633 ANDL $0x0000001f, SI
10634 MOVQ $0x00000040, R8
10635 SUBQ SI, R8
10636 DECQ DI
10637 JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_forward_sse_loop_32
10638 LEAQ -32(CX)(R8*1), SI
10639 LEAQ -32(AX)(R8*1), R9
10640
10641emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_big_loop_back:
10642 MOVOU (SI), X4
10643 MOVOU 16(SI), X5
10644 MOVOA X4, (R9)
10645 MOVOA X5, 16(R9)
10646 ADDQ $0x20, R9
10647 ADDQ $0x20, SI
10648 ADDQ $0x20, R8
10649 DECQ DI
10650 JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_big_loop_back
10651
10652emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_forward_sse_loop_32:
10653 MOVOU -32(CX)(R8*1), X4
10654 MOVOU -16(CX)(R8*1), X5
10655 MOVOA X4, -32(AX)(R8*1)
10656 MOVOA X5, -16(AX)(R8*1)
10657 ADDQ $0x20, R8
10658 CMPQ BX, R8
10659 JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_forward_sse_loop_32
10660 MOVOU X0, (AX)
10661 MOVOU X1, 16(AX)
10662 MOVOU X2, -32(AX)(BX*1)
10663 MOVOU X3, -16(AX)(BX*1)
10664 MOVQ DX, AX
10665
10666emit_literal_done_emit_remainder_encodeBetterBlockAsm8B:
10667 MOVQ dst_base+0(FP), CX
10668 SUBQ CX, AX
10669 MOVQ AX, ret+48(FP)
10670 RET
10671
10672// func encodeSnappyBlockAsm(dst []byte, src []byte) int
10673// Requires: BMI, SSE2
10674TEXT ·encodeSnappyBlockAsm(SB), $65560-56
10675 MOVQ dst_base+0(FP), AX
10676 MOVQ $0x00000200, CX
10677 LEAQ 24(SP), DX
10678 PXOR X0, X0
10679
10680zero_loop_encodeSnappyBlockAsm:
10681 MOVOU X0, (DX)
10682 MOVOU X0, 16(DX)
10683 MOVOU X0, 32(DX)
10684 MOVOU X0, 48(DX)
10685 MOVOU X0, 64(DX)
10686 MOVOU X0, 80(DX)
10687 MOVOU X0, 96(DX)
10688 MOVOU X0, 112(DX)
10689 ADDQ $0x80, DX
10690 DECQ CX
10691 JNZ zero_loop_encodeSnappyBlockAsm
10692 MOVL $0x00000000, 12(SP)
10693 MOVQ src_len+32(FP), CX
10694 LEAQ -9(CX), DX
10695 LEAQ -8(CX), BX
10696 MOVL BX, 8(SP)
10697 SHRQ $0x05, CX
10698 SUBL CX, DX
10699 LEAQ (AX)(DX*1), DX
10700 MOVQ DX, (SP)
10701 MOVL $0x00000001, CX
10702 MOVL CX, 16(SP)
10703 MOVQ src_base+24(FP), DX
10704
10705search_loop_encodeSnappyBlockAsm:
10706 MOVL CX, BX
10707 SUBL 12(SP), BX
10708 SHRL $0x06, BX
10709 LEAL 4(CX)(BX*1), BX
10710 CMPL BX, 8(SP)
10711 JAE emit_remainder_encodeSnappyBlockAsm
10712 MOVQ (DX)(CX*1), SI
10713 MOVL BX, 20(SP)
10714 MOVQ $0x0000cf1bbcdcbf9b, R8
10715 MOVQ SI, R9
10716 MOVQ SI, R10
10717 SHRQ $0x08, R10
10718 SHLQ $0x10, R9
10719 IMULQ R8, R9
10720 SHRQ $0x32, R9
10721 SHLQ $0x10, R10
10722 IMULQ R8, R10
10723 SHRQ $0x32, R10
10724 MOVL 24(SP)(R9*4), BX
10725 MOVL 24(SP)(R10*4), DI
10726 MOVL CX, 24(SP)(R9*4)
10727 LEAL 1(CX), R9
10728 MOVL R9, 24(SP)(R10*4)
10729 MOVQ SI, R9
10730 SHRQ $0x10, R9
10731 SHLQ $0x10, R9
10732 IMULQ R8, R9
10733 SHRQ $0x32, R9
10734 MOVL CX, R8
10735 SUBL 16(SP), R8
10736 MOVL 1(DX)(R8*1), R10
10737 MOVQ SI, R8
10738 SHRQ $0x08, R8
10739 CMPL R8, R10
10740 JNE no_repeat_found_encodeSnappyBlockAsm
10741 LEAL 1(CX), SI
10742 MOVL 12(SP), BX
10743 MOVL SI, DI
10744 SUBL 16(SP), DI
10745 JZ repeat_extend_back_end_encodeSnappyBlockAsm
10746
10747repeat_extend_back_loop_encodeSnappyBlockAsm:
10748 CMPL SI, BX
10749 JBE repeat_extend_back_end_encodeSnappyBlockAsm
10750 MOVB -1(DX)(DI*1), R8
10751 MOVB -1(DX)(SI*1), R9
10752 CMPB R8, R9
10753 JNE repeat_extend_back_end_encodeSnappyBlockAsm
10754 LEAL -1(SI), SI
10755 DECL DI
10756 JNZ repeat_extend_back_loop_encodeSnappyBlockAsm
10757
10758repeat_extend_back_end_encodeSnappyBlockAsm:
10759 MOVL 12(SP), BX
10760 CMPL BX, SI
10761 JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm
10762 MOVL SI, DI
10763 MOVL SI, 12(SP)
10764 LEAQ (DX)(BX*1), R8
10765 SUBL BX, DI
10766 LEAL -1(DI), BX
10767 CMPL BX, $0x3c
10768 JB one_byte_repeat_emit_encodeSnappyBlockAsm
10769 CMPL BX, $0x00000100
10770 JB two_bytes_repeat_emit_encodeSnappyBlockAsm
10771 CMPL BX, $0x00010000
10772 JB three_bytes_repeat_emit_encodeSnappyBlockAsm
10773 CMPL BX, $0x01000000
10774 JB four_bytes_repeat_emit_encodeSnappyBlockAsm
10775 MOVB $0xfc, (AX)
10776 MOVL BX, 1(AX)
10777 ADDQ $0x05, AX
10778 JMP memmove_long_repeat_emit_encodeSnappyBlockAsm
10779
10780four_bytes_repeat_emit_encodeSnappyBlockAsm:
10781 MOVL BX, R9
10782 SHRL $0x10, R9
10783 MOVB $0xf8, (AX)
10784 MOVW BX, 1(AX)
10785 MOVB R9, 3(AX)
10786 ADDQ $0x04, AX
10787 JMP memmove_long_repeat_emit_encodeSnappyBlockAsm
10788
10789three_bytes_repeat_emit_encodeSnappyBlockAsm:
10790 MOVB $0xf4, (AX)
10791 MOVW BX, 1(AX)
10792 ADDQ $0x03, AX
10793 JMP memmove_long_repeat_emit_encodeSnappyBlockAsm
10794
10795two_bytes_repeat_emit_encodeSnappyBlockAsm:
10796 MOVB $0xf0, (AX)
10797 MOVB BL, 1(AX)
10798 ADDQ $0x02, AX
10799 CMPL BX, $0x40
10800 JB memmove_repeat_emit_encodeSnappyBlockAsm
10801 JMP memmove_long_repeat_emit_encodeSnappyBlockAsm
10802
10803one_byte_repeat_emit_encodeSnappyBlockAsm:
10804 SHLB $0x02, BL
10805 MOVB BL, (AX)
10806 ADDQ $0x01, AX
10807
10808memmove_repeat_emit_encodeSnappyBlockAsm:
10809 LEAQ (AX)(DI*1), BX
10810
10811 // genMemMoveShort
10812 CMPQ DI, $0x08
10813 JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8
10814 CMPQ DI, $0x10
10815 JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8through16
10816 CMPQ DI, $0x20
10817 JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_17through32
10818 JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_33through64
10819
10820emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8:
10821 MOVQ (R8), R9
10822 MOVQ R9, (AX)
10823 JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm
10824
10825emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8through16:
10826 MOVQ (R8), R9
10827 MOVQ -8(R8)(DI*1), R8
10828 MOVQ R9, (AX)
10829 MOVQ R8, -8(AX)(DI*1)
10830 JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm
10831
10832emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_17through32:
10833 MOVOU (R8), X0
10834 MOVOU -16(R8)(DI*1), X1
10835 MOVOU X0, (AX)
10836 MOVOU X1, -16(AX)(DI*1)
10837 JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm
10838
10839emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_33through64:
10840 MOVOU (R8), X0
10841 MOVOU 16(R8), X1
10842 MOVOU -32(R8)(DI*1), X2
10843 MOVOU -16(R8)(DI*1), X3
10844 MOVOU X0, (AX)
10845 MOVOU X1, 16(AX)
10846 MOVOU X2, -32(AX)(DI*1)
10847 MOVOU X3, -16(AX)(DI*1)
10848
10849memmove_end_copy_repeat_emit_encodeSnappyBlockAsm:
10850 MOVQ BX, AX
10851 JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm
10852
10853memmove_long_repeat_emit_encodeSnappyBlockAsm:
10854 LEAQ (AX)(DI*1), BX
10855
10856 // genMemMoveLong
10857 MOVOU (R8), X0
10858 MOVOU 16(R8), X1
10859 MOVOU -32(R8)(DI*1), X2
10860 MOVOU -16(R8)(DI*1), X3
10861 MOVQ DI, R10
10862 SHRQ $0x05, R10
10863 MOVQ AX, R9
10864 ANDL $0x0000001f, R9
10865 MOVQ $0x00000040, R11
10866 SUBQ R9, R11
10867 DECQ R10
10868 JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32
10869 LEAQ -32(R8)(R11*1), R9
10870 LEAQ -32(AX)(R11*1), R12
10871
10872emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_big_loop_back:
10873 MOVOU (R9), X4
10874 MOVOU 16(R9), X5
10875 MOVOA X4, (R12)
10876 MOVOA X5, 16(R12)
10877 ADDQ $0x20, R12
10878 ADDQ $0x20, R9
10879 ADDQ $0x20, R11
10880 DECQ R10
10881 JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_big_loop_back
10882
10883emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32:
10884 MOVOU -32(R8)(R11*1), X4
10885 MOVOU -16(R8)(R11*1), X5
10886 MOVOA X4, -32(AX)(R11*1)
10887 MOVOA X5, -16(AX)(R11*1)
10888 ADDQ $0x20, R11
10889 CMPQ DI, R11
10890 JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32
10891 MOVOU X0, (AX)
10892 MOVOU X1, 16(AX)
10893 MOVOU X2, -32(AX)(DI*1)
10894 MOVOU X3, -16(AX)(DI*1)
10895 MOVQ BX, AX
10896
10897emit_literal_done_repeat_emit_encodeSnappyBlockAsm:
10898 ADDL $0x05, CX
10899 MOVL CX, BX
10900 SUBL 16(SP), BX
10901 MOVQ src_len+32(FP), DI
10902 SUBL CX, DI
10903 LEAQ (DX)(CX*1), R8
10904 LEAQ (DX)(BX*1), BX
10905
10906 // matchLen
10907 XORL R10, R10
10908
10909matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm:
10910 CMPL DI, $0x10
10911 JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm
10912 MOVQ (R8)(R10*1), R9
10913 MOVQ 8(R8)(R10*1), R11
10914 XORQ (BX)(R10*1), R9
10915 JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm
10916 XORQ 8(BX)(R10*1), R11
10917 JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm
10918 LEAL -16(DI), DI
10919 LEAL 16(R10), R10
10920 JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm
10921
10922matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm:
10923#ifdef GOAMD64_v3
10924 TZCNTQ R11, R11
10925
10926#else
10927 BSFQ R11, R11
10928
10929#endif
10930 SARQ $0x03, R11
10931 LEAL 8(R10)(R11*1), R10
10932 JMP repeat_extend_forward_end_encodeSnappyBlockAsm
10933
10934matchlen_match8_repeat_extend_encodeSnappyBlockAsm:
10935 CMPL DI, $0x08
10936 JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm
10937 MOVQ (R8)(R10*1), R9
10938 XORQ (BX)(R10*1), R9
10939 JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm
10940 LEAL -8(DI), DI
10941 LEAL 8(R10), R10
10942 JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm
10943
10944matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm:
10945#ifdef GOAMD64_v3
10946 TZCNTQ R9, R9
10947
10948#else
10949 BSFQ R9, R9
10950
10951#endif
10952 SARQ $0x03, R9
10953 LEAL (R10)(R9*1), R10
10954 JMP repeat_extend_forward_end_encodeSnappyBlockAsm
10955
10956matchlen_match4_repeat_extend_encodeSnappyBlockAsm:
10957 CMPL DI, $0x04
10958 JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm
10959 MOVL (R8)(R10*1), R9
10960 CMPL (BX)(R10*1), R9
10961 JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm
10962 LEAL -4(DI), DI
10963 LEAL 4(R10), R10
10964
10965matchlen_match2_repeat_extend_encodeSnappyBlockAsm:
10966 CMPL DI, $0x01
10967 JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm
10968 JB repeat_extend_forward_end_encodeSnappyBlockAsm
10969 MOVW (R8)(R10*1), R9
10970 CMPW (BX)(R10*1), R9
10971 JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm
10972 LEAL 2(R10), R10
10973 SUBL $0x02, DI
10974 JZ repeat_extend_forward_end_encodeSnappyBlockAsm
10975
10976matchlen_match1_repeat_extend_encodeSnappyBlockAsm:
10977 MOVB (R8)(R10*1), R9
10978 CMPB (BX)(R10*1), R9
10979 JNE repeat_extend_forward_end_encodeSnappyBlockAsm
10980 LEAL 1(R10), R10
10981
10982repeat_extend_forward_end_encodeSnappyBlockAsm:
10983 ADDL R10, CX
10984 MOVL CX, BX
10985 SUBL SI, BX
10986 MOVL 16(SP), SI
10987
10988 // emitCopy
10989 CMPL SI, $0x00010000
10990 JB two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm
10991
10992four_bytes_loop_back_repeat_as_copy_encodeSnappyBlockAsm:
10993 CMPL BX, $0x40
10994 JBE four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm
10995 MOVB $0xff, (AX)
10996 MOVL SI, 1(AX)
10997 LEAL -64(BX), BX
10998 ADDQ $0x05, AX
10999 CMPL BX, $0x04
11000 JB four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm
11001 JMP four_bytes_loop_back_repeat_as_copy_encodeSnappyBlockAsm
11002
11003four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm:
11004 TESTL BX, BX
11005 JZ repeat_end_emit_encodeSnappyBlockAsm
11006 XORL DI, DI
11007 LEAL -1(DI)(BX*4), BX
11008 MOVB BL, (AX)
11009 MOVL SI, 1(AX)
11010 ADDQ $0x05, AX
11011 JMP repeat_end_emit_encodeSnappyBlockAsm
11012
11013two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm:
11014 CMPL BX, $0x40
11015 JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm
11016 MOVB $0xee, (AX)
11017 MOVW SI, 1(AX)
11018 LEAL -60(BX), BX
11019 ADDQ $0x03, AX
11020 JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm
11021
11022two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm:
11023 MOVL BX, DI
11024 SHLL $0x02, DI
11025 CMPL BX, $0x0c
11026 JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm
11027 CMPL SI, $0x00000800
11028 JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm
11029 LEAL -15(DI), DI
11030 MOVB SI, 1(AX)
11031 SHRL $0x08, SI
11032 SHLL $0x05, SI
11033 ORL SI, DI
11034 MOVB DI, (AX)
11035 ADDQ $0x02, AX
11036 JMP repeat_end_emit_encodeSnappyBlockAsm
11037
11038emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm:
11039 LEAL -2(DI), DI
11040 MOVB DI, (AX)
11041 MOVW SI, 1(AX)
11042 ADDQ $0x03, AX
11043
11044repeat_end_emit_encodeSnappyBlockAsm:
11045 MOVL CX, 12(SP)
11046 JMP search_loop_encodeSnappyBlockAsm
11047
11048no_repeat_found_encodeSnappyBlockAsm:
11049 CMPL (DX)(BX*1), SI
11050 JEQ candidate_match_encodeSnappyBlockAsm
11051 SHRQ $0x08, SI
11052 MOVL 24(SP)(R9*4), BX
11053 LEAL 2(CX), R8
11054 CMPL (DX)(DI*1), SI
11055 JEQ candidate2_match_encodeSnappyBlockAsm
11056 MOVL R8, 24(SP)(R9*4)
11057 SHRQ $0x08, SI
11058 CMPL (DX)(BX*1), SI
11059 JEQ candidate3_match_encodeSnappyBlockAsm
11060 MOVL 20(SP), CX
11061 JMP search_loop_encodeSnappyBlockAsm
11062
11063candidate3_match_encodeSnappyBlockAsm:
11064 ADDL $0x02, CX
11065 JMP candidate_match_encodeSnappyBlockAsm
11066
11067candidate2_match_encodeSnappyBlockAsm:
11068 MOVL R8, 24(SP)(R9*4)
11069 INCL CX
11070 MOVL DI, BX
11071
11072candidate_match_encodeSnappyBlockAsm:
11073 MOVL 12(SP), SI
11074 TESTL BX, BX
11075 JZ match_extend_back_end_encodeSnappyBlockAsm
11076
11077match_extend_back_loop_encodeSnappyBlockAsm:
11078 CMPL CX, SI
11079 JBE match_extend_back_end_encodeSnappyBlockAsm
11080 MOVB -1(DX)(BX*1), DI
11081 MOVB -1(DX)(CX*1), R8
11082 CMPB DI, R8
11083 JNE match_extend_back_end_encodeSnappyBlockAsm
11084 LEAL -1(CX), CX
11085 DECL BX
11086 JZ match_extend_back_end_encodeSnappyBlockAsm
11087 JMP match_extend_back_loop_encodeSnappyBlockAsm
11088
11089match_extend_back_end_encodeSnappyBlockAsm:
11090 MOVL CX, SI
11091 SUBL 12(SP), SI
11092 LEAQ 5(AX)(SI*1), SI
11093 CMPQ SI, (SP)
11094 JB match_dst_size_check_encodeSnappyBlockAsm
11095 MOVQ $0x00000000, ret+48(FP)
11096 RET
11097
11098match_dst_size_check_encodeSnappyBlockAsm:
11099 MOVL CX, SI
11100 MOVL 12(SP), DI
11101 CMPL DI, SI
11102 JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm
11103 MOVL SI, R8
11104 MOVL SI, 12(SP)
11105 LEAQ (DX)(DI*1), SI
11106 SUBL DI, R8
11107 LEAL -1(R8), DI
11108 CMPL DI, $0x3c
11109 JB one_byte_match_emit_encodeSnappyBlockAsm
11110 CMPL DI, $0x00000100
11111 JB two_bytes_match_emit_encodeSnappyBlockAsm
11112 CMPL DI, $0x00010000
11113 JB three_bytes_match_emit_encodeSnappyBlockAsm
11114 CMPL DI, $0x01000000
11115 JB four_bytes_match_emit_encodeSnappyBlockAsm
11116 MOVB $0xfc, (AX)
11117 MOVL DI, 1(AX)
11118 ADDQ $0x05, AX
11119 JMP memmove_long_match_emit_encodeSnappyBlockAsm
11120
11121four_bytes_match_emit_encodeSnappyBlockAsm:
11122 MOVL DI, R9
11123 SHRL $0x10, R9
11124 MOVB $0xf8, (AX)
11125 MOVW DI, 1(AX)
11126 MOVB R9, 3(AX)
11127 ADDQ $0x04, AX
11128 JMP memmove_long_match_emit_encodeSnappyBlockAsm
11129
11130three_bytes_match_emit_encodeSnappyBlockAsm:
11131 MOVB $0xf4, (AX)
11132 MOVW DI, 1(AX)
11133 ADDQ $0x03, AX
11134 JMP memmove_long_match_emit_encodeSnappyBlockAsm
11135
11136two_bytes_match_emit_encodeSnappyBlockAsm:
11137 MOVB $0xf0, (AX)
11138 MOVB DI, 1(AX)
11139 ADDQ $0x02, AX
11140 CMPL DI, $0x40
11141 JB memmove_match_emit_encodeSnappyBlockAsm
11142 JMP memmove_long_match_emit_encodeSnappyBlockAsm
11143
11144one_byte_match_emit_encodeSnappyBlockAsm:
11145 SHLB $0x02, DI
11146 MOVB DI, (AX)
11147 ADDQ $0x01, AX
11148
11149memmove_match_emit_encodeSnappyBlockAsm:
11150 LEAQ (AX)(R8*1), DI
11151
11152 // genMemMoveShort
11153 CMPQ R8, $0x08
11154 JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8
11155 CMPQ R8, $0x10
11156 JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8through16
11157 CMPQ R8, $0x20
11158 JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_17through32
11159 JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_33through64
11160
11161emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8:
11162 MOVQ (SI), R9
11163 MOVQ R9, (AX)
11164 JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm
11165
11166emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8through16:
11167 MOVQ (SI), R9
11168 MOVQ -8(SI)(R8*1), SI
11169 MOVQ R9, (AX)
11170 MOVQ SI, -8(AX)(R8*1)
11171 JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm
11172
11173emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_17through32:
11174 MOVOU (SI), X0
11175 MOVOU -16(SI)(R8*1), X1
11176 MOVOU X0, (AX)
11177 MOVOU X1, -16(AX)(R8*1)
11178 JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm
11179
11180emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_33through64:
11181 MOVOU (SI), X0
11182 MOVOU 16(SI), X1
11183 MOVOU -32(SI)(R8*1), X2
11184 MOVOU -16(SI)(R8*1), X3
11185 MOVOU X0, (AX)
11186 MOVOU X1, 16(AX)
11187 MOVOU X2, -32(AX)(R8*1)
11188 MOVOU X3, -16(AX)(R8*1)
11189
11190memmove_end_copy_match_emit_encodeSnappyBlockAsm:
11191 MOVQ DI, AX
11192 JMP emit_literal_done_match_emit_encodeSnappyBlockAsm
11193
11194memmove_long_match_emit_encodeSnappyBlockAsm:
11195 LEAQ (AX)(R8*1), DI
11196
11197 // genMemMoveLong
11198 MOVOU (SI), X0
11199 MOVOU 16(SI), X1
11200 MOVOU -32(SI)(R8*1), X2
11201 MOVOU -16(SI)(R8*1), X3
11202 MOVQ R8, R10
11203 SHRQ $0x05, R10
11204 MOVQ AX, R9
11205 ANDL $0x0000001f, R9
11206 MOVQ $0x00000040, R11
11207 SUBQ R9, R11
11208 DECQ R10
11209 JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32
11210 LEAQ -32(SI)(R11*1), R9
11211 LEAQ -32(AX)(R11*1), R12
11212
11213emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_big_loop_back:
11214 MOVOU (R9), X4
11215 MOVOU 16(R9), X5
11216 MOVOA X4, (R12)
11217 MOVOA X5, 16(R12)
11218 ADDQ $0x20, R12
11219 ADDQ $0x20, R9
11220 ADDQ $0x20, R11
11221 DECQ R10
11222 JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_big_loop_back
11223
11224emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32:
11225 MOVOU -32(SI)(R11*1), X4
11226 MOVOU -16(SI)(R11*1), X5
11227 MOVOA X4, -32(AX)(R11*1)
11228 MOVOA X5, -16(AX)(R11*1)
11229 ADDQ $0x20, R11
11230 CMPQ R8, R11
11231 JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32
11232 MOVOU X0, (AX)
11233 MOVOU X1, 16(AX)
11234 MOVOU X2, -32(AX)(R8*1)
11235 MOVOU X3, -16(AX)(R8*1)
11236 MOVQ DI, AX
11237
11238emit_literal_done_match_emit_encodeSnappyBlockAsm:
11239match_nolit_loop_encodeSnappyBlockAsm:
11240 MOVL CX, SI
11241 SUBL BX, SI
11242 MOVL SI, 16(SP)
11243 ADDL $0x04, CX
11244 ADDL $0x04, BX
11245 MOVQ src_len+32(FP), SI
11246 SUBL CX, SI
11247 LEAQ (DX)(CX*1), DI
11248 LEAQ (DX)(BX*1), BX
11249
11250 // matchLen
11251 XORL R9, R9
11252
11253matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm:
11254 CMPL SI, $0x10
11255 JB matchlen_match8_match_nolit_encodeSnappyBlockAsm
11256 MOVQ (DI)(R9*1), R8
11257 MOVQ 8(DI)(R9*1), R10
11258 XORQ (BX)(R9*1), R8
11259 JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm
11260 XORQ 8(BX)(R9*1), R10
11261 JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm
11262 LEAL -16(SI), SI
11263 LEAL 16(R9), R9
11264 JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm
11265
11266matchlen_bsf_16match_nolit_encodeSnappyBlockAsm:
11267#ifdef GOAMD64_v3
11268 TZCNTQ R10, R10
11269
11270#else
11271 BSFQ R10, R10
11272
11273#endif
11274 SARQ $0x03, R10
11275 LEAL 8(R9)(R10*1), R9
11276 JMP match_nolit_end_encodeSnappyBlockAsm
11277
11278matchlen_match8_match_nolit_encodeSnappyBlockAsm:
11279 CMPL SI, $0x08
11280 JB matchlen_match4_match_nolit_encodeSnappyBlockAsm
11281 MOVQ (DI)(R9*1), R8
11282 XORQ (BX)(R9*1), R8
11283 JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm
11284 LEAL -8(SI), SI
11285 LEAL 8(R9), R9
11286 JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm
11287
11288matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm:
11289#ifdef GOAMD64_v3
11290 TZCNTQ R8, R8
11291
11292#else
11293 BSFQ R8, R8
11294
11295#endif
11296 SARQ $0x03, R8
11297 LEAL (R9)(R8*1), R9
11298 JMP match_nolit_end_encodeSnappyBlockAsm
11299
11300matchlen_match4_match_nolit_encodeSnappyBlockAsm:
11301 CMPL SI, $0x04
11302 JB matchlen_match2_match_nolit_encodeSnappyBlockAsm
11303 MOVL (DI)(R9*1), R8
11304 CMPL (BX)(R9*1), R8
11305 JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm
11306 LEAL -4(SI), SI
11307 LEAL 4(R9), R9
11308
11309matchlen_match2_match_nolit_encodeSnappyBlockAsm:
11310 CMPL SI, $0x01
11311 JE matchlen_match1_match_nolit_encodeSnappyBlockAsm
11312 JB match_nolit_end_encodeSnappyBlockAsm
11313 MOVW (DI)(R9*1), R8
11314 CMPW (BX)(R9*1), R8
11315 JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm
11316 LEAL 2(R9), R9
11317 SUBL $0x02, SI
11318 JZ match_nolit_end_encodeSnappyBlockAsm
11319
11320matchlen_match1_match_nolit_encodeSnappyBlockAsm:
11321 MOVB (DI)(R9*1), R8
11322 CMPB (BX)(R9*1), R8
11323 JNE match_nolit_end_encodeSnappyBlockAsm
11324 LEAL 1(R9), R9
11325
11326match_nolit_end_encodeSnappyBlockAsm:
11327 ADDL R9, CX
11328 MOVL 16(SP), BX
11329 ADDL $0x04, R9
11330 MOVL CX, 12(SP)
11331
11332 // emitCopy
11333 CMPL BX, $0x00010000
11334 JB two_byte_offset_match_nolit_encodeSnappyBlockAsm
11335
11336four_bytes_loop_back_match_nolit_encodeSnappyBlockAsm:
11337 CMPL R9, $0x40
11338 JBE four_bytes_remain_match_nolit_encodeSnappyBlockAsm
11339 MOVB $0xff, (AX)
11340 MOVL BX, 1(AX)
11341 LEAL -64(R9), R9
11342 ADDQ $0x05, AX
11343 CMPL R9, $0x04
11344 JB four_bytes_remain_match_nolit_encodeSnappyBlockAsm
11345 JMP four_bytes_loop_back_match_nolit_encodeSnappyBlockAsm
11346
11347four_bytes_remain_match_nolit_encodeSnappyBlockAsm:
11348 TESTL R9, R9
11349 JZ match_nolit_emitcopy_end_encodeSnappyBlockAsm
11350 XORL SI, SI
11351 LEAL -1(SI)(R9*4), R9
11352 MOVB R9, (AX)
11353 MOVL BX, 1(AX)
11354 ADDQ $0x05, AX
11355 JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm
11356
11357two_byte_offset_match_nolit_encodeSnappyBlockAsm:
11358 CMPL R9, $0x40
11359 JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm
11360 MOVB $0xee, (AX)
11361 MOVW BX, 1(AX)
11362 LEAL -60(R9), R9
11363 ADDQ $0x03, AX
11364 JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm
11365
11366two_byte_offset_short_match_nolit_encodeSnappyBlockAsm:
11367 MOVL R9, SI
11368 SHLL $0x02, SI
11369 CMPL R9, $0x0c
11370 JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm
11371 CMPL BX, $0x00000800
11372 JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm
11373 LEAL -15(SI), SI
11374 MOVB BL, 1(AX)
11375 SHRL $0x08, BX
11376 SHLL $0x05, BX
11377 ORL BX, SI
11378 MOVB SI, (AX)
11379 ADDQ $0x02, AX
11380 JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm
11381
11382emit_copy_three_match_nolit_encodeSnappyBlockAsm:
11383 LEAL -2(SI), SI
11384 MOVB SI, (AX)
11385 MOVW BX, 1(AX)
11386 ADDQ $0x03, AX
11387
11388match_nolit_emitcopy_end_encodeSnappyBlockAsm:
11389 CMPL CX, 8(SP)
11390 JAE emit_remainder_encodeSnappyBlockAsm
11391 MOVQ -2(DX)(CX*1), SI
11392 CMPQ AX, (SP)
11393 JB match_nolit_dst_ok_encodeSnappyBlockAsm
11394 MOVQ $0x00000000, ret+48(FP)
11395 RET
11396
11397match_nolit_dst_ok_encodeSnappyBlockAsm:
11398 MOVQ $0x0000cf1bbcdcbf9b, R8
11399 MOVQ SI, DI
11400 SHRQ $0x10, SI
11401 MOVQ SI, BX
11402 SHLQ $0x10, DI
11403 IMULQ R8, DI
11404 SHRQ $0x32, DI
11405 SHLQ $0x10, BX
11406 IMULQ R8, BX
11407 SHRQ $0x32, BX
11408 LEAL -2(CX), R8
11409 LEAQ 24(SP)(BX*4), R9
11410 MOVL (R9), BX
11411 MOVL R8, 24(SP)(DI*4)
11412 MOVL CX, (R9)
11413 CMPL (DX)(BX*1), SI
11414 JEQ match_nolit_loop_encodeSnappyBlockAsm
11415 INCL CX
11416 JMP search_loop_encodeSnappyBlockAsm
11417
11418emit_remainder_encodeSnappyBlockAsm:
11419 MOVQ src_len+32(FP), CX
11420 SUBL 12(SP), CX
11421 LEAQ 5(AX)(CX*1), CX
11422 CMPQ CX, (SP)
11423 JB emit_remainder_ok_encodeSnappyBlockAsm
11424 MOVQ $0x00000000, ret+48(FP)
11425 RET
11426
11427emit_remainder_ok_encodeSnappyBlockAsm:
11428 MOVQ src_len+32(FP), CX
11429 MOVL 12(SP), BX
11430 CMPL BX, CX
11431 JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm
11432 MOVL CX, SI
11433 MOVL CX, 12(SP)
11434 LEAQ (DX)(BX*1), CX
11435 SUBL BX, SI
11436 LEAL -1(SI), DX
11437 CMPL DX, $0x3c
11438 JB one_byte_emit_remainder_encodeSnappyBlockAsm
11439 CMPL DX, $0x00000100
11440 JB two_bytes_emit_remainder_encodeSnappyBlockAsm
11441 CMPL DX, $0x00010000
11442 JB three_bytes_emit_remainder_encodeSnappyBlockAsm
11443 CMPL DX, $0x01000000
11444 JB four_bytes_emit_remainder_encodeSnappyBlockAsm
11445 MOVB $0xfc, (AX)
11446 MOVL DX, 1(AX)
11447 ADDQ $0x05, AX
11448 JMP memmove_long_emit_remainder_encodeSnappyBlockAsm
11449
11450four_bytes_emit_remainder_encodeSnappyBlockAsm:
11451 MOVL DX, BX
11452 SHRL $0x10, BX
11453 MOVB $0xf8, (AX)
11454 MOVW DX, 1(AX)
11455 MOVB BL, 3(AX)
11456 ADDQ $0x04, AX
11457 JMP memmove_long_emit_remainder_encodeSnappyBlockAsm
11458
11459three_bytes_emit_remainder_encodeSnappyBlockAsm:
11460 MOVB $0xf4, (AX)
11461 MOVW DX, 1(AX)
11462 ADDQ $0x03, AX
11463 JMP memmove_long_emit_remainder_encodeSnappyBlockAsm
11464
11465two_bytes_emit_remainder_encodeSnappyBlockAsm:
11466 MOVB $0xf0, (AX)
11467 MOVB DL, 1(AX)
11468 ADDQ $0x02, AX
11469 CMPL DX, $0x40
11470 JB memmove_emit_remainder_encodeSnappyBlockAsm
11471 JMP memmove_long_emit_remainder_encodeSnappyBlockAsm
11472
11473one_byte_emit_remainder_encodeSnappyBlockAsm:
11474 SHLB $0x02, DL
11475 MOVB DL, (AX)
11476 ADDQ $0x01, AX
11477
11478memmove_emit_remainder_encodeSnappyBlockAsm:
11479 LEAQ (AX)(SI*1), DX
11480 MOVL SI, BX
11481
11482 // genMemMoveShort
11483 CMPQ BX, $0x03
11484 JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_1or2
11485 JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_3
11486 CMPQ BX, $0x08
11487 JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_4through7
11488 CMPQ BX, $0x10
11489 JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_8through16
11490 CMPQ BX, $0x20
11491 JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_17through32
11492 JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_33through64
11493
11494emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_1or2:
11495 MOVB (CX), SI
11496 MOVB -1(CX)(BX*1), CL
11497 MOVB SI, (AX)
11498 MOVB CL, -1(AX)(BX*1)
11499 JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm
11500
11501emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_3:
11502 MOVW (CX), SI
11503 MOVB 2(CX), CL
11504 MOVW SI, (AX)
11505 MOVB CL, 2(AX)
11506 JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm
11507
11508emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_4through7:
11509 MOVL (CX), SI
11510 MOVL -4(CX)(BX*1), CX
11511 MOVL SI, (AX)
11512 MOVL CX, -4(AX)(BX*1)
11513 JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm
11514
11515emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_8through16:
11516 MOVQ (CX), SI
11517 MOVQ -8(CX)(BX*1), CX
11518 MOVQ SI, (AX)
11519 MOVQ CX, -8(AX)(BX*1)
11520 JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm
11521
11522emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_17through32:
11523 MOVOU (CX), X0
11524 MOVOU -16(CX)(BX*1), X1
11525 MOVOU X0, (AX)
11526 MOVOU X1, -16(AX)(BX*1)
11527 JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm
11528
11529emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_33through64:
11530 MOVOU (CX), X0
11531 MOVOU 16(CX), X1
11532 MOVOU -32(CX)(BX*1), X2
11533 MOVOU -16(CX)(BX*1), X3
11534 MOVOU X0, (AX)
11535 MOVOU X1, 16(AX)
11536 MOVOU X2, -32(AX)(BX*1)
11537 MOVOU X3, -16(AX)(BX*1)
11538
11539memmove_end_copy_emit_remainder_encodeSnappyBlockAsm:
11540 MOVQ DX, AX
11541 JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm
11542
11543memmove_long_emit_remainder_encodeSnappyBlockAsm:
11544 LEAQ (AX)(SI*1), DX
11545 MOVL SI, BX
11546
11547 // genMemMoveLong
11548 MOVOU (CX), X0
11549 MOVOU 16(CX), X1
11550 MOVOU -32(CX)(BX*1), X2
11551 MOVOU -16(CX)(BX*1), X3
11552 MOVQ BX, DI
11553 SHRQ $0x05, DI
11554 MOVQ AX, SI
11555 ANDL $0x0000001f, SI
11556 MOVQ $0x00000040, R8
11557 SUBQ SI, R8
11558 DECQ DI
11559 JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_forward_sse_loop_32
11560 LEAQ -32(CX)(R8*1), SI
11561 LEAQ -32(AX)(R8*1), R9
11562
11563emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_big_loop_back:
11564 MOVOU (SI), X4
11565 MOVOU 16(SI), X5
11566 MOVOA X4, (R9)
11567 MOVOA X5, 16(R9)
11568 ADDQ $0x20, R9
11569 ADDQ $0x20, SI
11570 ADDQ $0x20, R8
11571 DECQ DI
11572 JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_big_loop_back
11573
11574emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_forward_sse_loop_32:
11575 MOVOU -32(CX)(R8*1), X4
11576 MOVOU -16(CX)(R8*1), X5
11577 MOVOA X4, -32(AX)(R8*1)
11578 MOVOA X5, -16(AX)(R8*1)
11579 ADDQ $0x20, R8
11580 CMPQ BX, R8
11581 JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_forward_sse_loop_32
11582 MOVOU X0, (AX)
11583 MOVOU X1, 16(AX)
11584 MOVOU X2, -32(AX)(BX*1)
11585 MOVOU X3, -16(AX)(BX*1)
11586 MOVQ DX, AX
11587
11588emit_literal_done_emit_remainder_encodeSnappyBlockAsm:
11589 MOVQ dst_base+0(FP), CX
11590 SUBQ CX, AX
11591 MOVQ AX, ret+48(FP)
11592 RET
11593
11594// func encodeSnappyBlockAsm64K(dst []byte, src []byte) int
11595// Requires: BMI, SSE2
11596TEXT ·encodeSnappyBlockAsm64K(SB), $65560-56
11597 MOVQ dst_base+0(FP), AX
11598 MOVQ $0x00000200, CX
11599 LEAQ 24(SP), DX
11600 PXOR X0, X0
11601
11602zero_loop_encodeSnappyBlockAsm64K:
11603 MOVOU X0, (DX)
11604 MOVOU X0, 16(DX)
11605 MOVOU X0, 32(DX)
11606 MOVOU X0, 48(DX)
11607 MOVOU X0, 64(DX)
11608 MOVOU X0, 80(DX)
11609 MOVOU X0, 96(DX)
11610 MOVOU X0, 112(DX)
11611 ADDQ $0x80, DX
11612 DECQ CX
11613 JNZ zero_loop_encodeSnappyBlockAsm64K
11614 MOVL $0x00000000, 12(SP)
11615 MOVQ src_len+32(FP), CX
11616 LEAQ -9(CX), DX
11617 LEAQ -8(CX), BX
11618 MOVL BX, 8(SP)
11619 SHRQ $0x05, CX
11620 SUBL CX, DX
11621 LEAQ (AX)(DX*1), DX
11622 MOVQ DX, (SP)
11623 MOVL $0x00000001, CX
11624 MOVL CX, 16(SP)
11625 MOVQ src_base+24(FP), DX
11626
11627search_loop_encodeSnappyBlockAsm64K:
11628 MOVL CX, BX
11629 SUBL 12(SP), BX
11630 SHRL $0x06, BX
11631 LEAL 4(CX)(BX*1), BX
11632 CMPL BX, 8(SP)
11633 JAE emit_remainder_encodeSnappyBlockAsm64K
11634 MOVQ (DX)(CX*1), SI
11635 MOVL BX, 20(SP)
11636 MOVQ $0x0000cf1bbcdcbf9b, R8
11637 MOVQ SI, R9
11638 MOVQ SI, R10
11639 SHRQ $0x08, R10
11640 SHLQ $0x10, R9
11641 IMULQ R8, R9
11642 SHRQ $0x32, R9
11643 SHLQ $0x10, R10
11644 IMULQ R8, R10
11645 SHRQ $0x32, R10
11646 MOVL 24(SP)(R9*4), BX
11647 MOVL 24(SP)(R10*4), DI
11648 MOVL CX, 24(SP)(R9*4)
11649 LEAL 1(CX), R9
11650 MOVL R9, 24(SP)(R10*4)
11651 MOVQ SI, R9
11652 SHRQ $0x10, R9
11653 SHLQ $0x10, R9
11654 IMULQ R8, R9
11655 SHRQ $0x32, R9
11656 MOVL CX, R8
11657 SUBL 16(SP), R8
11658 MOVL 1(DX)(R8*1), R10
11659 MOVQ SI, R8
11660 SHRQ $0x08, R8
11661 CMPL R8, R10
11662 JNE no_repeat_found_encodeSnappyBlockAsm64K
11663 LEAL 1(CX), SI
11664 MOVL 12(SP), BX
11665 MOVL SI, DI
11666 SUBL 16(SP), DI
11667 JZ repeat_extend_back_end_encodeSnappyBlockAsm64K
11668
11669repeat_extend_back_loop_encodeSnappyBlockAsm64K:
11670 CMPL SI, BX
11671 JBE repeat_extend_back_end_encodeSnappyBlockAsm64K
11672 MOVB -1(DX)(DI*1), R8
11673 MOVB -1(DX)(SI*1), R9
11674 CMPB R8, R9
11675 JNE repeat_extend_back_end_encodeSnappyBlockAsm64K
11676 LEAL -1(SI), SI
11677 DECL DI
11678 JNZ repeat_extend_back_loop_encodeSnappyBlockAsm64K
11679
11680repeat_extend_back_end_encodeSnappyBlockAsm64K:
11681 MOVL 12(SP), BX
11682 CMPL BX, SI
11683 JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K
11684 MOVL SI, DI
11685 MOVL SI, 12(SP)
11686 LEAQ (DX)(BX*1), R8
11687 SUBL BX, DI
11688 LEAL -1(DI), BX
11689 CMPL BX, $0x3c
11690 JB one_byte_repeat_emit_encodeSnappyBlockAsm64K
11691 CMPL BX, $0x00000100
11692 JB two_bytes_repeat_emit_encodeSnappyBlockAsm64K
11693 JB three_bytes_repeat_emit_encodeSnappyBlockAsm64K
11694
11695three_bytes_repeat_emit_encodeSnappyBlockAsm64K:
11696 MOVB $0xf4, (AX)
11697 MOVW BX, 1(AX)
11698 ADDQ $0x03, AX
11699 JMP memmove_long_repeat_emit_encodeSnappyBlockAsm64K
11700
11701two_bytes_repeat_emit_encodeSnappyBlockAsm64K:
11702 MOVB $0xf0, (AX)
11703 MOVB BL, 1(AX)
11704 ADDQ $0x02, AX
11705 CMPL BX, $0x40
11706 JB memmove_repeat_emit_encodeSnappyBlockAsm64K
11707 JMP memmove_long_repeat_emit_encodeSnappyBlockAsm64K
11708
11709one_byte_repeat_emit_encodeSnappyBlockAsm64K:
11710 SHLB $0x02, BL
11711 MOVB BL, (AX)
11712 ADDQ $0x01, AX
11713
11714memmove_repeat_emit_encodeSnappyBlockAsm64K:
11715 LEAQ (AX)(DI*1), BX
11716
11717 // genMemMoveShort
11718 CMPQ DI, $0x08
11719 JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8
11720 CMPQ DI, $0x10
11721 JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8through16
11722 CMPQ DI, $0x20
11723 JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_17through32
11724 JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_33through64
11725
11726emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8:
11727 MOVQ (R8), R9
11728 MOVQ R9, (AX)
11729 JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K
11730
11731emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8through16:
11732 MOVQ (R8), R9
11733 MOVQ -8(R8)(DI*1), R8
11734 MOVQ R9, (AX)
11735 MOVQ R8, -8(AX)(DI*1)
11736 JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K
11737
11738emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_17through32:
11739 MOVOU (R8), X0
11740 MOVOU -16(R8)(DI*1), X1
11741 MOVOU X0, (AX)
11742 MOVOU X1, -16(AX)(DI*1)
11743 JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K
11744
11745emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_33through64:
11746 MOVOU (R8), X0
11747 MOVOU 16(R8), X1
11748 MOVOU -32(R8)(DI*1), X2
11749 MOVOU -16(R8)(DI*1), X3
11750 MOVOU X0, (AX)
11751 MOVOU X1, 16(AX)
11752 MOVOU X2, -32(AX)(DI*1)
11753 MOVOU X3, -16(AX)(DI*1)
11754
11755memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K:
11756 MOVQ BX, AX
11757 JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K
11758
11759memmove_long_repeat_emit_encodeSnappyBlockAsm64K:
11760 LEAQ (AX)(DI*1), BX
11761
11762 // genMemMoveLong
11763 MOVOU (R8), X0
11764 MOVOU 16(R8), X1
11765 MOVOU -32(R8)(DI*1), X2
11766 MOVOU -16(R8)(DI*1), X3
11767 MOVQ DI, R10
11768 SHRQ $0x05, R10
11769 MOVQ AX, R9
11770 ANDL $0x0000001f, R9
11771 MOVQ $0x00000040, R11
11772 SUBQ R9, R11
11773 DECQ R10
11774 JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32
11775 LEAQ -32(R8)(R11*1), R9
11776 LEAQ -32(AX)(R11*1), R12
11777
11778emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_big_loop_back:
11779 MOVOU (R9), X4
11780 MOVOU 16(R9), X5
11781 MOVOA X4, (R12)
11782 MOVOA X5, 16(R12)
11783 ADDQ $0x20, R12
11784 ADDQ $0x20, R9
11785 ADDQ $0x20, R11
11786 DECQ R10
11787 JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_big_loop_back
11788
11789emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32:
11790 MOVOU -32(R8)(R11*1), X4
11791 MOVOU -16(R8)(R11*1), X5
11792 MOVOA X4, -32(AX)(R11*1)
11793 MOVOA X5, -16(AX)(R11*1)
11794 ADDQ $0x20, R11
11795 CMPQ DI, R11
11796 JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32
11797 MOVOU X0, (AX)
11798 MOVOU X1, 16(AX)
11799 MOVOU X2, -32(AX)(DI*1)
11800 MOVOU X3, -16(AX)(DI*1)
11801 MOVQ BX, AX
11802
11803emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K:
11804 ADDL $0x05, CX
11805 MOVL CX, BX
11806 SUBL 16(SP), BX
11807 MOVQ src_len+32(FP), DI
11808 SUBL CX, DI
11809 LEAQ (DX)(CX*1), R8
11810 LEAQ (DX)(BX*1), BX
11811
11812 // matchLen
11813 XORL R10, R10
11814
11815matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm64K:
11816 CMPL DI, $0x10
11817 JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm64K
11818 MOVQ (R8)(R10*1), R9
11819 MOVQ 8(R8)(R10*1), R11
11820 XORQ (BX)(R10*1), R9
11821 JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm64K
11822 XORQ 8(BX)(R10*1), R11
11823 JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm64K
11824 LEAL -16(DI), DI
11825 LEAL 16(R10), R10
11826 JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm64K
11827
11828matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm64K:
11829#ifdef GOAMD64_v3
11830 TZCNTQ R11, R11
11831
11832#else
11833 BSFQ R11, R11
11834
11835#endif
11836 SARQ $0x03, R11
11837 LEAL 8(R10)(R11*1), R10
11838 JMP repeat_extend_forward_end_encodeSnappyBlockAsm64K
11839
11840matchlen_match8_repeat_extend_encodeSnappyBlockAsm64K:
11841 CMPL DI, $0x08
11842 JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K
11843 MOVQ (R8)(R10*1), R9
11844 XORQ (BX)(R10*1), R9
11845 JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm64K
11846 LEAL -8(DI), DI
11847 LEAL 8(R10), R10
11848 JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K
11849
11850matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm64K:
11851#ifdef GOAMD64_v3
11852 TZCNTQ R9, R9
11853
11854#else
11855 BSFQ R9, R9
11856
11857#endif
11858 SARQ $0x03, R9
11859 LEAL (R10)(R9*1), R10
11860 JMP repeat_extend_forward_end_encodeSnappyBlockAsm64K
11861
11862matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K:
11863 CMPL DI, $0x04
11864 JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K
11865 MOVL (R8)(R10*1), R9
11866 CMPL (BX)(R10*1), R9
11867 JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K
11868 LEAL -4(DI), DI
11869 LEAL 4(R10), R10
11870
11871matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K:
11872 CMPL DI, $0x01
11873 JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K
11874 JB repeat_extend_forward_end_encodeSnappyBlockAsm64K
11875 MOVW (R8)(R10*1), R9
11876 CMPW (BX)(R10*1), R9
11877 JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K
11878 LEAL 2(R10), R10
11879 SUBL $0x02, DI
11880 JZ repeat_extend_forward_end_encodeSnappyBlockAsm64K
11881
11882matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K:
11883 MOVB (R8)(R10*1), R9
11884 CMPB (BX)(R10*1), R9
11885 JNE repeat_extend_forward_end_encodeSnappyBlockAsm64K
11886 LEAL 1(R10), R10
11887
11888repeat_extend_forward_end_encodeSnappyBlockAsm64K:
11889 ADDL R10, CX
11890 MOVL CX, BX
11891 SUBL SI, BX
11892 MOVL 16(SP), SI
11893
11894 // emitCopy
11895two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm64K:
11896 CMPL BX, $0x40
11897 JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm64K
11898 MOVB $0xee, (AX)
11899 MOVW SI, 1(AX)
11900 LEAL -60(BX), BX
11901 ADDQ $0x03, AX
11902 JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm64K
11903
11904two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm64K:
11905 MOVL BX, DI
11906 SHLL $0x02, DI
11907 CMPL BX, $0x0c
11908 JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K
11909 CMPL SI, $0x00000800
11910 JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K
11911 LEAL -15(DI), DI
11912 MOVB SI, 1(AX)
11913 SHRL $0x08, SI
11914 SHLL $0x05, SI
11915 ORL SI, DI
11916 MOVB DI, (AX)
11917 ADDQ $0x02, AX
11918 JMP repeat_end_emit_encodeSnappyBlockAsm64K
11919
11920emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K:
11921 LEAL -2(DI), DI
11922 MOVB DI, (AX)
11923 MOVW SI, 1(AX)
11924 ADDQ $0x03, AX
11925
11926repeat_end_emit_encodeSnappyBlockAsm64K:
11927 MOVL CX, 12(SP)
11928 JMP search_loop_encodeSnappyBlockAsm64K
11929
11930no_repeat_found_encodeSnappyBlockAsm64K:
11931 CMPL (DX)(BX*1), SI
11932 JEQ candidate_match_encodeSnappyBlockAsm64K
11933 SHRQ $0x08, SI
11934 MOVL 24(SP)(R9*4), BX
11935 LEAL 2(CX), R8
11936 CMPL (DX)(DI*1), SI
11937 JEQ candidate2_match_encodeSnappyBlockAsm64K
11938 MOVL R8, 24(SP)(R9*4)
11939 SHRQ $0x08, SI
11940 CMPL (DX)(BX*1), SI
11941 JEQ candidate3_match_encodeSnappyBlockAsm64K
11942 MOVL 20(SP), CX
11943 JMP search_loop_encodeSnappyBlockAsm64K
11944
11945candidate3_match_encodeSnappyBlockAsm64K:
11946 ADDL $0x02, CX
11947 JMP candidate_match_encodeSnappyBlockAsm64K
11948
11949candidate2_match_encodeSnappyBlockAsm64K:
11950 MOVL R8, 24(SP)(R9*4)
11951 INCL CX
11952 MOVL DI, BX
11953
11954candidate_match_encodeSnappyBlockAsm64K:
11955 MOVL 12(SP), SI
11956 TESTL BX, BX
11957 JZ match_extend_back_end_encodeSnappyBlockAsm64K
11958
11959match_extend_back_loop_encodeSnappyBlockAsm64K:
11960 CMPL CX, SI
11961 JBE match_extend_back_end_encodeSnappyBlockAsm64K
11962 MOVB -1(DX)(BX*1), DI
11963 MOVB -1(DX)(CX*1), R8
11964 CMPB DI, R8
11965 JNE match_extend_back_end_encodeSnappyBlockAsm64K
11966 LEAL -1(CX), CX
11967 DECL BX
11968 JZ match_extend_back_end_encodeSnappyBlockAsm64K
11969 JMP match_extend_back_loop_encodeSnappyBlockAsm64K
11970
11971match_extend_back_end_encodeSnappyBlockAsm64K:
11972 MOVL CX, SI
11973 SUBL 12(SP), SI
11974 LEAQ 3(AX)(SI*1), SI
11975 CMPQ SI, (SP)
11976 JB match_dst_size_check_encodeSnappyBlockAsm64K
11977 MOVQ $0x00000000, ret+48(FP)
11978 RET
11979
11980match_dst_size_check_encodeSnappyBlockAsm64K:
11981 MOVL CX, SI
11982 MOVL 12(SP), DI
11983 CMPL DI, SI
11984 JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm64K
11985 MOVL SI, R8
11986 MOVL SI, 12(SP)
11987 LEAQ (DX)(DI*1), SI
11988 SUBL DI, R8
11989 LEAL -1(R8), DI
11990 CMPL DI, $0x3c
11991 JB one_byte_match_emit_encodeSnappyBlockAsm64K
11992 CMPL DI, $0x00000100
11993 JB two_bytes_match_emit_encodeSnappyBlockAsm64K
11994 JB three_bytes_match_emit_encodeSnappyBlockAsm64K
11995
11996three_bytes_match_emit_encodeSnappyBlockAsm64K:
11997 MOVB $0xf4, (AX)
11998 MOVW DI, 1(AX)
11999 ADDQ $0x03, AX
12000 JMP memmove_long_match_emit_encodeSnappyBlockAsm64K
12001
12002two_bytes_match_emit_encodeSnappyBlockAsm64K:
12003 MOVB $0xf0, (AX)
12004 MOVB DI, 1(AX)
12005 ADDQ $0x02, AX
12006 CMPL DI, $0x40
12007 JB memmove_match_emit_encodeSnappyBlockAsm64K
12008 JMP memmove_long_match_emit_encodeSnappyBlockAsm64K
12009
12010one_byte_match_emit_encodeSnappyBlockAsm64K:
12011 SHLB $0x02, DI
12012 MOVB DI, (AX)
12013 ADDQ $0x01, AX
12014
12015memmove_match_emit_encodeSnappyBlockAsm64K:
12016 LEAQ (AX)(R8*1), DI
12017
12018 // genMemMoveShort
12019 CMPQ R8, $0x08
12020 JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8
12021 CMPQ R8, $0x10
12022 JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8through16
12023 CMPQ R8, $0x20
12024 JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_17through32
12025 JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_33through64
12026
12027emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8:
12028 MOVQ (SI), R9
12029 MOVQ R9, (AX)
12030 JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K
12031
12032emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8through16:
12033 MOVQ (SI), R9
12034 MOVQ -8(SI)(R8*1), SI
12035 MOVQ R9, (AX)
12036 MOVQ SI, -8(AX)(R8*1)
12037 JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K
12038
12039emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_17through32:
12040 MOVOU (SI), X0
12041 MOVOU -16(SI)(R8*1), X1
12042 MOVOU X0, (AX)
12043 MOVOU X1, -16(AX)(R8*1)
12044 JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K
12045
12046emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_33through64:
12047 MOVOU (SI), X0
12048 MOVOU 16(SI), X1
12049 MOVOU -32(SI)(R8*1), X2
12050 MOVOU -16(SI)(R8*1), X3
12051 MOVOU X0, (AX)
12052 MOVOU X1, 16(AX)
12053 MOVOU X2, -32(AX)(R8*1)
12054 MOVOU X3, -16(AX)(R8*1)
12055
12056memmove_end_copy_match_emit_encodeSnappyBlockAsm64K:
12057 MOVQ DI, AX
12058 JMP emit_literal_done_match_emit_encodeSnappyBlockAsm64K
12059
12060memmove_long_match_emit_encodeSnappyBlockAsm64K:
12061 LEAQ (AX)(R8*1), DI
12062
12063 // genMemMoveLong
12064 MOVOU (SI), X0
12065 MOVOU 16(SI), X1
12066 MOVOU -32(SI)(R8*1), X2
12067 MOVOU -16(SI)(R8*1), X3
12068 MOVQ R8, R10
12069 SHRQ $0x05, R10
12070 MOVQ AX, R9
12071 ANDL $0x0000001f, R9
12072 MOVQ $0x00000040, R11
12073 SUBQ R9, R11
12074 DECQ R10
12075 JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32
12076 LEAQ -32(SI)(R11*1), R9
12077 LEAQ -32(AX)(R11*1), R12
12078
12079emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_big_loop_back:
12080 MOVOU (R9), X4
12081 MOVOU 16(R9), X5
12082 MOVOA X4, (R12)
12083 MOVOA X5, 16(R12)
12084 ADDQ $0x20, R12
12085 ADDQ $0x20, R9
12086 ADDQ $0x20, R11
12087 DECQ R10
12088 JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_big_loop_back
12089
12090emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32:
12091 MOVOU -32(SI)(R11*1), X4
12092 MOVOU -16(SI)(R11*1), X5
12093 MOVOA X4, -32(AX)(R11*1)
12094 MOVOA X5, -16(AX)(R11*1)
12095 ADDQ $0x20, R11
12096 CMPQ R8, R11
12097 JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32
12098 MOVOU X0, (AX)
12099 MOVOU X1, 16(AX)
12100 MOVOU X2, -32(AX)(R8*1)
12101 MOVOU X3, -16(AX)(R8*1)
12102 MOVQ DI, AX
12103
12104emit_literal_done_match_emit_encodeSnappyBlockAsm64K:
12105match_nolit_loop_encodeSnappyBlockAsm64K:
12106 MOVL CX, SI
12107 SUBL BX, SI
12108 MOVL SI, 16(SP)
12109 ADDL $0x04, CX
12110 ADDL $0x04, BX
12111 MOVQ src_len+32(FP), SI
12112 SUBL CX, SI
12113 LEAQ (DX)(CX*1), DI
12114 LEAQ (DX)(BX*1), BX
12115
12116 // matchLen
12117 XORL R9, R9
12118
12119matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm64K:
12120 CMPL SI, $0x10
12121 JB matchlen_match8_match_nolit_encodeSnappyBlockAsm64K
12122 MOVQ (DI)(R9*1), R8
12123 MOVQ 8(DI)(R9*1), R10
12124 XORQ (BX)(R9*1), R8
12125 JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm64K
12126 XORQ 8(BX)(R9*1), R10
12127 JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm64K
12128 LEAL -16(SI), SI
12129 LEAL 16(R9), R9
12130 JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm64K
12131
12132matchlen_bsf_16match_nolit_encodeSnappyBlockAsm64K:
12133#ifdef GOAMD64_v3
12134 TZCNTQ R10, R10
12135
12136#else
12137 BSFQ R10, R10
12138
12139#endif
12140 SARQ $0x03, R10
12141 LEAL 8(R9)(R10*1), R9
12142 JMP match_nolit_end_encodeSnappyBlockAsm64K
12143
12144matchlen_match8_match_nolit_encodeSnappyBlockAsm64K:
12145 CMPL SI, $0x08
12146 JB matchlen_match4_match_nolit_encodeSnappyBlockAsm64K
12147 MOVQ (DI)(R9*1), R8
12148 XORQ (BX)(R9*1), R8
12149 JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm64K
12150 LEAL -8(SI), SI
12151 LEAL 8(R9), R9
12152 JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm64K
12153
12154matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm64K:
12155#ifdef GOAMD64_v3
12156 TZCNTQ R8, R8
12157
12158#else
12159 BSFQ R8, R8
12160
12161#endif
12162 SARQ $0x03, R8
12163 LEAL (R9)(R8*1), R9
12164 JMP match_nolit_end_encodeSnappyBlockAsm64K
12165
12166matchlen_match4_match_nolit_encodeSnappyBlockAsm64K:
12167 CMPL SI, $0x04
12168 JB matchlen_match2_match_nolit_encodeSnappyBlockAsm64K
12169 MOVL (DI)(R9*1), R8
12170 CMPL (BX)(R9*1), R8
12171 JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm64K
12172 LEAL -4(SI), SI
12173 LEAL 4(R9), R9
12174
12175matchlen_match2_match_nolit_encodeSnappyBlockAsm64K:
12176 CMPL SI, $0x01
12177 JE matchlen_match1_match_nolit_encodeSnappyBlockAsm64K
12178 JB match_nolit_end_encodeSnappyBlockAsm64K
12179 MOVW (DI)(R9*1), R8
12180 CMPW (BX)(R9*1), R8
12181 JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm64K
12182 LEAL 2(R9), R9
12183 SUBL $0x02, SI
12184 JZ match_nolit_end_encodeSnappyBlockAsm64K
12185
12186matchlen_match1_match_nolit_encodeSnappyBlockAsm64K:
12187 MOVB (DI)(R9*1), R8
12188 CMPB (BX)(R9*1), R8
12189 JNE match_nolit_end_encodeSnappyBlockAsm64K
12190 LEAL 1(R9), R9
12191
12192match_nolit_end_encodeSnappyBlockAsm64K:
12193 ADDL R9, CX
12194 MOVL 16(SP), BX
12195 ADDL $0x04, R9
12196 MOVL CX, 12(SP)
12197
12198 // emitCopy
12199two_byte_offset_match_nolit_encodeSnappyBlockAsm64K:
12200 CMPL R9, $0x40
12201 JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm64K
12202 MOVB $0xee, (AX)
12203 MOVW BX, 1(AX)
12204 LEAL -60(R9), R9
12205 ADDQ $0x03, AX
12206 JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm64K
12207
12208two_byte_offset_short_match_nolit_encodeSnappyBlockAsm64K:
12209 MOVL R9, SI
12210 SHLL $0x02, SI
12211 CMPL R9, $0x0c
12212 JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm64K
12213 CMPL BX, $0x00000800
12214 JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm64K
12215 LEAL -15(SI), SI
12216 MOVB BL, 1(AX)
12217 SHRL $0x08, BX
12218 SHLL $0x05, BX
12219 ORL BX, SI
12220 MOVB SI, (AX)
12221 ADDQ $0x02, AX
12222 JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm64K
12223
12224emit_copy_three_match_nolit_encodeSnappyBlockAsm64K:
12225 LEAL -2(SI), SI
12226 MOVB SI, (AX)
12227 MOVW BX, 1(AX)
12228 ADDQ $0x03, AX
12229
12230match_nolit_emitcopy_end_encodeSnappyBlockAsm64K:
12231 CMPL CX, 8(SP)
12232 JAE emit_remainder_encodeSnappyBlockAsm64K
12233 MOVQ -2(DX)(CX*1), SI
12234 CMPQ AX, (SP)
12235 JB match_nolit_dst_ok_encodeSnappyBlockAsm64K
12236 MOVQ $0x00000000, ret+48(FP)
12237 RET
12238
12239match_nolit_dst_ok_encodeSnappyBlockAsm64K:
12240 MOVQ $0x0000cf1bbcdcbf9b, R8
12241 MOVQ SI, DI
12242 SHRQ $0x10, SI
12243 MOVQ SI, BX
12244 SHLQ $0x10, DI
12245 IMULQ R8, DI
12246 SHRQ $0x32, DI
12247 SHLQ $0x10, BX
12248 IMULQ R8, BX
12249 SHRQ $0x32, BX
12250 LEAL -2(CX), R8
12251 LEAQ 24(SP)(BX*4), R9
12252 MOVL (R9), BX
12253 MOVL R8, 24(SP)(DI*4)
12254 MOVL CX, (R9)
12255 CMPL (DX)(BX*1), SI
12256 JEQ match_nolit_loop_encodeSnappyBlockAsm64K
12257 INCL CX
12258 JMP search_loop_encodeSnappyBlockAsm64K
12259
12260emit_remainder_encodeSnappyBlockAsm64K:
12261 MOVQ src_len+32(FP), CX
12262 SUBL 12(SP), CX
12263 LEAQ 3(AX)(CX*1), CX
12264 CMPQ CX, (SP)
12265 JB emit_remainder_ok_encodeSnappyBlockAsm64K
12266 MOVQ $0x00000000, ret+48(FP)
12267 RET
12268
12269emit_remainder_ok_encodeSnappyBlockAsm64K:
12270 MOVQ src_len+32(FP), CX
12271 MOVL 12(SP), BX
12272 CMPL BX, CX
12273 JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm64K
12274 MOVL CX, SI
12275 MOVL CX, 12(SP)
12276 LEAQ (DX)(BX*1), CX
12277 SUBL BX, SI
12278 LEAL -1(SI), DX
12279 CMPL DX, $0x3c
12280 JB one_byte_emit_remainder_encodeSnappyBlockAsm64K
12281 CMPL DX, $0x00000100
12282 JB two_bytes_emit_remainder_encodeSnappyBlockAsm64K
12283 JB three_bytes_emit_remainder_encodeSnappyBlockAsm64K
12284
12285three_bytes_emit_remainder_encodeSnappyBlockAsm64K:
12286 MOVB $0xf4, (AX)
12287 MOVW DX, 1(AX)
12288 ADDQ $0x03, AX
12289 JMP memmove_long_emit_remainder_encodeSnappyBlockAsm64K
12290
12291two_bytes_emit_remainder_encodeSnappyBlockAsm64K:
12292 MOVB $0xf0, (AX)
12293 MOVB DL, 1(AX)
12294 ADDQ $0x02, AX
12295 CMPL DX, $0x40
12296 JB memmove_emit_remainder_encodeSnappyBlockAsm64K
12297 JMP memmove_long_emit_remainder_encodeSnappyBlockAsm64K
12298
12299one_byte_emit_remainder_encodeSnappyBlockAsm64K:
12300 SHLB $0x02, DL
12301 MOVB DL, (AX)
12302 ADDQ $0x01, AX
12303
12304memmove_emit_remainder_encodeSnappyBlockAsm64K:
12305 LEAQ (AX)(SI*1), DX
12306 MOVL SI, BX
12307
12308 // genMemMoveShort
12309 CMPQ BX, $0x03
12310 JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_1or2
12311 JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_3
12312 CMPQ BX, $0x08
12313 JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_4through7
12314 CMPQ BX, $0x10
12315 JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_8through16
12316 CMPQ BX, $0x20
12317 JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_17through32
12318 JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_33through64
12319
12320emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_1or2:
12321 MOVB (CX), SI
12322 MOVB -1(CX)(BX*1), CL
12323 MOVB SI, (AX)
12324 MOVB CL, -1(AX)(BX*1)
12325 JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K
12326
12327emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_3:
12328 MOVW (CX), SI
12329 MOVB 2(CX), CL
12330 MOVW SI, (AX)
12331 MOVB CL, 2(AX)
12332 JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K
12333
12334emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_4through7:
12335 MOVL (CX), SI
12336 MOVL -4(CX)(BX*1), CX
12337 MOVL SI, (AX)
12338 MOVL CX, -4(AX)(BX*1)
12339 JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K
12340
12341emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_8through16:
12342 MOVQ (CX), SI
12343 MOVQ -8(CX)(BX*1), CX
12344 MOVQ SI, (AX)
12345 MOVQ CX, -8(AX)(BX*1)
12346 JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K
12347
12348emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_17through32:
12349 MOVOU (CX), X0
12350 MOVOU -16(CX)(BX*1), X1
12351 MOVOU X0, (AX)
12352 MOVOU X1, -16(AX)(BX*1)
12353 JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K
12354
12355emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_33through64:
12356 MOVOU (CX), X0
12357 MOVOU 16(CX), X1
12358 MOVOU -32(CX)(BX*1), X2
12359 MOVOU -16(CX)(BX*1), X3
12360 MOVOU X0, (AX)
12361 MOVOU X1, 16(AX)
12362 MOVOU X2, -32(AX)(BX*1)
12363 MOVOU X3, -16(AX)(BX*1)
12364
12365memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K:
12366 MOVQ DX, AX
12367 JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm64K
12368
12369memmove_long_emit_remainder_encodeSnappyBlockAsm64K:
12370 LEAQ (AX)(SI*1), DX
12371 MOVL SI, BX
12372
12373 // genMemMoveLong
12374 MOVOU (CX), X0
12375 MOVOU 16(CX), X1
12376 MOVOU -32(CX)(BX*1), X2
12377 MOVOU -16(CX)(BX*1), X3
12378 MOVQ BX, DI
12379 SHRQ $0x05, DI
12380 MOVQ AX, SI
12381 ANDL $0x0000001f, SI
12382 MOVQ $0x00000040, R8
12383 SUBQ SI, R8
12384 DECQ DI
12385 JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32
12386 LEAQ -32(CX)(R8*1), SI
12387 LEAQ -32(AX)(R8*1), R9
12388
12389emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_big_loop_back:
12390 MOVOU (SI), X4
12391 MOVOU 16(SI), X5
12392 MOVOA X4, (R9)
12393 MOVOA X5, 16(R9)
12394 ADDQ $0x20, R9
12395 ADDQ $0x20, SI
12396 ADDQ $0x20, R8
12397 DECQ DI
12398 JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_big_loop_back
12399
12400emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32:
12401 MOVOU -32(CX)(R8*1), X4
12402 MOVOU -16(CX)(R8*1), X5
12403 MOVOA X4, -32(AX)(R8*1)
12404 MOVOA X5, -16(AX)(R8*1)
12405 ADDQ $0x20, R8
12406 CMPQ BX, R8
12407 JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32
12408 MOVOU X0, (AX)
12409 MOVOU X1, 16(AX)
12410 MOVOU X2, -32(AX)(BX*1)
12411 MOVOU X3, -16(AX)(BX*1)
12412 MOVQ DX, AX
12413
12414emit_literal_done_emit_remainder_encodeSnappyBlockAsm64K:
12415 MOVQ dst_base+0(FP), CX
12416 SUBQ CX, AX
12417 MOVQ AX, ret+48(FP)
12418 RET
12419
12420// func encodeSnappyBlockAsm12B(dst []byte, src []byte) int
12421// Requires: BMI, SSE2
12422TEXT ·encodeSnappyBlockAsm12B(SB), $16408-56
12423 MOVQ dst_base+0(FP), AX
12424 MOVQ $0x00000080, CX
12425 LEAQ 24(SP), DX
12426 PXOR X0, X0
12427
12428zero_loop_encodeSnappyBlockAsm12B:
12429 MOVOU X0, (DX)
12430 MOVOU X0, 16(DX)
12431 MOVOU X0, 32(DX)
12432 MOVOU X0, 48(DX)
12433 MOVOU X0, 64(DX)
12434 MOVOU X0, 80(DX)
12435 MOVOU X0, 96(DX)
12436 MOVOU X0, 112(DX)
12437 ADDQ $0x80, DX
12438 DECQ CX
12439 JNZ zero_loop_encodeSnappyBlockAsm12B
12440 MOVL $0x00000000, 12(SP)
12441 MOVQ src_len+32(FP), CX
12442 LEAQ -9(CX), DX
12443 LEAQ -8(CX), BX
12444 MOVL BX, 8(SP)
12445 SHRQ $0x05, CX
12446 SUBL CX, DX
12447 LEAQ (AX)(DX*1), DX
12448 MOVQ DX, (SP)
12449 MOVL $0x00000001, CX
12450 MOVL CX, 16(SP)
12451 MOVQ src_base+24(FP), DX
12452
12453search_loop_encodeSnappyBlockAsm12B:
12454 MOVL CX, BX
12455 SUBL 12(SP), BX
12456 SHRL $0x05, BX
12457 LEAL 4(CX)(BX*1), BX
12458 CMPL BX, 8(SP)
12459 JAE emit_remainder_encodeSnappyBlockAsm12B
12460 MOVQ (DX)(CX*1), SI
12461 MOVL BX, 20(SP)
12462 MOVQ $0x000000cf1bbcdcbb, R8
12463 MOVQ SI, R9
12464 MOVQ SI, R10
12465 SHRQ $0x08, R10
12466 SHLQ $0x18, R9
12467 IMULQ R8, R9
12468 SHRQ $0x34, R9
12469 SHLQ $0x18, R10
12470 IMULQ R8, R10
12471 SHRQ $0x34, R10
12472 MOVL 24(SP)(R9*4), BX
12473 MOVL 24(SP)(R10*4), DI
12474 MOVL CX, 24(SP)(R9*4)
12475 LEAL 1(CX), R9
12476 MOVL R9, 24(SP)(R10*4)
12477 MOVQ SI, R9
12478 SHRQ $0x10, R9
12479 SHLQ $0x18, R9
12480 IMULQ R8, R9
12481 SHRQ $0x34, R9
12482 MOVL CX, R8
12483 SUBL 16(SP), R8
12484 MOVL 1(DX)(R8*1), R10
12485 MOVQ SI, R8
12486 SHRQ $0x08, R8
12487 CMPL R8, R10
12488 JNE no_repeat_found_encodeSnappyBlockAsm12B
12489 LEAL 1(CX), SI
12490 MOVL 12(SP), BX
12491 MOVL SI, DI
12492 SUBL 16(SP), DI
12493 JZ repeat_extend_back_end_encodeSnappyBlockAsm12B
12494
12495repeat_extend_back_loop_encodeSnappyBlockAsm12B:
12496 CMPL SI, BX
12497 JBE repeat_extend_back_end_encodeSnappyBlockAsm12B
12498 MOVB -1(DX)(DI*1), R8
12499 MOVB -1(DX)(SI*1), R9
12500 CMPB R8, R9
12501 JNE repeat_extend_back_end_encodeSnappyBlockAsm12B
12502 LEAL -1(SI), SI
12503 DECL DI
12504 JNZ repeat_extend_back_loop_encodeSnappyBlockAsm12B
12505
12506repeat_extend_back_end_encodeSnappyBlockAsm12B:
12507 MOVL 12(SP), BX
12508 CMPL BX, SI
12509 JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B
12510 MOVL SI, DI
12511 MOVL SI, 12(SP)
12512 LEAQ (DX)(BX*1), R8
12513 SUBL BX, DI
12514 LEAL -1(DI), BX
12515 CMPL BX, $0x3c
12516 JB one_byte_repeat_emit_encodeSnappyBlockAsm12B
12517 CMPL BX, $0x00000100
12518 JB two_bytes_repeat_emit_encodeSnappyBlockAsm12B
12519 JB three_bytes_repeat_emit_encodeSnappyBlockAsm12B
12520
12521three_bytes_repeat_emit_encodeSnappyBlockAsm12B:
12522 MOVB $0xf4, (AX)
12523 MOVW BX, 1(AX)
12524 ADDQ $0x03, AX
12525 JMP memmove_long_repeat_emit_encodeSnappyBlockAsm12B
12526
12527two_bytes_repeat_emit_encodeSnappyBlockAsm12B:
12528 MOVB $0xf0, (AX)
12529 MOVB BL, 1(AX)
12530 ADDQ $0x02, AX
12531 CMPL BX, $0x40
12532 JB memmove_repeat_emit_encodeSnappyBlockAsm12B
12533 JMP memmove_long_repeat_emit_encodeSnappyBlockAsm12B
12534
12535one_byte_repeat_emit_encodeSnappyBlockAsm12B:
12536 SHLB $0x02, BL
12537 MOVB BL, (AX)
12538 ADDQ $0x01, AX
12539
12540memmove_repeat_emit_encodeSnappyBlockAsm12B:
12541 LEAQ (AX)(DI*1), BX
12542
12543 // genMemMoveShort
12544 CMPQ DI, $0x08
12545 JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8
12546 CMPQ DI, $0x10
12547 JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8through16
12548 CMPQ DI, $0x20
12549 JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_17through32
12550 JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_33through64
12551
12552emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8:
12553 MOVQ (R8), R9
12554 MOVQ R9, (AX)
12555 JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B
12556
12557emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8through16:
12558 MOVQ (R8), R9
12559 MOVQ -8(R8)(DI*1), R8
12560 MOVQ R9, (AX)
12561 MOVQ R8, -8(AX)(DI*1)
12562 JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B
12563
12564emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_17through32:
12565 MOVOU (R8), X0
12566 MOVOU -16(R8)(DI*1), X1
12567 MOVOU X0, (AX)
12568 MOVOU X1, -16(AX)(DI*1)
12569 JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B
12570
12571emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_33through64:
12572 MOVOU (R8), X0
12573 MOVOU 16(R8), X1
12574 MOVOU -32(R8)(DI*1), X2
12575 MOVOU -16(R8)(DI*1), X3
12576 MOVOU X0, (AX)
12577 MOVOU X1, 16(AX)
12578 MOVOU X2, -32(AX)(DI*1)
12579 MOVOU X3, -16(AX)(DI*1)
12580
12581memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B:
12582 MOVQ BX, AX
12583 JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B
12584
12585memmove_long_repeat_emit_encodeSnappyBlockAsm12B:
12586 LEAQ (AX)(DI*1), BX
12587
12588 // genMemMoveLong
12589 MOVOU (R8), X0
12590 MOVOU 16(R8), X1
12591 MOVOU -32(R8)(DI*1), X2
12592 MOVOU -16(R8)(DI*1), X3
12593 MOVQ DI, R10
12594 SHRQ $0x05, R10
12595 MOVQ AX, R9
12596 ANDL $0x0000001f, R9
12597 MOVQ $0x00000040, R11
12598 SUBQ R9, R11
12599 DECQ R10
12600 JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32
12601 LEAQ -32(R8)(R11*1), R9
12602 LEAQ -32(AX)(R11*1), R12
12603
12604emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_big_loop_back:
12605 MOVOU (R9), X4
12606 MOVOU 16(R9), X5
12607 MOVOA X4, (R12)
12608 MOVOA X5, 16(R12)
12609 ADDQ $0x20, R12
12610 ADDQ $0x20, R9
12611 ADDQ $0x20, R11
12612 DECQ R10
12613 JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_big_loop_back
12614
12615emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32:
12616 MOVOU -32(R8)(R11*1), X4
12617 MOVOU -16(R8)(R11*1), X5
12618 MOVOA X4, -32(AX)(R11*1)
12619 MOVOA X5, -16(AX)(R11*1)
12620 ADDQ $0x20, R11
12621 CMPQ DI, R11
12622 JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32
12623 MOVOU X0, (AX)
12624 MOVOU X1, 16(AX)
12625 MOVOU X2, -32(AX)(DI*1)
12626 MOVOU X3, -16(AX)(DI*1)
12627 MOVQ BX, AX
12628
12629emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B:
12630 ADDL $0x05, CX
12631 MOVL CX, BX
12632 SUBL 16(SP), BX
12633 MOVQ src_len+32(FP), DI
12634 SUBL CX, DI
12635 LEAQ (DX)(CX*1), R8
12636 LEAQ (DX)(BX*1), BX
12637
12638 // matchLen
12639 XORL R10, R10
12640
12641matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm12B:
12642 CMPL DI, $0x10
12643 JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm12B
12644 MOVQ (R8)(R10*1), R9
12645 MOVQ 8(R8)(R10*1), R11
12646 XORQ (BX)(R10*1), R9
12647 JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm12B
12648 XORQ 8(BX)(R10*1), R11
12649 JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm12B
12650 LEAL -16(DI), DI
12651 LEAL 16(R10), R10
12652 JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm12B
12653
12654matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm12B:
12655#ifdef GOAMD64_v3
12656 TZCNTQ R11, R11
12657
12658#else
12659 BSFQ R11, R11
12660
12661#endif
12662 SARQ $0x03, R11
12663 LEAL 8(R10)(R11*1), R10
12664 JMP repeat_extend_forward_end_encodeSnappyBlockAsm12B
12665
12666matchlen_match8_repeat_extend_encodeSnappyBlockAsm12B:
12667 CMPL DI, $0x08
12668 JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B
12669 MOVQ (R8)(R10*1), R9
12670 XORQ (BX)(R10*1), R9
12671 JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm12B
12672 LEAL -8(DI), DI
12673 LEAL 8(R10), R10
12674 JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B
12675
12676matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm12B:
12677#ifdef GOAMD64_v3
12678 TZCNTQ R9, R9
12679
12680#else
12681 BSFQ R9, R9
12682
12683#endif
12684 SARQ $0x03, R9
12685 LEAL (R10)(R9*1), R10
12686 JMP repeat_extend_forward_end_encodeSnappyBlockAsm12B
12687
12688matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B:
12689 CMPL DI, $0x04
12690 JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B
12691 MOVL (R8)(R10*1), R9
12692 CMPL (BX)(R10*1), R9
12693 JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B
12694 LEAL -4(DI), DI
12695 LEAL 4(R10), R10
12696
12697matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B:
12698 CMPL DI, $0x01
12699 JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B
12700 JB repeat_extend_forward_end_encodeSnappyBlockAsm12B
12701 MOVW (R8)(R10*1), R9
12702 CMPW (BX)(R10*1), R9
12703 JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B
12704 LEAL 2(R10), R10
12705 SUBL $0x02, DI
12706 JZ repeat_extend_forward_end_encodeSnappyBlockAsm12B
12707
12708matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B:
12709 MOVB (R8)(R10*1), R9
12710 CMPB (BX)(R10*1), R9
12711 JNE repeat_extend_forward_end_encodeSnappyBlockAsm12B
12712 LEAL 1(R10), R10
12713
12714repeat_extend_forward_end_encodeSnappyBlockAsm12B:
12715 ADDL R10, CX
12716 MOVL CX, BX
12717 SUBL SI, BX
12718 MOVL 16(SP), SI
12719
12720 // emitCopy
12721two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm12B:
12722 CMPL BX, $0x40
12723 JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm12B
12724 MOVB $0xee, (AX)
12725 MOVW SI, 1(AX)
12726 LEAL -60(BX), BX
12727 ADDQ $0x03, AX
12728 JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm12B
12729
12730two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm12B:
12731 MOVL BX, DI
12732 SHLL $0x02, DI
12733 CMPL BX, $0x0c
12734 JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B
12735 CMPL SI, $0x00000800
12736 JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B
12737 LEAL -15(DI), DI
12738 MOVB SI, 1(AX)
12739 SHRL $0x08, SI
12740 SHLL $0x05, SI
12741 ORL SI, DI
12742 MOVB DI, (AX)
12743 ADDQ $0x02, AX
12744 JMP repeat_end_emit_encodeSnappyBlockAsm12B
12745
12746emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B:
12747 LEAL -2(DI), DI
12748 MOVB DI, (AX)
12749 MOVW SI, 1(AX)
12750 ADDQ $0x03, AX
12751
12752repeat_end_emit_encodeSnappyBlockAsm12B:
12753 MOVL CX, 12(SP)
12754 JMP search_loop_encodeSnappyBlockAsm12B
12755
12756no_repeat_found_encodeSnappyBlockAsm12B:
12757 CMPL (DX)(BX*1), SI
12758 JEQ candidate_match_encodeSnappyBlockAsm12B
12759 SHRQ $0x08, SI
12760 MOVL 24(SP)(R9*4), BX
12761 LEAL 2(CX), R8
12762 CMPL (DX)(DI*1), SI
12763 JEQ candidate2_match_encodeSnappyBlockAsm12B
12764 MOVL R8, 24(SP)(R9*4)
12765 SHRQ $0x08, SI
12766 CMPL (DX)(BX*1), SI
12767 JEQ candidate3_match_encodeSnappyBlockAsm12B
12768 MOVL 20(SP), CX
12769 JMP search_loop_encodeSnappyBlockAsm12B
12770
12771candidate3_match_encodeSnappyBlockAsm12B:
12772 ADDL $0x02, CX
12773 JMP candidate_match_encodeSnappyBlockAsm12B
12774
12775candidate2_match_encodeSnappyBlockAsm12B:
12776 MOVL R8, 24(SP)(R9*4)
12777 INCL CX
12778 MOVL DI, BX
12779
12780candidate_match_encodeSnappyBlockAsm12B:
12781 MOVL 12(SP), SI
12782 TESTL BX, BX
12783 JZ match_extend_back_end_encodeSnappyBlockAsm12B
12784
12785match_extend_back_loop_encodeSnappyBlockAsm12B:
12786 CMPL CX, SI
12787 JBE match_extend_back_end_encodeSnappyBlockAsm12B
12788 MOVB -1(DX)(BX*1), DI
12789 MOVB -1(DX)(CX*1), R8
12790 CMPB DI, R8
12791 JNE match_extend_back_end_encodeSnappyBlockAsm12B
12792 LEAL -1(CX), CX
12793 DECL BX
12794 JZ match_extend_back_end_encodeSnappyBlockAsm12B
12795 JMP match_extend_back_loop_encodeSnappyBlockAsm12B
12796
12797match_extend_back_end_encodeSnappyBlockAsm12B:
12798 MOVL CX, SI
12799 SUBL 12(SP), SI
12800 LEAQ 3(AX)(SI*1), SI
12801 CMPQ SI, (SP)
12802 JB match_dst_size_check_encodeSnappyBlockAsm12B
12803 MOVQ $0x00000000, ret+48(FP)
12804 RET
12805
12806match_dst_size_check_encodeSnappyBlockAsm12B:
12807 MOVL CX, SI
12808 MOVL 12(SP), DI
12809 CMPL DI, SI
12810 JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm12B
12811 MOVL SI, R8
12812 MOVL SI, 12(SP)
12813 LEAQ (DX)(DI*1), SI
12814 SUBL DI, R8
12815 LEAL -1(R8), DI
12816 CMPL DI, $0x3c
12817 JB one_byte_match_emit_encodeSnappyBlockAsm12B
12818 CMPL DI, $0x00000100
12819 JB two_bytes_match_emit_encodeSnappyBlockAsm12B
12820 JB three_bytes_match_emit_encodeSnappyBlockAsm12B
12821
12822three_bytes_match_emit_encodeSnappyBlockAsm12B:
12823 MOVB $0xf4, (AX)
12824 MOVW DI, 1(AX)
12825 ADDQ $0x03, AX
12826 JMP memmove_long_match_emit_encodeSnappyBlockAsm12B
12827
12828two_bytes_match_emit_encodeSnappyBlockAsm12B:
12829 MOVB $0xf0, (AX)
12830 MOVB DI, 1(AX)
12831 ADDQ $0x02, AX
12832 CMPL DI, $0x40
12833 JB memmove_match_emit_encodeSnappyBlockAsm12B
12834 JMP memmove_long_match_emit_encodeSnappyBlockAsm12B
12835
12836one_byte_match_emit_encodeSnappyBlockAsm12B:
12837 SHLB $0x02, DI
12838 MOVB DI, (AX)
12839 ADDQ $0x01, AX
12840
12841memmove_match_emit_encodeSnappyBlockAsm12B:
12842 LEAQ (AX)(R8*1), DI
12843
12844 // genMemMoveShort
12845 CMPQ R8, $0x08
12846 JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8
12847 CMPQ R8, $0x10
12848 JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8through16
12849 CMPQ R8, $0x20
12850 JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_17through32
12851 JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_33through64
12852
12853emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8:
12854 MOVQ (SI), R9
12855 MOVQ R9, (AX)
12856 JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B
12857
12858emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8through16:
12859 MOVQ (SI), R9
12860 MOVQ -8(SI)(R8*1), SI
12861 MOVQ R9, (AX)
12862 MOVQ SI, -8(AX)(R8*1)
12863 JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B
12864
12865emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_17through32:
12866 MOVOU (SI), X0
12867 MOVOU -16(SI)(R8*1), X1
12868 MOVOU X0, (AX)
12869 MOVOU X1, -16(AX)(R8*1)
12870 JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B
12871
12872emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_33through64:
12873 MOVOU (SI), X0
12874 MOVOU 16(SI), X1
12875 MOVOU -32(SI)(R8*1), X2
12876 MOVOU -16(SI)(R8*1), X3
12877 MOVOU X0, (AX)
12878 MOVOU X1, 16(AX)
12879 MOVOU X2, -32(AX)(R8*1)
12880 MOVOU X3, -16(AX)(R8*1)
12881
12882memmove_end_copy_match_emit_encodeSnappyBlockAsm12B:
12883 MOVQ DI, AX
12884 JMP emit_literal_done_match_emit_encodeSnappyBlockAsm12B
12885
12886memmove_long_match_emit_encodeSnappyBlockAsm12B:
12887 LEAQ (AX)(R8*1), DI
12888
12889 // genMemMoveLong
12890 MOVOU (SI), X0
12891 MOVOU 16(SI), X1
12892 MOVOU -32(SI)(R8*1), X2
12893 MOVOU -16(SI)(R8*1), X3
12894 MOVQ R8, R10
12895 SHRQ $0x05, R10
12896 MOVQ AX, R9
12897 ANDL $0x0000001f, R9
12898 MOVQ $0x00000040, R11
12899 SUBQ R9, R11
12900 DECQ R10
12901 JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32
12902 LEAQ -32(SI)(R11*1), R9
12903 LEAQ -32(AX)(R11*1), R12
12904
12905emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_big_loop_back:
12906 MOVOU (R9), X4
12907 MOVOU 16(R9), X5
12908 MOVOA X4, (R12)
12909 MOVOA X5, 16(R12)
12910 ADDQ $0x20, R12
12911 ADDQ $0x20, R9
12912 ADDQ $0x20, R11
12913 DECQ R10
12914 JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_big_loop_back
12915
12916emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32:
12917 MOVOU -32(SI)(R11*1), X4
12918 MOVOU -16(SI)(R11*1), X5
12919 MOVOA X4, -32(AX)(R11*1)
12920 MOVOA X5, -16(AX)(R11*1)
12921 ADDQ $0x20, R11
12922 CMPQ R8, R11
12923 JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32
12924 MOVOU X0, (AX)
12925 MOVOU X1, 16(AX)
12926 MOVOU X2, -32(AX)(R8*1)
12927 MOVOU X3, -16(AX)(R8*1)
12928 MOVQ DI, AX
12929
12930emit_literal_done_match_emit_encodeSnappyBlockAsm12B:
12931match_nolit_loop_encodeSnappyBlockAsm12B:
12932 MOVL CX, SI
12933 SUBL BX, SI
12934 MOVL SI, 16(SP)
12935 ADDL $0x04, CX
12936 ADDL $0x04, BX
12937 MOVQ src_len+32(FP), SI
12938 SUBL CX, SI
12939 LEAQ (DX)(CX*1), DI
12940 LEAQ (DX)(BX*1), BX
12941
12942 // matchLen
12943 XORL R9, R9
12944
12945matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm12B:
12946 CMPL SI, $0x10
12947 JB matchlen_match8_match_nolit_encodeSnappyBlockAsm12B
12948 MOVQ (DI)(R9*1), R8
12949 MOVQ 8(DI)(R9*1), R10
12950 XORQ (BX)(R9*1), R8
12951 JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm12B
12952 XORQ 8(BX)(R9*1), R10
12953 JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm12B
12954 LEAL -16(SI), SI
12955 LEAL 16(R9), R9
12956 JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm12B
12957
12958matchlen_bsf_16match_nolit_encodeSnappyBlockAsm12B:
12959#ifdef GOAMD64_v3
12960 TZCNTQ R10, R10
12961
12962#else
12963 BSFQ R10, R10
12964
12965#endif
12966 SARQ $0x03, R10
12967 LEAL 8(R9)(R10*1), R9
12968 JMP match_nolit_end_encodeSnappyBlockAsm12B
12969
12970matchlen_match8_match_nolit_encodeSnappyBlockAsm12B:
12971 CMPL SI, $0x08
12972 JB matchlen_match4_match_nolit_encodeSnappyBlockAsm12B
12973 MOVQ (DI)(R9*1), R8
12974 XORQ (BX)(R9*1), R8
12975 JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm12B
12976 LEAL -8(SI), SI
12977 LEAL 8(R9), R9
12978 JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm12B
12979
12980matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm12B:
12981#ifdef GOAMD64_v3
12982 TZCNTQ R8, R8
12983
12984#else
12985 BSFQ R8, R8
12986
12987#endif
12988 SARQ $0x03, R8
12989 LEAL (R9)(R8*1), R9
12990 JMP match_nolit_end_encodeSnappyBlockAsm12B
12991
12992matchlen_match4_match_nolit_encodeSnappyBlockAsm12B:
12993 CMPL SI, $0x04
12994 JB matchlen_match2_match_nolit_encodeSnappyBlockAsm12B
12995 MOVL (DI)(R9*1), R8
12996 CMPL (BX)(R9*1), R8
12997 JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm12B
12998 LEAL -4(SI), SI
12999 LEAL 4(R9), R9
13000
13001matchlen_match2_match_nolit_encodeSnappyBlockAsm12B:
13002 CMPL SI, $0x01
13003 JE matchlen_match1_match_nolit_encodeSnappyBlockAsm12B
13004 JB match_nolit_end_encodeSnappyBlockAsm12B
13005 MOVW (DI)(R9*1), R8
13006 CMPW (BX)(R9*1), R8
13007 JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm12B
13008 LEAL 2(R9), R9
13009 SUBL $0x02, SI
13010 JZ match_nolit_end_encodeSnappyBlockAsm12B
13011
13012matchlen_match1_match_nolit_encodeSnappyBlockAsm12B:
13013 MOVB (DI)(R9*1), R8
13014 CMPB (BX)(R9*1), R8
13015 JNE match_nolit_end_encodeSnappyBlockAsm12B
13016 LEAL 1(R9), R9
13017
13018match_nolit_end_encodeSnappyBlockAsm12B:
13019 ADDL R9, CX
13020 MOVL 16(SP), BX
13021 ADDL $0x04, R9
13022 MOVL CX, 12(SP)
13023
13024 // emitCopy
13025two_byte_offset_match_nolit_encodeSnappyBlockAsm12B:
13026 CMPL R9, $0x40
13027 JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm12B
13028 MOVB $0xee, (AX)
13029 MOVW BX, 1(AX)
13030 LEAL -60(R9), R9
13031 ADDQ $0x03, AX
13032 JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm12B
13033
13034two_byte_offset_short_match_nolit_encodeSnappyBlockAsm12B:
13035 MOVL R9, SI
13036 SHLL $0x02, SI
13037 CMPL R9, $0x0c
13038 JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm12B
13039 CMPL BX, $0x00000800
13040 JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm12B
13041 LEAL -15(SI), SI
13042 MOVB BL, 1(AX)
13043 SHRL $0x08, BX
13044 SHLL $0x05, BX
13045 ORL BX, SI
13046 MOVB SI, (AX)
13047 ADDQ $0x02, AX
13048 JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm12B
13049
13050emit_copy_three_match_nolit_encodeSnappyBlockAsm12B:
13051 LEAL -2(SI), SI
13052 MOVB SI, (AX)
13053 MOVW BX, 1(AX)
13054 ADDQ $0x03, AX
13055
13056match_nolit_emitcopy_end_encodeSnappyBlockAsm12B:
13057 CMPL CX, 8(SP)
13058 JAE emit_remainder_encodeSnappyBlockAsm12B
13059 MOVQ -2(DX)(CX*1), SI
13060 CMPQ AX, (SP)
13061 JB match_nolit_dst_ok_encodeSnappyBlockAsm12B
13062 MOVQ $0x00000000, ret+48(FP)
13063 RET
13064
13065match_nolit_dst_ok_encodeSnappyBlockAsm12B:
13066 MOVQ $0x000000cf1bbcdcbb, R8
13067 MOVQ SI, DI
13068 SHRQ $0x10, SI
13069 MOVQ SI, BX
13070 SHLQ $0x18, DI
13071 IMULQ R8, DI
13072 SHRQ $0x34, DI
13073 SHLQ $0x18, BX
13074 IMULQ R8, BX
13075 SHRQ $0x34, BX
13076 LEAL -2(CX), R8
13077 LEAQ 24(SP)(BX*4), R9
13078 MOVL (R9), BX
13079 MOVL R8, 24(SP)(DI*4)
13080 MOVL CX, (R9)
13081 CMPL (DX)(BX*1), SI
13082 JEQ match_nolit_loop_encodeSnappyBlockAsm12B
13083 INCL CX
13084 JMP search_loop_encodeSnappyBlockAsm12B
13085
13086emit_remainder_encodeSnappyBlockAsm12B:
13087 MOVQ src_len+32(FP), CX
13088 SUBL 12(SP), CX
13089 LEAQ 3(AX)(CX*1), CX
13090 CMPQ CX, (SP)
13091 JB emit_remainder_ok_encodeSnappyBlockAsm12B
13092 MOVQ $0x00000000, ret+48(FP)
13093 RET
13094
13095emit_remainder_ok_encodeSnappyBlockAsm12B:
13096 MOVQ src_len+32(FP), CX
13097 MOVL 12(SP), BX
13098 CMPL BX, CX
13099 JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm12B
13100 MOVL CX, SI
13101 MOVL CX, 12(SP)
13102 LEAQ (DX)(BX*1), CX
13103 SUBL BX, SI
13104 LEAL -1(SI), DX
13105 CMPL DX, $0x3c
13106 JB one_byte_emit_remainder_encodeSnappyBlockAsm12B
13107 CMPL DX, $0x00000100
13108 JB two_bytes_emit_remainder_encodeSnappyBlockAsm12B
13109 JB three_bytes_emit_remainder_encodeSnappyBlockAsm12B
13110
13111three_bytes_emit_remainder_encodeSnappyBlockAsm12B:
13112 MOVB $0xf4, (AX)
13113 MOVW DX, 1(AX)
13114 ADDQ $0x03, AX
13115 JMP memmove_long_emit_remainder_encodeSnappyBlockAsm12B
13116
13117two_bytes_emit_remainder_encodeSnappyBlockAsm12B:
13118 MOVB $0xf0, (AX)
13119 MOVB DL, 1(AX)
13120 ADDQ $0x02, AX
13121 CMPL DX, $0x40
13122 JB memmove_emit_remainder_encodeSnappyBlockAsm12B
13123 JMP memmove_long_emit_remainder_encodeSnappyBlockAsm12B
13124
13125one_byte_emit_remainder_encodeSnappyBlockAsm12B:
13126 SHLB $0x02, DL
13127 MOVB DL, (AX)
13128 ADDQ $0x01, AX
13129
13130memmove_emit_remainder_encodeSnappyBlockAsm12B:
13131 LEAQ (AX)(SI*1), DX
13132 MOVL SI, BX
13133
13134 // genMemMoveShort
13135 CMPQ BX, $0x03
13136 JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_1or2
13137 JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_3
13138 CMPQ BX, $0x08
13139 JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_4through7
13140 CMPQ BX, $0x10
13141 JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_8through16
13142 CMPQ BX, $0x20
13143 JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_17through32
13144 JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_33through64
13145
13146emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_1or2:
13147 MOVB (CX), SI
13148 MOVB -1(CX)(BX*1), CL
13149 MOVB SI, (AX)
13150 MOVB CL, -1(AX)(BX*1)
13151 JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B
13152
13153emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_3:
13154 MOVW (CX), SI
13155 MOVB 2(CX), CL
13156 MOVW SI, (AX)
13157 MOVB CL, 2(AX)
13158 JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B
13159
13160emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_4through7:
13161 MOVL (CX), SI
13162 MOVL -4(CX)(BX*1), CX
13163 MOVL SI, (AX)
13164 MOVL CX, -4(AX)(BX*1)
13165 JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B
13166
13167emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_8through16:
13168 MOVQ (CX), SI
13169 MOVQ -8(CX)(BX*1), CX
13170 MOVQ SI, (AX)
13171 MOVQ CX, -8(AX)(BX*1)
13172 JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B
13173
13174emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_17through32:
13175 MOVOU (CX), X0
13176 MOVOU -16(CX)(BX*1), X1
13177 MOVOU X0, (AX)
13178 MOVOU X1, -16(AX)(BX*1)
13179 JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B
13180
13181emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_33through64:
13182 MOVOU (CX), X0
13183 MOVOU 16(CX), X1
13184 MOVOU -32(CX)(BX*1), X2
13185 MOVOU -16(CX)(BX*1), X3
13186 MOVOU X0, (AX)
13187 MOVOU X1, 16(AX)
13188 MOVOU X2, -32(AX)(BX*1)
13189 MOVOU X3, -16(AX)(BX*1)
13190
13191memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B:
13192 MOVQ DX, AX
13193 JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm12B
13194
13195memmove_long_emit_remainder_encodeSnappyBlockAsm12B:
13196 LEAQ (AX)(SI*1), DX
13197 MOVL SI, BX
13198
13199 // genMemMoveLong
13200 MOVOU (CX), X0
13201 MOVOU 16(CX), X1
13202 MOVOU -32(CX)(BX*1), X2
13203 MOVOU -16(CX)(BX*1), X3
13204 MOVQ BX, DI
13205 SHRQ $0x05, DI
13206 MOVQ AX, SI
13207 ANDL $0x0000001f, SI
13208 MOVQ $0x00000040, R8
13209 SUBQ SI, R8
13210 DECQ DI
13211 JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32
13212 LEAQ -32(CX)(R8*1), SI
13213 LEAQ -32(AX)(R8*1), R9
13214
13215emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_big_loop_back:
13216 MOVOU (SI), X4
13217 MOVOU 16(SI), X5
13218 MOVOA X4, (R9)
13219 MOVOA X5, 16(R9)
13220 ADDQ $0x20, R9
13221 ADDQ $0x20, SI
13222 ADDQ $0x20, R8
13223 DECQ DI
13224 JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_big_loop_back
13225
13226emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32:
13227 MOVOU -32(CX)(R8*1), X4
13228 MOVOU -16(CX)(R8*1), X5
13229 MOVOA X4, -32(AX)(R8*1)
13230 MOVOA X5, -16(AX)(R8*1)
13231 ADDQ $0x20, R8
13232 CMPQ BX, R8
13233 JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32
13234 MOVOU X0, (AX)
13235 MOVOU X1, 16(AX)
13236 MOVOU X2, -32(AX)(BX*1)
13237 MOVOU X3, -16(AX)(BX*1)
13238 MOVQ DX, AX
13239
13240emit_literal_done_emit_remainder_encodeSnappyBlockAsm12B:
13241 MOVQ dst_base+0(FP), CX
13242 SUBQ CX, AX
13243 MOVQ AX, ret+48(FP)
13244 RET
13245
13246// func encodeSnappyBlockAsm10B(dst []byte, src []byte) int
13247// Requires: BMI, SSE2
13248TEXT ·encodeSnappyBlockAsm10B(SB), $4120-56
13249 MOVQ dst_base+0(FP), AX
13250 MOVQ $0x00000020, CX
13251 LEAQ 24(SP), DX
13252 PXOR X0, X0
13253
13254zero_loop_encodeSnappyBlockAsm10B:
13255 MOVOU X0, (DX)
13256 MOVOU X0, 16(DX)
13257 MOVOU X0, 32(DX)
13258 MOVOU X0, 48(DX)
13259 MOVOU X0, 64(DX)
13260 MOVOU X0, 80(DX)
13261 MOVOU X0, 96(DX)
13262 MOVOU X0, 112(DX)
13263 ADDQ $0x80, DX
13264 DECQ CX
13265 JNZ zero_loop_encodeSnappyBlockAsm10B
13266 MOVL $0x00000000, 12(SP)
13267 MOVQ src_len+32(FP), CX
13268 LEAQ -9(CX), DX
13269 LEAQ -8(CX), BX
13270 MOVL BX, 8(SP)
13271 SHRQ $0x05, CX
13272 SUBL CX, DX
13273 LEAQ (AX)(DX*1), DX
13274 MOVQ DX, (SP)
13275 MOVL $0x00000001, CX
13276 MOVL CX, 16(SP)
13277 MOVQ src_base+24(FP), DX
13278
13279search_loop_encodeSnappyBlockAsm10B:
13280 MOVL CX, BX
13281 SUBL 12(SP), BX
13282 SHRL $0x05, BX
13283 LEAL 4(CX)(BX*1), BX
13284 CMPL BX, 8(SP)
13285 JAE emit_remainder_encodeSnappyBlockAsm10B
13286 MOVQ (DX)(CX*1), SI
13287 MOVL BX, 20(SP)
13288 MOVQ $0x9e3779b1, R8
13289 MOVQ SI, R9
13290 MOVQ SI, R10
13291 SHRQ $0x08, R10
13292 SHLQ $0x20, R9
13293 IMULQ R8, R9
13294 SHRQ $0x36, R9
13295 SHLQ $0x20, R10
13296 IMULQ R8, R10
13297 SHRQ $0x36, R10
13298 MOVL 24(SP)(R9*4), BX
13299 MOVL 24(SP)(R10*4), DI
13300 MOVL CX, 24(SP)(R9*4)
13301 LEAL 1(CX), R9
13302 MOVL R9, 24(SP)(R10*4)
13303 MOVQ SI, R9
13304 SHRQ $0x10, R9
13305 SHLQ $0x20, R9
13306 IMULQ R8, R9
13307 SHRQ $0x36, R9
13308 MOVL CX, R8
13309 SUBL 16(SP), R8
13310 MOVL 1(DX)(R8*1), R10
13311 MOVQ SI, R8
13312 SHRQ $0x08, R8
13313 CMPL R8, R10
13314 JNE no_repeat_found_encodeSnappyBlockAsm10B
13315 LEAL 1(CX), SI
13316 MOVL 12(SP), BX
13317 MOVL SI, DI
13318 SUBL 16(SP), DI
13319 JZ repeat_extend_back_end_encodeSnappyBlockAsm10B
13320
13321repeat_extend_back_loop_encodeSnappyBlockAsm10B:
13322 CMPL SI, BX
13323 JBE repeat_extend_back_end_encodeSnappyBlockAsm10B
13324 MOVB -1(DX)(DI*1), R8
13325 MOVB -1(DX)(SI*1), R9
13326 CMPB R8, R9
13327 JNE repeat_extend_back_end_encodeSnappyBlockAsm10B
13328 LEAL -1(SI), SI
13329 DECL DI
13330 JNZ repeat_extend_back_loop_encodeSnappyBlockAsm10B
13331
13332repeat_extend_back_end_encodeSnappyBlockAsm10B:
13333 MOVL 12(SP), BX
13334 CMPL BX, SI
13335 JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B
13336 MOVL SI, DI
13337 MOVL SI, 12(SP)
13338 LEAQ (DX)(BX*1), R8
13339 SUBL BX, DI
13340 LEAL -1(DI), BX
13341 CMPL BX, $0x3c
13342 JB one_byte_repeat_emit_encodeSnappyBlockAsm10B
13343 CMPL BX, $0x00000100
13344 JB two_bytes_repeat_emit_encodeSnappyBlockAsm10B
13345 JB three_bytes_repeat_emit_encodeSnappyBlockAsm10B
13346
13347three_bytes_repeat_emit_encodeSnappyBlockAsm10B:
13348 MOVB $0xf4, (AX)
13349 MOVW BX, 1(AX)
13350 ADDQ $0x03, AX
13351 JMP memmove_long_repeat_emit_encodeSnappyBlockAsm10B
13352
13353two_bytes_repeat_emit_encodeSnappyBlockAsm10B:
13354 MOVB $0xf0, (AX)
13355 MOVB BL, 1(AX)
13356 ADDQ $0x02, AX
13357 CMPL BX, $0x40
13358 JB memmove_repeat_emit_encodeSnappyBlockAsm10B
13359 JMP memmove_long_repeat_emit_encodeSnappyBlockAsm10B
13360
13361one_byte_repeat_emit_encodeSnappyBlockAsm10B:
13362 SHLB $0x02, BL
13363 MOVB BL, (AX)
13364 ADDQ $0x01, AX
13365
13366memmove_repeat_emit_encodeSnappyBlockAsm10B:
13367 LEAQ (AX)(DI*1), BX
13368
13369 // genMemMoveShort
13370 CMPQ DI, $0x08
13371 JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8
13372 CMPQ DI, $0x10
13373 JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8through16
13374 CMPQ DI, $0x20
13375 JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_17through32
13376 JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_33through64
13377
13378emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8:
13379 MOVQ (R8), R9
13380 MOVQ R9, (AX)
13381 JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B
13382
13383emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8through16:
13384 MOVQ (R8), R9
13385 MOVQ -8(R8)(DI*1), R8
13386 MOVQ R9, (AX)
13387 MOVQ R8, -8(AX)(DI*1)
13388 JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B
13389
13390emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_17through32:
13391 MOVOU (R8), X0
13392 MOVOU -16(R8)(DI*1), X1
13393 MOVOU X0, (AX)
13394 MOVOU X1, -16(AX)(DI*1)
13395 JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B
13396
13397emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_33through64:
13398 MOVOU (R8), X0
13399 MOVOU 16(R8), X1
13400 MOVOU -32(R8)(DI*1), X2
13401 MOVOU -16(R8)(DI*1), X3
13402 MOVOU X0, (AX)
13403 MOVOU X1, 16(AX)
13404 MOVOU X2, -32(AX)(DI*1)
13405 MOVOU X3, -16(AX)(DI*1)
13406
13407memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B:
13408 MOVQ BX, AX
13409 JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B
13410
13411memmove_long_repeat_emit_encodeSnappyBlockAsm10B:
13412 LEAQ (AX)(DI*1), BX
13413
13414 // genMemMoveLong
13415 MOVOU (R8), X0
13416 MOVOU 16(R8), X1
13417 MOVOU -32(R8)(DI*1), X2
13418 MOVOU -16(R8)(DI*1), X3
13419 MOVQ DI, R10
13420 SHRQ $0x05, R10
13421 MOVQ AX, R9
13422 ANDL $0x0000001f, R9
13423 MOVQ $0x00000040, R11
13424 SUBQ R9, R11
13425 DECQ R10
13426 JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32
13427 LEAQ -32(R8)(R11*1), R9
13428 LEAQ -32(AX)(R11*1), R12
13429
13430emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_big_loop_back:
13431 MOVOU (R9), X4
13432 MOVOU 16(R9), X5
13433 MOVOA X4, (R12)
13434 MOVOA X5, 16(R12)
13435 ADDQ $0x20, R12
13436 ADDQ $0x20, R9
13437 ADDQ $0x20, R11
13438 DECQ R10
13439 JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_big_loop_back
13440
13441emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32:
13442 MOVOU -32(R8)(R11*1), X4
13443 MOVOU -16(R8)(R11*1), X5
13444 MOVOA X4, -32(AX)(R11*1)
13445 MOVOA X5, -16(AX)(R11*1)
13446 ADDQ $0x20, R11
13447 CMPQ DI, R11
13448 JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32
13449 MOVOU X0, (AX)
13450 MOVOU X1, 16(AX)
13451 MOVOU X2, -32(AX)(DI*1)
13452 MOVOU X3, -16(AX)(DI*1)
13453 MOVQ BX, AX
13454
13455emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B:
13456 ADDL $0x05, CX
13457 MOVL CX, BX
13458 SUBL 16(SP), BX
13459 MOVQ src_len+32(FP), DI
13460 SUBL CX, DI
13461 LEAQ (DX)(CX*1), R8
13462 LEAQ (DX)(BX*1), BX
13463
13464 // matchLen
13465 XORL R10, R10
13466
13467matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm10B:
13468 CMPL DI, $0x10
13469 JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm10B
13470 MOVQ (R8)(R10*1), R9
13471 MOVQ 8(R8)(R10*1), R11
13472 XORQ (BX)(R10*1), R9
13473 JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm10B
13474 XORQ 8(BX)(R10*1), R11
13475 JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm10B
13476 LEAL -16(DI), DI
13477 LEAL 16(R10), R10
13478 JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm10B
13479
13480matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm10B:
13481#ifdef GOAMD64_v3
13482 TZCNTQ R11, R11
13483
13484#else
13485 BSFQ R11, R11
13486
13487#endif
13488 SARQ $0x03, R11
13489 LEAL 8(R10)(R11*1), R10
13490 JMP repeat_extend_forward_end_encodeSnappyBlockAsm10B
13491
13492matchlen_match8_repeat_extend_encodeSnappyBlockAsm10B:
13493 CMPL DI, $0x08
13494 JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B
13495 MOVQ (R8)(R10*1), R9
13496 XORQ (BX)(R10*1), R9
13497 JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm10B
13498 LEAL -8(DI), DI
13499 LEAL 8(R10), R10
13500 JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B
13501
13502matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm10B:
13503#ifdef GOAMD64_v3
13504 TZCNTQ R9, R9
13505
13506#else
13507 BSFQ R9, R9
13508
13509#endif
13510 SARQ $0x03, R9
13511 LEAL (R10)(R9*1), R10
13512 JMP repeat_extend_forward_end_encodeSnappyBlockAsm10B
13513
13514matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B:
13515 CMPL DI, $0x04
13516 JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B
13517 MOVL (R8)(R10*1), R9
13518 CMPL (BX)(R10*1), R9
13519 JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B
13520 LEAL -4(DI), DI
13521 LEAL 4(R10), R10
13522
13523matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B:
13524 CMPL DI, $0x01
13525 JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B
13526 JB repeat_extend_forward_end_encodeSnappyBlockAsm10B
13527 MOVW (R8)(R10*1), R9
13528 CMPW (BX)(R10*1), R9
13529 JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B
13530 LEAL 2(R10), R10
13531 SUBL $0x02, DI
13532 JZ repeat_extend_forward_end_encodeSnappyBlockAsm10B
13533
13534matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B:
13535 MOVB (R8)(R10*1), R9
13536 CMPB (BX)(R10*1), R9
13537 JNE repeat_extend_forward_end_encodeSnappyBlockAsm10B
13538 LEAL 1(R10), R10
13539
13540repeat_extend_forward_end_encodeSnappyBlockAsm10B:
13541 ADDL R10, CX
13542 MOVL CX, BX
13543 SUBL SI, BX
13544 MOVL 16(SP), SI
13545
13546 // emitCopy
13547two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm10B:
13548 CMPL BX, $0x40
13549 JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm10B
13550 MOVB $0xee, (AX)
13551 MOVW SI, 1(AX)
13552 LEAL -60(BX), BX
13553 ADDQ $0x03, AX
13554 JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm10B
13555
13556two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm10B:
13557 MOVL BX, DI
13558 SHLL $0x02, DI
13559 CMPL BX, $0x0c
13560 JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B
13561 CMPL SI, $0x00000800
13562 JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B
13563 LEAL -15(DI), DI
13564 MOVB SI, 1(AX)
13565 SHRL $0x08, SI
13566 SHLL $0x05, SI
13567 ORL SI, DI
13568 MOVB DI, (AX)
13569 ADDQ $0x02, AX
13570 JMP repeat_end_emit_encodeSnappyBlockAsm10B
13571
13572emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B:
13573 LEAL -2(DI), DI
13574 MOVB DI, (AX)
13575 MOVW SI, 1(AX)
13576 ADDQ $0x03, AX
13577
13578repeat_end_emit_encodeSnappyBlockAsm10B:
13579 MOVL CX, 12(SP)
13580 JMP search_loop_encodeSnappyBlockAsm10B
13581
13582no_repeat_found_encodeSnappyBlockAsm10B:
13583 CMPL (DX)(BX*1), SI
13584 JEQ candidate_match_encodeSnappyBlockAsm10B
13585 SHRQ $0x08, SI
13586 MOVL 24(SP)(R9*4), BX
13587 LEAL 2(CX), R8
13588 CMPL (DX)(DI*1), SI
13589 JEQ candidate2_match_encodeSnappyBlockAsm10B
13590 MOVL R8, 24(SP)(R9*4)
13591 SHRQ $0x08, SI
13592 CMPL (DX)(BX*1), SI
13593 JEQ candidate3_match_encodeSnappyBlockAsm10B
13594 MOVL 20(SP), CX
13595 JMP search_loop_encodeSnappyBlockAsm10B
13596
13597candidate3_match_encodeSnappyBlockAsm10B:
13598 ADDL $0x02, CX
13599 JMP candidate_match_encodeSnappyBlockAsm10B
13600
13601candidate2_match_encodeSnappyBlockAsm10B:
13602 MOVL R8, 24(SP)(R9*4)
13603 INCL CX
13604 MOVL DI, BX
13605
13606candidate_match_encodeSnappyBlockAsm10B:
13607 MOVL 12(SP), SI
13608 TESTL BX, BX
13609 JZ match_extend_back_end_encodeSnappyBlockAsm10B
13610
13611match_extend_back_loop_encodeSnappyBlockAsm10B:
13612 CMPL CX, SI
13613 JBE match_extend_back_end_encodeSnappyBlockAsm10B
13614 MOVB -1(DX)(BX*1), DI
13615 MOVB -1(DX)(CX*1), R8
13616 CMPB DI, R8
13617 JNE match_extend_back_end_encodeSnappyBlockAsm10B
13618 LEAL -1(CX), CX
13619 DECL BX
13620 JZ match_extend_back_end_encodeSnappyBlockAsm10B
13621 JMP match_extend_back_loop_encodeSnappyBlockAsm10B
13622
13623match_extend_back_end_encodeSnappyBlockAsm10B:
13624 MOVL CX, SI
13625 SUBL 12(SP), SI
13626 LEAQ 3(AX)(SI*1), SI
13627 CMPQ SI, (SP)
13628 JB match_dst_size_check_encodeSnappyBlockAsm10B
13629 MOVQ $0x00000000, ret+48(FP)
13630 RET
13631
13632match_dst_size_check_encodeSnappyBlockAsm10B:
13633 MOVL CX, SI
13634 MOVL 12(SP), DI
13635 CMPL DI, SI
13636 JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm10B
13637 MOVL SI, R8
13638 MOVL SI, 12(SP)
13639 LEAQ (DX)(DI*1), SI
13640 SUBL DI, R8
13641 LEAL -1(R8), DI
13642 CMPL DI, $0x3c
13643 JB one_byte_match_emit_encodeSnappyBlockAsm10B
13644 CMPL DI, $0x00000100
13645 JB two_bytes_match_emit_encodeSnappyBlockAsm10B
13646 JB three_bytes_match_emit_encodeSnappyBlockAsm10B
13647
13648three_bytes_match_emit_encodeSnappyBlockAsm10B:
13649 MOVB $0xf4, (AX)
13650 MOVW DI, 1(AX)
13651 ADDQ $0x03, AX
13652 JMP memmove_long_match_emit_encodeSnappyBlockAsm10B
13653
13654two_bytes_match_emit_encodeSnappyBlockAsm10B:
13655 MOVB $0xf0, (AX)
13656 MOVB DI, 1(AX)
13657 ADDQ $0x02, AX
13658 CMPL DI, $0x40
13659 JB memmove_match_emit_encodeSnappyBlockAsm10B
13660 JMP memmove_long_match_emit_encodeSnappyBlockAsm10B
13661
13662one_byte_match_emit_encodeSnappyBlockAsm10B:
13663 SHLB $0x02, DI
13664 MOVB DI, (AX)
13665 ADDQ $0x01, AX
13666
13667memmove_match_emit_encodeSnappyBlockAsm10B:
13668 LEAQ (AX)(R8*1), DI
13669
13670 // genMemMoveShort
13671 CMPQ R8, $0x08
13672 JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8
13673 CMPQ R8, $0x10
13674 JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8through16
13675 CMPQ R8, $0x20
13676 JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_17through32
13677 JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_33through64
13678
13679emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8:
13680 MOVQ (SI), R9
13681 MOVQ R9, (AX)
13682 JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B
13683
13684emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8through16:
13685 MOVQ (SI), R9
13686 MOVQ -8(SI)(R8*1), SI
13687 MOVQ R9, (AX)
13688 MOVQ SI, -8(AX)(R8*1)
13689 JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B
13690
13691emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_17through32:
13692 MOVOU (SI), X0
13693 MOVOU -16(SI)(R8*1), X1
13694 MOVOU X0, (AX)
13695 MOVOU X1, -16(AX)(R8*1)
13696 JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B
13697
13698emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_33through64:
13699 MOVOU (SI), X0
13700 MOVOU 16(SI), X1
13701 MOVOU -32(SI)(R8*1), X2
13702 MOVOU -16(SI)(R8*1), X3
13703 MOVOU X0, (AX)
13704 MOVOU X1, 16(AX)
13705 MOVOU X2, -32(AX)(R8*1)
13706 MOVOU X3, -16(AX)(R8*1)
13707
13708memmove_end_copy_match_emit_encodeSnappyBlockAsm10B:
13709 MOVQ DI, AX
13710 JMP emit_literal_done_match_emit_encodeSnappyBlockAsm10B
13711
13712memmove_long_match_emit_encodeSnappyBlockAsm10B:
13713 LEAQ (AX)(R8*1), DI
13714
13715 // genMemMoveLong
13716 MOVOU (SI), X0
13717 MOVOU 16(SI), X1
13718 MOVOU -32(SI)(R8*1), X2
13719 MOVOU -16(SI)(R8*1), X3
13720 MOVQ R8, R10
13721 SHRQ $0x05, R10
13722 MOVQ AX, R9
13723 ANDL $0x0000001f, R9
13724 MOVQ $0x00000040, R11
13725 SUBQ R9, R11
13726 DECQ R10
13727 JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32
13728 LEAQ -32(SI)(R11*1), R9
13729 LEAQ -32(AX)(R11*1), R12
13730
13731emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_big_loop_back:
13732 MOVOU (R9), X4
13733 MOVOU 16(R9), X5
13734 MOVOA X4, (R12)
13735 MOVOA X5, 16(R12)
13736 ADDQ $0x20, R12
13737 ADDQ $0x20, R9
13738 ADDQ $0x20, R11
13739 DECQ R10
13740 JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_big_loop_back
13741
13742emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32:
13743 MOVOU -32(SI)(R11*1), X4
13744 MOVOU -16(SI)(R11*1), X5
13745 MOVOA X4, -32(AX)(R11*1)
13746 MOVOA X5, -16(AX)(R11*1)
13747 ADDQ $0x20, R11
13748 CMPQ R8, R11
13749 JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32
13750 MOVOU X0, (AX)
13751 MOVOU X1, 16(AX)
13752 MOVOU X2, -32(AX)(R8*1)
13753 MOVOU X3, -16(AX)(R8*1)
13754 MOVQ DI, AX
13755
13756emit_literal_done_match_emit_encodeSnappyBlockAsm10B:
13757match_nolit_loop_encodeSnappyBlockAsm10B:
13758 MOVL CX, SI
13759 SUBL BX, SI
13760 MOVL SI, 16(SP)
13761 ADDL $0x04, CX
13762 ADDL $0x04, BX
13763 MOVQ src_len+32(FP), SI
13764 SUBL CX, SI
13765 LEAQ (DX)(CX*1), DI
13766 LEAQ (DX)(BX*1), BX
13767
13768 // matchLen
13769 XORL R9, R9
13770
13771matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm10B:
13772 CMPL SI, $0x10
13773 JB matchlen_match8_match_nolit_encodeSnappyBlockAsm10B
13774 MOVQ (DI)(R9*1), R8
13775 MOVQ 8(DI)(R9*1), R10
13776 XORQ (BX)(R9*1), R8
13777 JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm10B
13778 XORQ 8(BX)(R9*1), R10
13779 JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm10B
13780 LEAL -16(SI), SI
13781 LEAL 16(R9), R9
13782 JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm10B
13783
13784matchlen_bsf_16match_nolit_encodeSnappyBlockAsm10B:
13785#ifdef GOAMD64_v3
13786 TZCNTQ R10, R10
13787
13788#else
13789 BSFQ R10, R10
13790
13791#endif
13792 SARQ $0x03, R10
13793 LEAL 8(R9)(R10*1), R9
13794 JMP match_nolit_end_encodeSnappyBlockAsm10B
13795
13796matchlen_match8_match_nolit_encodeSnappyBlockAsm10B:
13797 CMPL SI, $0x08
13798 JB matchlen_match4_match_nolit_encodeSnappyBlockAsm10B
13799 MOVQ (DI)(R9*1), R8
13800 XORQ (BX)(R9*1), R8
13801 JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm10B
13802 LEAL -8(SI), SI
13803 LEAL 8(R9), R9
13804 JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm10B
13805
13806matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm10B:
13807#ifdef GOAMD64_v3
13808 TZCNTQ R8, R8
13809
13810#else
13811 BSFQ R8, R8
13812
13813#endif
13814 SARQ $0x03, R8
13815 LEAL (R9)(R8*1), R9
13816 JMP match_nolit_end_encodeSnappyBlockAsm10B
13817
13818matchlen_match4_match_nolit_encodeSnappyBlockAsm10B:
13819 CMPL SI, $0x04
13820 JB matchlen_match2_match_nolit_encodeSnappyBlockAsm10B
13821 MOVL (DI)(R9*1), R8
13822 CMPL (BX)(R9*1), R8
13823 JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm10B
13824 LEAL -4(SI), SI
13825 LEAL 4(R9), R9
13826
13827matchlen_match2_match_nolit_encodeSnappyBlockAsm10B:
13828 CMPL SI, $0x01
13829 JE matchlen_match1_match_nolit_encodeSnappyBlockAsm10B
13830 JB match_nolit_end_encodeSnappyBlockAsm10B
13831 MOVW (DI)(R9*1), R8
13832 CMPW (BX)(R9*1), R8
13833 JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm10B
13834 LEAL 2(R9), R9
13835 SUBL $0x02, SI
13836 JZ match_nolit_end_encodeSnappyBlockAsm10B
13837
13838matchlen_match1_match_nolit_encodeSnappyBlockAsm10B:
13839 MOVB (DI)(R9*1), R8
13840 CMPB (BX)(R9*1), R8
13841 JNE match_nolit_end_encodeSnappyBlockAsm10B
13842 LEAL 1(R9), R9
13843
13844match_nolit_end_encodeSnappyBlockAsm10B:
13845 ADDL R9, CX
13846 MOVL 16(SP), BX
13847 ADDL $0x04, R9
13848 MOVL CX, 12(SP)
13849
13850 // emitCopy
13851two_byte_offset_match_nolit_encodeSnappyBlockAsm10B:
13852 CMPL R9, $0x40
13853 JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm10B
13854 MOVB $0xee, (AX)
13855 MOVW BX, 1(AX)
13856 LEAL -60(R9), R9
13857 ADDQ $0x03, AX
13858 JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm10B
13859
13860two_byte_offset_short_match_nolit_encodeSnappyBlockAsm10B:
13861 MOVL R9, SI
13862 SHLL $0x02, SI
13863 CMPL R9, $0x0c
13864 JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm10B
13865 CMPL BX, $0x00000800
13866 JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm10B
13867 LEAL -15(SI), SI
13868 MOVB BL, 1(AX)
13869 SHRL $0x08, BX
13870 SHLL $0x05, BX
13871 ORL BX, SI
13872 MOVB SI, (AX)
13873 ADDQ $0x02, AX
13874 JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm10B
13875
13876emit_copy_three_match_nolit_encodeSnappyBlockAsm10B:
13877 LEAL -2(SI), SI
13878 MOVB SI, (AX)
13879 MOVW BX, 1(AX)
13880 ADDQ $0x03, AX
13881
13882match_nolit_emitcopy_end_encodeSnappyBlockAsm10B:
13883 CMPL CX, 8(SP)
13884 JAE emit_remainder_encodeSnappyBlockAsm10B
13885 MOVQ -2(DX)(CX*1), SI
13886 CMPQ AX, (SP)
13887 JB match_nolit_dst_ok_encodeSnappyBlockAsm10B
13888 MOVQ $0x00000000, ret+48(FP)
13889 RET
13890
13891match_nolit_dst_ok_encodeSnappyBlockAsm10B:
13892 MOVQ $0x9e3779b1, R8
13893 MOVQ SI, DI
13894 SHRQ $0x10, SI
13895 MOVQ SI, BX
13896 SHLQ $0x20, DI
13897 IMULQ R8, DI
13898 SHRQ $0x36, DI
13899 SHLQ $0x20, BX
13900 IMULQ R8, BX
13901 SHRQ $0x36, BX
13902 LEAL -2(CX), R8
13903 LEAQ 24(SP)(BX*4), R9
13904 MOVL (R9), BX
13905 MOVL R8, 24(SP)(DI*4)
13906 MOVL CX, (R9)
13907 CMPL (DX)(BX*1), SI
13908 JEQ match_nolit_loop_encodeSnappyBlockAsm10B
13909 INCL CX
13910 JMP search_loop_encodeSnappyBlockAsm10B
13911
13912emit_remainder_encodeSnappyBlockAsm10B:
13913 MOVQ src_len+32(FP), CX
13914 SUBL 12(SP), CX
13915 LEAQ 3(AX)(CX*1), CX
13916 CMPQ CX, (SP)
13917 JB emit_remainder_ok_encodeSnappyBlockAsm10B
13918 MOVQ $0x00000000, ret+48(FP)
13919 RET
13920
13921emit_remainder_ok_encodeSnappyBlockAsm10B:
13922 MOVQ src_len+32(FP), CX
13923 MOVL 12(SP), BX
13924 CMPL BX, CX
13925 JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm10B
13926 MOVL CX, SI
13927 MOVL CX, 12(SP)
13928 LEAQ (DX)(BX*1), CX
13929 SUBL BX, SI
13930 LEAL -1(SI), DX
13931 CMPL DX, $0x3c
13932 JB one_byte_emit_remainder_encodeSnappyBlockAsm10B
13933 CMPL DX, $0x00000100
13934 JB two_bytes_emit_remainder_encodeSnappyBlockAsm10B
13935 JB three_bytes_emit_remainder_encodeSnappyBlockAsm10B
13936
13937three_bytes_emit_remainder_encodeSnappyBlockAsm10B:
13938 MOVB $0xf4, (AX)
13939 MOVW DX, 1(AX)
13940 ADDQ $0x03, AX
13941 JMP memmove_long_emit_remainder_encodeSnappyBlockAsm10B
13942
13943two_bytes_emit_remainder_encodeSnappyBlockAsm10B:
13944 MOVB $0xf0, (AX)
13945 MOVB DL, 1(AX)
13946 ADDQ $0x02, AX
13947 CMPL DX, $0x40
13948 JB memmove_emit_remainder_encodeSnappyBlockAsm10B
13949 JMP memmove_long_emit_remainder_encodeSnappyBlockAsm10B
13950
13951one_byte_emit_remainder_encodeSnappyBlockAsm10B:
13952 SHLB $0x02, DL
13953 MOVB DL, (AX)
13954 ADDQ $0x01, AX
13955
13956memmove_emit_remainder_encodeSnappyBlockAsm10B:
13957 LEAQ (AX)(SI*1), DX
13958 MOVL SI, BX
13959
13960 // genMemMoveShort
13961 CMPQ BX, $0x03
13962 JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_1or2
13963 JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_3
13964 CMPQ BX, $0x08
13965 JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_4through7
13966 CMPQ BX, $0x10
13967 JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_8through16
13968 CMPQ BX, $0x20
13969 JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_17through32
13970 JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_33through64
13971
13972emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_1or2:
13973 MOVB (CX), SI
13974 MOVB -1(CX)(BX*1), CL
13975 MOVB SI, (AX)
13976 MOVB CL, -1(AX)(BX*1)
13977 JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B
13978
13979emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_3:
13980 MOVW (CX), SI
13981 MOVB 2(CX), CL
13982 MOVW SI, (AX)
13983 MOVB CL, 2(AX)
13984 JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B
13985
13986emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_4through7:
13987 MOVL (CX), SI
13988 MOVL -4(CX)(BX*1), CX
13989 MOVL SI, (AX)
13990 MOVL CX, -4(AX)(BX*1)
13991 JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B
13992
13993emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_8through16:
13994 MOVQ (CX), SI
13995 MOVQ -8(CX)(BX*1), CX
13996 MOVQ SI, (AX)
13997 MOVQ CX, -8(AX)(BX*1)
13998 JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B
13999
14000emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_17through32:
14001 MOVOU (CX), X0
14002 MOVOU -16(CX)(BX*1), X1
14003 MOVOU X0, (AX)
14004 MOVOU X1, -16(AX)(BX*1)
14005 JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B
14006
14007emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_33through64:
14008 MOVOU (CX), X0
14009 MOVOU 16(CX), X1
14010 MOVOU -32(CX)(BX*1), X2
14011 MOVOU -16(CX)(BX*1), X3
14012 MOVOU X0, (AX)
14013 MOVOU X1, 16(AX)
14014 MOVOU X2, -32(AX)(BX*1)
14015 MOVOU X3, -16(AX)(BX*1)
14016
14017memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B:
14018 MOVQ DX, AX
14019 JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm10B
14020
14021memmove_long_emit_remainder_encodeSnappyBlockAsm10B:
14022 LEAQ (AX)(SI*1), DX
14023 MOVL SI, BX
14024
14025 // genMemMoveLong
14026 MOVOU (CX), X0
14027 MOVOU 16(CX), X1
14028 MOVOU -32(CX)(BX*1), X2
14029 MOVOU -16(CX)(BX*1), X3
14030 MOVQ BX, DI
14031 SHRQ $0x05, DI
14032 MOVQ AX, SI
14033 ANDL $0x0000001f, SI
14034 MOVQ $0x00000040, R8
14035 SUBQ SI, R8
14036 DECQ DI
14037 JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32
14038 LEAQ -32(CX)(R8*1), SI
14039 LEAQ -32(AX)(R8*1), R9
14040
14041emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_big_loop_back:
14042 MOVOU (SI), X4
14043 MOVOU 16(SI), X5
14044 MOVOA X4, (R9)
14045 MOVOA X5, 16(R9)
14046 ADDQ $0x20, R9
14047 ADDQ $0x20, SI
14048 ADDQ $0x20, R8
14049 DECQ DI
14050 JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_big_loop_back
14051
14052emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32:
14053 MOVOU -32(CX)(R8*1), X4
14054 MOVOU -16(CX)(R8*1), X5
14055 MOVOA X4, -32(AX)(R8*1)
14056 MOVOA X5, -16(AX)(R8*1)
14057 ADDQ $0x20, R8
14058 CMPQ BX, R8
14059 JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32
14060 MOVOU X0, (AX)
14061 MOVOU X1, 16(AX)
14062 MOVOU X2, -32(AX)(BX*1)
14063 MOVOU X3, -16(AX)(BX*1)
14064 MOVQ DX, AX
14065
14066emit_literal_done_emit_remainder_encodeSnappyBlockAsm10B:
14067 MOVQ dst_base+0(FP), CX
14068 SUBQ CX, AX
14069 MOVQ AX, ret+48(FP)
14070 RET
14071
14072// func encodeSnappyBlockAsm8B(dst []byte, src []byte) int
14073// Requires: BMI, SSE2
14074TEXT ·encodeSnappyBlockAsm8B(SB), $1048-56
14075 MOVQ dst_base+0(FP), AX
14076 MOVQ $0x00000008, CX
14077 LEAQ 24(SP), DX
14078 PXOR X0, X0
14079
14080zero_loop_encodeSnappyBlockAsm8B:
14081 MOVOU X0, (DX)
14082 MOVOU X0, 16(DX)
14083 MOVOU X0, 32(DX)
14084 MOVOU X0, 48(DX)
14085 MOVOU X0, 64(DX)
14086 MOVOU X0, 80(DX)
14087 MOVOU X0, 96(DX)
14088 MOVOU X0, 112(DX)
14089 ADDQ $0x80, DX
14090 DECQ CX
14091 JNZ zero_loop_encodeSnappyBlockAsm8B
14092 MOVL $0x00000000, 12(SP)
14093 MOVQ src_len+32(FP), CX
14094 LEAQ -9(CX), DX
14095 LEAQ -8(CX), BX
14096 MOVL BX, 8(SP)
14097 SHRQ $0x05, CX
14098 SUBL CX, DX
14099 LEAQ (AX)(DX*1), DX
14100 MOVQ DX, (SP)
14101 MOVL $0x00000001, CX
14102 MOVL CX, 16(SP)
14103 MOVQ src_base+24(FP), DX
14104
14105search_loop_encodeSnappyBlockAsm8B:
14106 MOVL CX, BX
14107 SUBL 12(SP), BX
14108 SHRL $0x04, BX
14109 LEAL 4(CX)(BX*1), BX
14110 CMPL BX, 8(SP)
14111 JAE emit_remainder_encodeSnappyBlockAsm8B
14112 MOVQ (DX)(CX*1), SI
14113 MOVL BX, 20(SP)
14114 MOVQ $0x9e3779b1, R8
14115 MOVQ SI, R9
14116 MOVQ SI, R10
14117 SHRQ $0x08, R10
14118 SHLQ $0x20, R9
14119 IMULQ R8, R9
14120 SHRQ $0x38, R9
14121 SHLQ $0x20, R10
14122 IMULQ R8, R10
14123 SHRQ $0x38, R10
14124 MOVL 24(SP)(R9*4), BX
14125 MOVL 24(SP)(R10*4), DI
14126 MOVL CX, 24(SP)(R9*4)
14127 LEAL 1(CX), R9
14128 MOVL R9, 24(SP)(R10*4)
14129 MOVQ SI, R9
14130 SHRQ $0x10, R9
14131 SHLQ $0x20, R9
14132 IMULQ R8, R9
14133 SHRQ $0x38, R9
14134 MOVL CX, R8
14135 SUBL 16(SP), R8
14136 MOVL 1(DX)(R8*1), R10
14137 MOVQ SI, R8
14138 SHRQ $0x08, R8
14139 CMPL R8, R10
14140 JNE no_repeat_found_encodeSnappyBlockAsm8B
14141 LEAL 1(CX), SI
14142 MOVL 12(SP), BX
14143 MOVL SI, DI
14144 SUBL 16(SP), DI
14145 JZ repeat_extend_back_end_encodeSnappyBlockAsm8B
14146
14147repeat_extend_back_loop_encodeSnappyBlockAsm8B:
14148 CMPL SI, BX
14149 JBE repeat_extend_back_end_encodeSnappyBlockAsm8B
14150 MOVB -1(DX)(DI*1), R8
14151 MOVB -1(DX)(SI*1), R9
14152 CMPB R8, R9
14153 JNE repeat_extend_back_end_encodeSnappyBlockAsm8B
14154 LEAL -1(SI), SI
14155 DECL DI
14156 JNZ repeat_extend_back_loop_encodeSnappyBlockAsm8B
14157
14158repeat_extend_back_end_encodeSnappyBlockAsm8B:
14159 MOVL 12(SP), BX
14160 CMPL BX, SI
14161 JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B
14162 MOVL SI, DI
14163 MOVL SI, 12(SP)
14164 LEAQ (DX)(BX*1), R8
14165 SUBL BX, DI
14166 LEAL -1(DI), BX
14167 CMPL BX, $0x3c
14168 JB one_byte_repeat_emit_encodeSnappyBlockAsm8B
14169 CMPL BX, $0x00000100
14170 JB two_bytes_repeat_emit_encodeSnappyBlockAsm8B
14171 JB three_bytes_repeat_emit_encodeSnappyBlockAsm8B
14172
14173three_bytes_repeat_emit_encodeSnappyBlockAsm8B:
14174 MOVB $0xf4, (AX)
14175 MOVW BX, 1(AX)
14176 ADDQ $0x03, AX
14177 JMP memmove_long_repeat_emit_encodeSnappyBlockAsm8B
14178
14179two_bytes_repeat_emit_encodeSnappyBlockAsm8B:
14180 MOVB $0xf0, (AX)
14181 MOVB BL, 1(AX)
14182 ADDQ $0x02, AX
14183 CMPL BX, $0x40
14184 JB memmove_repeat_emit_encodeSnappyBlockAsm8B
14185 JMP memmove_long_repeat_emit_encodeSnappyBlockAsm8B
14186
14187one_byte_repeat_emit_encodeSnappyBlockAsm8B:
14188 SHLB $0x02, BL
14189 MOVB BL, (AX)
14190 ADDQ $0x01, AX
14191
14192memmove_repeat_emit_encodeSnappyBlockAsm8B:
14193 LEAQ (AX)(DI*1), BX
14194
14195 // genMemMoveShort
14196 CMPQ DI, $0x08
14197 JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8
14198 CMPQ DI, $0x10
14199 JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8through16
14200 CMPQ DI, $0x20
14201 JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_17through32
14202 JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_33through64
14203
14204emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8:
14205 MOVQ (R8), R9
14206 MOVQ R9, (AX)
14207 JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B
14208
14209emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8through16:
14210 MOVQ (R8), R9
14211 MOVQ -8(R8)(DI*1), R8
14212 MOVQ R9, (AX)
14213 MOVQ R8, -8(AX)(DI*1)
14214 JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B
14215
14216emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_17through32:
14217 MOVOU (R8), X0
14218 MOVOU -16(R8)(DI*1), X1
14219 MOVOU X0, (AX)
14220 MOVOU X1, -16(AX)(DI*1)
14221 JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B
14222
14223emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_33through64:
14224 MOVOU (R8), X0
14225 MOVOU 16(R8), X1
14226 MOVOU -32(R8)(DI*1), X2
14227 MOVOU -16(R8)(DI*1), X3
14228 MOVOU X0, (AX)
14229 MOVOU X1, 16(AX)
14230 MOVOU X2, -32(AX)(DI*1)
14231 MOVOU X3, -16(AX)(DI*1)
14232
14233memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B:
14234 MOVQ BX, AX
14235 JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B
14236
14237memmove_long_repeat_emit_encodeSnappyBlockAsm8B:
14238 LEAQ (AX)(DI*1), BX
14239
14240 // genMemMoveLong
14241 MOVOU (R8), X0
14242 MOVOU 16(R8), X1
14243 MOVOU -32(R8)(DI*1), X2
14244 MOVOU -16(R8)(DI*1), X3
14245 MOVQ DI, R10
14246 SHRQ $0x05, R10
14247 MOVQ AX, R9
14248 ANDL $0x0000001f, R9
14249 MOVQ $0x00000040, R11
14250 SUBQ R9, R11
14251 DECQ R10
14252 JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32
14253 LEAQ -32(R8)(R11*1), R9
14254 LEAQ -32(AX)(R11*1), R12
14255
14256emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_big_loop_back:
14257 MOVOU (R9), X4
14258 MOVOU 16(R9), X5
14259 MOVOA X4, (R12)
14260 MOVOA X5, 16(R12)
14261 ADDQ $0x20, R12
14262 ADDQ $0x20, R9
14263 ADDQ $0x20, R11
14264 DECQ R10
14265 JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_big_loop_back
14266
14267emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32:
14268 MOVOU -32(R8)(R11*1), X4
14269 MOVOU -16(R8)(R11*1), X5
14270 MOVOA X4, -32(AX)(R11*1)
14271 MOVOA X5, -16(AX)(R11*1)
14272 ADDQ $0x20, R11
14273 CMPQ DI, R11
14274 JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32
14275 MOVOU X0, (AX)
14276 MOVOU X1, 16(AX)
14277 MOVOU X2, -32(AX)(DI*1)
14278 MOVOU X3, -16(AX)(DI*1)
14279 MOVQ BX, AX
14280
14281emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B:
14282 ADDL $0x05, CX
14283 MOVL CX, BX
14284 SUBL 16(SP), BX
14285 MOVQ src_len+32(FP), DI
14286 SUBL CX, DI
14287 LEAQ (DX)(CX*1), R8
14288 LEAQ (DX)(BX*1), BX
14289
14290 // matchLen
14291 XORL R10, R10
14292
14293matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm8B:
14294 CMPL DI, $0x10
14295 JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm8B
14296 MOVQ (R8)(R10*1), R9
14297 MOVQ 8(R8)(R10*1), R11
14298 XORQ (BX)(R10*1), R9
14299 JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm8B
14300 XORQ 8(BX)(R10*1), R11
14301 JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm8B
14302 LEAL -16(DI), DI
14303 LEAL 16(R10), R10
14304 JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm8B
14305
14306matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm8B:
14307#ifdef GOAMD64_v3
14308 TZCNTQ R11, R11
14309
14310#else
14311 BSFQ R11, R11
14312
14313#endif
14314 SARQ $0x03, R11
14315 LEAL 8(R10)(R11*1), R10
14316 JMP repeat_extend_forward_end_encodeSnappyBlockAsm8B
14317
14318matchlen_match8_repeat_extend_encodeSnappyBlockAsm8B:
14319 CMPL DI, $0x08
14320 JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B
14321 MOVQ (R8)(R10*1), R9
14322 XORQ (BX)(R10*1), R9
14323 JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm8B
14324 LEAL -8(DI), DI
14325 LEAL 8(R10), R10
14326 JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B
14327
14328matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm8B:
14329#ifdef GOAMD64_v3
14330 TZCNTQ R9, R9
14331
14332#else
14333 BSFQ R9, R9
14334
14335#endif
14336 SARQ $0x03, R9
14337 LEAL (R10)(R9*1), R10
14338 JMP repeat_extend_forward_end_encodeSnappyBlockAsm8B
14339
14340matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B:
14341 CMPL DI, $0x04
14342 JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B
14343 MOVL (R8)(R10*1), R9
14344 CMPL (BX)(R10*1), R9
14345 JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B
14346 LEAL -4(DI), DI
14347 LEAL 4(R10), R10
14348
14349matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B:
14350 CMPL DI, $0x01
14351 JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B
14352 JB repeat_extend_forward_end_encodeSnappyBlockAsm8B
14353 MOVW (R8)(R10*1), R9
14354 CMPW (BX)(R10*1), R9
14355 JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B
14356 LEAL 2(R10), R10
14357 SUBL $0x02, DI
14358 JZ repeat_extend_forward_end_encodeSnappyBlockAsm8B
14359
14360matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B:
14361 MOVB (R8)(R10*1), R9
14362 CMPB (BX)(R10*1), R9
14363 JNE repeat_extend_forward_end_encodeSnappyBlockAsm8B
14364 LEAL 1(R10), R10
14365
14366repeat_extend_forward_end_encodeSnappyBlockAsm8B:
14367 ADDL R10, CX
14368 MOVL CX, BX
14369 SUBL SI, BX
14370 MOVL 16(SP), SI
14371
14372 // emitCopy
14373two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm8B:
14374 CMPL BX, $0x40
14375 JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm8B
14376 MOVB $0xee, (AX)
14377 MOVW SI, 1(AX)
14378 LEAL -60(BX), BX
14379 ADDQ $0x03, AX
14380 JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm8B
14381
14382two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm8B:
14383 MOVL BX, DI
14384 SHLL $0x02, DI
14385 CMPL BX, $0x0c
14386 JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm8B
14387 LEAL -15(DI), DI
14388 MOVB SI, 1(AX)
14389 SHRL $0x08, SI
14390 SHLL $0x05, SI
14391 ORL SI, DI
14392 MOVB DI, (AX)
14393 ADDQ $0x02, AX
14394 JMP repeat_end_emit_encodeSnappyBlockAsm8B
14395
14396emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm8B:
14397 LEAL -2(DI), DI
14398 MOVB DI, (AX)
14399 MOVW SI, 1(AX)
14400 ADDQ $0x03, AX
14401
14402repeat_end_emit_encodeSnappyBlockAsm8B:
14403 MOVL CX, 12(SP)
14404 JMP search_loop_encodeSnappyBlockAsm8B
14405
14406no_repeat_found_encodeSnappyBlockAsm8B:
14407 CMPL (DX)(BX*1), SI
14408 JEQ candidate_match_encodeSnappyBlockAsm8B
14409 SHRQ $0x08, SI
14410 MOVL 24(SP)(R9*4), BX
14411 LEAL 2(CX), R8
14412 CMPL (DX)(DI*1), SI
14413 JEQ candidate2_match_encodeSnappyBlockAsm8B
14414 MOVL R8, 24(SP)(R9*4)
14415 SHRQ $0x08, SI
14416 CMPL (DX)(BX*1), SI
14417 JEQ candidate3_match_encodeSnappyBlockAsm8B
14418 MOVL 20(SP), CX
14419 JMP search_loop_encodeSnappyBlockAsm8B
14420
14421candidate3_match_encodeSnappyBlockAsm8B:
14422 ADDL $0x02, CX
14423 JMP candidate_match_encodeSnappyBlockAsm8B
14424
14425candidate2_match_encodeSnappyBlockAsm8B:
14426 MOVL R8, 24(SP)(R9*4)
14427 INCL CX
14428 MOVL DI, BX
14429
14430candidate_match_encodeSnappyBlockAsm8B:
14431 MOVL 12(SP), SI
14432 TESTL BX, BX
14433 JZ match_extend_back_end_encodeSnappyBlockAsm8B
14434
14435match_extend_back_loop_encodeSnappyBlockAsm8B:
14436 CMPL CX, SI
14437 JBE match_extend_back_end_encodeSnappyBlockAsm8B
14438 MOVB -1(DX)(BX*1), DI
14439 MOVB -1(DX)(CX*1), R8
14440 CMPB DI, R8
14441 JNE match_extend_back_end_encodeSnappyBlockAsm8B
14442 LEAL -1(CX), CX
14443 DECL BX
14444 JZ match_extend_back_end_encodeSnappyBlockAsm8B
14445 JMP match_extend_back_loop_encodeSnappyBlockAsm8B
14446
14447match_extend_back_end_encodeSnappyBlockAsm8B:
14448 MOVL CX, SI
14449 SUBL 12(SP), SI
14450 LEAQ 3(AX)(SI*1), SI
14451 CMPQ SI, (SP)
14452 JB match_dst_size_check_encodeSnappyBlockAsm8B
14453 MOVQ $0x00000000, ret+48(FP)
14454 RET
14455
14456match_dst_size_check_encodeSnappyBlockAsm8B:
14457 MOVL CX, SI
14458 MOVL 12(SP), DI
14459 CMPL DI, SI
14460 JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm8B
14461 MOVL SI, R8
14462 MOVL SI, 12(SP)
14463 LEAQ (DX)(DI*1), SI
14464 SUBL DI, R8
14465 LEAL -1(R8), DI
14466 CMPL DI, $0x3c
14467 JB one_byte_match_emit_encodeSnappyBlockAsm8B
14468 CMPL DI, $0x00000100
14469 JB two_bytes_match_emit_encodeSnappyBlockAsm8B
14470 JB three_bytes_match_emit_encodeSnappyBlockAsm8B
14471
14472three_bytes_match_emit_encodeSnappyBlockAsm8B:
14473 MOVB $0xf4, (AX)
14474 MOVW DI, 1(AX)
14475 ADDQ $0x03, AX
14476 JMP memmove_long_match_emit_encodeSnappyBlockAsm8B
14477
14478two_bytes_match_emit_encodeSnappyBlockAsm8B:
14479 MOVB $0xf0, (AX)
14480 MOVB DI, 1(AX)
14481 ADDQ $0x02, AX
14482 CMPL DI, $0x40
14483 JB memmove_match_emit_encodeSnappyBlockAsm8B
14484 JMP memmove_long_match_emit_encodeSnappyBlockAsm8B
14485
14486one_byte_match_emit_encodeSnappyBlockAsm8B:
14487 SHLB $0x02, DI
14488 MOVB DI, (AX)
14489 ADDQ $0x01, AX
14490
14491memmove_match_emit_encodeSnappyBlockAsm8B:
14492 LEAQ (AX)(R8*1), DI
14493
14494 // genMemMoveShort
14495 CMPQ R8, $0x08
14496 JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8
14497 CMPQ R8, $0x10
14498 JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8through16
14499 CMPQ R8, $0x20
14500 JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_17through32
14501 JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_33through64
14502
14503emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8:
14504 MOVQ (SI), R9
14505 MOVQ R9, (AX)
14506 JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B
14507
14508emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8through16:
14509 MOVQ (SI), R9
14510 MOVQ -8(SI)(R8*1), SI
14511 MOVQ R9, (AX)
14512 MOVQ SI, -8(AX)(R8*1)
14513 JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B
14514
14515emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_17through32:
14516 MOVOU (SI), X0
14517 MOVOU -16(SI)(R8*1), X1
14518 MOVOU X0, (AX)
14519 MOVOU X1, -16(AX)(R8*1)
14520 JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B
14521
14522emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_33through64:
14523 MOVOU (SI), X0
14524 MOVOU 16(SI), X1
14525 MOVOU -32(SI)(R8*1), X2
14526 MOVOU -16(SI)(R8*1), X3
14527 MOVOU X0, (AX)
14528 MOVOU X1, 16(AX)
14529 MOVOU X2, -32(AX)(R8*1)
14530 MOVOU X3, -16(AX)(R8*1)
14531
14532memmove_end_copy_match_emit_encodeSnappyBlockAsm8B:
14533 MOVQ DI, AX
14534 JMP emit_literal_done_match_emit_encodeSnappyBlockAsm8B
14535
14536memmove_long_match_emit_encodeSnappyBlockAsm8B:
14537 LEAQ (AX)(R8*1), DI
14538
14539 // genMemMoveLong
14540 MOVOU (SI), X0
14541 MOVOU 16(SI), X1
14542 MOVOU -32(SI)(R8*1), X2
14543 MOVOU -16(SI)(R8*1), X3
14544 MOVQ R8, R10
14545 SHRQ $0x05, R10
14546 MOVQ AX, R9
14547 ANDL $0x0000001f, R9
14548 MOVQ $0x00000040, R11
14549 SUBQ R9, R11
14550 DECQ R10
14551 JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32
14552 LEAQ -32(SI)(R11*1), R9
14553 LEAQ -32(AX)(R11*1), R12
14554
14555emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_big_loop_back:
14556 MOVOU (R9), X4
14557 MOVOU 16(R9), X5
14558 MOVOA X4, (R12)
14559 MOVOA X5, 16(R12)
14560 ADDQ $0x20, R12
14561 ADDQ $0x20, R9
14562 ADDQ $0x20, R11
14563 DECQ R10
14564 JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_big_loop_back
14565
14566emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32:
14567 MOVOU -32(SI)(R11*1), X4
14568 MOVOU -16(SI)(R11*1), X5
14569 MOVOA X4, -32(AX)(R11*1)
14570 MOVOA X5, -16(AX)(R11*1)
14571 ADDQ $0x20, R11
14572 CMPQ R8, R11
14573 JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32
14574 MOVOU X0, (AX)
14575 MOVOU X1, 16(AX)
14576 MOVOU X2, -32(AX)(R8*1)
14577 MOVOU X3, -16(AX)(R8*1)
14578 MOVQ DI, AX
14579
14580emit_literal_done_match_emit_encodeSnappyBlockAsm8B:
14581match_nolit_loop_encodeSnappyBlockAsm8B:
14582 MOVL CX, SI
14583 SUBL BX, SI
14584 MOVL SI, 16(SP)
14585 ADDL $0x04, CX
14586 ADDL $0x04, BX
14587 MOVQ src_len+32(FP), SI
14588 SUBL CX, SI
14589 LEAQ (DX)(CX*1), DI
14590 LEAQ (DX)(BX*1), BX
14591
14592 // matchLen
14593 XORL R9, R9
14594
14595matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm8B:
14596 CMPL SI, $0x10
14597 JB matchlen_match8_match_nolit_encodeSnappyBlockAsm8B
14598 MOVQ (DI)(R9*1), R8
14599 MOVQ 8(DI)(R9*1), R10
14600 XORQ (BX)(R9*1), R8
14601 JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm8B
14602 XORQ 8(BX)(R9*1), R10
14603 JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm8B
14604 LEAL -16(SI), SI
14605 LEAL 16(R9), R9
14606 JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm8B
14607
14608matchlen_bsf_16match_nolit_encodeSnappyBlockAsm8B:
14609#ifdef GOAMD64_v3
14610 TZCNTQ R10, R10
14611
14612#else
14613 BSFQ R10, R10
14614
14615#endif
14616 SARQ $0x03, R10
14617 LEAL 8(R9)(R10*1), R9
14618 JMP match_nolit_end_encodeSnappyBlockAsm8B
14619
14620matchlen_match8_match_nolit_encodeSnappyBlockAsm8B:
14621 CMPL SI, $0x08
14622 JB matchlen_match4_match_nolit_encodeSnappyBlockAsm8B
14623 MOVQ (DI)(R9*1), R8
14624 XORQ (BX)(R9*1), R8
14625 JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm8B
14626 LEAL -8(SI), SI
14627 LEAL 8(R9), R9
14628 JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm8B
14629
14630matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm8B:
14631#ifdef GOAMD64_v3
14632 TZCNTQ R8, R8
14633
14634#else
14635 BSFQ R8, R8
14636
14637#endif
14638 SARQ $0x03, R8
14639 LEAL (R9)(R8*1), R9
14640 JMP match_nolit_end_encodeSnappyBlockAsm8B
14641
14642matchlen_match4_match_nolit_encodeSnappyBlockAsm8B:
14643 CMPL SI, $0x04
14644 JB matchlen_match2_match_nolit_encodeSnappyBlockAsm8B
14645 MOVL (DI)(R9*1), R8
14646 CMPL (BX)(R9*1), R8
14647 JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm8B
14648 LEAL -4(SI), SI
14649 LEAL 4(R9), R9
14650
14651matchlen_match2_match_nolit_encodeSnappyBlockAsm8B:
14652 CMPL SI, $0x01
14653 JE matchlen_match1_match_nolit_encodeSnappyBlockAsm8B
14654 JB match_nolit_end_encodeSnappyBlockAsm8B
14655 MOVW (DI)(R9*1), R8
14656 CMPW (BX)(R9*1), R8
14657 JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm8B
14658 LEAL 2(R9), R9
14659 SUBL $0x02, SI
14660 JZ match_nolit_end_encodeSnappyBlockAsm8B
14661
14662matchlen_match1_match_nolit_encodeSnappyBlockAsm8B:
14663 MOVB (DI)(R9*1), R8
14664 CMPB (BX)(R9*1), R8
14665 JNE match_nolit_end_encodeSnappyBlockAsm8B
14666 LEAL 1(R9), R9
14667
14668match_nolit_end_encodeSnappyBlockAsm8B:
14669 ADDL R9, CX
14670 MOVL 16(SP), BX
14671 ADDL $0x04, R9
14672 MOVL CX, 12(SP)
14673
14674 // emitCopy
14675two_byte_offset_match_nolit_encodeSnappyBlockAsm8B:
14676 CMPL R9, $0x40
14677 JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm8B
14678 MOVB $0xee, (AX)
14679 MOVW BX, 1(AX)
14680 LEAL -60(R9), R9
14681 ADDQ $0x03, AX
14682 JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm8B
14683
14684two_byte_offset_short_match_nolit_encodeSnappyBlockAsm8B:
14685 MOVL R9, SI
14686 SHLL $0x02, SI
14687 CMPL R9, $0x0c
14688 JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm8B
14689 LEAL -15(SI), SI
14690 MOVB BL, 1(AX)
14691 SHRL $0x08, BX
14692 SHLL $0x05, BX
14693 ORL BX, SI
14694 MOVB SI, (AX)
14695 ADDQ $0x02, AX
14696 JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm8B
14697
14698emit_copy_three_match_nolit_encodeSnappyBlockAsm8B:
14699 LEAL -2(SI), SI
14700 MOVB SI, (AX)
14701 MOVW BX, 1(AX)
14702 ADDQ $0x03, AX
14703
14704match_nolit_emitcopy_end_encodeSnappyBlockAsm8B:
14705 CMPL CX, 8(SP)
14706 JAE emit_remainder_encodeSnappyBlockAsm8B
14707 MOVQ -2(DX)(CX*1), SI
14708 CMPQ AX, (SP)
14709 JB match_nolit_dst_ok_encodeSnappyBlockAsm8B
14710 MOVQ $0x00000000, ret+48(FP)
14711 RET
14712
14713match_nolit_dst_ok_encodeSnappyBlockAsm8B:
14714 MOVQ $0x9e3779b1, R8
14715 MOVQ SI, DI
14716 SHRQ $0x10, SI
14717 MOVQ SI, BX
14718 SHLQ $0x20, DI
14719 IMULQ R8, DI
14720 SHRQ $0x38, DI
14721 SHLQ $0x20, BX
14722 IMULQ R8, BX
14723 SHRQ $0x38, BX
14724 LEAL -2(CX), R8
14725 LEAQ 24(SP)(BX*4), R9
14726 MOVL (R9), BX
14727 MOVL R8, 24(SP)(DI*4)
14728 MOVL CX, (R9)
14729 CMPL (DX)(BX*1), SI
14730 JEQ match_nolit_loop_encodeSnappyBlockAsm8B
14731 INCL CX
14732 JMP search_loop_encodeSnappyBlockAsm8B
14733
14734emit_remainder_encodeSnappyBlockAsm8B:
14735 MOVQ src_len+32(FP), CX
14736 SUBL 12(SP), CX
14737 LEAQ 3(AX)(CX*1), CX
14738 CMPQ CX, (SP)
14739 JB emit_remainder_ok_encodeSnappyBlockAsm8B
14740 MOVQ $0x00000000, ret+48(FP)
14741 RET
14742
14743emit_remainder_ok_encodeSnappyBlockAsm8B:
14744 MOVQ src_len+32(FP), CX
14745 MOVL 12(SP), BX
14746 CMPL BX, CX
14747 JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B
14748 MOVL CX, SI
14749 MOVL CX, 12(SP)
14750 LEAQ (DX)(BX*1), CX
14751 SUBL BX, SI
14752 LEAL -1(SI), DX
14753 CMPL DX, $0x3c
14754 JB one_byte_emit_remainder_encodeSnappyBlockAsm8B
14755 CMPL DX, $0x00000100
14756 JB two_bytes_emit_remainder_encodeSnappyBlockAsm8B
14757 JB three_bytes_emit_remainder_encodeSnappyBlockAsm8B
14758
14759three_bytes_emit_remainder_encodeSnappyBlockAsm8B:
14760 MOVB $0xf4, (AX)
14761 MOVW DX, 1(AX)
14762 ADDQ $0x03, AX
14763 JMP memmove_long_emit_remainder_encodeSnappyBlockAsm8B
14764
14765two_bytes_emit_remainder_encodeSnappyBlockAsm8B:
14766 MOVB $0xf0, (AX)
14767 MOVB DL, 1(AX)
14768 ADDQ $0x02, AX
14769 CMPL DX, $0x40
14770 JB memmove_emit_remainder_encodeSnappyBlockAsm8B
14771 JMP memmove_long_emit_remainder_encodeSnappyBlockAsm8B
14772
14773one_byte_emit_remainder_encodeSnappyBlockAsm8B:
14774 SHLB $0x02, DL
14775 MOVB DL, (AX)
14776 ADDQ $0x01, AX
14777
14778memmove_emit_remainder_encodeSnappyBlockAsm8B:
14779 LEAQ (AX)(SI*1), DX
14780 MOVL SI, BX
14781
14782 // genMemMoveShort
14783 CMPQ BX, $0x03
14784 JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_1or2
14785 JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_3
14786 CMPQ BX, $0x08
14787 JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_4through7
14788 CMPQ BX, $0x10
14789 JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_8through16
14790 CMPQ BX, $0x20
14791 JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_17through32
14792 JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_33through64
14793
14794emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_1or2:
14795 MOVB (CX), SI
14796 MOVB -1(CX)(BX*1), CL
14797 MOVB SI, (AX)
14798 MOVB CL, -1(AX)(BX*1)
14799 JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B
14800
14801emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_3:
14802 MOVW (CX), SI
14803 MOVB 2(CX), CL
14804 MOVW SI, (AX)
14805 MOVB CL, 2(AX)
14806 JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B
14807
14808emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_4through7:
14809 MOVL (CX), SI
14810 MOVL -4(CX)(BX*1), CX
14811 MOVL SI, (AX)
14812 MOVL CX, -4(AX)(BX*1)
14813 JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B
14814
14815emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_8through16:
14816 MOVQ (CX), SI
14817 MOVQ -8(CX)(BX*1), CX
14818 MOVQ SI, (AX)
14819 MOVQ CX, -8(AX)(BX*1)
14820 JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B
14821
14822emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_17through32:
14823 MOVOU (CX), X0
14824 MOVOU -16(CX)(BX*1), X1
14825 MOVOU X0, (AX)
14826 MOVOU X1, -16(AX)(BX*1)
14827 JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B
14828
14829emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_33through64:
14830 MOVOU (CX), X0
14831 MOVOU 16(CX), X1
14832 MOVOU -32(CX)(BX*1), X2
14833 MOVOU -16(CX)(BX*1), X3
14834 MOVOU X0, (AX)
14835 MOVOU X1, 16(AX)
14836 MOVOU X2, -32(AX)(BX*1)
14837 MOVOU X3, -16(AX)(BX*1)
14838
14839memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B:
14840 MOVQ DX, AX
14841 JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B
14842
14843memmove_long_emit_remainder_encodeSnappyBlockAsm8B:
14844 LEAQ (AX)(SI*1), DX
14845 MOVL SI, BX
14846
14847 // genMemMoveLong
14848 MOVOU (CX), X0
14849 MOVOU 16(CX), X1
14850 MOVOU -32(CX)(BX*1), X2
14851 MOVOU -16(CX)(BX*1), X3
14852 MOVQ BX, DI
14853 SHRQ $0x05, DI
14854 MOVQ AX, SI
14855 ANDL $0x0000001f, SI
14856 MOVQ $0x00000040, R8
14857 SUBQ SI, R8
14858 DECQ DI
14859 JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32
14860 LEAQ -32(CX)(R8*1), SI
14861 LEAQ -32(AX)(R8*1), R9
14862
14863emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_big_loop_back:
14864 MOVOU (SI), X4
14865 MOVOU 16(SI), X5
14866 MOVOA X4, (R9)
14867 MOVOA X5, 16(R9)
14868 ADDQ $0x20, R9
14869 ADDQ $0x20, SI
14870 ADDQ $0x20, R8
14871 DECQ DI
14872 JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_big_loop_back
14873
14874emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32:
14875 MOVOU -32(CX)(R8*1), X4
14876 MOVOU -16(CX)(R8*1), X5
14877 MOVOA X4, -32(AX)(R8*1)
14878 MOVOA X5, -16(AX)(R8*1)
14879 ADDQ $0x20, R8
14880 CMPQ BX, R8
14881 JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32
14882 MOVOU X0, (AX)
14883 MOVOU X1, 16(AX)
14884 MOVOU X2, -32(AX)(BX*1)
14885 MOVOU X3, -16(AX)(BX*1)
14886 MOVQ DX, AX
14887
14888emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B:
14889 MOVQ dst_base+0(FP), CX
14890 SUBQ CX, AX
14891 MOVQ AX, ret+48(FP)
14892 RET
14893
14894// func encodeSnappyBetterBlockAsm(dst []byte, src []byte) int
14895// Requires: BMI, SSE2
14896TEXT ·encodeSnappyBetterBlockAsm(SB), $589848-56
14897 MOVQ dst_base+0(FP), AX
14898 MOVQ $0x00001200, CX
14899 LEAQ 24(SP), DX
14900 PXOR X0, X0
14901
14902zero_loop_encodeSnappyBetterBlockAsm:
14903 MOVOU X0, (DX)
14904 MOVOU X0, 16(DX)
14905 MOVOU X0, 32(DX)
14906 MOVOU X0, 48(DX)
14907 MOVOU X0, 64(DX)
14908 MOVOU X0, 80(DX)
14909 MOVOU X0, 96(DX)
14910 MOVOU X0, 112(DX)
14911 ADDQ $0x80, DX
14912 DECQ CX
14913 JNZ zero_loop_encodeSnappyBetterBlockAsm
14914 MOVL $0x00000000, 12(SP)
14915 MOVQ src_len+32(FP), CX
14916 LEAQ -9(CX), DX
14917 LEAQ -8(CX), BX
14918 MOVL BX, 8(SP)
14919 SHRQ $0x05, CX
14920 SUBL CX, DX
14921 LEAQ (AX)(DX*1), DX
14922 MOVQ DX, (SP)
14923 MOVL $0x00000001, CX
14924 MOVL $0x00000000, 16(SP)
14925 MOVQ src_base+24(FP), DX
14926
14927search_loop_encodeSnappyBetterBlockAsm:
14928 MOVL CX, BX
14929 SUBL 12(SP), BX
14930 SHRL $0x07, BX
14931 CMPL BX, $0x63
14932 JBE check_maxskip_ok_encodeSnappyBetterBlockAsm
14933 LEAL 100(CX), BX
14934 JMP check_maxskip_cont_encodeSnappyBetterBlockAsm
14935
14936check_maxskip_ok_encodeSnappyBetterBlockAsm:
14937 LEAL 1(CX)(BX*1), BX
14938
14939check_maxskip_cont_encodeSnappyBetterBlockAsm:
14940 CMPL BX, 8(SP)
14941 JAE emit_remainder_encodeSnappyBetterBlockAsm
14942 MOVQ (DX)(CX*1), SI
14943 MOVL BX, 20(SP)
14944 MOVQ $0x00cf1bbcdcbfa563, R8
14945 MOVQ $0x9e3779b1, BX
14946 MOVQ SI, R9
14947 MOVQ SI, R10
14948 SHLQ $0x08, R9
14949 IMULQ R8, R9
14950 SHRQ $0x2f, R9
14951 SHLQ $0x20, R10
14952 IMULQ BX, R10
14953 SHRQ $0x32, R10
14954 MOVL 24(SP)(R9*4), BX
14955 MOVL 524312(SP)(R10*4), DI
14956 MOVL CX, 24(SP)(R9*4)
14957 MOVL CX, 524312(SP)(R10*4)
14958 MOVQ (DX)(BX*1), R9
14959 MOVQ (DX)(DI*1), R10
14960 CMPQ R9, SI
14961 JEQ candidate_match_encodeSnappyBetterBlockAsm
14962 CMPQ R10, SI
14963 JNE no_short_found_encodeSnappyBetterBlockAsm
14964 MOVL DI, BX
14965 JMP candidate_match_encodeSnappyBetterBlockAsm
14966
14967no_short_found_encodeSnappyBetterBlockAsm:
14968 CMPL R9, SI
14969 JEQ candidate_match_encodeSnappyBetterBlockAsm
14970 CMPL R10, SI
14971 JEQ candidateS_match_encodeSnappyBetterBlockAsm
14972 MOVL 20(SP), CX
14973 JMP search_loop_encodeSnappyBetterBlockAsm
14974
14975candidateS_match_encodeSnappyBetterBlockAsm:
14976 SHRQ $0x08, SI
14977 MOVQ SI, R9
14978 SHLQ $0x08, R9
14979 IMULQ R8, R9
14980 SHRQ $0x2f, R9
14981 MOVL 24(SP)(R9*4), BX
14982 INCL CX
14983 MOVL CX, 24(SP)(R9*4)
14984 CMPL (DX)(BX*1), SI
14985 JEQ candidate_match_encodeSnappyBetterBlockAsm
14986 DECL CX
14987 MOVL DI, BX
14988
14989candidate_match_encodeSnappyBetterBlockAsm:
14990 MOVL 12(SP), SI
14991 TESTL BX, BX
14992 JZ match_extend_back_end_encodeSnappyBetterBlockAsm
14993
14994match_extend_back_loop_encodeSnappyBetterBlockAsm:
14995 CMPL CX, SI
14996 JBE match_extend_back_end_encodeSnappyBetterBlockAsm
14997 MOVB -1(DX)(BX*1), DI
14998 MOVB -1(DX)(CX*1), R8
14999 CMPB DI, R8
15000 JNE match_extend_back_end_encodeSnappyBetterBlockAsm
15001 LEAL -1(CX), CX
15002 DECL BX
15003 JZ match_extend_back_end_encodeSnappyBetterBlockAsm
15004 JMP match_extend_back_loop_encodeSnappyBetterBlockAsm
15005
15006match_extend_back_end_encodeSnappyBetterBlockAsm:
15007 MOVL CX, SI
15008 SUBL 12(SP), SI
15009 LEAQ 5(AX)(SI*1), SI
15010 CMPQ SI, (SP)
15011 JB match_dst_size_check_encodeSnappyBetterBlockAsm
15012 MOVQ $0x00000000, ret+48(FP)
15013 RET
15014
15015match_dst_size_check_encodeSnappyBetterBlockAsm:
15016 MOVL CX, SI
15017 ADDL $0x04, CX
15018 ADDL $0x04, BX
15019 MOVQ src_len+32(FP), DI
15020 SUBL CX, DI
15021 LEAQ (DX)(CX*1), R8
15022 LEAQ (DX)(BX*1), R9
15023
15024 // matchLen
15025 XORL R11, R11
15026
15027matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm:
15028 CMPL DI, $0x10
15029 JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm
15030 MOVQ (R8)(R11*1), R10
15031 MOVQ 8(R8)(R11*1), R12
15032 XORQ (R9)(R11*1), R10
15033 JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm
15034 XORQ 8(R9)(R11*1), R12
15035 JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm
15036 LEAL -16(DI), DI
15037 LEAL 16(R11), R11
15038 JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm
15039
15040matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm:
15041#ifdef GOAMD64_v3
15042 TZCNTQ R12, R12
15043
15044#else
15045 BSFQ R12, R12
15046
15047#endif
15048 SARQ $0x03, R12
15049 LEAL 8(R11)(R12*1), R11
15050 JMP match_nolit_end_encodeSnappyBetterBlockAsm
15051
15052matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm:
15053 CMPL DI, $0x08
15054 JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm
15055 MOVQ (R8)(R11*1), R10
15056 XORQ (R9)(R11*1), R10
15057 JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm
15058 LEAL -8(DI), DI
15059 LEAL 8(R11), R11
15060 JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm
15061
15062matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm:
15063#ifdef GOAMD64_v3
15064 TZCNTQ R10, R10
15065
15066#else
15067 BSFQ R10, R10
15068
15069#endif
15070 SARQ $0x03, R10
15071 LEAL (R11)(R10*1), R11
15072 JMP match_nolit_end_encodeSnappyBetterBlockAsm
15073
15074matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm:
15075 CMPL DI, $0x04
15076 JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm
15077 MOVL (R8)(R11*1), R10
15078 CMPL (R9)(R11*1), R10
15079 JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm
15080 LEAL -4(DI), DI
15081 LEAL 4(R11), R11
15082
15083matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm:
15084 CMPL DI, $0x01
15085 JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm
15086 JB match_nolit_end_encodeSnappyBetterBlockAsm
15087 MOVW (R8)(R11*1), R10
15088 CMPW (R9)(R11*1), R10
15089 JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm
15090 LEAL 2(R11), R11
15091 SUBL $0x02, DI
15092 JZ match_nolit_end_encodeSnappyBetterBlockAsm
15093
15094matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm:
15095 MOVB (R8)(R11*1), R10
15096 CMPB (R9)(R11*1), R10
15097 JNE match_nolit_end_encodeSnappyBetterBlockAsm
15098 LEAL 1(R11), R11
15099
15100match_nolit_end_encodeSnappyBetterBlockAsm:
15101 MOVL CX, DI
15102 SUBL BX, DI
15103
15104 // Check if repeat
15105 CMPL R11, $0x01
15106 JA match_length_ok_encodeSnappyBetterBlockAsm
15107 CMPL DI, $0x0000ffff
15108 JBE match_length_ok_encodeSnappyBetterBlockAsm
15109 MOVL 20(SP), CX
15110 INCL CX
15111 JMP search_loop_encodeSnappyBetterBlockAsm
15112
15113match_length_ok_encodeSnappyBetterBlockAsm:
15114 MOVL DI, 16(SP)
15115 MOVL 12(SP), BX
15116 CMPL BX, SI
15117 JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm
15118 MOVL SI, R8
15119 MOVL SI, 12(SP)
15120 LEAQ (DX)(BX*1), R9
15121 SUBL BX, R8
15122 LEAL -1(R8), BX
15123 CMPL BX, $0x3c
15124 JB one_byte_match_emit_encodeSnappyBetterBlockAsm
15125 CMPL BX, $0x00000100
15126 JB two_bytes_match_emit_encodeSnappyBetterBlockAsm
15127 CMPL BX, $0x00010000
15128 JB three_bytes_match_emit_encodeSnappyBetterBlockAsm
15129 CMPL BX, $0x01000000
15130 JB four_bytes_match_emit_encodeSnappyBetterBlockAsm
15131 MOVB $0xfc, (AX)
15132 MOVL BX, 1(AX)
15133 ADDQ $0x05, AX
15134 JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm
15135
15136four_bytes_match_emit_encodeSnappyBetterBlockAsm:
15137 MOVL BX, R10
15138 SHRL $0x10, R10
15139 MOVB $0xf8, (AX)
15140 MOVW BX, 1(AX)
15141 MOVB R10, 3(AX)
15142 ADDQ $0x04, AX
15143 JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm
15144
15145three_bytes_match_emit_encodeSnappyBetterBlockAsm:
15146 MOVB $0xf4, (AX)
15147 MOVW BX, 1(AX)
15148 ADDQ $0x03, AX
15149 JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm
15150
15151two_bytes_match_emit_encodeSnappyBetterBlockAsm:
15152 MOVB $0xf0, (AX)
15153 MOVB BL, 1(AX)
15154 ADDQ $0x02, AX
15155 CMPL BX, $0x40
15156 JB memmove_match_emit_encodeSnappyBetterBlockAsm
15157 JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm
15158
15159one_byte_match_emit_encodeSnappyBetterBlockAsm:
15160 SHLB $0x02, BL
15161 MOVB BL, (AX)
15162 ADDQ $0x01, AX
15163
15164memmove_match_emit_encodeSnappyBetterBlockAsm:
15165 LEAQ (AX)(R8*1), BX
15166
15167 // genMemMoveShort
15168 CMPQ R8, $0x08
15169 JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8
15170 CMPQ R8, $0x10
15171 JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8through16
15172 CMPQ R8, $0x20
15173 JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_17through32
15174 JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_33through64
15175
15176emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8:
15177 MOVQ (R9), R10
15178 MOVQ R10, (AX)
15179 JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm
15180
15181emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8through16:
15182 MOVQ (R9), R10
15183 MOVQ -8(R9)(R8*1), R9
15184 MOVQ R10, (AX)
15185 MOVQ R9, -8(AX)(R8*1)
15186 JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm
15187
15188emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_17through32:
15189 MOVOU (R9), X0
15190 MOVOU -16(R9)(R8*1), X1
15191 MOVOU X0, (AX)
15192 MOVOU X1, -16(AX)(R8*1)
15193 JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm
15194
15195emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_33through64:
15196 MOVOU (R9), X0
15197 MOVOU 16(R9), X1
15198 MOVOU -32(R9)(R8*1), X2
15199 MOVOU -16(R9)(R8*1), X3
15200 MOVOU X0, (AX)
15201 MOVOU X1, 16(AX)
15202 MOVOU X2, -32(AX)(R8*1)
15203 MOVOU X3, -16(AX)(R8*1)
15204
15205memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm:
15206 MOVQ BX, AX
15207 JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm
15208
15209memmove_long_match_emit_encodeSnappyBetterBlockAsm:
15210 LEAQ (AX)(R8*1), BX
15211
15212 // genMemMoveLong
15213 MOVOU (R9), X0
15214 MOVOU 16(R9), X1
15215 MOVOU -32(R9)(R8*1), X2
15216 MOVOU -16(R9)(R8*1), X3
15217 MOVQ R8, R12
15218 SHRQ $0x05, R12
15219 MOVQ AX, R10
15220 ANDL $0x0000001f, R10
15221 MOVQ $0x00000040, R13
15222 SUBQ R10, R13
15223 DECQ R12
15224 JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32
15225 LEAQ -32(R9)(R13*1), R10
15226 LEAQ -32(AX)(R13*1), R14
15227
15228emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_big_loop_back:
15229 MOVOU (R10), X4
15230 MOVOU 16(R10), X5
15231 MOVOA X4, (R14)
15232 MOVOA X5, 16(R14)
15233 ADDQ $0x20, R14
15234 ADDQ $0x20, R10
15235 ADDQ $0x20, R13
15236 DECQ R12
15237 JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_big_loop_back
15238
15239emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32:
15240 MOVOU -32(R9)(R13*1), X4
15241 MOVOU -16(R9)(R13*1), X5
15242 MOVOA X4, -32(AX)(R13*1)
15243 MOVOA X5, -16(AX)(R13*1)
15244 ADDQ $0x20, R13
15245 CMPQ R8, R13
15246 JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32
15247 MOVOU X0, (AX)
15248 MOVOU X1, 16(AX)
15249 MOVOU X2, -32(AX)(R8*1)
15250 MOVOU X3, -16(AX)(R8*1)
15251 MOVQ BX, AX
15252
15253emit_literal_done_match_emit_encodeSnappyBetterBlockAsm:
15254 ADDL R11, CX
15255 ADDL $0x04, R11
15256 MOVL CX, 12(SP)
15257
15258 // emitCopy
15259 CMPL DI, $0x00010000
15260 JB two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm
15261
15262four_bytes_loop_back_match_nolit_encodeSnappyBetterBlockAsm:
15263 CMPL R11, $0x40
15264 JBE four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm
15265 MOVB $0xff, (AX)
15266 MOVL DI, 1(AX)
15267 LEAL -64(R11), R11
15268 ADDQ $0x05, AX
15269 CMPL R11, $0x04
15270 JB four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm
15271 JMP four_bytes_loop_back_match_nolit_encodeSnappyBetterBlockAsm
15272
15273four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm:
15274 TESTL R11, R11
15275 JZ match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm
15276 XORL BX, BX
15277 LEAL -1(BX)(R11*4), R11
15278 MOVB R11, (AX)
15279 MOVL DI, 1(AX)
15280 ADDQ $0x05, AX
15281 JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm
15282
15283two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm:
15284 CMPL R11, $0x40
15285 JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm
15286 MOVB $0xee, (AX)
15287 MOVW DI, 1(AX)
15288 LEAL -60(R11), R11
15289 ADDQ $0x03, AX
15290 JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm
15291
15292two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm:
15293 MOVL R11, BX
15294 SHLL $0x02, BX
15295 CMPL R11, $0x0c
15296 JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm
15297 CMPL DI, $0x00000800
15298 JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm
15299 LEAL -15(BX), BX
15300 MOVB DI, 1(AX)
15301 SHRL $0x08, DI
15302 SHLL $0x05, DI
15303 ORL DI, BX
15304 MOVB BL, (AX)
15305 ADDQ $0x02, AX
15306 JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm
15307
15308emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm:
15309 LEAL -2(BX), BX
15310 MOVB BL, (AX)
15311 MOVW DI, 1(AX)
15312 ADDQ $0x03, AX
15313
15314match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm:
15315 CMPL CX, 8(SP)
15316 JAE emit_remainder_encodeSnappyBetterBlockAsm
15317 CMPQ AX, (SP)
15318 JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm
15319 MOVQ $0x00000000, ret+48(FP)
15320 RET
15321
15322match_nolit_dst_ok_encodeSnappyBetterBlockAsm:
15323 MOVQ $0x00cf1bbcdcbfa563, BX
15324 MOVQ $0x9e3779b1, DI
15325 LEAQ 1(SI), SI
15326 LEAQ -2(CX), R8
15327 MOVQ (DX)(SI*1), R9
15328 MOVQ 1(DX)(SI*1), R10
15329 MOVQ (DX)(R8*1), R11
15330 MOVQ 1(DX)(R8*1), R12
15331 SHLQ $0x08, R9
15332 IMULQ BX, R9
15333 SHRQ $0x2f, R9
15334 SHLQ $0x20, R10
15335 IMULQ DI, R10
15336 SHRQ $0x32, R10
15337 SHLQ $0x08, R11
15338 IMULQ BX, R11
15339 SHRQ $0x2f, R11
15340 SHLQ $0x20, R12
15341 IMULQ DI, R12
15342 SHRQ $0x32, R12
15343 LEAQ 1(SI), DI
15344 LEAQ 1(R8), R13
15345 MOVL SI, 24(SP)(R9*4)
15346 MOVL R8, 24(SP)(R11*4)
15347 MOVL DI, 524312(SP)(R10*4)
15348 MOVL R13, 524312(SP)(R12*4)
15349 LEAQ 1(R8)(SI*1), DI
15350 SHRQ $0x01, DI
15351 ADDQ $0x01, SI
15352 SUBQ $0x01, R8
15353
15354index_loop_encodeSnappyBetterBlockAsm:
15355 CMPQ DI, R8
15356 JAE search_loop_encodeSnappyBetterBlockAsm
15357 MOVQ (DX)(SI*1), R9
15358 MOVQ (DX)(DI*1), R10
15359 SHLQ $0x08, R9
15360 IMULQ BX, R9
15361 SHRQ $0x2f, R9
15362 SHLQ $0x08, R10
15363 IMULQ BX, R10
15364 SHRQ $0x2f, R10
15365 MOVL SI, 24(SP)(R9*4)
15366 MOVL DI, 24(SP)(R10*4)
15367 ADDQ $0x02, SI
15368 ADDQ $0x02, DI
15369 JMP index_loop_encodeSnappyBetterBlockAsm
15370
15371emit_remainder_encodeSnappyBetterBlockAsm:
15372 MOVQ src_len+32(FP), CX
15373 SUBL 12(SP), CX
15374 LEAQ 5(AX)(CX*1), CX
15375 CMPQ CX, (SP)
15376 JB emit_remainder_ok_encodeSnappyBetterBlockAsm
15377 MOVQ $0x00000000, ret+48(FP)
15378 RET
15379
15380emit_remainder_ok_encodeSnappyBetterBlockAsm:
15381 MOVQ src_len+32(FP), CX
15382 MOVL 12(SP), BX
15383 CMPL BX, CX
15384 JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm
15385 MOVL CX, SI
15386 MOVL CX, 12(SP)
15387 LEAQ (DX)(BX*1), CX
15388 SUBL BX, SI
15389 LEAL -1(SI), DX
15390 CMPL DX, $0x3c
15391 JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm
15392 CMPL DX, $0x00000100
15393 JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm
15394 CMPL DX, $0x00010000
15395 JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm
15396 CMPL DX, $0x01000000
15397 JB four_bytes_emit_remainder_encodeSnappyBetterBlockAsm
15398 MOVB $0xfc, (AX)
15399 MOVL DX, 1(AX)
15400 ADDQ $0x05, AX
15401 JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm
15402
15403four_bytes_emit_remainder_encodeSnappyBetterBlockAsm:
15404 MOVL DX, BX
15405 SHRL $0x10, BX
15406 MOVB $0xf8, (AX)
15407 MOVW DX, 1(AX)
15408 MOVB BL, 3(AX)
15409 ADDQ $0x04, AX
15410 JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm
15411
15412three_bytes_emit_remainder_encodeSnappyBetterBlockAsm:
15413 MOVB $0xf4, (AX)
15414 MOVW DX, 1(AX)
15415 ADDQ $0x03, AX
15416 JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm
15417
15418two_bytes_emit_remainder_encodeSnappyBetterBlockAsm:
15419 MOVB $0xf0, (AX)
15420 MOVB DL, 1(AX)
15421 ADDQ $0x02, AX
15422 CMPL DX, $0x40
15423 JB memmove_emit_remainder_encodeSnappyBetterBlockAsm
15424 JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm
15425
15426one_byte_emit_remainder_encodeSnappyBetterBlockAsm:
15427 SHLB $0x02, DL
15428 MOVB DL, (AX)
15429 ADDQ $0x01, AX
15430
15431memmove_emit_remainder_encodeSnappyBetterBlockAsm:
15432 LEAQ (AX)(SI*1), DX
15433 MOVL SI, BX
15434
15435 // genMemMoveShort
15436 CMPQ BX, $0x03
15437 JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_1or2
15438 JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_3
15439 CMPQ BX, $0x08
15440 JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_4through7
15441 CMPQ BX, $0x10
15442 JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_8through16
15443 CMPQ BX, $0x20
15444 JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_17through32
15445 JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_33through64
15446
15447emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_1or2:
15448 MOVB (CX), SI
15449 MOVB -1(CX)(BX*1), CL
15450 MOVB SI, (AX)
15451 MOVB CL, -1(AX)(BX*1)
15452 JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm
15453
15454emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_3:
15455 MOVW (CX), SI
15456 MOVB 2(CX), CL
15457 MOVW SI, (AX)
15458 MOVB CL, 2(AX)
15459 JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm
15460
15461emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_4through7:
15462 MOVL (CX), SI
15463 MOVL -4(CX)(BX*1), CX
15464 MOVL SI, (AX)
15465 MOVL CX, -4(AX)(BX*1)
15466 JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm
15467
15468emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_8through16:
15469 MOVQ (CX), SI
15470 MOVQ -8(CX)(BX*1), CX
15471 MOVQ SI, (AX)
15472 MOVQ CX, -8(AX)(BX*1)
15473 JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm
15474
15475emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_17through32:
15476 MOVOU (CX), X0
15477 MOVOU -16(CX)(BX*1), X1
15478 MOVOU X0, (AX)
15479 MOVOU X1, -16(AX)(BX*1)
15480 JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm
15481
15482emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_33through64:
15483 MOVOU (CX), X0
15484 MOVOU 16(CX), X1
15485 MOVOU -32(CX)(BX*1), X2
15486 MOVOU -16(CX)(BX*1), X3
15487 MOVOU X0, (AX)
15488 MOVOU X1, 16(AX)
15489 MOVOU X2, -32(AX)(BX*1)
15490 MOVOU X3, -16(AX)(BX*1)
15491
15492memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm:
15493 MOVQ DX, AX
15494 JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm
15495
15496memmove_long_emit_remainder_encodeSnappyBetterBlockAsm:
15497 LEAQ (AX)(SI*1), DX
15498 MOVL SI, BX
15499
15500 // genMemMoveLong
15501 MOVOU (CX), X0
15502 MOVOU 16(CX), X1
15503 MOVOU -32(CX)(BX*1), X2
15504 MOVOU -16(CX)(BX*1), X3
15505 MOVQ BX, DI
15506 SHRQ $0x05, DI
15507 MOVQ AX, SI
15508 ANDL $0x0000001f, SI
15509 MOVQ $0x00000040, R8
15510 SUBQ SI, R8
15511 DECQ DI
15512 JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32
15513 LEAQ -32(CX)(R8*1), SI
15514 LEAQ -32(AX)(R8*1), R9
15515
15516emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_big_loop_back:
15517 MOVOU (SI), X4
15518 MOVOU 16(SI), X5
15519 MOVOA X4, (R9)
15520 MOVOA X5, 16(R9)
15521 ADDQ $0x20, R9
15522 ADDQ $0x20, SI
15523 ADDQ $0x20, R8
15524 DECQ DI
15525 JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_big_loop_back
15526
15527emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32:
15528 MOVOU -32(CX)(R8*1), X4
15529 MOVOU -16(CX)(R8*1), X5
15530 MOVOA X4, -32(AX)(R8*1)
15531 MOVOA X5, -16(AX)(R8*1)
15532 ADDQ $0x20, R8
15533 CMPQ BX, R8
15534 JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32
15535 MOVOU X0, (AX)
15536 MOVOU X1, 16(AX)
15537 MOVOU X2, -32(AX)(BX*1)
15538 MOVOU X3, -16(AX)(BX*1)
15539 MOVQ DX, AX
15540
15541emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm:
15542 MOVQ dst_base+0(FP), CX
15543 SUBQ CX, AX
15544 MOVQ AX, ret+48(FP)
15545 RET
15546
15547// func encodeSnappyBetterBlockAsm64K(dst []byte, src []byte) int
15548// Requires: BMI, SSE2
15549TEXT ·encodeSnappyBetterBlockAsm64K(SB), $327704-56
15550 MOVQ dst_base+0(FP), AX
15551 MOVQ $0x00000a00, CX
15552 LEAQ 24(SP), DX
15553 PXOR X0, X0
15554
15555zero_loop_encodeSnappyBetterBlockAsm64K:
15556 MOVOU X0, (DX)
15557 MOVOU X0, 16(DX)
15558 MOVOU X0, 32(DX)
15559 MOVOU X0, 48(DX)
15560 MOVOU X0, 64(DX)
15561 MOVOU X0, 80(DX)
15562 MOVOU X0, 96(DX)
15563 MOVOU X0, 112(DX)
15564 ADDQ $0x80, DX
15565 DECQ CX
15566 JNZ zero_loop_encodeSnappyBetterBlockAsm64K
15567 MOVL $0x00000000, 12(SP)
15568 MOVQ src_len+32(FP), CX
15569 LEAQ -9(CX), DX
15570 LEAQ -8(CX), BX
15571 MOVL BX, 8(SP)
15572 SHRQ $0x05, CX
15573 SUBL CX, DX
15574 LEAQ (AX)(DX*1), DX
15575 MOVQ DX, (SP)
15576 MOVL $0x00000001, CX
15577 MOVL $0x00000000, 16(SP)
15578 MOVQ src_base+24(FP), DX
15579
15580search_loop_encodeSnappyBetterBlockAsm64K:
15581 MOVL CX, BX
15582 SUBL 12(SP), BX
15583 SHRL $0x07, BX
15584 LEAL 1(CX)(BX*1), BX
15585 CMPL BX, 8(SP)
15586 JAE emit_remainder_encodeSnappyBetterBlockAsm64K
15587 MOVQ (DX)(CX*1), SI
15588 MOVL BX, 20(SP)
15589 MOVQ $0x00cf1bbcdcbfa563, R8
15590 MOVQ $0x9e3779b1, BX
15591 MOVQ SI, R9
15592 MOVQ SI, R10
15593 SHLQ $0x08, R9
15594 IMULQ R8, R9
15595 SHRQ $0x30, R9
15596 SHLQ $0x20, R10
15597 IMULQ BX, R10
15598 SHRQ $0x32, R10
15599 MOVL 24(SP)(R9*4), BX
15600 MOVL 262168(SP)(R10*4), DI
15601 MOVL CX, 24(SP)(R9*4)
15602 MOVL CX, 262168(SP)(R10*4)
15603 MOVQ (DX)(BX*1), R9
15604 MOVQ (DX)(DI*1), R10
15605 CMPQ R9, SI
15606 JEQ candidate_match_encodeSnappyBetterBlockAsm64K
15607 CMPQ R10, SI
15608 JNE no_short_found_encodeSnappyBetterBlockAsm64K
15609 MOVL DI, BX
15610 JMP candidate_match_encodeSnappyBetterBlockAsm64K
15611
15612no_short_found_encodeSnappyBetterBlockAsm64K:
15613 CMPL R9, SI
15614 JEQ candidate_match_encodeSnappyBetterBlockAsm64K
15615 CMPL R10, SI
15616 JEQ candidateS_match_encodeSnappyBetterBlockAsm64K
15617 MOVL 20(SP), CX
15618 JMP search_loop_encodeSnappyBetterBlockAsm64K
15619
15620candidateS_match_encodeSnappyBetterBlockAsm64K:
15621 SHRQ $0x08, SI
15622 MOVQ SI, R9
15623 SHLQ $0x08, R9
15624 IMULQ R8, R9
15625 SHRQ $0x30, R9
15626 MOVL 24(SP)(R9*4), BX
15627 INCL CX
15628 MOVL CX, 24(SP)(R9*4)
15629 CMPL (DX)(BX*1), SI
15630 JEQ candidate_match_encodeSnappyBetterBlockAsm64K
15631 DECL CX
15632 MOVL DI, BX
15633
15634candidate_match_encodeSnappyBetterBlockAsm64K:
15635 MOVL 12(SP), SI
15636 TESTL BX, BX
15637 JZ match_extend_back_end_encodeSnappyBetterBlockAsm64K
15638
15639match_extend_back_loop_encodeSnappyBetterBlockAsm64K:
15640 CMPL CX, SI
15641 JBE match_extend_back_end_encodeSnappyBetterBlockAsm64K
15642 MOVB -1(DX)(BX*1), DI
15643 MOVB -1(DX)(CX*1), R8
15644 CMPB DI, R8
15645 JNE match_extend_back_end_encodeSnappyBetterBlockAsm64K
15646 LEAL -1(CX), CX
15647 DECL BX
15648 JZ match_extend_back_end_encodeSnappyBetterBlockAsm64K
15649 JMP match_extend_back_loop_encodeSnappyBetterBlockAsm64K
15650
15651match_extend_back_end_encodeSnappyBetterBlockAsm64K:
15652 MOVL CX, SI
15653 SUBL 12(SP), SI
15654 LEAQ 3(AX)(SI*1), SI
15655 CMPQ SI, (SP)
15656 JB match_dst_size_check_encodeSnappyBetterBlockAsm64K
15657 MOVQ $0x00000000, ret+48(FP)
15658 RET
15659
15660match_dst_size_check_encodeSnappyBetterBlockAsm64K:
15661 MOVL CX, SI
15662 ADDL $0x04, CX
15663 ADDL $0x04, BX
15664 MOVQ src_len+32(FP), DI
15665 SUBL CX, DI
15666 LEAQ (DX)(CX*1), R8
15667 LEAQ (DX)(BX*1), R9
15668
15669 // matchLen
15670 XORL R11, R11
15671
15672matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm64K:
15673 CMPL DI, $0x10
15674 JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm64K
15675 MOVQ (R8)(R11*1), R10
15676 MOVQ 8(R8)(R11*1), R12
15677 XORQ (R9)(R11*1), R10
15678 JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm64K
15679 XORQ 8(R9)(R11*1), R12
15680 JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm64K
15681 LEAL -16(DI), DI
15682 LEAL 16(R11), R11
15683 JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm64K
15684
15685matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm64K:
15686#ifdef GOAMD64_v3
15687 TZCNTQ R12, R12
15688
15689#else
15690 BSFQ R12, R12
15691
15692#endif
15693 SARQ $0x03, R12
15694 LEAL 8(R11)(R12*1), R11
15695 JMP match_nolit_end_encodeSnappyBetterBlockAsm64K
15696
15697matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm64K:
15698 CMPL DI, $0x08
15699 JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K
15700 MOVQ (R8)(R11*1), R10
15701 XORQ (R9)(R11*1), R10
15702 JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm64K
15703 LEAL -8(DI), DI
15704 LEAL 8(R11), R11
15705 JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K
15706
15707matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm64K:
15708#ifdef GOAMD64_v3
15709 TZCNTQ R10, R10
15710
15711#else
15712 BSFQ R10, R10
15713
15714#endif
15715 SARQ $0x03, R10
15716 LEAL (R11)(R10*1), R11
15717 JMP match_nolit_end_encodeSnappyBetterBlockAsm64K
15718
15719matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K:
15720 CMPL DI, $0x04
15721 JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K
15722 MOVL (R8)(R11*1), R10
15723 CMPL (R9)(R11*1), R10
15724 JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K
15725 LEAL -4(DI), DI
15726 LEAL 4(R11), R11
15727
15728matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K:
15729 CMPL DI, $0x01
15730 JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K
15731 JB match_nolit_end_encodeSnappyBetterBlockAsm64K
15732 MOVW (R8)(R11*1), R10
15733 CMPW (R9)(R11*1), R10
15734 JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K
15735 LEAL 2(R11), R11
15736 SUBL $0x02, DI
15737 JZ match_nolit_end_encodeSnappyBetterBlockAsm64K
15738
15739matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K:
15740 MOVB (R8)(R11*1), R10
15741 CMPB (R9)(R11*1), R10
15742 JNE match_nolit_end_encodeSnappyBetterBlockAsm64K
15743 LEAL 1(R11), R11
15744
15745match_nolit_end_encodeSnappyBetterBlockAsm64K:
15746 MOVL CX, DI
15747 SUBL BX, DI
15748
15749 // Check if repeat
15750 MOVL DI, 16(SP)
15751 MOVL 12(SP), BX
15752 CMPL BX, SI
15753 JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K
15754 MOVL SI, R8
15755 MOVL SI, 12(SP)
15756 LEAQ (DX)(BX*1), R9
15757 SUBL BX, R8
15758 LEAL -1(R8), BX
15759 CMPL BX, $0x3c
15760 JB one_byte_match_emit_encodeSnappyBetterBlockAsm64K
15761 CMPL BX, $0x00000100
15762 JB two_bytes_match_emit_encodeSnappyBetterBlockAsm64K
15763 JB three_bytes_match_emit_encodeSnappyBetterBlockAsm64K
15764
15765three_bytes_match_emit_encodeSnappyBetterBlockAsm64K:
15766 MOVB $0xf4, (AX)
15767 MOVW BX, 1(AX)
15768 ADDQ $0x03, AX
15769 JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm64K
15770
15771two_bytes_match_emit_encodeSnappyBetterBlockAsm64K:
15772 MOVB $0xf0, (AX)
15773 MOVB BL, 1(AX)
15774 ADDQ $0x02, AX
15775 CMPL BX, $0x40
15776 JB memmove_match_emit_encodeSnappyBetterBlockAsm64K
15777 JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm64K
15778
15779one_byte_match_emit_encodeSnappyBetterBlockAsm64K:
15780 SHLB $0x02, BL
15781 MOVB BL, (AX)
15782 ADDQ $0x01, AX
15783
15784memmove_match_emit_encodeSnappyBetterBlockAsm64K:
15785 LEAQ (AX)(R8*1), BX
15786
15787 // genMemMoveShort
15788 CMPQ R8, $0x08
15789 JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8
15790 CMPQ R8, $0x10
15791 JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8through16
15792 CMPQ R8, $0x20
15793 JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_17through32
15794 JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_33through64
15795
15796emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8:
15797 MOVQ (R9), R10
15798 MOVQ R10, (AX)
15799 JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K
15800
15801emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8through16:
15802 MOVQ (R9), R10
15803 MOVQ -8(R9)(R8*1), R9
15804 MOVQ R10, (AX)
15805 MOVQ R9, -8(AX)(R8*1)
15806 JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K
15807
15808emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_17through32:
15809 MOVOU (R9), X0
15810 MOVOU -16(R9)(R8*1), X1
15811 MOVOU X0, (AX)
15812 MOVOU X1, -16(AX)(R8*1)
15813 JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K
15814
15815emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_33through64:
15816 MOVOU (R9), X0
15817 MOVOU 16(R9), X1
15818 MOVOU -32(R9)(R8*1), X2
15819 MOVOU -16(R9)(R8*1), X3
15820 MOVOU X0, (AX)
15821 MOVOU X1, 16(AX)
15822 MOVOU X2, -32(AX)(R8*1)
15823 MOVOU X3, -16(AX)(R8*1)
15824
15825memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K:
15826 MOVQ BX, AX
15827 JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K
15828
15829memmove_long_match_emit_encodeSnappyBetterBlockAsm64K:
15830 LEAQ (AX)(R8*1), BX
15831
15832 // genMemMoveLong
15833 MOVOU (R9), X0
15834 MOVOU 16(R9), X1
15835 MOVOU -32(R9)(R8*1), X2
15836 MOVOU -16(R9)(R8*1), X3
15837 MOVQ R8, R12
15838 SHRQ $0x05, R12
15839 MOVQ AX, R10
15840 ANDL $0x0000001f, R10
15841 MOVQ $0x00000040, R13
15842 SUBQ R10, R13
15843 DECQ R12
15844 JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32
15845 LEAQ -32(R9)(R13*1), R10
15846 LEAQ -32(AX)(R13*1), R14
15847
15848emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_big_loop_back:
15849 MOVOU (R10), X4
15850 MOVOU 16(R10), X5
15851 MOVOA X4, (R14)
15852 MOVOA X5, 16(R14)
15853 ADDQ $0x20, R14
15854 ADDQ $0x20, R10
15855 ADDQ $0x20, R13
15856 DECQ R12
15857 JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_big_loop_back
15858
15859emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32:
15860 MOVOU -32(R9)(R13*1), X4
15861 MOVOU -16(R9)(R13*1), X5
15862 MOVOA X4, -32(AX)(R13*1)
15863 MOVOA X5, -16(AX)(R13*1)
15864 ADDQ $0x20, R13
15865 CMPQ R8, R13
15866 JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32
15867 MOVOU X0, (AX)
15868 MOVOU X1, 16(AX)
15869 MOVOU X2, -32(AX)(R8*1)
15870 MOVOU X3, -16(AX)(R8*1)
15871 MOVQ BX, AX
15872
15873emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K:
15874 ADDL R11, CX
15875 ADDL $0x04, R11
15876 MOVL CX, 12(SP)
15877
15878 // emitCopy
15879two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm64K:
15880 CMPL R11, $0x40
15881 JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm64K
15882 MOVB $0xee, (AX)
15883 MOVW DI, 1(AX)
15884 LEAL -60(R11), R11
15885 ADDQ $0x03, AX
15886 JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm64K
15887
15888two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm64K:
15889 MOVL R11, BX
15890 SHLL $0x02, BX
15891 CMPL R11, $0x0c
15892 JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K
15893 CMPL DI, $0x00000800
15894 JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K
15895 LEAL -15(BX), BX
15896 MOVB DI, 1(AX)
15897 SHRL $0x08, DI
15898 SHLL $0x05, DI
15899 ORL DI, BX
15900 MOVB BL, (AX)
15901 ADDQ $0x02, AX
15902 JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm64K
15903
15904emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K:
15905 LEAL -2(BX), BX
15906 MOVB BL, (AX)
15907 MOVW DI, 1(AX)
15908 ADDQ $0x03, AX
15909
15910match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm64K:
15911 CMPL CX, 8(SP)
15912 JAE emit_remainder_encodeSnappyBetterBlockAsm64K
15913 CMPQ AX, (SP)
15914 JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm64K
15915 MOVQ $0x00000000, ret+48(FP)
15916 RET
15917
15918match_nolit_dst_ok_encodeSnappyBetterBlockAsm64K:
15919 MOVQ $0x00cf1bbcdcbfa563, BX
15920 MOVQ $0x9e3779b1, DI
15921 LEAQ 1(SI), SI
15922 LEAQ -2(CX), R8
15923 MOVQ (DX)(SI*1), R9
15924 MOVQ 1(DX)(SI*1), R10
15925 MOVQ (DX)(R8*1), R11
15926 MOVQ 1(DX)(R8*1), R12
15927 SHLQ $0x08, R9
15928 IMULQ BX, R9
15929 SHRQ $0x30, R9
15930 SHLQ $0x20, R10
15931 IMULQ DI, R10
15932 SHRQ $0x32, R10
15933 SHLQ $0x08, R11
15934 IMULQ BX, R11
15935 SHRQ $0x30, R11
15936 SHLQ $0x20, R12
15937 IMULQ DI, R12
15938 SHRQ $0x32, R12
15939 LEAQ 1(SI), DI
15940 LEAQ 1(R8), R13
15941 MOVL SI, 24(SP)(R9*4)
15942 MOVL R8, 24(SP)(R11*4)
15943 MOVL DI, 262168(SP)(R10*4)
15944 MOVL R13, 262168(SP)(R12*4)
15945 LEAQ 1(R8)(SI*1), DI
15946 SHRQ $0x01, DI
15947 ADDQ $0x01, SI
15948 SUBQ $0x01, R8
15949
15950index_loop_encodeSnappyBetterBlockAsm64K:
15951 CMPQ DI, R8
15952 JAE search_loop_encodeSnappyBetterBlockAsm64K
15953 MOVQ (DX)(SI*1), R9
15954 MOVQ (DX)(DI*1), R10
15955 SHLQ $0x08, R9
15956 IMULQ BX, R9
15957 SHRQ $0x30, R9
15958 SHLQ $0x08, R10
15959 IMULQ BX, R10
15960 SHRQ $0x30, R10
15961 MOVL SI, 24(SP)(R9*4)
15962 MOVL DI, 24(SP)(R10*4)
15963 ADDQ $0x02, SI
15964 ADDQ $0x02, DI
15965 JMP index_loop_encodeSnappyBetterBlockAsm64K
15966
15967emit_remainder_encodeSnappyBetterBlockAsm64K:
15968 MOVQ src_len+32(FP), CX
15969 SUBL 12(SP), CX
15970 LEAQ 3(AX)(CX*1), CX
15971 CMPQ CX, (SP)
15972 JB emit_remainder_ok_encodeSnappyBetterBlockAsm64K
15973 MOVQ $0x00000000, ret+48(FP)
15974 RET
15975
15976emit_remainder_ok_encodeSnappyBetterBlockAsm64K:
15977 MOVQ src_len+32(FP), CX
15978 MOVL 12(SP), BX
15979 CMPL BX, CX
15980 JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm64K
15981 MOVL CX, SI
15982 MOVL CX, 12(SP)
15983 LEAQ (DX)(BX*1), CX
15984 SUBL BX, SI
15985 LEAL -1(SI), DX
15986 CMPL DX, $0x3c
15987 JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm64K
15988 CMPL DX, $0x00000100
15989 JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K
15990 JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K
15991
15992three_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K:
15993 MOVB $0xf4, (AX)
15994 MOVW DX, 1(AX)
15995 ADDQ $0x03, AX
15996 JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64K
15997
15998two_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K:
15999 MOVB $0xf0, (AX)
16000 MOVB DL, 1(AX)
16001 ADDQ $0x02, AX
16002 CMPL DX, $0x40
16003 JB memmove_emit_remainder_encodeSnappyBetterBlockAsm64K
16004 JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64K
16005
16006one_byte_emit_remainder_encodeSnappyBetterBlockAsm64K:
16007 SHLB $0x02, DL
16008 MOVB DL, (AX)
16009 ADDQ $0x01, AX
16010
16011memmove_emit_remainder_encodeSnappyBetterBlockAsm64K:
16012 LEAQ (AX)(SI*1), DX
16013 MOVL SI, BX
16014
16015 // genMemMoveShort
16016 CMPQ BX, $0x03
16017 JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_1or2
16018 JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_3
16019 CMPQ BX, $0x08
16020 JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_4through7
16021 CMPQ BX, $0x10
16022 JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_8through16
16023 CMPQ BX, $0x20
16024 JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_17through32
16025 JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_33through64
16026
16027emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_1or2:
16028 MOVB (CX), SI
16029 MOVB -1(CX)(BX*1), CL
16030 MOVB SI, (AX)
16031 MOVB CL, -1(AX)(BX*1)
16032 JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K
16033
16034emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_3:
16035 MOVW (CX), SI
16036 MOVB 2(CX), CL
16037 MOVW SI, (AX)
16038 MOVB CL, 2(AX)
16039 JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K
16040
16041emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_4through7:
16042 MOVL (CX), SI
16043 MOVL -4(CX)(BX*1), CX
16044 MOVL SI, (AX)
16045 MOVL CX, -4(AX)(BX*1)
16046 JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K
16047
16048emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_8through16:
16049 MOVQ (CX), SI
16050 MOVQ -8(CX)(BX*1), CX
16051 MOVQ SI, (AX)
16052 MOVQ CX, -8(AX)(BX*1)
16053 JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K
16054
16055emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_17through32:
16056 MOVOU (CX), X0
16057 MOVOU -16(CX)(BX*1), X1
16058 MOVOU X0, (AX)
16059 MOVOU X1, -16(AX)(BX*1)
16060 JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K
16061
16062emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_33through64:
16063 MOVOU (CX), X0
16064 MOVOU 16(CX), X1
16065 MOVOU -32(CX)(BX*1), X2
16066 MOVOU -16(CX)(BX*1), X3
16067 MOVOU X0, (AX)
16068 MOVOU X1, 16(AX)
16069 MOVOU X2, -32(AX)(BX*1)
16070 MOVOU X3, -16(AX)(BX*1)
16071
16072memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K:
16073 MOVQ DX, AX
16074 JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm64K
16075
16076memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64K:
16077 LEAQ (AX)(SI*1), DX
16078 MOVL SI, BX
16079
16080 // genMemMoveLong
16081 MOVOU (CX), X0
16082 MOVOU 16(CX), X1
16083 MOVOU -32(CX)(BX*1), X2
16084 MOVOU -16(CX)(BX*1), X3
16085 MOVQ BX, DI
16086 SHRQ $0x05, DI
16087 MOVQ AX, SI
16088 ANDL $0x0000001f, SI
16089 MOVQ $0x00000040, R8
16090 SUBQ SI, R8
16091 DECQ DI
16092 JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32
16093 LEAQ -32(CX)(R8*1), SI
16094 LEAQ -32(AX)(R8*1), R9
16095
16096emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_big_loop_back:
16097 MOVOU (SI), X4
16098 MOVOU 16(SI), X5
16099 MOVOA X4, (R9)
16100 MOVOA X5, 16(R9)
16101 ADDQ $0x20, R9
16102 ADDQ $0x20, SI
16103 ADDQ $0x20, R8
16104 DECQ DI
16105 JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_big_loop_back
16106
16107emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32:
16108 MOVOU -32(CX)(R8*1), X4
16109 MOVOU -16(CX)(R8*1), X5
16110 MOVOA X4, -32(AX)(R8*1)
16111 MOVOA X5, -16(AX)(R8*1)
16112 ADDQ $0x20, R8
16113 CMPQ BX, R8
16114 JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32
16115 MOVOU X0, (AX)
16116 MOVOU X1, 16(AX)
16117 MOVOU X2, -32(AX)(BX*1)
16118 MOVOU X3, -16(AX)(BX*1)
16119 MOVQ DX, AX
16120
16121emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm64K:
16122 MOVQ dst_base+0(FP), CX
16123 SUBQ CX, AX
16124 MOVQ AX, ret+48(FP)
16125 RET
16126
16127// func encodeSnappyBetterBlockAsm12B(dst []byte, src []byte) int
16128// Requires: BMI, SSE2
16129TEXT ·encodeSnappyBetterBlockAsm12B(SB), $81944-56
16130 MOVQ dst_base+0(FP), AX
16131 MOVQ $0x00000280, CX
16132 LEAQ 24(SP), DX
16133 PXOR X0, X0
16134
16135zero_loop_encodeSnappyBetterBlockAsm12B:
16136 MOVOU X0, (DX)
16137 MOVOU X0, 16(DX)
16138 MOVOU X0, 32(DX)
16139 MOVOU X0, 48(DX)
16140 MOVOU X0, 64(DX)
16141 MOVOU X0, 80(DX)
16142 MOVOU X0, 96(DX)
16143 MOVOU X0, 112(DX)
16144 ADDQ $0x80, DX
16145 DECQ CX
16146 JNZ zero_loop_encodeSnappyBetterBlockAsm12B
16147 MOVL $0x00000000, 12(SP)
16148 MOVQ src_len+32(FP), CX
16149 LEAQ -9(CX), DX
16150 LEAQ -8(CX), BX
16151 MOVL BX, 8(SP)
16152 SHRQ $0x05, CX
16153 SUBL CX, DX
16154 LEAQ (AX)(DX*1), DX
16155 MOVQ DX, (SP)
16156 MOVL $0x00000001, CX
16157 MOVL $0x00000000, 16(SP)
16158 MOVQ src_base+24(FP), DX
16159
16160search_loop_encodeSnappyBetterBlockAsm12B:
16161 MOVL CX, BX
16162 SUBL 12(SP), BX
16163 SHRL $0x06, BX
16164 LEAL 1(CX)(BX*1), BX
16165 CMPL BX, 8(SP)
16166 JAE emit_remainder_encodeSnappyBetterBlockAsm12B
16167 MOVQ (DX)(CX*1), SI
16168 MOVL BX, 20(SP)
16169 MOVQ $0x0000cf1bbcdcbf9b, R8
16170 MOVQ $0x9e3779b1, BX
16171 MOVQ SI, R9
16172 MOVQ SI, R10
16173 SHLQ $0x10, R9
16174 IMULQ R8, R9
16175 SHRQ $0x32, R9
16176 SHLQ $0x20, R10
16177 IMULQ BX, R10
16178 SHRQ $0x34, R10
16179 MOVL 24(SP)(R9*4), BX
16180 MOVL 65560(SP)(R10*4), DI
16181 MOVL CX, 24(SP)(R9*4)
16182 MOVL CX, 65560(SP)(R10*4)
16183 MOVQ (DX)(BX*1), R9
16184 MOVQ (DX)(DI*1), R10
16185 CMPQ R9, SI
16186 JEQ candidate_match_encodeSnappyBetterBlockAsm12B
16187 CMPQ R10, SI
16188 JNE no_short_found_encodeSnappyBetterBlockAsm12B
16189 MOVL DI, BX
16190 JMP candidate_match_encodeSnappyBetterBlockAsm12B
16191
16192no_short_found_encodeSnappyBetterBlockAsm12B:
16193 CMPL R9, SI
16194 JEQ candidate_match_encodeSnappyBetterBlockAsm12B
16195 CMPL R10, SI
16196 JEQ candidateS_match_encodeSnappyBetterBlockAsm12B
16197 MOVL 20(SP), CX
16198 JMP search_loop_encodeSnappyBetterBlockAsm12B
16199
16200candidateS_match_encodeSnappyBetterBlockAsm12B:
16201 SHRQ $0x08, SI
16202 MOVQ SI, R9
16203 SHLQ $0x10, R9
16204 IMULQ R8, R9
16205 SHRQ $0x32, R9
16206 MOVL 24(SP)(R9*4), BX
16207 INCL CX
16208 MOVL CX, 24(SP)(R9*4)
16209 CMPL (DX)(BX*1), SI
16210 JEQ candidate_match_encodeSnappyBetterBlockAsm12B
16211 DECL CX
16212 MOVL DI, BX
16213
16214candidate_match_encodeSnappyBetterBlockAsm12B:
16215 MOVL 12(SP), SI
16216 TESTL BX, BX
16217 JZ match_extend_back_end_encodeSnappyBetterBlockAsm12B
16218
16219match_extend_back_loop_encodeSnappyBetterBlockAsm12B:
16220 CMPL CX, SI
16221 JBE match_extend_back_end_encodeSnappyBetterBlockAsm12B
16222 MOVB -1(DX)(BX*1), DI
16223 MOVB -1(DX)(CX*1), R8
16224 CMPB DI, R8
16225 JNE match_extend_back_end_encodeSnappyBetterBlockAsm12B
16226 LEAL -1(CX), CX
16227 DECL BX
16228 JZ match_extend_back_end_encodeSnappyBetterBlockAsm12B
16229 JMP match_extend_back_loop_encodeSnappyBetterBlockAsm12B
16230
16231match_extend_back_end_encodeSnappyBetterBlockAsm12B:
16232 MOVL CX, SI
16233 SUBL 12(SP), SI
16234 LEAQ 3(AX)(SI*1), SI
16235 CMPQ SI, (SP)
16236 JB match_dst_size_check_encodeSnappyBetterBlockAsm12B
16237 MOVQ $0x00000000, ret+48(FP)
16238 RET
16239
16240match_dst_size_check_encodeSnappyBetterBlockAsm12B:
16241 MOVL CX, SI
16242 ADDL $0x04, CX
16243 ADDL $0x04, BX
16244 MOVQ src_len+32(FP), DI
16245 SUBL CX, DI
16246 LEAQ (DX)(CX*1), R8
16247 LEAQ (DX)(BX*1), R9
16248
16249 // matchLen
16250 XORL R11, R11
16251
16252matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm12B:
16253 CMPL DI, $0x10
16254 JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm12B
16255 MOVQ (R8)(R11*1), R10
16256 MOVQ 8(R8)(R11*1), R12
16257 XORQ (R9)(R11*1), R10
16258 JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm12B
16259 XORQ 8(R9)(R11*1), R12
16260 JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm12B
16261 LEAL -16(DI), DI
16262 LEAL 16(R11), R11
16263 JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm12B
16264
16265matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm12B:
16266#ifdef GOAMD64_v3
16267 TZCNTQ R12, R12
16268
16269#else
16270 BSFQ R12, R12
16271
16272#endif
16273 SARQ $0x03, R12
16274 LEAL 8(R11)(R12*1), R11
16275 JMP match_nolit_end_encodeSnappyBetterBlockAsm12B
16276
16277matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm12B:
16278 CMPL DI, $0x08
16279 JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B
16280 MOVQ (R8)(R11*1), R10
16281 XORQ (R9)(R11*1), R10
16282 JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm12B
16283 LEAL -8(DI), DI
16284 LEAL 8(R11), R11
16285 JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B
16286
16287matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm12B:
16288#ifdef GOAMD64_v3
16289 TZCNTQ R10, R10
16290
16291#else
16292 BSFQ R10, R10
16293
16294#endif
16295 SARQ $0x03, R10
16296 LEAL (R11)(R10*1), R11
16297 JMP match_nolit_end_encodeSnappyBetterBlockAsm12B
16298
16299matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B:
16300 CMPL DI, $0x04
16301 JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B
16302 MOVL (R8)(R11*1), R10
16303 CMPL (R9)(R11*1), R10
16304 JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B
16305 LEAL -4(DI), DI
16306 LEAL 4(R11), R11
16307
16308matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B:
16309 CMPL DI, $0x01
16310 JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B
16311 JB match_nolit_end_encodeSnappyBetterBlockAsm12B
16312 MOVW (R8)(R11*1), R10
16313 CMPW (R9)(R11*1), R10
16314 JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B
16315 LEAL 2(R11), R11
16316 SUBL $0x02, DI
16317 JZ match_nolit_end_encodeSnappyBetterBlockAsm12B
16318
16319matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B:
16320 MOVB (R8)(R11*1), R10
16321 CMPB (R9)(R11*1), R10
16322 JNE match_nolit_end_encodeSnappyBetterBlockAsm12B
16323 LEAL 1(R11), R11
16324
16325match_nolit_end_encodeSnappyBetterBlockAsm12B:
16326 MOVL CX, DI
16327 SUBL BX, DI
16328
16329 // Check if repeat
16330 MOVL DI, 16(SP)
16331 MOVL 12(SP), BX
16332 CMPL BX, SI
16333 JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B
16334 MOVL SI, R8
16335 MOVL SI, 12(SP)
16336 LEAQ (DX)(BX*1), R9
16337 SUBL BX, R8
16338 LEAL -1(R8), BX
16339 CMPL BX, $0x3c
16340 JB one_byte_match_emit_encodeSnappyBetterBlockAsm12B
16341 CMPL BX, $0x00000100
16342 JB two_bytes_match_emit_encodeSnappyBetterBlockAsm12B
16343 JB three_bytes_match_emit_encodeSnappyBetterBlockAsm12B
16344
16345three_bytes_match_emit_encodeSnappyBetterBlockAsm12B:
16346 MOVB $0xf4, (AX)
16347 MOVW BX, 1(AX)
16348 ADDQ $0x03, AX
16349 JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm12B
16350
16351two_bytes_match_emit_encodeSnappyBetterBlockAsm12B:
16352 MOVB $0xf0, (AX)
16353 MOVB BL, 1(AX)
16354 ADDQ $0x02, AX
16355 CMPL BX, $0x40
16356 JB memmove_match_emit_encodeSnappyBetterBlockAsm12B
16357 JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm12B
16358
16359one_byte_match_emit_encodeSnappyBetterBlockAsm12B:
16360 SHLB $0x02, BL
16361 MOVB BL, (AX)
16362 ADDQ $0x01, AX
16363
16364memmove_match_emit_encodeSnappyBetterBlockAsm12B:
16365 LEAQ (AX)(R8*1), BX
16366
16367 // genMemMoveShort
16368 CMPQ R8, $0x08
16369 JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8
16370 CMPQ R8, $0x10
16371 JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8through16
16372 CMPQ R8, $0x20
16373 JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_17through32
16374 JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_33through64
16375
16376emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8:
16377 MOVQ (R9), R10
16378 MOVQ R10, (AX)
16379 JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B
16380
16381emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8through16:
16382 MOVQ (R9), R10
16383 MOVQ -8(R9)(R8*1), R9
16384 MOVQ R10, (AX)
16385 MOVQ R9, -8(AX)(R8*1)
16386 JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B
16387
16388emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_17through32:
16389 MOVOU (R9), X0
16390 MOVOU -16(R9)(R8*1), X1
16391 MOVOU X0, (AX)
16392 MOVOU X1, -16(AX)(R8*1)
16393 JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B
16394
16395emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_33through64:
16396 MOVOU (R9), X0
16397 MOVOU 16(R9), X1
16398 MOVOU -32(R9)(R8*1), X2
16399 MOVOU -16(R9)(R8*1), X3
16400 MOVOU X0, (AX)
16401 MOVOU X1, 16(AX)
16402 MOVOU X2, -32(AX)(R8*1)
16403 MOVOU X3, -16(AX)(R8*1)
16404
16405memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B:
16406 MOVQ BX, AX
16407 JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B
16408
16409memmove_long_match_emit_encodeSnappyBetterBlockAsm12B:
16410 LEAQ (AX)(R8*1), BX
16411
16412 // genMemMoveLong
16413 MOVOU (R9), X0
16414 MOVOU 16(R9), X1
16415 MOVOU -32(R9)(R8*1), X2
16416 MOVOU -16(R9)(R8*1), X3
16417 MOVQ R8, R12
16418 SHRQ $0x05, R12
16419 MOVQ AX, R10
16420 ANDL $0x0000001f, R10
16421 MOVQ $0x00000040, R13
16422 SUBQ R10, R13
16423 DECQ R12
16424 JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32
16425 LEAQ -32(R9)(R13*1), R10
16426 LEAQ -32(AX)(R13*1), R14
16427
16428emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_big_loop_back:
16429 MOVOU (R10), X4
16430 MOVOU 16(R10), X5
16431 MOVOA X4, (R14)
16432 MOVOA X5, 16(R14)
16433 ADDQ $0x20, R14
16434 ADDQ $0x20, R10
16435 ADDQ $0x20, R13
16436 DECQ R12
16437 JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_big_loop_back
16438
16439emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32:
16440 MOVOU -32(R9)(R13*1), X4
16441 MOVOU -16(R9)(R13*1), X5
16442 MOVOA X4, -32(AX)(R13*1)
16443 MOVOA X5, -16(AX)(R13*1)
16444 ADDQ $0x20, R13
16445 CMPQ R8, R13
16446 JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32
16447 MOVOU X0, (AX)
16448 MOVOU X1, 16(AX)
16449 MOVOU X2, -32(AX)(R8*1)
16450 MOVOU X3, -16(AX)(R8*1)
16451 MOVQ BX, AX
16452
16453emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B:
16454 ADDL R11, CX
16455 ADDL $0x04, R11
16456 MOVL CX, 12(SP)
16457
16458 // emitCopy
16459two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm12B:
16460 CMPL R11, $0x40
16461 JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm12B
16462 MOVB $0xee, (AX)
16463 MOVW DI, 1(AX)
16464 LEAL -60(R11), R11
16465 ADDQ $0x03, AX
16466 JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm12B
16467
16468two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm12B:
16469 MOVL R11, BX
16470 SHLL $0x02, BX
16471 CMPL R11, $0x0c
16472 JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B
16473 CMPL DI, $0x00000800
16474 JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B
16475 LEAL -15(BX), BX
16476 MOVB DI, 1(AX)
16477 SHRL $0x08, DI
16478 SHLL $0x05, DI
16479 ORL DI, BX
16480 MOVB BL, (AX)
16481 ADDQ $0x02, AX
16482 JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm12B
16483
16484emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B:
16485 LEAL -2(BX), BX
16486 MOVB BL, (AX)
16487 MOVW DI, 1(AX)
16488 ADDQ $0x03, AX
16489
16490match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm12B:
16491 CMPL CX, 8(SP)
16492 JAE emit_remainder_encodeSnappyBetterBlockAsm12B
16493 CMPQ AX, (SP)
16494 JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm12B
16495 MOVQ $0x00000000, ret+48(FP)
16496 RET
16497
16498match_nolit_dst_ok_encodeSnappyBetterBlockAsm12B:
16499 MOVQ $0x0000cf1bbcdcbf9b, BX
16500 MOVQ $0x9e3779b1, DI
16501 LEAQ 1(SI), SI
16502 LEAQ -2(CX), R8
16503 MOVQ (DX)(SI*1), R9
16504 MOVQ 1(DX)(SI*1), R10
16505 MOVQ (DX)(R8*1), R11
16506 MOVQ 1(DX)(R8*1), R12
16507 SHLQ $0x10, R9
16508 IMULQ BX, R9
16509 SHRQ $0x32, R9
16510 SHLQ $0x20, R10
16511 IMULQ DI, R10
16512 SHRQ $0x34, R10
16513 SHLQ $0x10, R11
16514 IMULQ BX, R11
16515 SHRQ $0x32, R11
16516 SHLQ $0x20, R12
16517 IMULQ DI, R12
16518 SHRQ $0x34, R12
16519 LEAQ 1(SI), DI
16520 LEAQ 1(R8), R13
16521 MOVL SI, 24(SP)(R9*4)
16522 MOVL R8, 24(SP)(R11*4)
16523 MOVL DI, 65560(SP)(R10*4)
16524 MOVL R13, 65560(SP)(R12*4)
16525 LEAQ 1(R8)(SI*1), DI
16526 SHRQ $0x01, DI
16527 ADDQ $0x01, SI
16528 SUBQ $0x01, R8
16529
16530index_loop_encodeSnappyBetterBlockAsm12B:
16531 CMPQ DI, R8
16532 JAE search_loop_encodeSnappyBetterBlockAsm12B
16533 MOVQ (DX)(SI*1), R9
16534 MOVQ (DX)(DI*1), R10
16535 SHLQ $0x10, R9
16536 IMULQ BX, R9
16537 SHRQ $0x32, R9
16538 SHLQ $0x10, R10
16539 IMULQ BX, R10
16540 SHRQ $0x32, R10
16541 MOVL SI, 24(SP)(R9*4)
16542 MOVL DI, 24(SP)(R10*4)
16543 ADDQ $0x02, SI
16544 ADDQ $0x02, DI
16545 JMP index_loop_encodeSnappyBetterBlockAsm12B
16546
16547emit_remainder_encodeSnappyBetterBlockAsm12B:
16548 MOVQ src_len+32(FP), CX
16549 SUBL 12(SP), CX
16550 LEAQ 3(AX)(CX*1), CX
16551 CMPQ CX, (SP)
16552 JB emit_remainder_ok_encodeSnappyBetterBlockAsm12B
16553 MOVQ $0x00000000, ret+48(FP)
16554 RET
16555
16556emit_remainder_ok_encodeSnappyBetterBlockAsm12B:
16557 MOVQ src_len+32(FP), CX
16558 MOVL 12(SP), BX
16559 CMPL BX, CX
16560 JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm12B
16561 MOVL CX, SI
16562 MOVL CX, 12(SP)
16563 LEAQ (DX)(BX*1), CX
16564 SUBL BX, SI
16565 LEAL -1(SI), DX
16566 CMPL DX, $0x3c
16567 JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm12B
16568 CMPL DX, $0x00000100
16569 JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B
16570 JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B
16571
16572three_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B:
16573 MOVB $0xf4, (AX)
16574 MOVW DX, 1(AX)
16575 ADDQ $0x03, AX
16576 JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12B
16577
16578two_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B:
16579 MOVB $0xf0, (AX)
16580 MOVB DL, 1(AX)
16581 ADDQ $0x02, AX
16582 CMPL DX, $0x40
16583 JB memmove_emit_remainder_encodeSnappyBetterBlockAsm12B
16584 JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12B
16585
16586one_byte_emit_remainder_encodeSnappyBetterBlockAsm12B:
16587 SHLB $0x02, DL
16588 MOVB DL, (AX)
16589 ADDQ $0x01, AX
16590
16591memmove_emit_remainder_encodeSnappyBetterBlockAsm12B:
16592 LEAQ (AX)(SI*1), DX
16593 MOVL SI, BX
16594
16595 // genMemMoveShort
16596 CMPQ BX, $0x03
16597 JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_1or2
16598 JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_3
16599 CMPQ BX, $0x08
16600 JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_4through7
16601 CMPQ BX, $0x10
16602 JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_8through16
16603 CMPQ BX, $0x20
16604 JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_17through32
16605 JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_33through64
16606
16607emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_1or2:
16608 MOVB (CX), SI
16609 MOVB -1(CX)(BX*1), CL
16610 MOVB SI, (AX)
16611 MOVB CL, -1(AX)(BX*1)
16612 JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B
16613
16614emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_3:
16615 MOVW (CX), SI
16616 MOVB 2(CX), CL
16617 MOVW SI, (AX)
16618 MOVB CL, 2(AX)
16619 JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B
16620
16621emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_4through7:
16622 MOVL (CX), SI
16623 MOVL -4(CX)(BX*1), CX
16624 MOVL SI, (AX)
16625 MOVL CX, -4(AX)(BX*1)
16626 JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B
16627
16628emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_8through16:
16629 MOVQ (CX), SI
16630 MOVQ -8(CX)(BX*1), CX
16631 MOVQ SI, (AX)
16632 MOVQ CX, -8(AX)(BX*1)
16633 JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B
16634
16635emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_17through32:
16636 MOVOU (CX), X0
16637 MOVOU -16(CX)(BX*1), X1
16638 MOVOU X0, (AX)
16639 MOVOU X1, -16(AX)(BX*1)
16640 JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B
16641
16642emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_33through64:
16643 MOVOU (CX), X0
16644 MOVOU 16(CX), X1
16645 MOVOU -32(CX)(BX*1), X2
16646 MOVOU -16(CX)(BX*1), X3
16647 MOVOU X0, (AX)
16648 MOVOU X1, 16(AX)
16649 MOVOU X2, -32(AX)(BX*1)
16650 MOVOU X3, -16(AX)(BX*1)
16651
16652memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B:
16653 MOVQ DX, AX
16654 JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm12B
16655
16656memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12B:
16657 LEAQ (AX)(SI*1), DX
16658 MOVL SI, BX
16659
16660 // genMemMoveLong
16661 MOVOU (CX), X0
16662 MOVOU 16(CX), X1
16663 MOVOU -32(CX)(BX*1), X2
16664 MOVOU -16(CX)(BX*1), X3
16665 MOVQ BX, DI
16666 SHRQ $0x05, DI
16667 MOVQ AX, SI
16668 ANDL $0x0000001f, SI
16669 MOVQ $0x00000040, R8
16670 SUBQ SI, R8
16671 DECQ DI
16672 JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32
16673 LEAQ -32(CX)(R8*1), SI
16674 LEAQ -32(AX)(R8*1), R9
16675
16676emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_big_loop_back:
16677 MOVOU (SI), X4
16678 MOVOU 16(SI), X5
16679 MOVOA X4, (R9)
16680 MOVOA X5, 16(R9)
16681 ADDQ $0x20, R9
16682 ADDQ $0x20, SI
16683 ADDQ $0x20, R8
16684 DECQ DI
16685 JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_big_loop_back
16686
16687emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32:
16688 MOVOU -32(CX)(R8*1), X4
16689 MOVOU -16(CX)(R8*1), X5
16690 MOVOA X4, -32(AX)(R8*1)
16691 MOVOA X5, -16(AX)(R8*1)
16692 ADDQ $0x20, R8
16693 CMPQ BX, R8
16694 JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32
16695 MOVOU X0, (AX)
16696 MOVOU X1, 16(AX)
16697 MOVOU X2, -32(AX)(BX*1)
16698 MOVOU X3, -16(AX)(BX*1)
16699 MOVQ DX, AX
16700
16701emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm12B:
16702 MOVQ dst_base+0(FP), CX
16703 SUBQ CX, AX
16704 MOVQ AX, ret+48(FP)
16705 RET
16706
16707// func encodeSnappyBetterBlockAsm10B(dst []byte, src []byte) int
16708// Requires: BMI, SSE2
16709TEXT ·encodeSnappyBetterBlockAsm10B(SB), $20504-56
16710 MOVQ dst_base+0(FP), AX
16711 MOVQ $0x000000a0, CX
16712 LEAQ 24(SP), DX
16713 PXOR X0, X0
16714
16715zero_loop_encodeSnappyBetterBlockAsm10B:
16716 MOVOU X0, (DX)
16717 MOVOU X0, 16(DX)
16718 MOVOU X0, 32(DX)
16719 MOVOU X0, 48(DX)
16720 MOVOU X0, 64(DX)
16721 MOVOU X0, 80(DX)
16722 MOVOU X0, 96(DX)
16723 MOVOU X0, 112(DX)
16724 ADDQ $0x80, DX
16725 DECQ CX
16726 JNZ zero_loop_encodeSnappyBetterBlockAsm10B
16727 MOVL $0x00000000, 12(SP)
16728 MOVQ src_len+32(FP), CX
16729 LEAQ -9(CX), DX
16730 LEAQ -8(CX), BX
16731 MOVL BX, 8(SP)
16732 SHRQ $0x05, CX
16733 SUBL CX, DX
16734 LEAQ (AX)(DX*1), DX
16735 MOVQ DX, (SP)
16736 MOVL $0x00000001, CX
16737 MOVL $0x00000000, 16(SP)
16738 MOVQ src_base+24(FP), DX
16739
16740search_loop_encodeSnappyBetterBlockAsm10B:
16741 MOVL CX, BX
16742 SUBL 12(SP), BX
16743 SHRL $0x05, BX
16744 LEAL 1(CX)(BX*1), BX
16745 CMPL BX, 8(SP)
16746 JAE emit_remainder_encodeSnappyBetterBlockAsm10B
16747 MOVQ (DX)(CX*1), SI
16748 MOVL BX, 20(SP)
16749 MOVQ $0x0000cf1bbcdcbf9b, R8
16750 MOVQ $0x9e3779b1, BX
16751 MOVQ SI, R9
16752 MOVQ SI, R10
16753 SHLQ $0x10, R9
16754 IMULQ R8, R9
16755 SHRQ $0x34, R9
16756 SHLQ $0x20, R10
16757 IMULQ BX, R10
16758 SHRQ $0x36, R10
16759 MOVL 24(SP)(R9*4), BX
16760 MOVL 16408(SP)(R10*4), DI
16761 MOVL CX, 24(SP)(R9*4)
16762 MOVL CX, 16408(SP)(R10*4)
16763 MOVQ (DX)(BX*1), R9
16764 MOVQ (DX)(DI*1), R10
16765 CMPQ R9, SI
16766 JEQ candidate_match_encodeSnappyBetterBlockAsm10B
16767 CMPQ R10, SI
16768 JNE no_short_found_encodeSnappyBetterBlockAsm10B
16769 MOVL DI, BX
16770 JMP candidate_match_encodeSnappyBetterBlockAsm10B
16771
16772no_short_found_encodeSnappyBetterBlockAsm10B:
16773 CMPL R9, SI
16774 JEQ candidate_match_encodeSnappyBetterBlockAsm10B
16775 CMPL R10, SI
16776 JEQ candidateS_match_encodeSnappyBetterBlockAsm10B
16777 MOVL 20(SP), CX
16778 JMP search_loop_encodeSnappyBetterBlockAsm10B
16779
16780candidateS_match_encodeSnappyBetterBlockAsm10B:
16781 SHRQ $0x08, SI
16782 MOVQ SI, R9
16783 SHLQ $0x10, R9
16784 IMULQ R8, R9
16785 SHRQ $0x34, R9
16786 MOVL 24(SP)(R9*4), BX
16787 INCL CX
16788 MOVL CX, 24(SP)(R9*4)
16789 CMPL (DX)(BX*1), SI
16790 JEQ candidate_match_encodeSnappyBetterBlockAsm10B
16791 DECL CX
16792 MOVL DI, BX
16793
16794candidate_match_encodeSnappyBetterBlockAsm10B:
16795 MOVL 12(SP), SI
16796 TESTL BX, BX
16797 JZ match_extend_back_end_encodeSnappyBetterBlockAsm10B
16798
16799match_extend_back_loop_encodeSnappyBetterBlockAsm10B:
16800 CMPL CX, SI
16801 JBE match_extend_back_end_encodeSnappyBetterBlockAsm10B
16802 MOVB -1(DX)(BX*1), DI
16803 MOVB -1(DX)(CX*1), R8
16804 CMPB DI, R8
16805 JNE match_extend_back_end_encodeSnappyBetterBlockAsm10B
16806 LEAL -1(CX), CX
16807 DECL BX
16808 JZ match_extend_back_end_encodeSnappyBetterBlockAsm10B
16809 JMP match_extend_back_loop_encodeSnappyBetterBlockAsm10B
16810
16811match_extend_back_end_encodeSnappyBetterBlockAsm10B:
16812 MOVL CX, SI
16813 SUBL 12(SP), SI
16814 LEAQ 3(AX)(SI*1), SI
16815 CMPQ SI, (SP)
16816 JB match_dst_size_check_encodeSnappyBetterBlockAsm10B
16817 MOVQ $0x00000000, ret+48(FP)
16818 RET
16819
16820match_dst_size_check_encodeSnappyBetterBlockAsm10B:
16821 MOVL CX, SI
16822 ADDL $0x04, CX
16823 ADDL $0x04, BX
16824 MOVQ src_len+32(FP), DI
16825 SUBL CX, DI
16826 LEAQ (DX)(CX*1), R8
16827 LEAQ (DX)(BX*1), R9
16828
16829 // matchLen
16830 XORL R11, R11
16831
16832matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm10B:
16833 CMPL DI, $0x10
16834 JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm10B
16835 MOVQ (R8)(R11*1), R10
16836 MOVQ 8(R8)(R11*1), R12
16837 XORQ (R9)(R11*1), R10
16838 JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm10B
16839 XORQ 8(R9)(R11*1), R12
16840 JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm10B
16841 LEAL -16(DI), DI
16842 LEAL 16(R11), R11
16843 JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm10B
16844
16845matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm10B:
16846#ifdef GOAMD64_v3
16847 TZCNTQ R12, R12
16848
16849#else
16850 BSFQ R12, R12
16851
16852#endif
16853 SARQ $0x03, R12
16854 LEAL 8(R11)(R12*1), R11
16855 JMP match_nolit_end_encodeSnappyBetterBlockAsm10B
16856
16857matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm10B:
16858 CMPL DI, $0x08
16859 JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B
16860 MOVQ (R8)(R11*1), R10
16861 XORQ (R9)(R11*1), R10
16862 JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm10B
16863 LEAL -8(DI), DI
16864 LEAL 8(R11), R11
16865 JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B
16866
16867matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm10B:
16868#ifdef GOAMD64_v3
16869 TZCNTQ R10, R10
16870
16871#else
16872 BSFQ R10, R10
16873
16874#endif
16875 SARQ $0x03, R10
16876 LEAL (R11)(R10*1), R11
16877 JMP match_nolit_end_encodeSnappyBetterBlockAsm10B
16878
16879matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B:
16880 CMPL DI, $0x04
16881 JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B
16882 MOVL (R8)(R11*1), R10
16883 CMPL (R9)(R11*1), R10
16884 JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B
16885 LEAL -4(DI), DI
16886 LEAL 4(R11), R11
16887
16888matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B:
16889 CMPL DI, $0x01
16890 JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B
16891 JB match_nolit_end_encodeSnappyBetterBlockAsm10B
16892 MOVW (R8)(R11*1), R10
16893 CMPW (R9)(R11*1), R10
16894 JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B
16895 LEAL 2(R11), R11
16896 SUBL $0x02, DI
16897 JZ match_nolit_end_encodeSnappyBetterBlockAsm10B
16898
16899matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B:
16900 MOVB (R8)(R11*1), R10
16901 CMPB (R9)(R11*1), R10
16902 JNE match_nolit_end_encodeSnappyBetterBlockAsm10B
16903 LEAL 1(R11), R11
16904
16905match_nolit_end_encodeSnappyBetterBlockAsm10B:
16906 MOVL CX, DI
16907 SUBL BX, DI
16908
16909 // Check if repeat
16910 MOVL DI, 16(SP)
16911 MOVL 12(SP), BX
16912 CMPL BX, SI
16913 JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B
16914 MOVL SI, R8
16915 MOVL SI, 12(SP)
16916 LEAQ (DX)(BX*1), R9
16917 SUBL BX, R8
16918 LEAL -1(R8), BX
16919 CMPL BX, $0x3c
16920 JB one_byte_match_emit_encodeSnappyBetterBlockAsm10B
16921 CMPL BX, $0x00000100
16922 JB two_bytes_match_emit_encodeSnappyBetterBlockAsm10B
16923 JB three_bytes_match_emit_encodeSnappyBetterBlockAsm10B
16924
16925three_bytes_match_emit_encodeSnappyBetterBlockAsm10B:
16926 MOVB $0xf4, (AX)
16927 MOVW BX, 1(AX)
16928 ADDQ $0x03, AX
16929 JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm10B
16930
16931two_bytes_match_emit_encodeSnappyBetterBlockAsm10B:
16932 MOVB $0xf0, (AX)
16933 MOVB BL, 1(AX)
16934 ADDQ $0x02, AX
16935 CMPL BX, $0x40
16936 JB memmove_match_emit_encodeSnappyBetterBlockAsm10B
16937 JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm10B
16938
16939one_byte_match_emit_encodeSnappyBetterBlockAsm10B:
16940 SHLB $0x02, BL
16941 MOVB BL, (AX)
16942 ADDQ $0x01, AX
16943
16944memmove_match_emit_encodeSnappyBetterBlockAsm10B:
16945 LEAQ (AX)(R8*1), BX
16946
16947 // genMemMoveShort
16948 CMPQ R8, $0x08
16949 JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8
16950 CMPQ R8, $0x10
16951 JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8through16
16952 CMPQ R8, $0x20
16953 JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_17through32
16954 JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_33through64
16955
16956emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8:
16957 MOVQ (R9), R10
16958 MOVQ R10, (AX)
16959 JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B
16960
16961emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8through16:
16962 MOVQ (R9), R10
16963 MOVQ -8(R9)(R8*1), R9
16964 MOVQ R10, (AX)
16965 MOVQ R9, -8(AX)(R8*1)
16966 JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B
16967
16968emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_17through32:
16969 MOVOU (R9), X0
16970 MOVOU -16(R9)(R8*1), X1
16971 MOVOU X0, (AX)
16972 MOVOU X1, -16(AX)(R8*1)
16973 JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B
16974
16975emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_33through64:
16976 MOVOU (R9), X0
16977 MOVOU 16(R9), X1
16978 MOVOU -32(R9)(R8*1), X2
16979 MOVOU -16(R9)(R8*1), X3
16980 MOVOU X0, (AX)
16981 MOVOU X1, 16(AX)
16982 MOVOU X2, -32(AX)(R8*1)
16983 MOVOU X3, -16(AX)(R8*1)
16984
16985memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B:
16986 MOVQ BX, AX
16987 JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B
16988
16989memmove_long_match_emit_encodeSnappyBetterBlockAsm10B:
16990 LEAQ (AX)(R8*1), BX
16991
16992 // genMemMoveLong
16993 MOVOU (R9), X0
16994 MOVOU 16(R9), X1
16995 MOVOU -32(R9)(R8*1), X2
16996 MOVOU -16(R9)(R8*1), X3
16997 MOVQ R8, R12
16998 SHRQ $0x05, R12
16999 MOVQ AX, R10
17000 ANDL $0x0000001f, R10
17001 MOVQ $0x00000040, R13
17002 SUBQ R10, R13
17003 DECQ R12
17004 JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32
17005 LEAQ -32(R9)(R13*1), R10
17006 LEAQ -32(AX)(R13*1), R14
17007
17008emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_big_loop_back:
17009 MOVOU (R10), X4
17010 MOVOU 16(R10), X5
17011 MOVOA X4, (R14)
17012 MOVOA X5, 16(R14)
17013 ADDQ $0x20, R14
17014 ADDQ $0x20, R10
17015 ADDQ $0x20, R13
17016 DECQ R12
17017 JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_big_loop_back
17018
17019emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32:
17020 MOVOU -32(R9)(R13*1), X4
17021 MOVOU -16(R9)(R13*1), X5
17022 MOVOA X4, -32(AX)(R13*1)
17023 MOVOA X5, -16(AX)(R13*1)
17024 ADDQ $0x20, R13
17025 CMPQ R8, R13
17026 JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32
17027 MOVOU X0, (AX)
17028 MOVOU X1, 16(AX)
17029 MOVOU X2, -32(AX)(R8*1)
17030 MOVOU X3, -16(AX)(R8*1)
17031 MOVQ BX, AX
17032
17033emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B:
17034 ADDL R11, CX
17035 ADDL $0x04, R11
17036 MOVL CX, 12(SP)
17037
17038 // emitCopy
17039two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm10B:
17040 CMPL R11, $0x40
17041 JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm10B
17042 MOVB $0xee, (AX)
17043 MOVW DI, 1(AX)
17044 LEAL -60(R11), R11
17045 ADDQ $0x03, AX
17046 JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm10B
17047
17048two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm10B:
17049 MOVL R11, BX
17050 SHLL $0x02, BX
17051 CMPL R11, $0x0c
17052 JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B
17053 CMPL DI, $0x00000800
17054 JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B
17055 LEAL -15(BX), BX
17056 MOVB DI, 1(AX)
17057 SHRL $0x08, DI
17058 SHLL $0x05, DI
17059 ORL DI, BX
17060 MOVB BL, (AX)
17061 ADDQ $0x02, AX
17062 JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm10B
17063
17064emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B:
17065 LEAL -2(BX), BX
17066 MOVB BL, (AX)
17067 MOVW DI, 1(AX)
17068 ADDQ $0x03, AX
17069
17070match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm10B:
17071 CMPL CX, 8(SP)
17072 JAE emit_remainder_encodeSnappyBetterBlockAsm10B
17073 CMPQ AX, (SP)
17074 JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm10B
17075 MOVQ $0x00000000, ret+48(FP)
17076 RET
17077
17078match_nolit_dst_ok_encodeSnappyBetterBlockAsm10B:
17079 MOVQ $0x0000cf1bbcdcbf9b, BX
17080 MOVQ $0x9e3779b1, DI
17081 LEAQ 1(SI), SI
17082 LEAQ -2(CX), R8
17083 MOVQ (DX)(SI*1), R9
17084 MOVQ 1(DX)(SI*1), R10
17085 MOVQ (DX)(R8*1), R11
17086 MOVQ 1(DX)(R8*1), R12
17087 SHLQ $0x10, R9
17088 IMULQ BX, R9
17089 SHRQ $0x34, R9
17090 SHLQ $0x20, R10
17091 IMULQ DI, R10
17092 SHRQ $0x36, R10
17093 SHLQ $0x10, R11
17094 IMULQ BX, R11
17095 SHRQ $0x34, R11
17096 SHLQ $0x20, R12
17097 IMULQ DI, R12
17098 SHRQ $0x36, R12
17099 LEAQ 1(SI), DI
17100 LEAQ 1(R8), R13
17101 MOVL SI, 24(SP)(R9*4)
17102 MOVL R8, 24(SP)(R11*4)
17103 MOVL DI, 16408(SP)(R10*4)
17104 MOVL R13, 16408(SP)(R12*4)
17105 LEAQ 1(R8)(SI*1), DI
17106 SHRQ $0x01, DI
17107 ADDQ $0x01, SI
17108 SUBQ $0x01, R8
17109
17110index_loop_encodeSnappyBetterBlockAsm10B:
17111 CMPQ DI, R8
17112 JAE search_loop_encodeSnappyBetterBlockAsm10B
17113 MOVQ (DX)(SI*1), R9
17114 MOVQ (DX)(DI*1), R10
17115 SHLQ $0x10, R9
17116 IMULQ BX, R9
17117 SHRQ $0x34, R9
17118 SHLQ $0x10, R10
17119 IMULQ BX, R10
17120 SHRQ $0x34, R10
17121 MOVL SI, 24(SP)(R9*4)
17122 MOVL DI, 24(SP)(R10*4)
17123 ADDQ $0x02, SI
17124 ADDQ $0x02, DI
17125 JMP index_loop_encodeSnappyBetterBlockAsm10B
17126
17127emit_remainder_encodeSnappyBetterBlockAsm10B:
17128 MOVQ src_len+32(FP), CX
17129 SUBL 12(SP), CX
17130 LEAQ 3(AX)(CX*1), CX
17131 CMPQ CX, (SP)
17132 JB emit_remainder_ok_encodeSnappyBetterBlockAsm10B
17133 MOVQ $0x00000000, ret+48(FP)
17134 RET
17135
17136emit_remainder_ok_encodeSnappyBetterBlockAsm10B:
17137 MOVQ src_len+32(FP), CX
17138 MOVL 12(SP), BX
17139 CMPL BX, CX
17140 JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm10B
17141 MOVL CX, SI
17142 MOVL CX, 12(SP)
17143 LEAQ (DX)(BX*1), CX
17144 SUBL BX, SI
17145 LEAL -1(SI), DX
17146 CMPL DX, $0x3c
17147 JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm10B
17148 CMPL DX, $0x00000100
17149 JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B
17150 JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B
17151
17152three_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B:
17153 MOVB $0xf4, (AX)
17154 MOVW DX, 1(AX)
17155 ADDQ $0x03, AX
17156 JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10B
17157
17158two_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B:
17159 MOVB $0xf0, (AX)
17160 MOVB DL, 1(AX)
17161 ADDQ $0x02, AX
17162 CMPL DX, $0x40
17163 JB memmove_emit_remainder_encodeSnappyBetterBlockAsm10B
17164 JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10B
17165
17166one_byte_emit_remainder_encodeSnappyBetterBlockAsm10B:
17167 SHLB $0x02, DL
17168 MOVB DL, (AX)
17169 ADDQ $0x01, AX
17170
17171memmove_emit_remainder_encodeSnappyBetterBlockAsm10B:
17172 LEAQ (AX)(SI*1), DX
17173 MOVL SI, BX
17174
17175 // genMemMoveShort
17176 CMPQ BX, $0x03
17177 JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_1or2
17178 JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_3
17179 CMPQ BX, $0x08
17180 JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_4through7
17181 CMPQ BX, $0x10
17182 JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_8through16
17183 CMPQ BX, $0x20
17184 JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_17through32
17185 JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_33through64
17186
17187emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_1or2:
17188 MOVB (CX), SI
17189 MOVB -1(CX)(BX*1), CL
17190 MOVB SI, (AX)
17191 MOVB CL, -1(AX)(BX*1)
17192 JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B
17193
17194emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_3:
17195 MOVW (CX), SI
17196 MOVB 2(CX), CL
17197 MOVW SI, (AX)
17198 MOVB CL, 2(AX)
17199 JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B
17200
17201emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_4through7:
17202 MOVL (CX), SI
17203 MOVL -4(CX)(BX*1), CX
17204 MOVL SI, (AX)
17205 MOVL CX, -4(AX)(BX*1)
17206 JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B
17207
17208emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_8through16:
17209 MOVQ (CX), SI
17210 MOVQ -8(CX)(BX*1), CX
17211 MOVQ SI, (AX)
17212 MOVQ CX, -8(AX)(BX*1)
17213 JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B
17214
17215emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_17through32:
17216 MOVOU (CX), X0
17217 MOVOU -16(CX)(BX*1), X1
17218 MOVOU X0, (AX)
17219 MOVOU X1, -16(AX)(BX*1)
17220 JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B
17221
17222emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_33through64:
17223 MOVOU (CX), X0
17224 MOVOU 16(CX), X1
17225 MOVOU -32(CX)(BX*1), X2
17226 MOVOU -16(CX)(BX*1), X3
17227 MOVOU X0, (AX)
17228 MOVOU X1, 16(AX)
17229 MOVOU X2, -32(AX)(BX*1)
17230 MOVOU X3, -16(AX)(BX*1)
17231
17232memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B:
17233 MOVQ DX, AX
17234 JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm10B
17235
17236memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10B:
17237 LEAQ (AX)(SI*1), DX
17238 MOVL SI, BX
17239
17240 // genMemMoveLong
17241 MOVOU (CX), X0
17242 MOVOU 16(CX), X1
17243 MOVOU -32(CX)(BX*1), X2
17244 MOVOU -16(CX)(BX*1), X3
17245 MOVQ BX, DI
17246 SHRQ $0x05, DI
17247 MOVQ AX, SI
17248 ANDL $0x0000001f, SI
17249 MOVQ $0x00000040, R8
17250 SUBQ SI, R8
17251 DECQ DI
17252 JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32
17253 LEAQ -32(CX)(R8*1), SI
17254 LEAQ -32(AX)(R8*1), R9
17255
17256emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_big_loop_back:
17257 MOVOU (SI), X4
17258 MOVOU 16(SI), X5
17259 MOVOA X4, (R9)
17260 MOVOA X5, 16(R9)
17261 ADDQ $0x20, R9
17262 ADDQ $0x20, SI
17263 ADDQ $0x20, R8
17264 DECQ DI
17265 JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_big_loop_back
17266
17267emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32:
17268 MOVOU -32(CX)(R8*1), X4
17269 MOVOU -16(CX)(R8*1), X5
17270 MOVOA X4, -32(AX)(R8*1)
17271 MOVOA X5, -16(AX)(R8*1)
17272 ADDQ $0x20, R8
17273 CMPQ BX, R8
17274 JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32
17275 MOVOU X0, (AX)
17276 MOVOU X1, 16(AX)
17277 MOVOU X2, -32(AX)(BX*1)
17278 MOVOU X3, -16(AX)(BX*1)
17279 MOVQ DX, AX
17280
17281emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm10B:
17282 MOVQ dst_base+0(FP), CX
17283 SUBQ CX, AX
17284 MOVQ AX, ret+48(FP)
17285 RET
17286
17287// func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte) int
17288// Requires: BMI, SSE2
17289TEXT ·encodeSnappyBetterBlockAsm8B(SB), $5144-56
17290 MOVQ dst_base+0(FP), AX
17291 MOVQ $0x00000028, CX
17292 LEAQ 24(SP), DX
17293 PXOR X0, X0
17294
17295zero_loop_encodeSnappyBetterBlockAsm8B:
17296 MOVOU X0, (DX)
17297 MOVOU X0, 16(DX)
17298 MOVOU X0, 32(DX)
17299 MOVOU X0, 48(DX)
17300 MOVOU X0, 64(DX)
17301 MOVOU X0, 80(DX)
17302 MOVOU X0, 96(DX)
17303 MOVOU X0, 112(DX)
17304 ADDQ $0x80, DX
17305 DECQ CX
17306 JNZ zero_loop_encodeSnappyBetterBlockAsm8B
17307 MOVL $0x00000000, 12(SP)
17308 MOVQ src_len+32(FP), CX
17309 LEAQ -9(CX), DX
17310 LEAQ -8(CX), BX
17311 MOVL BX, 8(SP)
17312 SHRQ $0x05, CX
17313 SUBL CX, DX
17314 LEAQ (AX)(DX*1), DX
17315 MOVQ DX, (SP)
17316 MOVL $0x00000001, CX
17317 MOVL $0x00000000, 16(SP)
17318 MOVQ src_base+24(FP), DX
17319
17320search_loop_encodeSnappyBetterBlockAsm8B:
17321 MOVL CX, BX
17322 SUBL 12(SP), BX
17323 SHRL $0x04, BX
17324 LEAL 1(CX)(BX*1), BX
17325 CMPL BX, 8(SP)
17326 JAE emit_remainder_encodeSnappyBetterBlockAsm8B
17327 MOVQ (DX)(CX*1), SI
17328 MOVL BX, 20(SP)
17329 MOVQ $0x0000cf1bbcdcbf9b, R8
17330 MOVQ $0x9e3779b1, BX
17331 MOVQ SI, R9
17332 MOVQ SI, R10
17333 SHLQ $0x10, R9
17334 IMULQ R8, R9
17335 SHRQ $0x36, R9
17336 SHLQ $0x20, R10
17337 IMULQ BX, R10
17338 SHRQ $0x38, R10
17339 MOVL 24(SP)(R9*4), BX
17340 MOVL 4120(SP)(R10*4), DI
17341 MOVL CX, 24(SP)(R9*4)
17342 MOVL CX, 4120(SP)(R10*4)
17343 MOVQ (DX)(BX*1), R9
17344 MOVQ (DX)(DI*1), R10
17345 CMPQ R9, SI
17346 JEQ candidate_match_encodeSnappyBetterBlockAsm8B
17347 CMPQ R10, SI
17348 JNE no_short_found_encodeSnappyBetterBlockAsm8B
17349 MOVL DI, BX
17350 JMP candidate_match_encodeSnappyBetterBlockAsm8B
17351
17352no_short_found_encodeSnappyBetterBlockAsm8B:
17353 CMPL R9, SI
17354 JEQ candidate_match_encodeSnappyBetterBlockAsm8B
17355 CMPL R10, SI
17356 JEQ candidateS_match_encodeSnappyBetterBlockAsm8B
17357 MOVL 20(SP), CX
17358 JMP search_loop_encodeSnappyBetterBlockAsm8B
17359
17360candidateS_match_encodeSnappyBetterBlockAsm8B:
17361 SHRQ $0x08, SI
17362 MOVQ SI, R9
17363 SHLQ $0x10, R9
17364 IMULQ R8, R9
17365 SHRQ $0x36, R9
17366 MOVL 24(SP)(R9*4), BX
17367 INCL CX
17368 MOVL CX, 24(SP)(R9*4)
17369 CMPL (DX)(BX*1), SI
17370 JEQ candidate_match_encodeSnappyBetterBlockAsm8B
17371 DECL CX
17372 MOVL DI, BX
17373
17374candidate_match_encodeSnappyBetterBlockAsm8B:
17375 MOVL 12(SP), SI
17376 TESTL BX, BX
17377 JZ match_extend_back_end_encodeSnappyBetterBlockAsm8B
17378
17379match_extend_back_loop_encodeSnappyBetterBlockAsm8B:
17380 CMPL CX, SI
17381 JBE match_extend_back_end_encodeSnappyBetterBlockAsm8B
17382 MOVB -1(DX)(BX*1), DI
17383 MOVB -1(DX)(CX*1), R8
17384 CMPB DI, R8
17385 JNE match_extend_back_end_encodeSnappyBetterBlockAsm8B
17386 LEAL -1(CX), CX
17387 DECL BX
17388 JZ match_extend_back_end_encodeSnappyBetterBlockAsm8B
17389 JMP match_extend_back_loop_encodeSnappyBetterBlockAsm8B
17390
17391match_extend_back_end_encodeSnappyBetterBlockAsm8B:
17392 MOVL CX, SI
17393 SUBL 12(SP), SI
17394 LEAQ 3(AX)(SI*1), SI
17395 CMPQ SI, (SP)
17396 JB match_dst_size_check_encodeSnappyBetterBlockAsm8B
17397 MOVQ $0x00000000, ret+48(FP)
17398 RET
17399
17400match_dst_size_check_encodeSnappyBetterBlockAsm8B:
17401 MOVL CX, SI
17402 ADDL $0x04, CX
17403 ADDL $0x04, BX
17404 MOVQ src_len+32(FP), DI
17405 SUBL CX, DI
17406 LEAQ (DX)(CX*1), R8
17407 LEAQ (DX)(BX*1), R9
17408
17409 // matchLen
17410 XORL R11, R11
17411
17412matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm8B:
17413 CMPL DI, $0x10
17414 JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm8B
17415 MOVQ (R8)(R11*1), R10
17416 MOVQ 8(R8)(R11*1), R12
17417 XORQ (R9)(R11*1), R10
17418 JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm8B
17419 XORQ 8(R9)(R11*1), R12
17420 JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm8B
17421 LEAL -16(DI), DI
17422 LEAL 16(R11), R11
17423 JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm8B
17424
17425matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm8B:
17426#ifdef GOAMD64_v3
17427 TZCNTQ R12, R12
17428
17429#else
17430 BSFQ R12, R12
17431
17432#endif
17433 SARQ $0x03, R12
17434 LEAL 8(R11)(R12*1), R11
17435 JMP match_nolit_end_encodeSnappyBetterBlockAsm8B
17436
17437matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm8B:
17438 CMPL DI, $0x08
17439 JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B
17440 MOVQ (R8)(R11*1), R10
17441 XORQ (R9)(R11*1), R10
17442 JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm8B
17443 LEAL -8(DI), DI
17444 LEAL 8(R11), R11
17445 JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B
17446
17447matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm8B:
17448#ifdef GOAMD64_v3
17449 TZCNTQ R10, R10
17450
17451#else
17452 BSFQ R10, R10
17453
17454#endif
17455 SARQ $0x03, R10
17456 LEAL (R11)(R10*1), R11
17457 JMP match_nolit_end_encodeSnappyBetterBlockAsm8B
17458
17459matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B:
17460 CMPL DI, $0x04
17461 JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B
17462 MOVL (R8)(R11*1), R10
17463 CMPL (R9)(R11*1), R10
17464 JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B
17465 LEAL -4(DI), DI
17466 LEAL 4(R11), R11
17467
17468matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B:
17469 CMPL DI, $0x01
17470 JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B
17471 JB match_nolit_end_encodeSnappyBetterBlockAsm8B
17472 MOVW (R8)(R11*1), R10
17473 CMPW (R9)(R11*1), R10
17474 JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B
17475 LEAL 2(R11), R11
17476 SUBL $0x02, DI
17477 JZ match_nolit_end_encodeSnappyBetterBlockAsm8B
17478
17479matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B:
17480 MOVB (R8)(R11*1), R10
17481 CMPB (R9)(R11*1), R10
17482 JNE match_nolit_end_encodeSnappyBetterBlockAsm8B
17483 LEAL 1(R11), R11
17484
17485match_nolit_end_encodeSnappyBetterBlockAsm8B:
17486 MOVL CX, DI
17487 SUBL BX, DI
17488
17489 // Check if repeat
17490 MOVL DI, 16(SP)
17491 MOVL 12(SP), BX
17492 CMPL BX, SI
17493 JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B
17494 MOVL SI, R8
17495 MOVL SI, 12(SP)
17496 LEAQ (DX)(BX*1), R9
17497 SUBL BX, R8
17498 LEAL -1(R8), BX
17499 CMPL BX, $0x3c
17500 JB one_byte_match_emit_encodeSnappyBetterBlockAsm8B
17501 CMPL BX, $0x00000100
17502 JB two_bytes_match_emit_encodeSnappyBetterBlockAsm8B
17503 JB three_bytes_match_emit_encodeSnappyBetterBlockAsm8B
17504
17505three_bytes_match_emit_encodeSnappyBetterBlockAsm8B:
17506 MOVB $0xf4, (AX)
17507 MOVW BX, 1(AX)
17508 ADDQ $0x03, AX
17509 JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm8B
17510
17511two_bytes_match_emit_encodeSnappyBetterBlockAsm8B:
17512 MOVB $0xf0, (AX)
17513 MOVB BL, 1(AX)
17514 ADDQ $0x02, AX
17515 CMPL BX, $0x40
17516 JB memmove_match_emit_encodeSnappyBetterBlockAsm8B
17517 JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm8B
17518
17519one_byte_match_emit_encodeSnappyBetterBlockAsm8B:
17520 SHLB $0x02, BL
17521 MOVB BL, (AX)
17522 ADDQ $0x01, AX
17523
17524memmove_match_emit_encodeSnappyBetterBlockAsm8B:
17525 LEAQ (AX)(R8*1), BX
17526
17527 // genMemMoveShort
17528 CMPQ R8, $0x08
17529 JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8
17530 CMPQ R8, $0x10
17531 JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8through16
17532 CMPQ R8, $0x20
17533 JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_17through32
17534 JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_33through64
17535
17536emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8:
17537 MOVQ (R9), R10
17538 MOVQ R10, (AX)
17539 JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B
17540
17541emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8through16:
17542 MOVQ (R9), R10
17543 MOVQ -8(R9)(R8*1), R9
17544 MOVQ R10, (AX)
17545 MOVQ R9, -8(AX)(R8*1)
17546 JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B
17547
17548emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_17through32:
17549 MOVOU (R9), X0
17550 MOVOU -16(R9)(R8*1), X1
17551 MOVOU X0, (AX)
17552 MOVOU X1, -16(AX)(R8*1)
17553 JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B
17554
17555emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_33through64:
17556 MOVOU (R9), X0
17557 MOVOU 16(R9), X1
17558 MOVOU -32(R9)(R8*1), X2
17559 MOVOU -16(R9)(R8*1), X3
17560 MOVOU X0, (AX)
17561 MOVOU X1, 16(AX)
17562 MOVOU X2, -32(AX)(R8*1)
17563 MOVOU X3, -16(AX)(R8*1)
17564
17565memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B:
17566 MOVQ BX, AX
17567 JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B
17568
17569memmove_long_match_emit_encodeSnappyBetterBlockAsm8B:
17570 LEAQ (AX)(R8*1), BX
17571
17572 // genMemMoveLong
17573 MOVOU (R9), X0
17574 MOVOU 16(R9), X1
17575 MOVOU -32(R9)(R8*1), X2
17576 MOVOU -16(R9)(R8*1), X3
17577 MOVQ R8, R12
17578 SHRQ $0x05, R12
17579 MOVQ AX, R10
17580 ANDL $0x0000001f, R10
17581 MOVQ $0x00000040, R13
17582 SUBQ R10, R13
17583 DECQ R12
17584 JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32
17585 LEAQ -32(R9)(R13*1), R10
17586 LEAQ -32(AX)(R13*1), R14
17587
17588emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_big_loop_back:
17589 MOVOU (R10), X4
17590 MOVOU 16(R10), X5
17591 MOVOA X4, (R14)
17592 MOVOA X5, 16(R14)
17593 ADDQ $0x20, R14
17594 ADDQ $0x20, R10
17595 ADDQ $0x20, R13
17596 DECQ R12
17597 JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_big_loop_back
17598
17599emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32:
17600 MOVOU -32(R9)(R13*1), X4
17601 MOVOU -16(R9)(R13*1), X5
17602 MOVOA X4, -32(AX)(R13*1)
17603 MOVOA X5, -16(AX)(R13*1)
17604 ADDQ $0x20, R13
17605 CMPQ R8, R13
17606 JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32
17607 MOVOU X0, (AX)
17608 MOVOU X1, 16(AX)
17609 MOVOU X2, -32(AX)(R8*1)
17610 MOVOU X3, -16(AX)(R8*1)
17611 MOVQ BX, AX
17612
17613emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B:
17614 ADDL R11, CX
17615 ADDL $0x04, R11
17616 MOVL CX, 12(SP)
17617
17618 // emitCopy
17619two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm8B:
17620 CMPL R11, $0x40
17621 JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm8B
17622 MOVB $0xee, (AX)
17623 MOVW DI, 1(AX)
17624 LEAL -60(R11), R11
17625 ADDQ $0x03, AX
17626 JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm8B
17627
17628two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm8B:
17629 MOVL R11, BX
17630 SHLL $0x02, BX
17631 CMPL R11, $0x0c
17632 JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm8B
17633 LEAL -15(BX), BX
17634 MOVB DI, 1(AX)
17635 SHRL $0x08, DI
17636 SHLL $0x05, DI
17637 ORL DI, BX
17638 MOVB BL, (AX)
17639 ADDQ $0x02, AX
17640 JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm8B
17641
17642emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm8B:
17643 LEAL -2(BX), BX
17644 MOVB BL, (AX)
17645 MOVW DI, 1(AX)
17646 ADDQ $0x03, AX
17647
17648match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm8B:
17649 CMPL CX, 8(SP)
17650 JAE emit_remainder_encodeSnappyBetterBlockAsm8B
17651 CMPQ AX, (SP)
17652 JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm8B
17653 MOVQ $0x00000000, ret+48(FP)
17654 RET
17655
17656match_nolit_dst_ok_encodeSnappyBetterBlockAsm8B:
17657 MOVQ $0x0000cf1bbcdcbf9b, BX
17658 MOVQ $0x9e3779b1, DI
17659 LEAQ 1(SI), SI
17660 LEAQ -2(CX), R8
17661 MOVQ (DX)(SI*1), R9
17662 MOVQ 1(DX)(SI*1), R10
17663 MOVQ (DX)(R8*1), R11
17664 MOVQ 1(DX)(R8*1), R12
17665 SHLQ $0x10, R9
17666 IMULQ BX, R9
17667 SHRQ $0x36, R9
17668 SHLQ $0x20, R10
17669 IMULQ DI, R10
17670 SHRQ $0x38, R10
17671 SHLQ $0x10, R11
17672 IMULQ BX, R11
17673 SHRQ $0x36, R11
17674 SHLQ $0x20, R12
17675 IMULQ DI, R12
17676 SHRQ $0x38, R12
17677 LEAQ 1(SI), DI
17678 LEAQ 1(R8), R13
17679 MOVL SI, 24(SP)(R9*4)
17680 MOVL R8, 24(SP)(R11*4)
17681 MOVL DI, 4120(SP)(R10*4)
17682 MOVL R13, 4120(SP)(R12*4)
17683 LEAQ 1(R8)(SI*1), DI
17684 SHRQ $0x01, DI
17685 ADDQ $0x01, SI
17686 SUBQ $0x01, R8
17687
17688index_loop_encodeSnappyBetterBlockAsm8B:
17689 CMPQ DI, R8
17690 JAE search_loop_encodeSnappyBetterBlockAsm8B
17691 MOVQ (DX)(SI*1), R9
17692 MOVQ (DX)(DI*1), R10
17693 SHLQ $0x10, R9
17694 IMULQ BX, R9
17695 SHRQ $0x36, R9
17696 SHLQ $0x10, R10
17697 IMULQ BX, R10
17698 SHRQ $0x36, R10
17699 MOVL SI, 24(SP)(R9*4)
17700 MOVL DI, 24(SP)(R10*4)
17701 ADDQ $0x02, SI
17702 ADDQ $0x02, DI
17703 JMP index_loop_encodeSnappyBetterBlockAsm8B
17704
17705emit_remainder_encodeSnappyBetterBlockAsm8B:
17706 MOVQ src_len+32(FP), CX
17707 SUBL 12(SP), CX
17708 LEAQ 3(AX)(CX*1), CX
17709 CMPQ CX, (SP)
17710 JB emit_remainder_ok_encodeSnappyBetterBlockAsm8B
17711 MOVQ $0x00000000, ret+48(FP)
17712 RET
17713
17714emit_remainder_ok_encodeSnappyBetterBlockAsm8B:
17715 MOVQ src_len+32(FP), CX
17716 MOVL 12(SP), BX
17717 CMPL BX, CX
17718 JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm8B
17719 MOVL CX, SI
17720 MOVL CX, 12(SP)
17721 LEAQ (DX)(BX*1), CX
17722 SUBL BX, SI
17723 LEAL -1(SI), DX
17724 CMPL DX, $0x3c
17725 JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm8B
17726 CMPL DX, $0x00000100
17727 JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B
17728 JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B
17729
17730three_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B:
17731 MOVB $0xf4, (AX)
17732 MOVW DX, 1(AX)
17733 ADDQ $0x03, AX
17734 JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8B
17735
17736two_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B:
17737 MOVB $0xf0, (AX)
17738 MOVB DL, 1(AX)
17739 ADDQ $0x02, AX
17740 CMPL DX, $0x40
17741 JB memmove_emit_remainder_encodeSnappyBetterBlockAsm8B
17742 JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8B
17743
17744one_byte_emit_remainder_encodeSnappyBetterBlockAsm8B:
17745 SHLB $0x02, DL
17746 MOVB DL, (AX)
17747 ADDQ $0x01, AX
17748
17749memmove_emit_remainder_encodeSnappyBetterBlockAsm8B:
17750 LEAQ (AX)(SI*1), DX
17751 MOVL SI, BX
17752
17753 // genMemMoveShort
17754 CMPQ BX, $0x03
17755 JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_1or2
17756 JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_3
17757 CMPQ BX, $0x08
17758 JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_4through7
17759 CMPQ BX, $0x10
17760 JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_8through16
17761 CMPQ BX, $0x20
17762 JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_17through32
17763 JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_33through64
17764
17765emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_1or2:
17766 MOVB (CX), SI
17767 MOVB -1(CX)(BX*1), CL
17768 MOVB SI, (AX)
17769 MOVB CL, -1(AX)(BX*1)
17770 JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B
17771
17772emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_3:
17773 MOVW (CX), SI
17774 MOVB 2(CX), CL
17775 MOVW SI, (AX)
17776 MOVB CL, 2(AX)
17777 JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B
17778
17779emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_4through7:
17780 MOVL (CX), SI
17781 MOVL -4(CX)(BX*1), CX
17782 MOVL SI, (AX)
17783 MOVL CX, -4(AX)(BX*1)
17784 JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B
17785
17786emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_8through16:
17787 MOVQ (CX), SI
17788 MOVQ -8(CX)(BX*1), CX
17789 MOVQ SI, (AX)
17790 MOVQ CX, -8(AX)(BX*1)
17791 JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B
17792
17793emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_17through32:
17794 MOVOU (CX), X0
17795 MOVOU -16(CX)(BX*1), X1
17796 MOVOU X0, (AX)
17797 MOVOU X1, -16(AX)(BX*1)
17798 JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B
17799
17800emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_33through64:
17801 MOVOU (CX), X0
17802 MOVOU 16(CX), X1
17803 MOVOU -32(CX)(BX*1), X2
17804 MOVOU -16(CX)(BX*1), X3
17805 MOVOU X0, (AX)
17806 MOVOU X1, 16(AX)
17807 MOVOU X2, -32(AX)(BX*1)
17808 MOVOU X3, -16(AX)(BX*1)
17809
17810memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B:
17811 MOVQ DX, AX
17812 JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm8B
17813
17814memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8B:
17815 LEAQ (AX)(SI*1), DX
17816 MOVL SI, BX
17817
17818 // genMemMoveLong
17819 MOVOU (CX), X0
17820 MOVOU 16(CX), X1
17821 MOVOU -32(CX)(BX*1), X2
17822 MOVOU -16(CX)(BX*1), X3
17823 MOVQ BX, DI
17824 SHRQ $0x05, DI
17825 MOVQ AX, SI
17826 ANDL $0x0000001f, SI
17827 MOVQ $0x00000040, R8
17828 SUBQ SI, R8
17829 DECQ DI
17830 JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32
17831 LEAQ -32(CX)(R8*1), SI
17832 LEAQ -32(AX)(R8*1), R9
17833
17834emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_big_loop_back:
17835 MOVOU (SI), X4
17836 MOVOU 16(SI), X5
17837 MOVOA X4, (R9)
17838 MOVOA X5, 16(R9)
17839 ADDQ $0x20, R9
17840 ADDQ $0x20, SI
17841 ADDQ $0x20, R8
17842 DECQ DI
17843 JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_big_loop_back
17844
17845emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32:
17846 MOVOU -32(CX)(R8*1), X4
17847 MOVOU -16(CX)(R8*1), X5
17848 MOVOA X4, -32(AX)(R8*1)
17849 MOVOA X5, -16(AX)(R8*1)
17850 ADDQ $0x20, R8
17851 CMPQ BX, R8
17852 JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32
17853 MOVOU X0, (AX)
17854 MOVOU X1, 16(AX)
17855 MOVOU X2, -32(AX)(BX*1)
17856 MOVOU X3, -16(AX)(BX*1)
17857 MOVQ DX, AX
17858
17859emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm8B:
17860 MOVQ dst_base+0(FP), CX
17861 SUBQ CX, AX
17862 MOVQ AX, ret+48(FP)
17863 RET
17864
17865// func calcBlockSize(src []byte) int
17866// Requires: BMI, SSE2
17867TEXT ·calcBlockSize(SB), $32792-32
17868 XORQ AX, AX
17869 MOVQ $0x00000100, CX
17870 LEAQ 24(SP), DX
17871 PXOR X0, X0
17872
17873zero_loop_calcBlockSize:
17874 MOVOU X0, (DX)
17875 MOVOU X0, 16(DX)
17876 MOVOU X0, 32(DX)
17877 MOVOU X0, 48(DX)
17878 MOVOU X0, 64(DX)
17879 MOVOU X0, 80(DX)
17880 MOVOU X0, 96(DX)
17881 MOVOU X0, 112(DX)
17882 ADDQ $0x80, DX
17883 DECQ CX
17884 JNZ zero_loop_calcBlockSize
17885 MOVL $0x00000000, 12(SP)
17886 MOVQ src_len+8(FP), CX
17887 LEAQ -9(CX), DX
17888 LEAQ -8(CX), BX
17889 MOVL BX, 8(SP)
17890 SHRQ $0x05, CX
17891 SUBL CX, DX
17892 LEAQ (AX)(DX*1), DX
17893 MOVQ DX, (SP)
17894 MOVL $0x00000001, CX
17895 MOVL CX, 16(SP)
17896 MOVQ src_base+0(FP), DX
17897
17898search_loop_calcBlockSize:
17899 MOVL CX, BX
17900 SUBL 12(SP), BX
17901 SHRL $0x05, BX
17902 LEAL 4(CX)(BX*1), BX
17903 CMPL BX, 8(SP)
17904 JAE emit_remainder_calcBlockSize
17905 MOVQ (DX)(CX*1), SI
17906 MOVL BX, 20(SP)
17907 MOVQ $0x0000cf1bbcdcbf9b, R8
17908 MOVQ SI, R9
17909 MOVQ SI, R10
17910 SHRQ $0x08, R10
17911 SHLQ $0x10, R9
17912 IMULQ R8, R9
17913 SHRQ $0x33, R9
17914 SHLQ $0x10, R10
17915 IMULQ R8, R10
17916 SHRQ $0x33, R10
17917 MOVL 24(SP)(R9*4), BX
17918 MOVL 24(SP)(R10*4), DI
17919 MOVL CX, 24(SP)(R9*4)
17920 LEAL 1(CX), R9
17921 MOVL R9, 24(SP)(R10*4)
17922 MOVQ SI, R9
17923 SHRQ $0x10, R9
17924 SHLQ $0x10, R9
17925 IMULQ R8, R9
17926 SHRQ $0x33, R9
17927 MOVL CX, R8
17928 SUBL 16(SP), R8
17929 MOVL 1(DX)(R8*1), R10
17930 MOVQ SI, R8
17931 SHRQ $0x08, R8
17932 CMPL R8, R10
17933 JNE no_repeat_found_calcBlockSize
17934 LEAL 1(CX), SI
17935 MOVL 12(SP), BX
17936 MOVL SI, DI
17937 SUBL 16(SP), DI
17938 JZ repeat_extend_back_end_calcBlockSize
17939
17940repeat_extend_back_loop_calcBlockSize:
17941 CMPL SI, BX
17942 JBE repeat_extend_back_end_calcBlockSize
17943 MOVB -1(DX)(DI*1), R8
17944 MOVB -1(DX)(SI*1), R9
17945 CMPB R8, R9
17946 JNE repeat_extend_back_end_calcBlockSize
17947 LEAL -1(SI), SI
17948 DECL DI
17949 JNZ repeat_extend_back_loop_calcBlockSize
17950
17951repeat_extend_back_end_calcBlockSize:
17952 MOVL 12(SP), BX
17953 CMPL BX, SI
17954 JEQ emit_literal_done_repeat_emit_calcBlockSize
17955 MOVL SI, DI
17956 MOVL SI, 12(SP)
17957 LEAQ (DX)(BX*1), R8
17958 SUBL BX, DI
17959 LEAL -1(DI), BX
17960 CMPL BX, $0x3c
17961 JB one_byte_repeat_emit_calcBlockSize
17962 CMPL BX, $0x00000100
17963 JB two_bytes_repeat_emit_calcBlockSize
17964 CMPL BX, $0x00010000
17965 JB three_bytes_repeat_emit_calcBlockSize
17966 CMPL BX, $0x01000000
17967 JB four_bytes_repeat_emit_calcBlockSize
17968 ADDQ $0x05, AX
17969 JMP memmove_long_repeat_emit_calcBlockSize
17970
17971four_bytes_repeat_emit_calcBlockSize:
17972 ADDQ $0x04, AX
17973 JMP memmove_long_repeat_emit_calcBlockSize
17974
17975three_bytes_repeat_emit_calcBlockSize:
17976 ADDQ $0x03, AX
17977 JMP memmove_long_repeat_emit_calcBlockSize
17978
17979two_bytes_repeat_emit_calcBlockSize:
17980 ADDQ $0x02, AX
17981 CMPL BX, $0x40
17982 JB memmove_repeat_emit_calcBlockSize
17983 JMP memmove_long_repeat_emit_calcBlockSize
17984
17985one_byte_repeat_emit_calcBlockSize:
17986 ADDQ $0x01, AX
17987
17988memmove_repeat_emit_calcBlockSize:
17989 LEAQ (AX)(DI*1), AX
17990 JMP emit_literal_done_repeat_emit_calcBlockSize
17991
17992memmove_long_repeat_emit_calcBlockSize:
17993 LEAQ (AX)(DI*1), AX
17994
17995emit_literal_done_repeat_emit_calcBlockSize:
17996 ADDL $0x05, CX
17997 MOVL CX, BX
17998 SUBL 16(SP), BX
17999 MOVQ src_len+8(FP), DI
18000 SUBL CX, DI
18001 LEAQ (DX)(CX*1), R8
18002 LEAQ (DX)(BX*1), BX
18003
18004 // matchLen
18005 XORL R10, R10
18006
18007matchlen_loopback_16_repeat_extend_calcBlockSize:
18008 CMPL DI, $0x10
18009 JB matchlen_match8_repeat_extend_calcBlockSize
18010 MOVQ (R8)(R10*1), R9
18011 MOVQ 8(R8)(R10*1), R11
18012 XORQ (BX)(R10*1), R9
18013 JNZ matchlen_bsf_8_repeat_extend_calcBlockSize
18014 XORQ 8(BX)(R10*1), R11
18015 JNZ matchlen_bsf_16repeat_extend_calcBlockSize
18016 LEAL -16(DI), DI
18017 LEAL 16(R10), R10
18018 JMP matchlen_loopback_16_repeat_extend_calcBlockSize
18019
18020matchlen_bsf_16repeat_extend_calcBlockSize:
18021#ifdef GOAMD64_v3
18022 TZCNTQ R11, R11
18023
18024#else
18025 BSFQ R11, R11
18026
18027#endif
18028 SARQ $0x03, R11
18029 LEAL 8(R10)(R11*1), R10
18030 JMP repeat_extend_forward_end_calcBlockSize
18031
18032matchlen_match8_repeat_extend_calcBlockSize:
18033 CMPL DI, $0x08
18034 JB matchlen_match4_repeat_extend_calcBlockSize
18035 MOVQ (R8)(R10*1), R9
18036 XORQ (BX)(R10*1), R9
18037 JNZ matchlen_bsf_8_repeat_extend_calcBlockSize
18038 LEAL -8(DI), DI
18039 LEAL 8(R10), R10
18040 JMP matchlen_match4_repeat_extend_calcBlockSize
18041
18042matchlen_bsf_8_repeat_extend_calcBlockSize:
18043#ifdef GOAMD64_v3
18044 TZCNTQ R9, R9
18045
18046#else
18047 BSFQ R9, R9
18048
18049#endif
18050 SARQ $0x03, R9
18051 LEAL (R10)(R9*1), R10
18052 JMP repeat_extend_forward_end_calcBlockSize
18053
18054matchlen_match4_repeat_extend_calcBlockSize:
18055 CMPL DI, $0x04
18056 JB matchlen_match2_repeat_extend_calcBlockSize
18057 MOVL (R8)(R10*1), R9
18058 CMPL (BX)(R10*1), R9
18059 JNE matchlen_match2_repeat_extend_calcBlockSize
18060 LEAL -4(DI), DI
18061 LEAL 4(R10), R10
18062
18063matchlen_match2_repeat_extend_calcBlockSize:
18064 CMPL DI, $0x01
18065 JE matchlen_match1_repeat_extend_calcBlockSize
18066 JB repeat_extend_forward_end_calcBlockSize
18067 MOVW (R8)(R10*1), R9
18068 CMPW (BX)(R10*1), R9
18069 JNE matchlen_match1_repeat_extend_calcBlockSize
18070 LEAL 2(R10), R10
18071 SUBL $0x02, DI
18072 JZ repeat_extend_forward_end_calcBlockSize
18073
18074matchlen_match1_repeat_extend_calcBlockSize:
18075 MOVB (R8)(R10*1), R9
18076 CMPB (BX)(R10*1), R9
18077 JNE repeat_extend_forward_end_calcBlockSize
18078 LEAL 1(R10), R10
18079
18080repeat_extend_forward_end_calcBlockSize:
18081 ADDL R10, CX
18082 MOVL CX, BX
18083 SUBL SI, BX
18084 MOVL 16(SP), SI
18085
18086 // emitCopy
18087 CMPL SI, $0x00010000
18088 JB two_byte_offset_repeat_as_copy_calcBlockSize
18089
18090four_bytes_loop_back_repeat_as_copy_calcBlockSize:
18091 CMPL BX, $0x40
18092 JBE four_bytes_remain_repeat_as_copy_calcBlockSize
18093 LEAL -64(BX), BX
18094 ADDQ $0x05, AX
18095 CMPL BX, $0x04
18096 JB four_bytes_remain_repeat_as_copy_calcBlockSize
18097 JMP four_bytes_loop_back_repeat_as_copy_calcBlockSize
18098
18099four_bytes_remain_repeat_as_copy_calcBlockSize:
18100 TESTL BX, BX
18101 JZ repeat_end_emit_calcBlockSize
18102 XORL BX, BX
18103 ADDQ $0x05, AX
18104 JMP repeat_end_emit_calcBlockSize
18105
18106two_byte_offset_repeat_as_copy_calcBlockSize:
18107 CMPL BX, $0x40
18108 JBE two_byte_offset_short_repeat_as_copy_calcBlockSize
18109 LEAL -60(BX), BX
18110 ADDQ $0x03, AX
18111 JMP two_byte_offset_repeat_as_copy_calcBlockSize
18112
18113two_byte_offset_short_repeat_as_copy_calcBlockSize:
18114 MOVL BX, DI
18115 SHLL $0x02, DI
18116 CMPL BX, $0x0c
18117 JAE emit_copy_three_repeat_as_copy_calcBlockSize
18118 CMPL SI, $0x00000800
18119 JAE emit_copy_three_repeat_as_copy_calcBlockSize
18120 ADDQ $0x02, AX
18121 JMP repeat_end_emit_calcBlockSize
18122
18123emit_copy_three_repeat_as_copy_calcBlockSize:
18124 ADDQ $0x03, AX
18125
18126repeat_end_emit_calcBlockSize:
18127 MOVL CX, 12(SP)
18128 JMP search_loop_calcBlockSize
18129
18130no_repeat_found_calcBlockSize:
18131 CMPL (DX)(BX*1), SI
18132 JEQ candidate_match_calcBlockSize
18133 SHRQ $0x08, SI
18134 MOVL 24(SP)(R9*4), BX
18135 LEAL 2(CX), R8
18136 CMPL (DX)(DI*1), SI
18137 JEQ candidate2_match_calcBlockSize
18138 MOVL R8, 24(SP)(R9*4)
18139 SHRQ $0x08, SI
18140 CMPL (DX)(BX*1), SI
18141 JEQ candidate3_match_calcBlockSize
18142 MOVL 20(SP), CX
18143 JMP search_loop_calcBlockSize
18144
18145candidate3_match_calcBlockSize:
18146 ADDL $0x02, CX
18147 JMP candidate_match_calcBlockSize
18148
18149candidate2_match_calcBlockSize:
18150 MOVL R8, 24(SP)(R9*4)
18151 INCL CX
18152 MOVL DI, BX
18153
18154candidate_match_calcBlockSize:
18155 MOVL 12(SP), SI
18156 TESTL BX, BX
18157 JZ match_extend_back_end_calcBlockSize
18158
18159match_extend_back_loop_calcBlockSize:
18160 CMPL CX, SI
18161 JBE match_extend_back_end_calcBlockSize
18162 MOVB -1(DX)(BX*1), DI
18163 MOVB -1(DX)(CX*1), R8
18164 CMPB DI, R8
18165 JNE match_extend_back_end_calcBlockSize
18166 LEAL -1(CX), CX
18167 DECL BX
18168 JZ match_extend_back_end_calcBlockSize
18169 JMP match_extend_back_loop_calcBlockSize
18170
18171match_extend_back_end_calcBlockSize:
18172 MOVL CX, SI
18173 SUBL 12(SP), SI
18174 LEAQ 5(AX)(SI*1), SI
18175 CMPQ SI, (SP)
18176 JB match_dst_size_check_calcBlockSize
18177 MOVQ $0x00000000, ret+24(FP)
18178 RET
18179
18180match_dst_size_check_calcBlockSize:
18181 MOVL CX, SI
18182 MOVL 12(SP), DI
18183 CMPL DI, SI
18184 JEQ emit_literal_done_match_emit_calcBlockSize
18185 MOVL SI, R8
18186 MOVL SI, 12(SP)
18187 LEAQ (DX)(DI*1), SI
18188 SUBL DI, R8
18189 LEAL -1(R8), SI
18190 CMPL SI, $0x3c
18191 JB one_byte_match_emit_calcBlockSize
18192 CMPL SI, $0x00000100
18193 JB two_bytes_match_emit_calcBlockSize
18194 CMPL SI, $0x00010000
18195 JB three_bytes_match_emit_calcBlockSize
18196 CMPL SI, $0x01000000
18197 JB four_bytes_match_emit_calcBlockSize
18198 ADDQ $0x05, AX
18199 JMP memmove_long_match_emit_calcBlockSize
18200
18201four_bytes_match_emit_calcBlockSize:
18202 ADDQ $0x04, AX
18203 JMP memmove_long_match_emit_calcBlockSize
18204
18205three_bytes_match_emit_calcBlockSize:
18206 ADDQ $0x03, AX
18207 JMP memmove_long_match_emit_calcBlockSize
18208
18209two_bytes_match_emit_calcBlockSize:
18210 ADDQ $0x02, AX
18211 CMPL SI, $0x40
18212 JB memmove_match_emit_calcBlockSize
18213 JMP memmove_long_match_emit_calcBlockSize
18214
18215one_byte_match_emit_calcBlockSize:
18216 ADDQ $0x01, AX
18217
18218memmove_match_emit_calcBlockSize:
18219 LEAQ (AX)(R8*1), AX
18220 JMP emit_literal_done_match_emit_calcBlockSize
18221
18222memmove_long_match_emit_calcBlockSize:
18223 LEAQ (AX)(R8*1), AX
18224
18225emit_literal_done_match_emit_calcBlockSize:
18226match_nolit_loop_calcBlockSize:
18227 MOVL CX, SI
18228 SUBL BX, SI
18229 MOVL SI, 16(SP)
18230 ADDL $0x04, CX
18231 ADDL $0x04, BX
18232 MOVQ src_len+8(FP), SI
18233 SUBL CX, SI
18234 LEAQ (DX)(CX*1), DI
18235 LEAQ (DX)(BX*1), BX
18236
18237 // matchLen
18238 XORL R9, R9
18239
18240matchlen_loopback_16_match_nolit_calcBlockSize:
18241 CMPL SI, $0x10
18242 JB matchlen_match8_match_nolit_calcBlockSize
18243 MOVQ (DI)(R9*1), R8
18244 MOVQ 8(DI)(R9*1), R10
18245 XORQ (BX)(R9*1), R8
18246 JNZ matchlen_bsf_8_match_nolit_calcBlockSize
18247 XORQ 8(BX)(R9*1), R10
18248 JNZ matchlen_bsf_16match_nolit_calcBlockSize
18249 LEAL -16(SI), SI
18250 LEAL 16(R9), R9
18251 JMP matchlen_loopback_16_match_nolit_calcBlockSize
18252
18253matchlen_bsf_16match_nolit_calcBlockSize:
18254#ifdef GOAMD64_v3
18255 TZCNTQ R10, R10
18256
18257#else
18258 BSFQ R10, R10
18259
18260#endif
18261 SARQ $0x03, R10
18262 LEAL 8(R9)(R10*1), R9
18263 JMP match_nolit_end_calcBlockSize
18264
18265matchlen_match8_match_nolit_calcBlockSize:
18266 CMPL SI, $0x08
18267 JB matchlen_match4_match_nolit_calcBlockSize
18268 MOVQ (DI)(R9*1), R8
18269 XORQ (BX)(R9*1), R8
18270 JNZ matchlen_bsf_8_match_nolit_calcBlockSize
18271 LEAL -8(SI), SI
18272 LEAL 8(R9), R9
18273 JMP matchlen_match4_match_nolit_calcBlockSize
18274
18275matchlen_bsf_8_match_nolit_calcBlockSize:
18276#ifdef GOAMD64_v3
18277 TZCNTQ R8, R8
18278
18279#else
18280 BSFQ R8, R8
18281
18282#endif
18283 SARQ $0x03, R8
18284 LEAL (R9)(R8*1), R9
18285 JMP match_nolit_end_calcBlockSize
18286
18287matchlen_match4_match_nolit_calcBlockSize:
18288 CMPL SI, $0x04
18289 JB matchlen_match2_match_nolit_calcBlockSize
18290 MOVL (DI)(R9*1), R8
18291 CMPL (BX)(R9*1), R8
18292 JNE matchlen_match2_match_nolit_calcBlockSize
18293 LEAL -4(SI), SI
18294 LEAL 4(R9), R9
18295
18296matchlen_match2_match_nolit_calcBlockSize:
18297 CMPL SI, $0x01
18298 JE matchlen_match1_match_nolit_calcBlockSize
18299 JB match_nolit_end_calcBlockSize
18300 MOVW (DI)(R9*1), R8
18301 CMPW (BX)(R9*1), R8
18302 JNE matchlen_match1_match_nolit_calcBlockSize
18303 LEAL 2(R9), R9
18304 SUBL $0x02, SI
18305 JZ match_nolit_end_calcBlockSize
18306
18307matchlen_match1_match_nolit_calcBlockSize:
18308 MOVB (DI)(R9*1), R8
18309 CMPB (BX)(R9*1), R8
18310 JNE match_nolit_end_calcBlockSize
18311 LEAL 1(R9), R9
18312
18313match_nolit_end_calcBlockSize:
18314 ADDL R9, CX
18315 MOVL 16(SP), BX
18316 ADDL $0x04, R9
18317 MOVL CX, 12(SP)
18318
18319 // emitCopy
18320 CMPL BX, $0x00010000
18321 JB two_byte_offset_match_nolit_calcBlockSize
18322
18323four_bytes_loop_back_match_nolit_calcBlockSize:
18324 CMPL R9, $0x40
18325 JBE four_bytes_remain_match_nolit_calcBlockSize
18326 LEAL -64(R9), R9
18327 ADDQ $0x05, AX
18328 CMPL R9, $0x04
18329 JB four_bytes_remain_match_nolit_calcBlockSize
18330 JMP four_bytes_loop_back_match_nolit_calcBlockSize
18331
18332four_bytes_remain_match_nolit_calcBlockSize:
18333 TESTL R9, R9
18334 JZ match_nolit_emitcopy_end_calcBlockSize
18335 XORL BX, BX
18336 ADDQ $0x05, AX
18337 JMP match_nolit_emitcopy_end_calcBlockSize
18338
18339two_byte_offset_match_nolit_calcBlockSize:
18340 CMPL R9, $0x40
18341 JBE two_byte_offset_short_match_nolit_calcBlockSize
18342 LEAL -60(R9), R9
18343 ADDQ $0x03, AX
18344 JMP two_byte_offset_match_nolit_calcBlockSize
18345
18346two_byte_offset_short_match_nolit_calcBlockSize:
18347 MOVL R9, SI
18348 SHLL $0x02, SI
18349 CMPL R9, $0x0c
18350 JAE emit_copy_three_match_nolit_calcBlockSize
18351 CMPL BX, $0x00000800
18352 JAE emit_copy_three_match_nolit_calcBlockSize
18353 ADDQ $0x02, AX
18354 JMP match_nolit_emitcopy_end_calcBlockSize
18355
18356emit_copy_three_match_nolit_calcBlockSize:
18357 ADDQ $0x03, AX
18358
18359match_nolit_emitcopy_end_calcBlockSize:
18360 CMPL CX, 8(SP)
18361 JAE emit_remainder_calcBlockSize
18362 MOVQ -2(DX)(CX*1), SI
18363 CMPQ AX, (SP)
18364 JB match_nolit_dst_ok_calcBlockSize
18365 MOVQ $0x00000000, ret+24(FP)
18366 RET
18367
18368match_nolit_dst_ok_calcBlockSize:
18369 MOVQ $0x0000cf1bbcdcbf9b, R8
18370 MOVQ SI, DI
18371 SHRQ $0x10, SI
18372 MOVQ SI, BX
18373 SHLQ $0x10, DI
18374 IMULQ R8, DI
18375 SHRQ $0x33, DI
18376 SHLQ $0x10, BX
18377 IMULQ R8, BX
18378 SHRQ $0x33, BX
18379 LEAL -2(CX), R8
18380 LEAQ 24(SP)(BX*4), R9
18381 MOVL (R9), BX
18382 MOVL R8, 24(SP)(DI*4)
18383 MOVL CX, (R9)
18384 CMPL (DX)(BX*1), SI
18385 JEQ match_nolit_loop_calcBlockSize
18386 INCL CX
18387 JMP search_loop_calcBlockSize
18388
18389emit_remainder_calcBlockSize:
18390 MOVQ src_len+8(FP), CX
18391 SUBL 12(SP), CX
18392 LEAQ 5(AX)(CX*1), CX
18393 CMPQ CX, (SP)
18394 JB emit_remainder_ok_calcBlockSize
18395 MOVQ $0x00000000, ret+24(FP)
18396 RET
18397
18398emit_remainder_ok_calcBlockSize:
18399 MOVQ src_len+8(FP), CX
18400 MOVL 12(SP), BX
18401 CMPL BX, CX
18402 JEQ emit_literal_done_emit_remainder_calcBlockSize
18403 MOVL CX, SI
18404 MOVL CX, 12(SP)
18405 LEAQ (DX)(BX*1), CX
18406 SUBL BX, SI
18407 LEAL -1(SI), CX
18408 CMPL CX, $0x3c
18409 JB one_byte_emit_remainder_calcBlockSize
18410 CMPL CX, $0x00000100
18411 JB two_bytes_emit_remainder_calcBlockSize
18412 CMPL CX, $0x00010000
18413 JB three_bytes_emit_remainder_calcBlockSize
18414 CMPL CX, $0x01000000
18415 JB four_bytes_emit_remainder_calcBlockSize
18416 ADDQ $0x05, AX
18417 JMP memmove_long_emit_remainder_calcBlockSize
18418
18419four_bytes_emit_remainder_calcBlockSize:
18420 ADDQ $0x04, AX
18421 JMP memmove_long_emit_remainder_calcBlockSize
18422
18423three_bytes_emit_remainder_calcBlockSize:
18424 ADDQ $0x03, AX
18425 JMP memmove_long_emit_remainder_calcBlockSize
18426
18427two_bytes_emit_remainder_calcBlockSize:
18428 ADDQ $0x02, AX
18429 CMPL CX, $0x40
18430 JB memmove_emit_remainder_calcBlockSize
18431 JMP memmove_long_emit_remainder_calcBlockSize
18432
18433one_byte_emit_remainder_calcBlockSize:
18434 ADDQ $0x01, AX
18435
18436memmove_emit_remainder_calcBlockSize:
18437 LEAQ (AX)(SI*1), AX
18438 JMP emit_literal_done_emit_remainder_calcBlockSize
18439
18440memmove_long_emit_remainder_calcBlockSize:
18441 LEAQ (AX)(SI*1), AX
18442
18443emit_literal_done_emit_remainder_calcBlockSize:
18444 MOVQ AX, ret+24(FP)
18445 RET
18446
18447// func calcBlockSizeSmall(src []byte) int
18448// Requires: BMI, SSE2
18449TEXT ·calcBlockSizeSmall(SB), $2072-32
18450 XORQ AX, AX
18451 MOVQ $0x00000010, CX
18452 LEAQ 24(SP), DX
18453 PXOR X0, X0
18454
18455zero_loop_calcBlockSizeSmall:
18456 MOVOU X0, (DX)
18457 MOVOU X0, 16(DX)
18458 MOVOU X0, 32(DX)
18459 MOVOU X0, 48(DX)
18460 MOVOU X0, 64(DX)
18461 MOVOU X0, 80(DX)
18462 MOVOU X0, 96(DX)
18463 MOVOU X0, 112(DX)
18464 ADDQ $0x80, DX
18465 DECQ CX
18466 JNZ zero_loop_calcBlockSizeSmall
18467 MOVL $0x00000000, 12(SP)
18468 MOVQ src_len+8(FP), CX
18469 LEAQ -9(CX), DX
18470 LEAQ -8(CX), BX
18471 MOVL BX, 8(SP)
18472 SHRQ $0x05, CX
18473 SUBL CX, DX
18474 LEAQ (AX)(DX*1), DX
18475 MOVQ DX, (SP)
18476 MOVL $0x00000001, CX
18477 MOVL CX, 16(SP)
18478 MOVQ src_base+0(FP), DX
18479
18480search_loop_calcBlockSizeSmall:
18481 MOVL CX, BX
18482 SUBL 12(SP), BX
18483 SHRL $0x04, BX
18484 LEAL 4(CX)(BX*1), BX
18485 CMPL BX, 8(SP)
18486 JAE emit_remainder_calcBlockSizeSmall
18487 MOVQ (DX)(CX*1), SI
18488 MOVL BX, 20(SP)
18489 MOVQ $0x9e3779b1, R8
18490 MOVQ SI, R9
18491 MOVQ SI, R10
18492 SHRQ $0x08, R10
18493 SHLQ $0x20, R9
18494 IMULQ R8, R9
18495 SHRQ $0x37, R9
18496 SHLQ $0x20, R10
18497 IMULQ R8, R10
18498 SHRQ $0x37, R10
18499 MOVL 24(SP)(R9*4), BX
18500 MOVL 24(SP)(R10*4), DI
18501 MOVL CX, 24(SP)(R9*4)
18502 LEAL 1(CX), R9
18503 MOVL R9, 24(SP)(R10*4)
18504 MOVQ SI, R9
18505 SHRQ $0x10, R9
18506 SHLQ $0x20, R9
18507 IMULQ R8, R9
18508 SHRQ $0x37, R9
18509 MOVL CX, R8
18510 SUBL 16(SP), R8
18511 MOVL 1(DX)(R8*1), R10
18512 MOVQ SI, R8
18513 SHRQ $0x08, R8
18514 CMPL R8, R10
18515 JNE no_repeat_found_calcBlockSizeSmall
18516 LEAL 1(CX), SI
18517 MOVL 12(SP), BX
18518 MOVL SI, DI
18519 SUBL 16(SP), DI
18520 JZ repeat_extend_back_end_calcBlockSizeSmall
18521
18522repeat_extend_back_loop_calcBlockSizeSmall:
18523 CMPL SI, BX
18524 JBE repeat_extend_back_end_calcBlockSizeSmall
18525 MOVB -1(DX)(DI*1), R8
18526 MOVB -1(DX)(SI*1), R9
18527 CMPB R8, R9
18528 JNE repeat_extend_back_end_calcBlockSizeSmall
18529 LEAL -1(SI), SI
18530 DECL DI
18531 JNZ repeat_extend_back_loop_calcBlockSizeSmall
18532
18533repeat_extend_back_end_calcBlockSizeSmall:
18534 MOVL 12(SP), BX
18535 CMPL BX, SI
18536 JEQ emit_literal_done_repeat_emit_calcBlockSizeSmall
18537 MOVL SI, DI
18538 MOVL SI, 12(SP)
18539 LEAQ (DX)(BX*1), R8
18540 SUBL BX, DI
18541 LEAL -1(DI), BX
18542 CMPL BX, $0x3c
18543 JB one_byte_repeat_emit_calcBlockSizeSmall
18544 CMPL BX, $0x00000100
18545 JB two_bytes_repeat_emit_calcBlockSizeSmall
18546 JB three_bytes_repeat_emit_calcBlockSizeSmall
18547
18548three_bytes_repeat_emit_calcBlockSizeSmall:
18549 ADDQ $0x03, AX
18550 JMP memmove_long_repeat_emit_calcBlockSizeSmall
18551
18552two_bytes_repeat_emit_calcBlockSizeSmall:
18553 ADDQ $0x02, AX
18554 CMPL BX, $0x40
18555 JB memmove_repeat_emit_calcBlockSizeSmall
18556 JMP memmove_long_repeat_emit_calcBlockSizeSmall
18557
18558one_byte_repeat_emit_calcBlockSizeSmall:
18559 ADDQ $0x01, AX
18560
18561memmove_repeat_emit_calcBlockSizeSmall:
18562 LEAQ (AX)(DI*1), AX
18563 JMP emit_literal_done_repeat_emit_calcBlockSizeSmall
18564
18565memmove_long_repeat_emit_calcBlockSizeSmall:
18566 LEAQ (AX)(DI*1), AX
18567
18568emit_literal_done_repeat_emit_calcBlockSizeSmall:
18569 ADDL $0x05, CX
18570 MOVL CX, BX
18571 SUBL 16(SP), BX
18572 MOVQ src_len+8(FP), DI
18573 SUBL CX, DI
18574 LEAQ (DX)(CX*1), R8
18575 LEAQ (DX)(BX*1), BX
18576
18577 // matchLen
18578 XORL R10, R10
18579
18580matchlen_loopback_16_repeat_extend_calcBlockSizeSmall:
18581 CMPL DI, $0x10
18582 JB matchlen_match8_repeat_extend_calcBlockSizeSmall
18583 MOVQ (R8)(R10*1), R9
18584 MOVQ 8(R8)(R10*1), R11
18585 XORQ (BX)(R10*1), R9
18586 JNZ matchlen_bsf_8_repeat_extend_calcBlockSizeSmall
18587 XORQ 8(BX)(R10*1), R11
18588 JNZ matchlen_bsf_16repeat_extend_calcBlockSizeSmall
18589 LEAL -16(DI), DI
18590 LEAL 16(R10), R10
18591 JMP matchlen_loopback_16_repeat_extend_calcBlockSizeSmall
18592
18593matchlen_bsf_16repeat_extend_calcBlockSizeSmall:
18594#ifdef GOAMD64_v3
18595 TZCNTQ R11, R11
18596
18597#else
18598 BSFQ R11, R11
18599
18600#endif
18601 SARQ $0x03, R11
18602 LEAL 8(R10)(R11*1), R10
18603 JMP repeat_extend_forward_end_calcBlockSizeSmall
18604
18605matchlen_match8_repeat_extend_calcBlockSizeSmall:
18606 CMPL DI, $0x08
18607 JB matchlen_match4_repeat_extend_calcBlockSizeSmall
18608 MOVQ (R8)(R10*1), R9
18609 XORQ (BX)(R10*1), R9
18610 JNZ matchlen_bsf_8_repeat_extend_calcBlockSizeSmall
18611 LEAL -8(DI), DI
18612 LEAL 8(R10), R10
18613 JMP matchlen_match4_repeat_extend_calcBlockSizeSmall
18614
18615matchlen_bsf_8_repeat_extend_calcBlockSizeSmall:
18616#ifdef GOAMD64_v3
18617 TZCNTQ R9, R9
18618
18619#else
18620 BSFQ R9, R9
18621
18622#endif
18623 SARQ $0x03, R9
18624 LEAL (R10)(R9*1), R10
18625 JMP repeat_extend_forward_end_calcBlockSizeSmall
18626
18627matchlen_match4_repeat_extend_calcBlockSizeSmall:
18628 CMPL DI, $0x04
18629 JB matchlen_match2_repeat_extend_calcBlockSizeSmall
18630 MOVL (R8)(R10*1), R9
18631 CMPL (BX)(R10*1), R9
18632 JNE matchlen_match2_repeat_extend_calcBlockSizeSmall
18633 LEAL -4(DI), DI
18634 LEAL 4(R10), R10
18635
18636matchlen_match2_repeat_extend_calcBlockSizeSmall:
18637 CMPL DI, $0x01
18638 JE matchlen_match1_repeat_extend_calcBlockSizeSmall
18639 JB repeat_extend_forward_end_calcBlockSizeSmall
18640 MOVW (R8)(R10*1), R9
18641 CMPW (BX)(R10*1), R9
18642 JNE matchlen_match1_repeat_extend_calcBlockSizeSmall
18643 LEAL 2(R10), R10
18644 SUBL $0x02, DI
18645 JZ repeat_extend_forward_end_calcBlockSizeSmall
18646
18647matchlen_match1_repeat_extend_calcBlockSizeSmall:
18648 MOVB (R8)(R10*1), R9
18649 CMPB (BX)(R10*1), R9
18650 JNE repeat_extend_forward_end_calcBlockSizeSmall
18651 LEAL 1(R10), R10
18652
18653repeat_extend_forward_end_calcBlockSizeSmall:
18654 ADDL R10, CX
18655 MOVL CX, BX
18656 SUBL SI, BX
18657 MOVL 16(SP), SI
18658
18659 // emitCopy
18660two_byte_offset_repeat_as_copy_calcBlockSizeSmall:
18661 CMPL BX, $0x40
18662 JBE two_byte_offset_short_repeat_as_copy_calcBlockSizeSmall
18663 LEAL -60(BX), BX
18664 ADDQ $0x03, AX
18665 JMP two_byte_offset_repeat_as_copy_calcBlockSizeSmall
18666
18667two_byte_offset_short_repeat_as_copy_calcBlockSizeSmall:
18668 MOVL BX, SI
18669 SHLL $0x02, SI
18670 CMPL BX, $0x0c
18671 JAE emit_copy_three_repeat_as_copy_calcBlockSizeSmall
18672 ADDQ $0x02, AX
18673 JMP repeat_end_emit_calcBlockSizeSmall
18674
18675emit_copy_three_repeat_as_copy_calcBlockSizeSmall:
18676 ADDQ $0x03, AX
18677
18678repeat_end_emit_calcBlockSizeSmall:
18679 MOVL CX, 12(SP)
18680 JMP search_loop_calcBlockSizeSmall
18681
18682no_repeat_found_calcBlockSizeSmall:
18683 CMPL (DX)(BX*1), SI
18684 JEQ candidate_match_calcBlockSizeSmall
18685 SHRQ $0x08, SI
18686 MOVL 24(SP)(R9*4), BX
18687 LEAL 2(CX), R8
18688 CMPL (DX)(DI*1), SI
18689 JEQ candidate2_match_calcBlockSizeSmall
18690 MOVL R8, 24(SP)(R9*4)
18691 SHRQ $0x08, SI
18692 CMPL (DX)(BX*1), SI
18693 JEQ candidate3_match_calcBlockSizeSmall
18694 MOVL 20(SP), CX
18695 JMP search_loop_calcBlockSizeSmall
18696
18697candidate3_match_calcBlockSizeSmall:
18698 ADDL $0x02, CX
18699 JMP candidate_match_calcBlockSizeSmall
18700
18701candidate2_match_calcBlockSizeSmall:
18702 MOVL R8, 24(SP)(R9*4)
18703 INCL CX
18704 MOVL DI, BX
18705
18706candidate_match_calcBlockSizeSmall:
18707 MOVL 12(SP), SI
18708 TESTL BX, BX
18709 JZ match_extend_back_end_calcBlockSizeSmall
18710
18711match_extend_back_loop_calcBlockSizeSmall:
18712 CMPL CX, SI
18713 JBE match_extend_back_end_calcBlockSizeSmall
18714 MOVB -1(DX)(BX*1), DI
18715 MOVB -1(DX)(CX*1), R8
18716 CMPB DI, R8
18717 JNE match_extend_back_end_calcBlockSizeSmall
18718 LEAL -1(CX), CX
18719 DECL BX
18720 JZ match_extend_back_end_calcBlockSizeSmall
18721 JMP match_extend_back_loop_calcBlockSizeSmall
18722
18723match_extend_back_end_calcBlockSizeSmall:
18724 MOVL CX, SI
18725 SUBL 12(SP), SI
18726 LEAQ 3(AX)(SI*1), SI
18727 CMPQ SI, (SP)
18728 JB match_dst_size_check_calcBlockSizeSmall
18729 MOVQ $0x00000000, ret+24(FP)
18730 RET
18731
18732match_dst_size_check_calcBlockSizeSmall:
18733 MOVL CX, SI
18734 MOVL 12(SP), DI
18735 CMPL DI, SI
18736 JEQ emit_literal_done_match_emit_calcBlockSizeSmall
18737 MOVL SI, R8
18738 MOVL SI, 12(SP)
18739 LEAQ (DX)(DI*1), SI
18740 SUBL DI, R8
18741 LEAL -1(R8), SI
18742 CMPL SI, $0x3c
18743 JB one_byte_match_emit_calcBlockSizeSmall
18744 CMPL SI, $0x00000100
18745 JB two_bytes_match_emit_calcBlockSizeSmall
18746 JB three_bytes_match_emit_calcBlockSizeSmall
18747
18748three_bytes_match_emit_calcBlockSizeSmall:
18749 ADDQ $0x03, AX
18750 JMP memmove_long_match_emit_calcBlockSizeSmall
18751
18752two_bytes_match_emit_calcBlockSizeSmall:
18753 ADDQ $0x02, AX
18754 CMPL SI, $0x40
18755 JB memmove_match_emit_calcBlockSizeSmall
18756 JMP memmove_long_match_emit_calcBlockSizeSmall
18757
18758one_byte_match_emit_calcBlockSizeSmall:
18759 ADDQ $0x01, AX
18760
18761memmove_match_emit_calcBlockSizeSmall:
18762 LEAQ (AX)(R8*1), AX
18763 JMP emit_literal_done_match_emit_calcBlockSizeSmall
18764
18765memmove_long_match_emit_calcBlockSizeSmall:
18766 LEAQ (AX)(R8*1), AX
18767
18768emit_literal_done_match_emit_calcBlockSizeSmall:
18769match_nolit_loop_calcBlockSizeSmall:
18770 MOVL CX, SI
18771 SUBL BX, SI
18772 MOVL SI, 16(SP)
18773 ADDL $0x04, CX
18774 ADDL $0x04, BX
18775 MOVQ src_len+8(FP), SI
18776 SUBL CX, SI
18777 LEAQ (DX)(CX*1), DI
18778 LEAQ (DX)(BX*1), BX
18779
18780 // matchLen
18781 XORL R9, R9
18782
18783matchlen_loopback_16_match_nolit_calcBlockSizeSmall:
18784 CMPL SI, $0x10
18785 JB matchlen_match8_match_nolit_calcBlockSizeSmall
18786 MOVQ (DI)(R9*1), R8
18787 MOVQ 8(DI)(R9*1), R10
18788 XORQ (BX)(R9*1), R8
18789 JNZ matchlen_bsf_8_match_nolit_calcBlockSizeSmall
18790 XORQ 8(BX)(R9*1), R10
18791 JNZ matchlen_bsf_16match_nolit_calcBlockSizeSmall
18792 LEAL -16(SI), SI
18793 LEAL 16(R9), R9
18794 JMP matchlen_loopback_16_match_nolit_calcBlockSizeSmall
18795
18796matchlen_bsf_16match_nolit_calcBlockSizeSmall:
18797#ifdef GOAMD64_v3
18798 TZCNTQ R10, R10
18799
18800#else
18801 BSFQ R10, R10
18802
18803#endif
18804 SARQ $0x03, R10
18805 LEAL 8(R9)(R10*1), R9
18806 JMP match_nolit_end_calcBlockSizeSmall
18807
18808matchlen_match8_match_nolit_calcBlockSizeSmall:
18809 CMPL SI, $0x08
18810 JB matchlen_match4_match_nolit_calcBlockSizeSmall
18811 MOVQ (DI)(R9*1), R8
18812 XORQ (BX)(R9*1), R8
18813 JNZ matchlen_bsf_8_match_nolit_calcBlockSizeSmall
18814 LEAL -8(SI), SI
18815 LEAL 8(R9), R9
18816 JMP matchlen_match4_match_nolit_calcBlockSizeSmall
18817
18818matchlen_bsf_8_match_nolit_calcBlockSizeSmall:
18819#ifdef GOAMD64_v3
18820 TZCNTQ R8, R8
18821
18822#else
18823 BSFQ R8, R8
18824
18825#endif
18826 SARQ $0x03, R8
18827 LEAL (R9)(R8*1), R9
18828 JMP match_nolit_end_calcBlockSizeSmall
18829
18830matchlen_match4_match_nolit_calcBlockSizeSmall:
18831 CMPL SI, $0x04
18832 JB matchlen_match2_match_nolit_calcBlockSizeSmall
18833 MOVL (DI)(R9*1), R8
18834 CMPL (BX)(R9*1), R8
18835 JNE matchlen_match2_match_nolit_calcBlockSizeSmall
18836 LEAL -4(SI), SI
18837 LEAL 4(R9), R9
18838
18839matchlen_match2_match_nolit_calcBlockSizeSmall:
18840 CMPL SI, $0x01
18841 JE matchlen_match1_match_nolit_calcBlockSizeSmall
18842 JB match_nolit_end_calcBlockSizeSmall
18843 MOVW (DI)(R9*1), R8
18844 CMPW (BX)(R9*1), R8
18845 JNE matchlen_match1_match_nolit_calcBlockSizeSmall
18846 LEAL 2(R9), R9
18847 SUBL $0x02, SI
18848 JZ match_nolit_end_calcBlockSizeSmall
18849
18850matchlen_match1_match_nolit_calcBlockSizeSmall:
18851 MOVB (DI)(R9*1), R8
18852 CMPB (BX)(R9*1), R8
18853 JNE match_nolit_end_calcBlockSizeSmall
18854 LEAL 1(R9), R9
18855
18856match_nolit_end_calcBlockSizeSmall:
18857 ADDL R9, CX
18858 MOVL 16(SP), BX
18859 ADDL $0x04, R9
18860 MOVL CX, 12(SP)
18861
18862 // emitCopy
18863two_byte_offset_match_nolit_calcBlockSizeSmall:
18864 CMPL R9, $0x40
18865 JBE two_byte_offset_short_match_nolit_calcBlockSizeSmall
18866 LEAL -60(R9), R9
18867 ADDQ $0x03, AX
18868 JMP two_byte_offset_match_nolit_calcBlockSizeSmall
18869
18870two_byte_offset_short_match_nolit_calcBlockSizeSmall:
18871 MOVL R9, BX
18872 SHLL $0x02, BX
18873 CMPL R9, $0x0c
18874 JAE emit_copy_three_match_nolit_calcBlockSizeSmall
18875 ADDQ $0x02, AX
18876 JMP match_nolit_emitcopy_end_calcBlockSizeSmall
18877
18878emit_copy_three_match_nolit_calcBlockSizeSmall:
18879 ADDQ $0x03, AX
18880
18881match_nolit_emitcopy_end_calcBlockSizeSmall:
18882 CMPL CX, 8(SP)
18883 JAE emit_remainder_calcBlockSizeSmall
18884 MOVQ -2(DX)(CX*1), SI
18885 CMPQ AX, (SP)
18886 JB match_nolit_dst_ok_calcBlockSizeSmall
18887 MOVQ $0x00000000, ret+24(FP)
18888 RET
18889
18890match_nolit_dst_ok_calcBlockSizeSmall:
18891 MOVQ $0x9e3779b1, R8
18892 MOVQ SI, DI
18893 SHRQ $0x10, SI
18894 MOVQ SI, BX
18895 SHLQ $0x20, DI
18896 IMULQ R8, DI
18897 SHRQ $0x37, DI
18898 SHLQ $0x20, BX
18899 IMULQ R8, BX
18900 SHRQ $0x37, BX
18901 LEAL -2(CX), R8
18902 LEAQ 24(SP)(BX*4), R9
18903 MOVL (R9), BX
18904 MOVL R8, 24(SP)(DI*4)
18905 MOVL CX, (R9)
18906 CMPL (DX)(BX*1), SI
18907 JEQ match_nolit_loop_calcBlockSizeSmall
18908 INCL CX
18909 JMP search_loop_calcBlockSizeSmall
18910
18911emit_remainder_calcBlockSizeSmall:
18912 MOVQ src_len+8(FP), CX
18913 SUBL 12(SP), CX
18914 LEAQ 3(AX)(CX*1), CX
18915 CMPQ CX, (SP)
18916 JB emit_remainder_ok_calcBlockSizeSmall
18917 MOVQ $0x00000000, ret+24(FP)
18918 RET
18919
18920emit_remainder_ok_calcBlockSizeSmall:
18921 MOVQ src_len+8(FP), CX
18922 MOVL 12(SP), BX
18923 CMPL BX, CX
18924 JEQ emit_literal_done_emit_remainder_calcBlockSizeSmall
18925 MOVL CX, SI
18926 MOVL CX, 12(SP)
18927 LEAQ (DX)(BX*1), CX
18928 SUBL BX, SI
18929 LEAL -1(SI), CX
18930 CMPL CX, $0x3c
18931 JB one_byte_emit_remainder_calcBlockSizeSmall
18932 CMPL CX, $0x00000100
18933 JB two_bytes_emit_remainder_calcBlockSizeSmall
18934 JB three_bytes_emit_remainder_calcBlockSizeSmall
18935
18936three_bytes_emit_remainder_calcBlockSizeSmall:
18937 ADDQ $0x03, AX
18938 JMP memmove_long_emit_remainder_calcBlockSizeSmall
18939
18940two_bytes_emit_remainder_calcBlockSizeSmall:
18941 ADDQ $0x02, AX
18942 CMPL CX, $0x40
18943 JB memmove_emit_remainder_calcBlockSizeSmall
18944 JMP memmove_long_emit_remainder_calcBlockSizeSmall
18945
18946one_byte_emit_remainder_calcBlockSizeSmall:
18947 ADDQ $0x01, AX
18948
18949memmove_emit_remainder_calcBlockSizeSmall:
18950 LEAQ (AX)(SI*1), AX
18951 JMP emit_literal_done_emit_remainder_calcBlockSizeSmall
18952
18953memmove_long_emit_remainder_calcBlockSizeSmall:
18954 LEAQ (AX)(SI*1), AX
18955
18956emit_literal_done_emit_remainder_calcBlockSizeSmall:
18957 MOVQ AX, ret+24(FP)
18958 RET
18959
18960// func emitLiteral(dst []byte, lit []byte) int
18961// Requires: SSE2
18962TEXT ·emitLiteral(SB), NOSPLIT, $0-56
18963 MOVQ lit_len+32(FP), DX
18964 MOVQ dst_base+0(FP), AX
18965 MOVQ lit_base+24(FP), CX
18966 TESTQ DX, DX
18967 JZ emit_literal_end_standalone_skip
18968 MOVL DX, BX
18969 LEAL -1(DX), SI
18970 CMPL SI, $0x3c
18971 JB one_byte_standalone
18972 CMPL SI, $0x00000100
18973 JB two_bytes_standalone
18974 CMPL SI, $0x00010000
18975 JB three_bytes_standalone
18976 CMPL SI, $0x01000000
18977 JB four_bytes_standalone
18978 MOVB $0xfc, (AX)
18979 MOVL SI, 1(AX)
18980 ADDQ $0x05, BX
18981 ADDQ $0x05, AX
18982 JMP memmove_long_standalone
18983
18984four_bytes_standalone:
18985 MOVL SI, DI
18986 SHRL $0x10, DI
18987 MOVB $0xf8, (AX)
18988 MOVW SI, 1(AX)
18989 MOVB DI, 3(AX)
18990 ADDQ $0x04, BX
18991 ADDQ $0x04, AX
18992 JMP memmove_long_standalone
18993
18994three_bytes_standalone:
18995 MOVB $0xf4, (AX)
18996 MOVW SI, 1(AX)
18997 ADDQ $0x03, BX
18998 ADDQ $0x03, AX
18999 JMP memmove_long_standalone
19000
19001two_bytes_standalone:
19002 MOVB $0xf0, (AX)
19003 MOVB SI, 1(AX)
19004 ADDQ $0x02, BX
19005 ADDQ $0x02, AX
19006 CMPL SI, $0x40
19007 JB memmove_standalone
19008 JMP memmove_long_standalone
19009
19010one_byte_standalone:
19011 SHLB $0x02, SI
19012 MOVB SI, (AX)
19013 ADDQ $0x01, BX
19014 ADDQ $0x01, AX
19015
19016memmove_standalone:
19017 // genMemMoveShort
19018 CMPQ DX, $0x03
19019 JB emit_lit_memmove_standalone_memmove_move_1or2
19020 JE emit_lit_memmove_standalone_memmove_move_3
19021 CMPQ DX, $0x08
19022 JB emit_lit_memmove_standalone_memmove_move_4through7
19023 CMPQ DX, $0x10
19024 JBE emit_lit_memmove_standalone_memmove_move_8through16
19025 CMPQ DX, $0x20
19026 JBE emit_lit_memmove_standalone_memmove_move_17through32
19027 JMP emit_lit_memmove_standalone_memmove_move_33through64
19028
19029emit_lit_memmove_standalone_memmove_move_1or2:
19030 MOVB (CX), SI
19031 MOVB -1(CX)(DX*1), CL
19032 MOVB SI, (AX)
19033 MOVB CL, -1(AX)(DX*1)
19034 JMP emit_literal_end_standalone
19035
19036emit_lit_memmove_standalone_memmove_move_3:
19037 MOVW (CX), SI
19038 MOVB 2(CX), CL
19039 MOVW SI, (AX)
19040 MOVB CL, 2(AX)
19041 JMP emit_literal_end_standalone
19042
19043emit_lit_memmove_standalone_memmove_move_4through7:
19044 MOVL (CX), SI
19045 MOVL -4(CX)(DX*1), CX
19046 MOVL SI, (AX)
19047 MOVL CX, -4(AX)(DX*1)
19048 JMP emit_literal_end_standalone
19049
19050emit_lit_memmove_standalone_memmove_move_8through16:
19051 MOVQ (CX), SI
19052 MOVQ -8(CX)(DX*1), CX
19053 MOVQ SI, (AX)
19054 MOVQ CX, -8(AX)(DX*1)
19055 JMP emit_literal_end_standalone
19056
19057emit_lit_memmove_standalone_memmove_move_17through32:
19058 MOVOU (CX), X0
19059 MOVOU -16(CX)(DX*1), X1
19060 MOVOU X0, (AX)
19061 MOVOU X1, -16(AX)(DX*1)
19062 JMP emit_literal_end_standalone
19063
19064emit_lit_memmove_standalone_memmove_move_33through64:
19065 MOVOU (CX), X0
19066 MOVOU 16(CX), X1
19067 MOVOU -32(CX)(DX*1), X2
19068 MOVOU -16(CX)(DX*1), X3
19069 MOVOU X0, (AX)
19070 MOVOU X1, 16(AX)
19071 MOVOU X2, -32(AX)(DX*1)
19072 MOVOU X3, -16(AX)(DX*1)
19073 JMP emit_literal_end_standalone
19074 JMP emit_literal_end_standalone
19075
19076memmove_long_standalone:
19077 // genMemMoveLong
19078 MOVOU (CX), X0
19079 MOVOU 16(CX), X1
19080 MOVOU -32(CX)(DX*1), X2
19081 MOVOU -16(CX)(DX*1), X3
19082 MOVQ DX, DI
19083 SHRQ $0x05, DI
19084 MOVQ AX, SI
19085 ANDL $0x0000001f, SI
19086 MOVQ $0x00000040, R8
19087 SUBQ SI, R8
19088 DECQ DI
19089 JA emit_lit_memmove_long_standalonelarge_forward_sse_loop_32
19090 LEAQ -32(CX)(R8*1), SI
19091 LEAQ -32(AX)(R8*1), R9
19092
19093emit_lit_memmove_long_standalonelarge_big_loop_back:
19094 MOVOU (SI), X4
19095 MOVOU 16(SI), X5
19096 MOVOA X4, (R9)
19097 MOVOA X5, 16(R9)
19098 ADDQ $0x20, R9
19099 ADDQ $0x20, SI
19100 ADDQ $0x20, R8
19101 DECQ DI
19102 JNA emit_lit_memmove_long_standalonelarge_big_loop_back
19103
19104emit_lit_memmove_long_standalonelarge_forward_sse_loop_32:
19105 MOVOU -32(CX)(R8*1), X4
19106 MOVOU -16(CX)(R8*1), X5
19107 MOVOA X4, -32(AX)(R8*1)
19108 MOVOA X5, -16(AX)(R8*1)
19109 ADDQ $0x20, R8
19110 CMPQ DX, R8
19111 JAE emit_lit_memmove_long_standalonelarge_forward_sse_loop_32
19112 MOVOU X0, (AX)
19113 MOVOU X1, 16(AX)
19114 MOVOU X2, -32(AX)(DX*1)
19115 MOVOU X3, -16(AX)(DX*1)
19116 JMP emit_literal_end_standalone
19117 JMP emit_literal_end_standalone
19118
19119emit_literal_end_standalone_skip:
19120 XORQ BX, BX
19121
19122emit_literal_end_standalone:
19123 MOVQ BX, ret+48(FP)
19124 RET
19125
19126// func emitRepeat(dst []byte, offset int, length int) int
19127TEXT ·emitRepeat(SB), NOSPLIT, $0-48
19128 XORQ BX, BX
19129 MOVQ dst_base+0(FP), AX
19130 MOVQ offset+24(FP), CX
19131 MOVQ length+32(FP), DX
19132
19133 // emitRepeat
19134emit_repeat_again_standalone:
19135 MOVL DX, SI
19136 LEAL -4(DX), DX
19137 CMPL SI, $0x08
19138 JBE repeat_two_standalone
19139 CMPL SI, $0x0c
19140 JAE cant_repeat_two_offset_standalone
19141 CMPL CX, $0x00000800
19142 JB repeat_two_offset_standalone
19143
19144cant_repeat_two_offset_standalone:
19145 CMPL DX, $0x00000104
19146 JB repeat_three_standalone
19147 CMPL DX, $0x00010100
19148 JB repeat_four_standalone
19149 CMPL DX, $0x0100ffff
19150 JB repeat_five_standalone
19151 LEAL -16842747(DX), DX
19152 MOVL $0xfffb001d, (AX)
19153 MOVB $0xff, 4(AX)
19154 ADDQ $0x05, AX
19155 ADDQ $0x05, BX
19156 JMP emit_repeat_again_standalone
19157
19158repeat_five_standalone:
19159 LEAL -65536(DX), DX
19160 MOVL DX, CX
19161 MOVW $0x001d, (AX)
19162 MOVW DX, 2(AX)
19163 SARL $0x10, CX
19164 MOVB CL, 4(AX)
19165 ADDQ $0x05, BX
19166 ADDQ $0x05, AX
19167 JMP gen_emit_repeat_end
19168
19169repeat_four_standalone:
19170 LEAL -256(DX), DX
19171 MOVW $0x0019, (AX)
19172 MOVW DX, 2(AX)
19173 ADDQ $0x04, BX
19174 ADDQ $0x04, AX
19175 JMP gen_emit_repeat_end
19176
19177repeat_three_standalone:
19178 LEAL -4(DX), DX
19179 MOVW $0x0015, (AX)
19180 MOVB DL, 2(AX)
19181 ADDQ $0x03, BX
19182 ADDQ $0x03, AX
19183 JMP gen_emit_repeat_end
19184
19185repeat_two_standalone:
19186 SHLL $0x02, DX
19187 ORL $0x01, DX
19188 MOVW DX, (AX)
19189 ADDQ $0x02, BX
19190 ADDQ $0x02, AX
19191 JMP gen_emit_repeat_end
19192
19193repeat_two_offset_standalone:
19194 XORQ SI, SI
19195 LEAL 1(SI)(DX*4), DX
19196 MOVB CL, 1(AX)
19197 SARL $0x08, CX
19198 SHLL $0x05, CX
19199 ORL CX, DX
19200 MOVB DL, (AX)
19201 ADDQ $0x02, BX
19202 ADDQ $0x02, AX
19203
19204gen_emit_repeat_end:
19205 MOVQ BX, ret+40(FP)
19206 RET
19207
19208// func emitCopy(dst []byte, offset int, length int) int
19209TEXT ·emitCopy(SB), NOSPLIT, $0-48
19210 XORQ BX, BX
19211 MOVQ dst_base+0(FP), AX
19212 MOVQ offset+24(FP), CX
19213 MOVQ length+32(FP), DX
19214
19215 // emitCopy
19216 CMPL CX, $0x00010000
19217 JB two_byte_offset_standalone
19218 CMPL DX, $0x40
19219 JBE four_bytes_remain_standalone
19220 MOVB $0xff, (AX)
19221 MOVL CX, 1(AX)
19222 LEAL -64(DX), DX
19223 ADDQ $0x05, BX
19224 ADDQ $0x05, AX
19225 CMPL DX, $0x04
19226 JB four_bytes_remain_standalone
19227
19228 // emitRepeat
19229emit_repeat_again_standalone_emit_copy:
19230 MOVL DX, SI
19231 LEAL -4(DX), DX
19232 CMPL SI, $0x08
19233 JBE repeat_two_standalone_emit_copy
19234 CMPL SI, $0x0c
19235 JAE cant_repeat_two_offset_standalone_emit_copy
19236 CMPL CX, $0x00000800
19237 JB repeat_two_offset_standalone_emit_copy
19238
19239cant_repeat_two_offset_standalone_emit_copy:
19240 CMPL DX, $0x00000104
19241 JB repeat_three_standalone_emit_copy
19242 CMPL DX, $0x00010100
19243 JB repeat_four_standalone_emit_copy
19244 CMPL DX, $0x0100ffff
19245 JB repeat_five_standalone_emit_copy
19246 LEAL -16842747(DX), DX
19247 MOVL $0xfffb001d, (AX)
19248 MOVB $0xff, 4(AX)
19249 ADDQ $0x05, AX
19250 ADDQ $0x05, BX
19251 JMP emit_repeat_again_standalone_emit_copy
19252
19253repeat_five_standalone_emit_copy:
19254 LEAL -65536(DX), DX
19255 MOVL DX, CX
19256 MOVW $0x001d, (AX)
19257 MOVW DX, 2(AX)
19258 SARL $0x10, CX
19259 MOVB CL, 4(AX)
19260 ADDQ $0x05, BX
19261 ADDQ $0x05, AX
19262 JMP gen_emit_copy_end
19263
19264repeat_four_standalone_emit_copy:
19265 LEAL -256(DX), DX
19266 MOVW $0x0019, (AX)
19267 MOVW DX, 2(AX)
19268 ADDQ $0x04, BX
19269 ADDQ $0x04, AX
19270 JMP gen_emit_copy_end
19271
19272repeat_three_standalone_emit_copy:
19273 LEAL -4(DX), DX
19274 MOVW $0x0015, (AX)
19275 MOVB DL, 2(AX)
19276 ADDQ $0x03, BX
19277 ADDQ $0x03, AX
19278 JMP gen_emit_copy_end
19279
19280repeat_two_standalone_emit_copy:
19281 SHLL $0x02, DX
19282 ORL $0x01, DX
19283 MOVW DX, (AX)
19284 ADDQ $0x02, BX
19285 ADDQ $0x02, AX
19286 JMP gen_emit_copy_end
19287
19288repeat_two_offset_standalone_emit_copy:
19289 XORQ SI, SI
19290 LEAL 1(SI)(DX*4), DX
19291 MOVB CL, 1(AX)
19292 SARL $0x08, CX
19293 SHLL $0x05, CX
19294 ORL CX, DX
19295 MOVB DL, (AX)
19296 ADDQ $0x02, BX
19297 ADDQ $0x02, AX
19298 JMP gen_emit_copy_end
19299
19300four_bytes_remain_standalone:
19301 TESTL DX, DX
19302 JZ gen_emit_copy_end
19303 XORL SI, SI
19304 LEAL -1(SI)(DX*4), DX
19305 MOVB DL, (AX)
19306 MOVL CX, 1(AX)
19307 ADDQ $0x05, BX
19308 ADDQ $0x05, AX
19309 JMP gen_emit_copy_end
19310
19311two_byte_offset_standalone:
19312 CMPL DX, $0x40
19313 JBE two_byte_offset_short_standalone
19314 CMPL CX, $0x00000800
19315 JAE long_offset_short_standalone
19316 MOVL $0x00000001, SI
19317 LEAL 16(SI), SI
19318 MOVB CL, 1(AX)
19319 MOVL CX, DI
19320 SHRL $0x08, DI
19321 SHLL $0x05, DI
19322 ORL DI, SI
19323 MOVB SI, (AX)
19324 ADDQ $0x02, BX
19325 ADDQ $0x02, AX
19326 SUBL $0x08, DX
19327
19328 // emitRepeat
19329 LEAL -4(DX), DX
19330 JMP cant_repeat_two_offset_standalone_emit_copy_short_2b
19331
19332emit_repeat_again_standalone_emit_copy_short_2b:
19333 MOVL DX, SI
19334 LEAL -4(DX), DX
19335 CMPL SI, $0x08
19336 JBE repeat_two_standalone_emit_copy_short_2b
19337 CMPL SI, $0x0c
19338 JAE cant_repeat_two_offset_standalone_emit_copy_short_2b
19339 CMPL CX, $0x00000800
19340 JB repeat_two_offset_standalone_emit_copy_short_2b
19341
19342cant_repeat_two_offset_standalone_emit_copy_short_2b:
19343 CMPL DX, $0x00000104
19344 JB repeat_three_standalone_emit_copy_short_2b
19345 CMPL DX, $0x00010100
19346 JB repeat_four_standalone_emit_copy_short_2b
19347 CMPL DX, $0x0100ffff
19348 JB repeat_five_standalone_emit_copy_short_2b
19349 LEAL -16842747(DX), DX
19350 MOVL $0xfffb001d, (AX)
19351 MOVB $0xff, 4(AX)
19352 ADDQ $0x05, AX
19353 ADDQ $0x05, BX
19354 JMP emit_repeat_again_standalone_emit_copy_short_2b
19355
19356repeat_five_standalone_emit_copy_short_2b:
19357 LEAL -65536(DX), DX
19358 MOVL DX, CX
19359 MOVW $0x001d, (AX)
19360 MOVW DX, 2(AX)
19361 SARL $0x10, CX
19362 MOVB CL, 4(AX)
19363 ADDQ $0x05, BX
19364 ADDQ $0x05, AX
19365 JMP gen_emit_copy_end
19366
19367repeat_four_standalone_emit_copy_short_2b:
19368 LEAL -256(DX), DX
19369 MOVW $0x0019, (AX)
19370 MOVW DX, 2(AX)
19371 ADDQ $0x04, BX
19372 ADDQ $0x04, AX
19373 JMP gen_emit_copy_end
19374
19375repeat_three_standalone_emit_copy_short_2b:
19376 LEAL -4(DX), DX
19377 MOVW $0x0015, (AX)
19378 MOVB DL, 2(AX)
19379 ADDQ $0x03, BX
19380 ADDQ $0x03, AX
19381 JMP gen_emit_copy_end
19382
19383repeat_two_standalone_emit_copy_short_2b:
19384 SHLL $0x02, DX
19385 ORL $0x01, DX
19386 MOVW DX, (AX)
19387 ADDQ $0x02, BX
19388 ADDQ $0x02, AX
19389 JMP gen_emit_copy_end
19390
19391repeat_two_offset_standalone_emit_copy_short_2b:
19392 XORQ SI, SI
19393 LEAL 1(SI)(DX*4), DX
19394 MOVB CL, 1(AX)
19395 SARL $0x08, CX
19396 SHLL $0x05, CX
19397 ORL CX, DX
19398 MOVB DL, (AX)
19399 ADDQ $0x02, BX
19400 ADDQ $0x02, AX
19401 JMP gen_emit_copy_end
19402
19403long_offset_short_standalone:
19404 MOVB $0xee, (AX)
19405 MOVW CX, 1(AX)
19406 LEAL -60(DX), DX
19407 ADDQ $0x03, AX
19408 ADDQ $0x03, BX
19409
19410 // emitRepeat
19411emit_repeat_again_standalone_emit_copy_short:
19412 MOVL DX, SI
19413 LEAL -4(DX), DX
19414 CMPL SI, $0x08
19415 JBE repeat_two_standalone_emit_copy_short
19416 CMPL SI, $0x0c
19417 JAE cant_repeat_two_offset_standalone_emit_copy_short
19418 CMPL CX, $0x00000800
19419 JB repeat_two_offset_standalone_emit_copy_short
19420
19421cant_repeat_two_offset_standalone_emit_copy_short:
19422 CMPL DX, $0x00000104
19423 JB repeat_three_standalone_emit_copy_short
19424 CMPL DX, $0x00010100
19425 JB repeat_four_standalone_emit_copy_short
19426 CMPL DX, $0x0100ffff
19427 JB repeat_five_standalone_emit_copy_short
19428 LEAL -16842747(DX), DX
19429 MOVL $0xfffb001d, (AX)
19430 MOVB $0xff, 4(AX)
19431 ADDQ $0x05, AX
19432 ADDQ $0x05, BX
19433 JMP emit_repeat_again_standalone_emit_copy_short
19434
19435repeat_five_standalone_emit_copy_short:
19436 LEAL -65536(DX), DX
19437 MOVL DX, CX
19438 MOVW $0x001d, (AX)
19439 MOVW DX, 2(AX)
19440 SARL $0x10, CX
19441 MOVB CL, 4(AX)
19442 ADDQ $0x05, BX
19443 ADDQ $0x05, AX
19444 JMP gen_emit_copy_end
19445
19446repeat_four_standalone_emit_copy_short:
19447 LEAL -256(DX), DX
19448 MOVW $0x0019, (AX)
19449 MOVW DX, 2(AX)
19450 ADDQ $0x04, BX
19451 ADDQ $0x04, AX
19452 JMP gen_emit_copy_end
19453
19454repeat_three_standalone_emit_copy_short:
19455 LEAL -4(DX), DX
19456 MOVW $0x0015, (AX)
19457 MOVB DL, 2(AX)
19458 ADDQ $0x03, BX
19459 ADDQ $0x03, AX
19460 JMP gen_emit_copy_end
19461
19462repeat_two_standalone_emit_copy_short:
19463 SHLL $0x02, DX
19464 ORL $0x01, DX
19465 MOVW DX, (AX)
19466 ADDQ $0x02, BX
19467 ADDQ $0x02, AX
19468 JMP gen_emit_copy_end
19469
19470repeat_two_offset_standalone_emit_copy_short:
19471 XORQ SI, SI
19472 LEAL 1(SI)(DX*4), DX
19473 MOVB CL, 1(AX)
19474 SARL $0x08, CX
19475 SHLL $0x05, CX
19476 ORL CX, DX
19477 MOVB DL, (AX)
19478 ADDQ $0x02, BX
19479 ADDQ $0x02, AX
19480 JMP gen_emit_copy_end
19481
19482two_byte_offset_short_standalone:
19483 MOVL DX, SI
19484 SHLL $0x02, SI
19485 CMPL DX, $0x0c
19486 JAE emit_copy_three_standalone
19487 CMPL CX, $0x00000800
19488 JAE emit_copy_three_standalone
19489 LEAL -15(SI), SI
19490 MOVB CL, 1(AX)
19491 SHRL $0x08, CX
19492 SHLL $0x05, CX
19493 ORL CX, SI
19494 MOVB SI, (AX)
19495 ADDQ $0x02, BX
19496 ADDQ $0x02, AX
19497 JMP gen_emit_copy_end
19498
19499emit_copy_three_standalone:
19500 LEAL -2(SI), SI
19501 MOVB SI, (AX)
19502 MOVW CX, 1(AX)
19503 ADDQ $0x03, BX
19504 ADDQ $0x03, AX
19505
19506gen_emit_copy_end:
19507 MOVQ BX, ret+40(FP)
19508 RET
19509
19510// func emitCopyNoRepeat(dst []byte, offset int, length int) int
19511TEXT ·emitCopyNoRepeat(SB), NOSPLIT, $0-48
19512 XORQ BX, BX
19513 MOVQ dst_base+0(FP), AX
19514 MOVQ offset+24(FP), CX
19515 MOVQ length+32(FP), DX
19516
19517 // emitCopy
19518 CMPL CX, $0x00010000
19519 JB two_byte_offset_standalone_snappy
19520
19521four_bytes_loop_back_standalone_snappy:
19522 CMPL DX, $0x40
19523 JBE four_bytes_remain_standalone_snappy
19524 MOVB $0xff, (AX)
19525 MOVL CX, 1(AX)
19526 LEAL -64(DX), DX
19527 ADDQ $0x05, BX
19528 ADDQ $0x05, AX
19529 CMPL DX, $0x04
19530 JB four_bytes_remain_standalone_snappy
19531 JMP four_bytes_loop_back_standalone_snappy
19532
19533four_bytes_remain_standalone_snappy:
19534 TESTL DX, DX
19535 JZ gen_emit_copy_end_snappy
19536 XORL SI, SI
19537 LEAL -1(SI)(DX*4), DX
19538 MOVB DL, (AX)
19539 MOVL CX, 1(AX)
19540 ADDQ $0x05, BX
19541 ADDQ $0x05, AX
19542 JMP gen_emit_copy_end_snappy
19543
19544two_byte_offset_standalone_snappy:
19545 CMPL DX, $0x40
19546 JBE two_byte_offset_short_standalone_snappy
19547 MOVB $0xee, (AX)
19548 MOVW CX, 1(AX)
19549 LEAL -60(DX), DX
19550 ADDQ $0x03, AX
19551 ADDQ $0x03, BX
19552 JMP two_byte_offset_standalone_snappy
19553
19554two_byte_offset_short_standalone_snappy:
19555 MOVL DX, SI
19556 SHLL $0x02, SI
19557 CMPL DX, $0x0c
19558 JAE emit_copy_three_standalone_snappy
19559 CMPL CX, $0x00000800
19560 JAE emit_copy_three_standalone_snappy
19561 LEAL -15(SI), SI
19562 MOVB CL, 1(AX)
19563 SHRL $0x08, CX
19564 SHLL $0x05, CX
19565 ORL CX, SI
19566 MOVB SI, (AX)
19567 ADDQ $0x02, BX
19568 ADDQ $0x02, AX
19569 JMP gen_emit_copy_end_snappy
19570
19571emit_copy_three_standalone_snappy:
19572 LEAL -2(SI), SI
19573 MOVB SI, (AX)
19574 MOVW CX, 1(AX)
19575 ADDQ $0x03, BX
19576 ADDQ $0x03, AX
19577
19578gen_emit_copy_end_snappy:
19579 MOVQ BX, ret+40(FP)
19580 RET
19581
19582// func matchLen(a []byte, b []byte) int
19583// Requires: BMI
19584TEXT ·matchLen(SB), NOSPLIT, $0-56
19585 MOVQ a_base+0(FP), AX
19586 MOVQ b_base+24(FP), CX
19587 MOVQ a_len+8(FP), DX
19588
19589 // matchLen
19590 XORL SI, SI
19591
19592matchlen_loopback_16_standalone:
19593 CMPL DX, $0x10
19594 JB matchlen_match8_standalone
19595 MOVQ (AX)(SI*1), BX
19596 MOVQ 8(AX)(SI*1), DI
19597 XORQ (CX)(SI*1), BX
19598 JNZ matchlen_bsf_8_standalone
19599 XORQ 8(CX)(SI*1), DI
19600 JNZ matchlen_bsf_16standalone
19601 LEAL -16(DX), DX
19602 LEAL 16(SI), SI
19603 JMP matchlen_loopback_16_standalone
19604
19605matchlen_bsf_16standalone:
19606#ifdef GOAMD64_v3
19607 TZCNTQ DI, DI
19608
19609#else
19610 BSFQ DI, DI
19611
19612#endif
19613 SARQ $0x03, DI
19614 LEAL 8(SI)(DI*1), SI
19615 JMP gen_match_len_end
19616
19617matchlen_match8_standalone:
19618 CMPL DX, $0x08
19619 JB matchlen_match4_standalone
19620 MOVQ (AX)(SI*1), BX
19621 XORQ (CX)(SI*1), BX
19622 JNZ matchlen_bsf_8_standalone
19623 LEAL -8(DX), DX
19624 LEAL 8(SI), SI
19625 JMP matchlen_match4_standalone
19626
19627matchlen_bsf_8_standalone:
19628#ifdef GOAMD64_v3
19629 TZCNTQ BX, BX
19630
19631#else
19632 BSFQ BX, BX
19633
19634#endif
19635 SARQ $0x03, BX
19636 LEAL (SI)(BX*1), SI
19637 JMP gen_match_len_end
19638
19639matchlen_match4_standalone:
19640 CMPL DX, $0x04
19641 JB matchlen_match2_standalone
19642 MOVL (AX)(SI*1), BX
19643 CMPL (CX)(SI*1), BX
19644 JNE matchlen_match2_standalone
19645 LEAL -4(DX), DX
19646 LEAL 4(SI), SI
19647
19648matchlen_match2_standalone:
19649 CMPL DX, $0x01
19650 JE matchlen_match1_standalone
19651 JB gen_match_len_end
19652 MOVW (AX)(SI*1), BX
19653 CMPW (CX)(SI*1), BX
19654 JNE matchlen_match1_standalone
19655 LEAL 2(SI), SI
19656 SUBL $0x02, DX
19657 JZ gen_match_len_end
19658
19659matchlen_match1_standalone:
19660 MOVB (AX)(SI*1), BL
19661 CMPB (CX)(SI*1), BL
19662 JNE gen_match_len_end
19663 LEAL 1(SI), SI
19664
19665gen_match_len_end:
19666 MOVQ SI, ret+48(FP)
19667 RET
19668
19669// func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
19670// Requires: SSE2
19671TEXT ·cvtLZ4BlockAsm(SB), NOSPLIT, $0-64
19672 XORQ SI, SI
19673 MOVQ dst_base+0(FP), AX
19674 MOVQ dst_len+8(FP), CX
19675 MOVQ src_base+24(FP), DX
19676 MOVQ src_len+32(FP), BX
19677 LEAQ (DX)(BX*1), BX
19678 LEAQ -10(AX)(CX*1), CX
19679 XORQ DI, DI
19680
19681lz4_s2_loop:
19682 CMPQ DX, BX
19683 JAE lz4_s2_corrupt
19684 CMPQ AX, CX
19685 JAE lz4_s2_dstfull
19686 MOVBQZX (DX), R8
19687 MOVQ R8, R9
19688 MOVQ R8, R10
19689 SHRQ $0x04, R9
19690 ANDQ $0x0f, R10
19691 CMPQ R8, $0xf0
19692 JB lz4_s2_ll_end
19693
19694lz4_s2_ll_loop:
19695 INCQ DX
19696 CMPQ DX, BX
19697 JAE lz4_s2_corrupt
19698 MOVBQZX (DX), R8
19699 ADDQ R8, R9
19700 CMPQ R8, $0xff
19701 JEQ lz4_s2_ll_loop
19702
19703lz4_s2_ll_end:
19704 LEAQ (DX)(R9*1), R8
19705 ADDQ $0x04, R10
19706 CMPQ R8, BX
19707 JAE lz4_s2_corrupt
19708 INCQ DX
19709 INCQ R8
19710 TESTQ R9, R9
19711 JZ lz4_s2_lits_done
19712 LEAQ (AX)(R9*1), R11
19713 CMPQ R11, CX
19714 JAE lz4_s2_dstfull
19715 ADDQ R9, SI
19716 LEAL -1(R9), R11
19717 CMPL R11, $0x3c
19718 JB one_byte_lz4_s2
19719 CMPL R11, $0x00000100
19720 JB two_bytes_lz4_s2
19721 CMPL R11, $0x00010000
19722 JB three_bytes_lz4_s2
19723 CMPL R11, $0x01000000
19724 JB four_bytes_lz4_s2
19725 MOVB $0xfc, (AX)
19726 MOVL R11, 1(AX)
19727 ADDQ $0x05, AX
19728 JMP memmove_long_lz4_s2
19729
19730four_bytes_lz4_s2:
19731 MOVL R11, R12
19732 SHRL $0x10, R12
19733 MOVB $0xf8, (AX)
19734 MOVW R11, 1(AX)
19735 MOVB R12, 3(AX)
19736 ADDQ $0x04, AX
19737 JMP memmove_long_lz4_s2
19738
19739three_bytes_lz4_s2:
19740 MOVB $0xf4, (AX)
19741 MOVW R11, 1(AX)
19742 ADDQ $0x03, AX
19743 JMP memmove_long_lz4_s2
19744
19745two_bytes_lz4_s2:
19746 MOVB $0xf0, (AX)
19747 MOVB R11, 1(AX)
19748 ADDQ $0x02, AX
19749 CMPL R11, $0x40
19750 JB memmove_lz4_s2
19751 JMP memmove_long_lz4_s2
19752
19753one_byte_lz4_s2:
19754 SHLB $0x02, R11
19755 MOVB R11, (AX)
19756 ADDQ $0x01, AX
19757
19758memmove_lz4_s2:
19759 LEAQ (AX)(R9*1), R11
19760
19761 // genMemMoveShort
19762 CMPQ R9, $0x08
19763 JBE emit_lit_memmove_lz4_s2_memmove_move_8
19764 CMPQ R9, $0x10
19765 JBE emit_lit_memmove_lz4_s2_memmove_move_8through16
19766 CMPQ R9, $0x20
19767 JBE emit_lit_memmove_lz4_s2_memmove_move_17through32
19768 JMP emit_lit_memmove_lz4_s2_memmove_move_33through64
19769
19770emit_lit_memmove_lz4_s2_memmove_move_8:
19771 MOVQ (DX), R12
19772 MOVQ R12, (AX)
19773 JMP memmove_end_copy_lz4_s2
19774
19775emit_lit_memmove_lz4_s2_memmove_move_8through16:
19776 MOVQ (DX), R12
19777 MOVQ -8(DX)(R9*1), DX
19778 MOVQ R12, (AX)
19779 MOVQ DX, -8(AX)(R9*1)
19780 JMP memmove_end_copy_lz4_s2
19781
19782emit_lit_memmove_lz4_s2_memmove_move_17through32:
19783 MOVOU (DX), X0
19784 MOVOU -16(DX)(R9*1), X1
19785 MOVOU X0, (AX)
19786 MOVOU X1, -16(AX)(R9*1)
19787 JMP memmove_end_copy_lz4_s2
19788
19789emit_lit_memmove_lz4_s2_memmove_move_33through64:
19790 MOVOU (DX), X0
19791 MOVOU 16(DX), X1
19792 MOVOU -32(DX)(R9*1), X2
19793 MOVOU -16(DX)(R9*1), X3
19794 MOVOU X0, (AX)
19795 MOVOU X1, 16(AX)
19796 MOVOU X2, -32(AX)(R9*1)
19797 MOVOU X3, -16(AX)(R9*1)
19798
19799memmove_end_copy_lz4_s2:
19800 MOVQ R11, AX
19801 JMP lz4_s2_lits_emit_done
19802
19803memmove_long_lz4_s2:
19804 LEAQ (AX)(R9*1), R11
19805
19806 // genMemMoveLong
19807 MOVOU (DX), X0
19808 MOVOU 16(DX), X1
19809 MOVOU -32(DX)(R9*1), X2
19810 MOVOU -16(DX)(R9*1), X3
19811 MOVQ R9, R13
19812 SHRQ $0x05, R13
19813 MOVQ AX, R12
19814 ANDL $0x0000001f, R12
19815 MOVQ $0x00000040, R14
19816 SUBQ R12, R14
19817 DECQ R13
19818 JA emit_lit_memmove_long_lz4_s2large_forward_sse_loop_32
19819 LEAQ -32(DX)(R14*1), R12
19820 LEAQ -32(AX)(R14*1), R15
19821
19822emit_lit_memmove_long_lz4_s2large_big_loop_back:
19823 MOVOU (R12), X4
19824 MOVOU 16(R12), X5
19825 MOVOA X4, (R15)
19826 MOVOA X5, 16(R15)
19827 ADDQ $0x20, R15
19828 ADDQ $0x20, R12
19829 ADDQ $0x20, R14
19830 DECQ R13
19831 JNA emit_lit_memmove_long_lz4_s2large_big_loop_back
19832
19833emit_lit_memmove_long_lz4_s2large_forward_sse_loop_32:
19834 MOVOU -32(DX)(R14*1), X4
19835 MOVOU -16(DX)(R14*1), X5
19836 MOVOA X4, -32(AX)(R14*1)
19837 MOVOA X5, -16(AX)(R14*1)
19838 ADDQ $0x20, R14
19839 CMPQ R9, R14
19840 JAE emit_lit_memmove_long_lz4_s2large_forward_sse_loop_32
19841 MOVOU X0, (AX)
19842 MOVOU X1, 16(AX)
19843 MOVOU X2, -32(AX)(R9*1)
19844 MOVOU X3, -16(AX)(R9*1)
19845 MOVQ R11, AX
19846
19847lz4_s2_lits_emit_done:
19848 MOVQ R8, DX
19849
19850lz4_s2_lits_done:
19851 CMPQ DX, BX
19852 JNE lz4_s2_match
19853 CMPQ R10, $0x04
19854 JEQ lz4_s2_done
19855 JMP lz4_s2_corrupt
19856
19857lz4_s2_match:
19858 LEAQ 2(DX), R8
19859 CMPQ R8, BX
19860 JAE lz4_s2_corrupt
19861 MOVWQZX (DX), R9
19862 MOVQ R8, DX
19863 TESTQ R9, R9
19864 JZ lz4_s2_corrupt
19865 CMPQ R9, SI
19866 JA lz4_s2_corrupt
19867 CMPQ R10, $0x13
19868 JNE lz4_s2_ml_done
19869
19870lz4_s2_ml_loop:
19871 MOVBQZX (DX), R8
19872 INCQ DX
19873 ADDQ R8, R10
19874 CMPQ DX, BX
19875 JAE lz4_s2_corrupt
19876 CMPQ R8, $0xff
19877 JEQ lz4_s2_ml_loop
19878
19879lz4_s2_ml_done:
19880 ADDQ R10, SI
19881 CMPQ R9, DI
19882 JNE lz4_s2_docopy
19883
19884 // emitRepeat
19885emit_repeat_again_lz4_s2:
19886 MOVL R10, R8
19887 LEAL -4(R10), R10
19888 CMPL R8, $0x08
19889 JBE repeat_two_lz4_s2
19890 CMPL R8, $0x0c
19891 JAE cant_repeat_two_offset_lz4_s2
19892 CMPL R9, $0x00000800
19893 JB repeat_two_offset_lz4_s2
19894
19895cant_repeat_two_offset_lz4_s2:
19896 CMPL R10, $0x00000104
19897 JB repeat_three_lz4_s2
19898 CMPL R10, $0x00010100
19899 JB repeat_four_lz4_s2
19900 CMPL R10, $0x0100ffff
19901 JB repeat_five_lz4_s2
19902 LEAL -16842747(R10), R10
19903 MOVL $0xfffb001d, (AX)
19904 MOVB $0xff, 4(AX)
19905 ADDQ $0x05, AX
19906 JMP emit_repeat_again_lz4_s2
19907
19908repeat_five_lz4_s2:
19909 LEAL -65536(R10), R10
19910 MOVL R10, R9
19911 MOVW $0x001d, (AX)
19912 MOVW R10, 2(AX)
19913 SARL $0x10, R9
19914 MOVB R9, 4(AX)
19915 ADDQ $0x05, AX
19916 JMP lz4_s2_loop
19917
19918repeat_four_lz4_s2:
19919 LEAL -256(R10), R10
19920 MOVW $0x0019, (AX)
19921 MOVW R10, 2(AX)
19922 ADDQ $0x04, AX
19923 JMP lz4_s2_loop
19924
19925repeat_three_lz4_s2:
19926 LEAL -4(R10), R10
19927 MOVW $0x0015, (AX)
19928 MOVB R10, 2(AX)
19929 ADDQ $0x03, AX
19930 JMP lz4_s2_loop
19931
19932repeat_two_lz4_s2:
19933 SHLL $0x02, R10
19934 ORL $0x01, R10
19935 MOVW R10, (AX)
19936 ADDQ $0x02, AX
19937 JMP lz4_s2_loop
19938
19939repeat_two_offset_lz4_s2:
19940 XORQ R8, R8
19941 LEAL 1(R8)(R10*4), R10
19942 MOVB R9, 1(AX)
19943 SARL $0x08, R9
19944 SHLL $0x05, R9
19945 ORL R9, R10
19946 MOVB R10, (AX)
19947 ADDQ $0x02, AX
19948 JMP lz4_s2_loop
19949
19950lz4_s2_docopy:
19951 MOVQ R9, DI
19952
19953 // emitCopy
19954 CMPL R10, $0x40
19955 JBE two_byte_offset_short_lz4_s2
19956 CMPL R9, $0x00000800
19957 JAE long_offset_short_lz4_s2
19958 MOVL $0x00000001, R8
19959 LEAL 16(R8), R8
19960 MOVB R9, 1(AX)
19961 MOVL R9, R11
19962 SHRL $0x08, R11
19963 SHLL $0x05, R11
19964 ORL R11, R8
19965 MOVB R8, (AX)
19966 ADDQ $0x02, AX
19967 SUBL $0x08, R10
19968
19969 // emitRepeat
19970 LEAL -4(R10), R10
19971 JMP cant_repeat_two_offset_lz4_s2_emit_copy_short_2b
19972
19973emit_repeat_again_lz4_s2_emit_copy_short_2b:
19974 MOVL R10, R8
19975 LEAL -4(R10), R10
19976 CMPL R8, $0x08
19977 JBE repeat_two_lz4_s2_emit_copy_short_2b
19978 CMPL R8, $0x0c
19979 JAE cant_repeat_two_offset_lz4_s2_emit_copy_short_2b
19980 CMPL R9, $0x00000800
19981 JB repeat_two_offset_lz4_s2_emit_copy_short_2b
19982
19983cant_repeat_two_offset_lz4_s2_emit_copy_short_2b:
19984 CMPL R10, $0x00000104
19985 JB repeat_three_lz4_s2_emit_copy_short_2b
19986 CMPL R10, $0x00010100
19987 JB repeat_four_lz4_s2_emit_copy_short_2b
19988 CMPL R10, $0x0100ffff
19989 JB repeat_five_lz4_s2_emit_copy_short_2b
19990 LEAL -16842747(R10), R10
19991 MOVL $0xfffb001d, (AX)
19992 MOVB $0xff, 4(AX)
19993 ADDQ $0x05, AX
19994 JMP emit_repeat_again_lz4_s2_emit_copy_short_2b
19995
19996repeat_five_lz4_s2_emit_copy_short_2b:
19997 LEAL -65536(R10), R10
19998 MOVL R10, R9
19999 MOVW $0x001d, (AX)
20000 MOVW R10, 2(AX)
20001 SARL $0x10, R9
20002 MOVB R9, 4(AX)
20003 ADDQ $0x05, AX
20004 JMP lz4_s2_loop
20005
20006repeat_four_lz4_s2_emit_copy_short_2b:
20007 LEAL -256(R10), R10
20008 MOVW $0x0019, (AX)
20009 MOVW R10, 2(AX)
20010 ADDQ $0x04, AX
20011 JMP lz4_s2_loop
20012
20013repeat_three_lz4_s2_emit_copy_short_2b:
20014 LEAL -4(R10), R10
20015 MOVW $0x0015, (AX)
20016 MOVB R10, 2(AX)
20017 ADDQ $0x03, AX
20018 JMP lz4_s2_loop
20019
20020repeat_two_lz4_s2_emit_copy_short_2b:
20021 SHLL $0x02, R10
20022 ORL $0x01, R10
20023 MOVW R10, (AX)
20024 ADDQ $0x02, AX
20025 JMP lz4_s2_loop
20026
20027repeat_two_offset_lz4_s2_emit_copy_short_2b:
20028 XORQ R8, R8
20029 LEAL 1(R8)(R10*4), R10
20030 MOVB R9, 1(AX)
20031 SARL $0x08, R9
20032 SHLL $0x05, R9
20033 ORL R9, R10
20034 MOVB R10, (AX)
20035 ADDQ $0x02, AX
20036 JMP lz4_s2_loop
20037
20038long_offset_short_lz4_s2:
20039 MOVB $0xee, (AX)
20040 MOVW R9, 1(AX)
20041 LEAL -60(R10), R10
20042 ADDQ $0x03, AX
20043
20044 // emitRepeat
20045emit_repeat_again_lz4_s2_emit_copy_short:
20046 MOVL R10, R8
20047 LEAL -4(R10), R10
20048 CMPL R8, $0x08
20049 JBE repeat_two_lz4_s2_emit_copy_short
20050 CMPL R8, $0x0c
20051 JAE cant_repeat_two_offset_lz4_s2_emit_copy_short
20052 CMPL R9, $0x00000800
20053 JB repeat_two_offset_lz4_s2_emit_copy_short
20054
20055cant_repeat_two_offset_lz4_s2_emit_copy_short:
20056 CMPL R10, $0x00000104
20057 JB repeat_three_lz4_s2_emit_copy_short
20058 CMPL R10, $0x00010100
20059 JB repeat_four_lz4_s2_emit_copy_short
20060 CMPL R10, $0x0100ffff
20061 JB repeat_five_lz4_s2_emit_copy_short
20062 LEAL -16842747(R10), R10
20063 MOVL $0xfffb001d, (AX)
20064 MOVB $0xff, 4(AX)
20065 ADDQ $0x05, AX
20066 JMP emit_repeat_again_lz4_s2_emit_copy_short
20067
20068repeat_five_lz4_s2_emit_copy_short:
20069 LEAL -65536(R10), R10
20070 MOVL R10, R9
20071 MOVW $0x001d, (AX)
20072 MOVW R10, 2(AX)
20073 SARL $0x10, R9
20074 MOVB R9, 4(AX)
20075 ADDQ $0x05, AX
20076 JMP lz4_s2_loop
20077
20078repeat_four_lz4_s2_emit_copy_short:
20079 LEAL -256(R10), R10
20080 MOVW $0x0019, (AX)
20081 MOVW R10, 2(AX)
20082 ADDQ $0x04, AX
20083 JMP lz4_s2_loop
20084
20085repeat_three_lz4_s2_emit_copy_short:
20086 LEAL -4(R10), R10
20087 MOVW $0x0015, (AX)
20088 MOVB R10, 2(AX)
20089 ADDQ $0x03, AX
20090 JMP lz4_s2_loop
20091
20092repeat_two_lz4_s2_emit_copy_short:
20093 SHLL $0x02, R10
20094 ORL $0x01, R10
20095 MOVW R10, (AX)
20096 ADDQ $0x02, AX
20097 JMP lz4_s2_loop
20098
20099repeat_two_offset_lz4_s2_emit_copy_short:
20100 XORQ R8, R8
20101 LEAL 1(R8)(R10*4), R10
20102 MOVB R9, 1(AX)
20103 SARL $0x08, R9
20104 SHLL $0x05, R9
20105 ORL R9, R10
20106 MOVB R10, (AX)
20107 ADDQ $0x02, AX
20108 JMP lz4_s2_loop
20109
20110two_byte_offset_short_lz4_s2:
20111 MOVL R10, R8
20112 SHLL $0x02, R8
20113 CMPL R10, $0x0c
20114 JAE emit_copy_three_lz4_s2
20115 CMPL R9, $0x00000800
20116 JAE emit_copy_three_lz4_s2
20117 LEAL -15(R8), R8
20118 MOVB R9, 1(AX)
20119 SHRL $0x08, R9
20120 SHLL $0x05, R9
20121 ORL R9, R8
20122 MOVB R8, (AX)
20123 ADDQ $0x02, AX
20124 JMP lz4_s2_loop
20125
20126emit_copy_three_lz4_s2:
20127 LEAL -2(R8), R8
20128 MOVB R8, (AX)
20129 MOVW R9, 1(AX)
20130 ADDQ $0x03, AX
20131 JMP lz4_s2_loop
20132
20133lz4_s2_done:
20134 MOVQ dst_base+0(FP), CX
20135 SUBQ CX, AX
20136 MOVQ SI, uncompressed+48(FP)
20137 MOVQ AX, dstUsed+56(FP)
20138 RET
20139
20140lz4_s2_corrupt:
20141 XORQ AX, AX
20142 LEAQ -1(AX), SI
20143 MOVQ SI, uncompressed+48(FP)
20144 RET
20145
20146lz4_s2_dstfull:
20147 XORQ AX, AX
20148 LEAQ -2(AX), SI
20149 MOVQ SI, uncompressed+48(FP)
20150 RET
20151
20152// func cvtLZ4sBlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
20153// Requires: SSE2
20154TEXT ·cvtLZ4sBlockAsm(SB), NOSPLIT, $0-64
20155 XORQ SI, SI
20156 MOVQ dst_base+0(FP), AX
20157 MOVQ dst_len+8(FP), CX
20158 MOVQ src_base+24(FP), DX
20159 MOVQ src_len+32(FP), BX
20160 LEAQ (DX)(BX*1), BX
20161 LEAQ -10(AX)(CX*1), CX
20162 XORQ DI, DI
20163
20164lz4s_s2_loop:
20165 CMPQ DX, BX
20166 JAE lz4s_s2_corrupt
20167 CMPQ AX, CX
20168 JAE lz4s_s2_dstfull
20169 MOVBQZX (DX), R8
20170 MOVQ R8, R9
20171 MOVQ R8, R10
20172 SHRQ $0x04, R9
20173 ANDQ $0x0f, R10
20174 CMPQ R8, $0xf0
20175 JB lz4s_s2_ll_end
20176
20177lz4s_s2_ll_loop:
20178 INCQ DX
20179 CMPQ DX, BX
20180 JAE lz4s_s2_corrupt
20181 MOVBQZX (DX), R8
20182 ADDQ R8, R9
20183 CMPQ R8, $0xff
20184 JEQ lz4s_s2_ll_loop
20185
20186lz4s_s2_ll_end:
20187 LEAQ (DX)(R9*1), R8
20188 ADDQ $0x03, R10
20189 CMPQ R8, BX
20190 JAE lz4s_s2_corrupt
20191 INCQ DX
20192 INCQ R8
20193 TESTQ R9, R9
20194 JZ lz4s_s2_lits_done
20195 LEAQ (AX)(R9*1), R11
20196 CMPQ R11, CX
20197 JAE lz4s_s2_dstfull
20198 ADDQ R9, SI
20199 LEAL -1(R9), R11
20200 CMPL R11, $0x3c
20201 JB one_byte_lz4s_s2
20202 CMPL R11, $0x00000100
20203 JB two_bytes_lz4s_s2
20204 CMPL R11, $0x00010000
20205 JB three_bytes_lz4s_s2
20206 CMPL R11, $0x01000000
20207 JB four_bytes_lz4s_s2
20208 MOVB $0xfc, (AX)
20209 MOVL R11, 1(AX)
20210 ADDQ $0x05, AX
20211 JMP memmove_long_lz4s_s2
20212
20213four_bytes_lz4s_s2:
20214 MOVL R11, R12
20215 SHRL $0x10, R12
20216 MOVB $0xf8, (AX)
20217 MOVW R11, 1(AX)
20218 MOVB R12, 3(AX)
20219 ADDQ $0x04, AX
20220 JMP memmove_long_lz4s_s2
20221
20222three_bytes_lz4s_s2:
20223 MOVB $0xf4, (AX)
20224 MOVW R11, 1(AX)
20225 ADDQ $0x03, AX
20226 JMP memmove_long_lz4s_s2
20227
20228two_bytes_lz4s_s2:
20229 MOVB $0xf0, (AX)
20230 MOVB R11, 1(AX)
20231 ADDQ $0x02, AX
20232 CMPL R11, $0x40
20233 JB memmove_lz4s_s2
20234 JMP memmove_long_lz4s_s2
20235
20236one_byte_lz4s_s2:
20237 SHLB $0x02, R11
20238 MOVB R11, (AX)
20239 ADDQ $0x01, AX
20240
20241memmove_lz4s_s2:
20242 LEAQ (AX)(R9*1), R11
20243
20244 // genMemMoveShort
20245 CMPQ R9, $0x08
20246 JBE emit_lit_memmove_lz4s_s2_memmove_move_8
20247 CMPQ R9, $0x10
20248 JBE emit_lit_memmove_lz4s_s2_memmove_move_8through16
20249 CMPQ R9, $0x20
20250 JBE emit_lit_memmove_lz4s_s2_memmove_move_17through32
20251 JMP emit_lit_memmove_lz4s_s2_memmove_move_33through64
20252
20253emit_lit_memmove_lz4s_s2_memmove_move_8:
20254 MOVQ (DX), R12
20255 MOVQ R12, (AX)
20256 JMP memmove_end_copy_lz4s_s2
20257
20258emit_lit_memmove_lz4s_s2_memmove_move_8through16:
20259 MOVQ (DX), R12
20260 MOVQ -8(DX)(R9*1), DX
20261 MOVQ R12, (AX)
20262 MOVQ DX, -8(AX)(R9*1)
20263 JMP memmove_end_copy_lz4s_s2
20264
20265emit_lit_memmove_lz4s_s2_memmove_move_17through32:
20266 MOVOU (DX), X0
20267 MOVOU -16(DX)(R9*1), X1
20268 MOVOU X0, (AX)
20269 MOVOU X1, -16(AX)(R9*1)
20270 JMP memmove_end_copy_lz4s_s2
20271
20272emit_lit_memmove_lz4s_s2_memmove_move_33through64:
20273 MOVOU (DX), X0
20274 MOVOU 16(DX), X1
20275 MOVOU -32(DX)(R9*1), X2
20276 MOVOU -16(DX)(R9*1), X3
20277 MOVOU X0, (AX)
20278 MOVOU X1, 16(AX)
20279 MOVOU X2, -32(AX)(R9*1)
20280 MOVOU X3, -16(AX)(R9*1)
20281
20282memmove_end_copy_lz4s_s2:
20283 MOVQ R11, AX
20284 JMP lz4s_s2_lits_emit_done
20285
20286memmove_long_lz4s_s2:
20287 LEAQ (AX)(R9*1), R11
20288
20289 // genMemMoveLong
20290 MOVOU (DX), X0
20291 MOVOU 16(DX), X1
20292 MOVOU -32(DX)(R9*1), X2
20293 MOVOU -16(DX)(R9*1), X3
20294 MOVQ R9, R13
20295 SHRQ $0x05, R13
20296 MOVQ AX, R12
20297 ANDL $0x0000001f, R12
20298 MOVQ $0x00000040, R14
20299 SUBQ R12, R14
20300 DECQ R13
20301 JA emit_lit_memmove_long_lz4s_s2large_forward_sse_loop_32
20302 LEAQ -32(DX)(R14*1), R12
20303 LEAQ -32(AX)(R14*1), R15
20304
20305emit_lit_memmove_long_lz4s_s2large_big_loop_back:
20306 MOVOU (R12), X4
20307 MOVOU 16(R12), X5
20308 MOVOA X4, (R15)
20309 MOVOA X5, 16(R15)
20310 ADDQ $0x20, R15
20311 ADDQ $0x20, R12
20312 ADDQ $0x20, R14
20313 DECQ R13
20314 JNA emit_lit_memmove_long_lz4s_s2large_big_loop_back
20315
20316emit_lit_memmove_long_lz4s_s2large_forward_sse_loop_32:
20317 MOVOU -32(DX)(R14*1), X4
20318 MOVOU -16(DX)(R14*1), X5
20319 MOVOA X4, -32(AX)(R14*1)
20320 MOVOA X5, -16(AX)(R14*1)
20321 ADDQ $0x20, R14
20322 CMPQ R9, R14
20323 JAE emit_lit_memmove_long_lz4s_s2large_forward_sse_loop_32
20324 MOVOU X0, (AX)
20325 MOVOU X1, 16(AX)
20326 MOVOU X2, -32(AX)(R9*1)
20327 MOVOU X3, -16(AX)(R9*1)
20328 MOVQ R11, AX
20329
20330lz4s_s2_lits_emit_done:
20331 MOVQ R8, DX
20332
20333lz4s_s2_lits_done:
20334 CMPQ DX, BX
20335 JNE lz4s_s2_match
20336 CMPQ R10, $0x03
20337 JEQ lz4s_s2_done
20338 JMP lz4s_s2_corrupt
20339
20340lz4s_s2_match:
20341 CMPQ R10, $0x03
20342 JEQ lz4s_s2_loop
20343 LEAQ 2(DX), R8
20344 CMPQ R8, BX
20345 JAE lz4s_s2_corrupt
20346 MOVWQZX (DX), R9
20347 MOVQ R8, DX
20348 TESTQ R9, R9
20349 JZ lz4s_s2_corrupt
20350 CMPQ R9, SI
20351 JA lz4s_s2_corrupt
20352 CMPQ R10, $0x12
20353 JNE lz4s_s2_ml_done
20354
20355lz4s_s2_ml_loop:
20356 MOVBQZX (DX), R8
20357 INCQ DX
20358 ADDQ R8, R10
20359 CMPQ DX, BX
20360 JAE lz4s_s2_corrupt
20361 CMPQ R8, $0xff
20362 JEQ lz4s_s2_ml_loop
20363
20364lz4s_s2_ml_done:
20365 ADDQ R10, SI
20366 CMPQ R9, DI
20367 JNE lz4s_s2_docopy
20368
20369 // emitRepeat
20370emit_repeat_again_lz4_s2:
20371 MOVL R10, R8
20372 LEAL -4(R10), R10
20373 CMPL R8, $0x08
20374 JBE repeat_two_lz4_s2
20375 CMPL R8, $0x0c
20376 JAE cant_repeat_two_offset_lz4_s2
20377 CMPL R9, $0x00000800
20378 JB repeat_two_offset_lz4_s2
20379
20380cant_repeat_two_offset_lz4_s2:
20381 CMPL R10, $0x00000104
20382 JB repeat_three_lz4_s2
20383 CMPL R10, $0x00010100
20384 JB repeat_four_lz4_s2
20385 CMPL R10, $0x0100ffff
20386 JB repeat_five_lz4_s2
20387 LEAL -16842747(R10), R10
20388 MOVL $0xfffb001d, (AX)
20389 MOVB $0xff, 4(AX)
20390 ADDQ $0x05, AX
20391 JMP emit_repeat_again_lz4_s2
20392
20393repeat_five_lz4_s2:
20394 LEAL -65536(R10), R10
20395 MOVL R10, R9
20396 MOVW $0x001d, (AX)
20397 MOVW R10, 2(AX)
20398 SARL $0x10, R9
20399 MOVB R9, 4(AX)
20400 ADDQ $0x05, AX
20401 JMP lz4s_s2_loop
20402
20403repeat_four_lz4_s2:
20404 LEAL -256(R10), R10
20405 MOVW $0x0019, (AX)
20406 MOVW R10, 2(AX)
20407 ADDQ $0x04, AX
20408 JMP lz4s_s2_loop
20409
20410repeat_three_lz4_s2:
20411 LEAL -4(R10), R10
20412 MOVW $0x0015, (AX)
20413 MOVB R10, 2(AX)
20414 ADDQ $0x03, AX
20415 JMP lz4s_s2_loop
20416
20417repeat_two_lz4_s2:
20418 SHLL $0x02, R10
20419 ORL $0x01, R10
20420 MOVW R10, (AX)
20421 ADDQ $0x02, AX
20422 JMP lz4s_s2_loop
20423
20424repeat_two_offset_lz4_s2:
20425 XORQ R8, R8
20426 LEAL 1(R8)(R10*4), R10
20427 MOVB R9, 1(AX)
20428 SARL $0x08, R9
20429 SHLL $0x05, R9
20430 ORL R9, R10
20431 MOVB R10, (AX)
20432 ADDQ $0x02, AX
20433 JMP lz4s_s2_loop
20434
20435lz4s_s2_docopy:
20436 MOVQ R9, DI
20437
20438 // emitCopy
20439 CMPL R10, $0x40
20440 JBE two_byte_offset_short_lz4_s2
20441 CMPL R9, $0x00000800
20442 JAE long_offset_short_lz4_s2
20443 MOVL $0x00000001, R8
20444 LEAL 16(R8), R8
20445 MOVB R9, 1(AX)
20446 MOVL R9, R11
20447 SHRL $0x08, R11
20448 SHLL $0x05, R11
20449 ORL R11, R8
20450 MOVB R8, (AX)
20451 ADDQ $0x02, AX
20452 SUBL $0x08, R10
20453
20454 // emitRepeat
20455 LEAL -4(R10), R10
20456 JMP cant_repeat_two_offset_lz4_s2_emit_copy_short_2b
20457
20458emit_repeat_again_lz4_s2_emit_copy_short_2b:
20459 MOVL R10, R8
20460 LEAL -4(R10), R10
20461 CMPL R8, $0x08
20462 JBE repeat_two_lz4_s2_emit_copy_short_2b
20463 CMPL R8, $0x0c
20464 JAE cant_repeat_two_offset_lz4_s2_emit_copy_short_2b
20465 CMPL R9, $0x00000800
20466 JB repeat_two_offset_lz4_s2_emit_copy_short_2b
20467
20468cant_repeat_two_offset_lz4_s2_emit_copy_short_2b:
20469 CMPL R10, $0x00000104
20470 JB repeat_three_lz4_s2_emit_copy_short_2b
20471 CMPL R10, $0x00010100
20472 JB repeat_four_lz4_s2_emit_copy_short_2b
20473 CMPL R10, $0x0100ffff
20474 JB repeat_five_lz4_s2_emit_copy_short_2b
20475 LEAL -16842747(R10), R10
20476 MOVL $0xfffb001d, (AX)
20477 MOVB $0xff, 4(AX)
20478 ADDQ $0x05, AX
20479 JMP emit_repeat_again_lz4_s2_emit_copy_short_2b
20480
20481repeat_five_lz4_s2_emit_copy_short_2b:
20482 LEAL -65536(R10), R10
20483 MOVL R10, R9
20484 MOVW $0x001d, (AX)
20485 MOVW R10, 2(AX)
20486 SARL $0x10, R9
20487 MOVB R9, 4(AX)
20488 ADDQ $0x05, AX
20489 JMP lz4s_s2_loop
20490
20491repeat_four_lz4_s2_emit_copy_short_2b:
20492 LEAL -256(R10), R10
20493 MOVW $0x0019, (AX)
20494 MOVW R10, 2(AX)
20495 ADDQ $0x04, AX
20496 JMP lz4s_s2_loop
20497
20498repeat_three_lz4_s2_emit_copy_short_2b:
20499 LEAL -4(R10), R10
20500 MOVW $0x0015, (AX)
20501 MOVB R10, 2(AX)
20502 ADDQ $0x03, AX
20503 JMP lz4s_s2_loop
20504
20505repeat_two_lz4_s2_emit_copy_short_2b:
20506 SHLL $0x02, R10
20507 ORL $0x01, R10
20508 MOVW R10, (AX)
20509 ADDQ $0x02, AX
20510 JMP lz4s_s2_loop
20511
20512repeat_two_offset_lz4_s2_emit_copy_short_2b:
20513 XORQ R8, R8
20514 LEAL 1(R8)(R10*4), R10
20515 MOVB R9, 1(AX)
20516 SARL $0x08, R9
20517 SHLL $0x05, R9
20518 ORL R9, R10
20519 MOVB R10, (AX)
20520 ADDQ $0x02, AX
20521 JMP lz4s_s2_loop
20522
20523long_offset_short_lz4_s2:
20524 MOVB $0xee, (AX)
20525 MOVW R9, 1(AX)
20526 LEAL -60(R10), R10
20527 ADDQ $0x03, AX
20528
20529 // emitRepeat
20530emit_repeat_again_lz4_s2_emit_copy_short:
20531 MOVL R10, R8
20532 LEAL -4(R10), R10
20533 CMPL R8, $0x08
20534 JBE repeat_two_lz4_s2_emit_copy_short
20535 CMPL R8, $0x0c
20536 JAE cant_repeat_two_offset_lz4_s2_emit_copy_short
20537 CMPL R9, $0x00000800
20538 JB repeat_two_offset_lz4_s2_emit_copy_short
20539
20540cant_repeat_two_offset_lz4_s2_emit_copy_short:
20541 CMPL R10, $0x00000104
20542 JB repeat_three_lz4_s2_emit_copy_short
20543 CMPL R10, $0x00010100
20544 JB repeat_four_lz4_s2_emit_copy_short
20545 CMPL R10, $0x0100ffff
20546 JB repeat_five_lz4_s2_emit_copy_short
20547 LEAL -16842747(R10), R10
20548 MOVL $0xfffb001d, (AX)
20549 MOVB $0xff, 4(AX)
20550 ADDQ $0x05, AX
20551 JMP emit_repeat_again_lz4_s2_emit_copy_short
20552
20553repeat_five_lz4_s2_emit_copy_short:
20554 LEAL -65536(R10), R10
20555 MOVL R10, R9
20556 MOVW $0x001d, (AX)
20557 MOVW R10, 2(AX)
20558 SARL $0x10, R9
20559 MOVB R9, 4(AX)
20560 ADDQ $0x05, AX
20561 JMP lz4s_s2_loop
20562
20563repeat_four_lz4_s2_emit_copy_short:
20564 LEAL -256(R10), R10
20565 MOVW $0x0019, (AX)
20566 MOVW R10, 2(AX)
20567 ADDQ $0x04, AX
20568 JMP lz4s_s2_loop
20569
20570repeat_three_lz4_s2_emit_copy_short:
20571 LEAL -4(R10), R10
20572 MOVW $0x0015, (AX)
20573 MOVB R10, 2(AX)
20574 ADDQ $0x03, AX
20575 JMP lz4s_s2_loop
20576
20577repeat_two_lz4_s2_emit_copy_short:
20578 SHLL $0x02, R10
20579 ORL $0x01, R10
20580 MOVW R10, (AX)
20581 ADDQ $0x02, AX
20582 JMP lz4s_s2_loop
20583
20584repeat_two_offset_lz4_s2_emit_copy_short:
20585 XORQ R8, R8
20586 LEAL 1(R8)(R10*4), R10
20587 MOVB R9, 1(AX)
20588 SARL $0x08, R9
20589 SHLL $0x05, R9
20590 ORL R9, R10
20591 MOVB R10, (AX)
20592 ADDQ $0x02, AX
20593 JMP lz4s_s2_loop
20594
20595two_byte_offset_short_lz4_s2:
20596 MOVL R10, R8
20597 SHLL $0x02, R8
20598 CMPL R10, $0x0c
20599 JAE emit_copy_three_lz4_s2
20600 CMPL R9, $0x00000800
20601 JAE emit_copy_three_lz4_s2
20602 LEAL -15(R8), R8
20603 MOVB R9, 1(AX)
20604 SHRL $0x08, R9
20605 SHLL $0x05, R9
20606 ORL R9, R8
20607 MOVB R8, (AX)
20608 ADDQ $0x02, AX
20609 JMP lz4s_s2_loop
20610
20611emit_copy_three_lz4_s2:
20612 LEAL -2(R8), R8
20613 MOVB R8, (AX)
20614 MOVW R9, 1(AX)
20615 ADDQ $0x03, AX
20616 JMP lz4s_s2_loop
20617
20618lz4s_s2_done:
20619 MOVQ dst_base+0(FP), CX
20620 SUBQ CX, AX
20621 MOVQ SI, uncompressed+48(FP)
20622 MOVQ AX, dstUsed+56(FP)
20623 RET
20624
20625lz4s_s2_corrupt:
20626 XORQ AX, AX
20627 LEAQ -1(AX), SI
20628 MOVQ SI, uncompressed+48(FP)
20629 RET
20630
20631lz4s_s2_dstfull:
20632 XORQ AX, AX
20633 LEAQ -2(AX), SI
20634 MOVQ SI, uncompressed+48(FP)
20635 RET
20636
20637// func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
20638// Requires: SSE2
20639TEXT ·cvtLZ4BlockSnappyAsm(SB), NOSPLIT, $0-64
20640 XORQ SI, SI
20641 MOVQ dst_base+0(FP), AX
20642 MOVQ dst_len+8(FP), CX
20643 MOVQ src_base+24(FP), DX
20644 MOVQ src_len+32(FP), BX
20645 LEAQ (DX)(BX*1), BX
20646 LEAQ -10(AX)(CX*1), CX
20647
20648lz4_snappy_loop:
20649 CMPQ DX, BX
20650 JAE lz4_snappy_corrupt
20651 CMPQ AX, CX
20652 JAE lz4_snappy_dstfull
20653 MOVBQZX (DX), DI
20654 MOVQ DI, R8
20655 MOVQ DI, R9
20656 SHRQ $0x04, R8
20657 ANDQ $0x0f, R9
20658 CMPQ DI, $0xf0
20659 JB lz4_snappy_ll_end
20660
20661lz4_snappy_ll_loop:
20662 INCQ DX
20663 CMPQ DX, BX
20664 JAE lz4_snappy_corrupt
20665 MOVBQZX (DX), DI
20666 ADDQ DI, R8
20667 CMPQ DI, $0xff
20668 JEQ lz4_snappy_ll_loop
20669
20670lz4_snappy_ll_end:
20671 LEAQ (DX)(R8*1), DI
20672 ADDQ $0x04, R9
20673 CMPQ DI, BX
20674 JAE lz4_snappy_corrupt
20675 INCQ DX
20676 INCQ DI
20677 TESTQ R8, R8
20678 JZ lz4_snappy_lits_done
20679 LEAQ (AX)(R8*1), R10
20680 CMPQ R10, CX
20681 JAE lz4_snappy_dstfull
20682 ADDQ R8, SI
20683 LEAL -1(R8), R10
20684 CMPL R10, $0x3c
20685 JB one_byte_lz4_snappy
20686 CMPL R10, $0x00000100
20687 JB two_bytes_lz4_snappy
20688 CMPL R10, $0x00010000
20689 JB three_bytes_lz4_snappy
20690 CMPL R10, $0x01000000
20691 JB four_bytes_lz4_snappy
20692 MOVB $0xfc, (AX)
20693 MOVL R10, 1(AX)
20694 ADDQ $0x05, AX
20695 JMP memmove_long_lz4_snappy
20696
20697four_bytes_lz4_snappy:
20698 MOVL R10, R11
20699 SHRL $0x10, R11
20700 MOVB $0xf8, (AX)
20701 MOVW R10, 1(AX)
20702 MOVB R11, 3(AX)
20703 ADDQ $0x04, AX
20704 JMP memmove_long_lz4_snappy
20705
20706three_bytes_lz4_snappy:
20707 MOVB $0xf4, (AX)
20708 MOVW R10, 1(AX)
20709 ADDQ $0x03, AX
20710 JMP memmove_long_lz4_snappy
20711
20712two_bytes_lz4_snappy:
20713 MOVB $0xf0, (AX)
20714 MOVB R10, 1(AX)
20715 ADDQ $0x02, AX
20716 CMPL R10, $0x40
20717 JB memmove_lz4_snappy
20718 JMP memmove_long_lz4_snappy
20719
20720one_byte_lz4_snappy:
20721 SHLB $0x02, R10
20722 MOVB R10, (AX)
20723 ADDQ $0x01, AX
20724
20725memmove_lz4_snappy:
20726 LEAQ (AX)(R8*1), R10
20727
20728 // genMemMoveShort
20729 CMPQ R8, $0x08
20730 JBE emit_lit_memmove_lz4_snappy_memmove_move_8
20731 CMPQ R8, $0x10
20732 JBE emit_lit_memmove_lz4_snappy_memmove_move_8through16
20733 CMPQ R8, $0x20
20734 JBE emit_lit_memmove_lz4_snappy_memmove_move_17through32
20735 JMP emit_lit_memmove_lz4_snappy_memmove_move_33through64
20736
20737emit_lit_memmove_lz4_snappy_memmove_move_8:
20738 MOVQ (DX), R11
20739 MOVQ R11, (AX)
20740 JMP memmove_end_copy_lz4_snappy
20741
20742emit_lit_memmove_lz4_snappy_memmove_move_8through16:
20743 MOVQ (DX), R11
20744 MOVQ -8(DX)(R8*1), DX
20745 MOVQ R11, (AX)
20746 MOVQ DX, -8(AX)(R8*1)
20747 JMP memmove_end_copy_lz4_snappy
20748
20749emit_lit_memmove_lz4_snappy_memmove_move_17through32:
20750 MOVOU (DX), X0
20751 MOVOU -16(DX)(R8*1), X1
20752 MOVOU X0, (AX)
20753 MOVOU X1, -16(AX)(R8*1)
20754 JMP memmove_end_copy_lz4_snappy
20755
20756emit_lit_memmove_lz4_snappy_memmove_move_33through64:
20757 MOVOU (DX), X0
20758 MOVOU 16(DX), X1
20759 MOVOU -32(DX)(R8*1), X2
20760 MOVOU -16(DX)(R8*1), X3
20761 MOVOU X0, (AX)
20762 MOVOU X1, 16(AX)
20763 MOVOU X2, -32(AX)(R8*1)
20764 MOVOU X3, -16(AX)(R8*1)
20765
20766memmove_end_copy_lz4_snappy:
20767 MOVQ R10, AX
20768 JMP lz4_snappy_lits_emit_done
20769
20770memmove_long_lz4_snappy:
20771 LEAQ (AX)(R8*1), R10
20772
20773 // genMemMoveLong
20774 MOVOU (DX), X0
20775 MOVOU 16(DX), X1
20776 MOVOU -32(DX)(R8*1), X2
20777 MOVOU -16(DX)(R8*1), X3
20778 MOVQ R8, R12
20779 SHRQ $0x05, R12
20780 MOVQ AX, R11
20781 ANDL $0x0000001f, R11
20782 MOVQ $0x00000040, R13
20783 SUBQ R11, R13
20784 DECQ R12
20785 JA emit_lit_memmove_long_lz4_snappylarge_forward_sse_loop_32
20786 LEAQ -32(DX)(R13*1), R11
20787 LEAQ -32(AX)(R13*1), R14
20788
20789emit_lit_memmove_long_lz4_snappylarge_big_loop_back:
20790 MOVOU (R11), X4
20791 MOVOU 16(R11), X5
20792 MOVOA X4, (R14)
20793 MOVOA X5, 16(R14)
20794 ADDQ $0x20, R14
20795 ADDQ $0x20, R11
20796 ADDQ $0x20, R13
20797 DECQ R12
20798 JNA emit_lit_memmove_long_lz4_snappylarge_big_loop_back
20799
20800emit_lit_memmove_long_lz4_snappylarge_forward_sse_loop_32:
20801 MOVOU -32(DX)(R13*1), X4
20802 MOVOU -16(DX)(R13*1), X5
20803 MOVOA X4, -32(AX)(R13*1)
20804 MOVOA X5, -16(AX)(R13*1)
20805 ADDQ $0x20, R13
20806 CMPQ R8, R13
20807 JAE emit_lit_memmove_long_lz4_snappylarge_forward_sse_loop_32
20808 MOVOU X0, (AX)
20809 MOVOU X1, 16(AX)
20810 MOVOU X2, -32(AX)(R8*1)
20811 MOVOU X3, -16(AX)(R8*1)
20812 MOVQ R10, AX
20813
20814lz4_snappy_lits_emit_done:
20815 MOVQ DI, DX
20816
20817lz4_snappy_lits_done:
20818 CMPQ DX, BX
20819 JNE lz4_snappy_match
20820 CMPQ R9, $0x04
20821 JEQ lz4_snappy_done
20822 JMP lz4_snappy_corrupt
20823
20824lz4_snappy_match:
20825 LEAQ 2(DX), DI
20826 CMPQ DI, BX
20827 JAE lz4_snappy_corrupt
20828 MOVWQZX (DX), R8
20829 MOVQ DI, DX
20830 TESTQ R8, R8
20831 JZ lz4_snappy_corrupt
20832 CMPQ R8, SI
20833 JA lz4_snappy_corrupt
20834 CMPQ R9, $0x13
20835 JNE lz4_snappy_ml_done
20836
20837lz4_snappy_ml_loop:
20838 MOVBQZX (DX), DI
20839 INCQ DX
20840 ADDQ DI, R9
20841 CMPQ DX, BX
20842 JAE lz4_snappy_corrupt
20843 CMPQ DI, $0xff
20844 JEQ lz4_snappy_ml_loop
20845
20846lz4_snappy_ml_done:
20847 ADDQ R9, SI
20848
20849 // emitCopy
20850two_byte_offset_lz4_s2:
20851 CMPL R9, $0x40
20852 JBE two_byte_offset_short_lz4_s2
20853 MOVB $0xee, (AX)
20854 MOVW R8, 1(AX)
20855 LEAL -60(R9), R9
20856 ADDQ $0x03, AX
20857 CMPQ AX, CX
20858 JAE lz4_snappy_loop
20859 JMP two_byte_offset_lz4_s2
20860
20861two_byte_offset_short_lz4_s2:
20862 MOVL R9, DI
20863 SHLL $0x02, DI
20864 CMPL R9, $0x0c
20865 JAE emit_copy_three_lz4_s2
20866 CMPL R8, $0x00000800
20867 JAE emit_copy_three_lz4_s2
20868 LEAL -15(DI), DI
20869 MOVB R8, 1(AX)
20870 SHRL $0x08, R8
20871 SHLL $0x05, R8
20872 ORL R8, DI
20873 MOVB DI, (AX)
20874 ADDQ $0x02, AX
20875 JMP lz4_snappy_loop
20876
20877emit_copy_three_lz4_s2:
20878 LEAL -2(DI), DI
20879 MOVB DI, (AX)
20880 MOVW R8, 1(AX)
20881 ADDQ $0x03, AX
20882 JMP lz4_snappy_loop
20883
20884lz4_snappy_done:
20885 MOVQ dst_base+0(FP), CX
20886 SUBQ CX, AX
20887 MOVQ SI, uncompressed+48(FP)
20888 MOVQ AX, dstUsed+56(FP)
20889 RET
20890
20891lz4_snappy_corrupt:
20892 XORQ AX, AX
20893 LEAQ -1(AX), SI
20894 MOVQ SI, uncompressed+48(FP)
20895 RET
20896
20897lz4_snappy_dstfull:
20898 XORQ AX, AX
20899 LEAQ -2(AX), SI
20900 MOVQ SI, uncompressed+48(FP)
20901 RET
20902
20903// func cvtLZ4sBlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
20904// Requires: SSE2
20905TEXT ·cvtLZ4sBlockSnappyAsm(SB), NOSPLIT, $0-64
20906 XORQ SI, SI
20907 MOVQ dst_base+0(FP), AX
20908 MOVQ dst_len+8(FP), CX
20909 MOVQ src_base+24(FP), DX
20910 MOVQ src_len+32(FP), BX
20911 LEAQ (DX)(BX*1), BX
20912 LEAQ -10(AX)(CX*1), CX
20913
20914lz4s_snappy_loop:
20915 CMPQ DX, BX
20916 JAE lz4s_snappy_corrupt
20917 CMPQ AX, CX
20918 JAE lz4s_snappy_dstfull
20919 MOVBQZX (DX), DI
20920 MOVQ DI, R8
20921 MOVQ DI, R9
20922 SHRQ $0x04, R8
20923 ANDQ $0x0f, R9
20924 CMPQ DI, $0xf0
20925 JB lz4s_snappy_ll_end
20926
20927lz4s_snappy_ll_loop:
20928 INCQ DX
20929 CMPQ DX, BX
20930 JAE lz4s_snappy_corrupt
20931 MOVBQZX (DX), DI
20932 ADDQ DI, R8
20933 CMPQ DI, $0xff
20934 JEQ lz4s_snappy_ll_loop
20935
20936lz4s_snappy_ll_end:
20937 LEAQ (DX)(R8*1), DI
20938 ADDQ $0x03, R9
20939 CMPQ DI, BX
20940 JAE lz4s_snappy_corrupt
20941 INCQ DX
20942 INCQ DI
20943 TESTQ R8, R8
20944 JZ lz4s_snappy_lits_done
20945 LEAQ (AX)(R8*1), R10
20946 CMPQ R10, CX
20947 JAE lz4s_snappy_dstfull
20948 ADDQ R8, SI
20949 LEAL -1(R8), R10
20950 CMPL R10, $0x3c
20951 JB one_byte_lz4s_snappy
20952 CMPL R10, $0x00000100
20953 JB two_bytes_lz4s_snappy
20954 CMPL R10, $0x00010000
20955 JB three_bytes_lz4s_snappy
20956 CMPL R10, $0x01000000
20957 JB four_bytes_lz4s_snappy
20958 MOVB $0xfc, (AX)
20959 MOVL R10, 1(AX)
20960 ADDQ $0x05, AX
20961 JMP memmove_long_lz4s_snappy
20962
20963four_bytes_lz4s_snappy:
20964 MOVL R10, R11
20965 SHRL $0x10, R11
20966 MOVB $0xf8, (AX)
20967 MOVW R10, 1(AX)
20968 MOVB R11, 3(AX)
20969 ADDQ $0x04, AX
20970 JMP memmove_long_lz4s_snappy
20971
20972three_bytes_lz4s_snappy:
20973 MOVB $0xf4, (AX)
20974 MOVW R10, 1(AX)
20975 ADDQ $0x03, AX
20976 JMP memmove_long_lz4s_snappy
20977
20978two_bytes_lz4s_snappy:
20979 MOVB $0xf0, (AX)
20980 MOVB R10, 1(AX)
20981 ADDQ $0x02, AX
20982 CMPL R10, $0x40
20983 JB memmove_lz4s_snappy
20984 JMP memmove_long_lz4s_snappy
20985
20986one_byte_lz4s_snappy:
20987 SHLB $0x02, R10
20988 MOVB R10, (AX)
20989 ADDQ $0x01, AX
20990
20991memmove_lz4s_snappy:
20992 LEAQ (AX)(R8*1), R10
20993
20994 // genMemMoveShort
20995 CMPQ R8, $0x08
20996 JBE emit_lit_memmove_lz4s_snappy_memmove_move_8
20997 CMPQ R8, $0x10
20998 JBE emit_lit_memmove_lz4s_snappy_memmove_move_8through16
20999 CMPQ R8, $0x20
21000 JBE emit_lit_memmove_lz4s_snappy_memmove_move_17through32
21001 JMP emit_lit_memmove_lz4s_snappy_memmove_move_33through64
21002
21003emit_lit_memmove_lz4s_snappy_memmove_move_8:
21004 MOVQ (DX), R11
21005 MOVQ R11, (AX)
21006 JMP memmove_end_copy_lz4s_snappy
21007
21008emit_lit_memmove_lz4s_snappy_memmove_move_8through16:
21009 MOVQ (DX), R11
21010 MOVQ -8(DX)(R8*1), DX
21011 MOVQ R11, (AX)
21012 MOVQ DX, -8(AX)(R8*1)
21013 JMP memmove_end_copy_lz4s_snappy
21014
21015emit_lit_memmove_lz4s_snappy_memmove_move_17through32:
21016 MOVOU (DX), X0
21017 MOVOU -16(DX)(R8*1), X1
21018 MOVOU X0, (AX)
21019 MOVOU X1, -16(AX)(R8*1)
21020 JMP memmove_end_copy_lz4s_snappy
21021
21022emit_lit_memmove_lz4s_snappy_memmove_move_33through64:
21023 MOVOU (DX), X0
21024 MOVOU 16(DX), X1
21025 MOVOU -32(DX)(R8*1), X2
21026 MOVOU -16(DX)(R8*1), X3
21027 MOVOU X0, (AX)
21028 MOVOU X1, 16(AX)
21029 MOVOU X2, -32(AX)(R8*1)
21030 MOVOU X3, -16(AX)(R8*1)
21031
21032memmove_end_copy_lz4s_snappy:
21033 MOVQ R10, AX
21034 JMP lz4s_snappy_lits_emit_done
21035
21036memmove_long_lz4s_snappy:
21037 LEAQ (AX)(R8*1), R10
21038
21039 // genMemMoveLong
21040 MOVOU (DX), X0
21041 MOVOU 16(DX), X1
21042 MOVOU -32(DX)(R8*1), X2
21043 MOVOU -16(DX)(R8*1), X3
21044 MOVQ R8, R12
21045 SHRQ $0x05, R12
21046 MOVQ AX, R11
21047 ANDL $0x0000001f, R11
21048 MOVQ $0x00000040, R13
21049 SUBQ R11, R13
21050 DECQ R12
21051 JA emit_lit_memmove_long_lz4s_snappylarge_forward_sse_loop_32
21052 LEAQ -32(DX)(R13*1), R11
21053 LEAQ -32(AX)(R13*1), R14
21054
21055emit_lit_memmove_long_lz4s_snappylarge_big_loop_back:
21056 MOVOU (R11), X4
21057 MOVOU 16(R11), X5
21058 MOVOA X4, (R14)
21059 MOVOA X5, 16(R14)
21060 ADDQ $0x20, R14
21061 ADDQ $0x20, R11
21062 ADDQ $0x20, R13
21063 DECQ R12
21064 JNA emit_lit_memmove_long_lz4s_snappylarge_big_loop_back
21065
21066emit_lit_memmove_long_lz4s_snappylarge_forward_sse_loop_32:
21067 MOVOU -32(DX)(R13*1), X4
21068 MOVOU -16(DX)(R13*1), X5
21069 MOVOA X4, -32(AX)(R13*1)
21070 MOVOA X5, -16(AX)(R13*1)
21071 ADDQ $0x20, R13
21072 CMPQ R8, R13
21073 JAE emit_lit_memmove_long_lz4s_snappylarge_forward_sse_loop_32
21074 MOVOU X0, (AX)
21075 MOVOU X1, 16(AX)
21076 MOVOU X2, -32(AX)(R8*1)
21077 MOVOU X3, -16(AX)(R8*1)
21078 MOVQ R10, AX
21079
21080lz4s_snappy_lits_emit_done:
21081 MOVQ DI, DX
21082
21083lz4s_snappy_lits_done:
21084 CMPQ DX, BX
21085 JNE lz4s_snappy_match
21086 CMPQ R9, $0x03
21087 JEQ lz4s_snappy_done
21088 JMP lz4s_snappy_corrupt
21089
21090lz4s_snappy_match:
21091 CMPQ R9, $0x03
21092 JEQ lz4s_snappy_loop
21093 LEAQ 2(DX), DI
21094 CMPQ DI, BX
21095 JAE lz4s_snappy_corrupt
21096 MOVWQZX (DX), R8
21097 MOVQ DI, DX
21098 TESTQ R8, R8
21099 JZ lz4s_snappy_corrupt
21100 CMPQ R8, SI
21101 JA lz4s_snappy_corrupt
21102 CMPQ R9, $0x12
21103 JNE lz4s_snappy_ml_done
21104
21105lz4s_snappy_ml_loop:
21106 MOVBQZX (DX), DI
21107 INCQ DX
21108 ADDQ DI, R9
21109 CMPQ DX, BX
21110 JAE lz4s_snappy_corrupt
21111 CMPQ DI, $0xff
21112 JEQ lz4s_snappy_ml_loop
21113
21114lz4s_snappy_ml_done:
21115 ADDQ R9, SI
21116
21117 // emitCopy
21118two_byte_offset_lz4_s2:
21119 CMPL R9, $0x40
21120 JBE two_byte_offset_short_lz4_s2
21121 MOVB $0xee, (AX)
21122 MOVW R8, 1(AX)
21123 LEAL -60(R9), R9
21124 ADDQ $0x03, AX
21125 CMPQ AX, CX
21126 JAE lz4s_snappy_loop
21127 JMP two_byte_offset_lz4_s2
21128
21129two_byte_offset_short_lz4_s2:
21130 MOVL R9, DI
21131 SHLL $0x02, DI
21132 CMPL R9, $0x0c
21133 JAE emit_copy_three_lz4_s2
21134 CMPL R8, $0x00000800
21135 JAE emit_copy_three_lz4_s2
21136 LEAL -15(DI), DI
21137 MOVB R8, 1(AX)
21138 SHRL $0x08, R8
21139 SHLL $0x05, R8
21140 ORL R8, DI
21141 MOVB DI, (AX)
21142 ADDQ $0x02, AX
21143 JMP lz4s_snappy_loop
21144
21145emit_copy_three_lz4_s2:
21146 LEAL -2(DI), DI
21147 MOVB DI, (AX)
21148 MOVW R8, 1(AX)
21149 ADDQ $0x03, AX
21150 JMP lz4s_snappy_loop
21151
21152lz4s_snappy_done:
21153 MOVQ dst_base+0(FP), CX
21154 SUBQ CX, AX
21155 MOVQ SI, uncompressed+48(FP)
21156 MOVQ AX, dstUsed+56(FP)
21157 RET
21158
21159lz4s_snappy_corrupt:
21160 XORQ AX, AX
21161 LEAQ -1(AX), SI
21162 MOVQ SI, uncompressed+48(FP)
21163 RET
21164
21165lz4s_snappy_dstfull:
21166 XORQ AX, AX
21167 LEAQ -2(AX), SI
21168 MOVQ SI, uncompressed+48(FP)
21169 RET
diff --git a/vendor/github.com/klauspost/compress/s2/index.go b/vendor/github.com/klauspost/compress/s2/index.go
new file mode 100644
index 0000000..18a4f7a
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/index.go
@@ -0,0 +1,596 @@
1// Copyright (c) 2022+ Klaus Post. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package s2
6
7import (
8 "bytes"
9 "encoding/binary"
10 "encoding/json"
11 "fmt"
12 "io"
13 "sort"
14)
15
16const (
17 S2IndexHeader = "s2idx\x00"
18 S2IndexTrailer = "\x00xdi2s"
19 maxIndexEntries = 1 << 16
20)
21
22// Index represents an S2/Snappy index.
23type Index struct {
24 TotalUncompressed int64 // Total Uncompressed size if known. Will be -1 if unknown.
25 TotalCompressed int64 // Total Compressed size if known. Will be -1 if unknown.
26 info []struct {
27 compressedOffset int64
28 uncompressedOffset int64
29 }
30 estBlockUncomp int64
31}
32
33func (i *Index) reset(maxBlock int) {
34 i.estBlockUncomp = int64(maxBlock)
35 i.TotalCompressed = -1
36 i.TotalUncompressed = -1
37 if len(i.info) > 0 {
38 i.info = i.info[:0]
39 }
40}
41
42// allocInfos will allocate an empty slice of infos.
43func (i *Index) allocInfos(n int) {
44 if n > maxIndexEntries {
45 panic("n > maxIndexEntries")
46 }
47 i.info = make([]struct {
48 compressedOffset int64
49 uncompressedOffset int64
50 }, 0, n)
51}
52
53// add an uncompressed and compressed pair.
54// Entries must be sent in order.
55func (i *Index) add(compressedOffset, uncompressedOffset int64) error {
56 if i == nil {
57 return nil
58 }
59 lastIdx := len(i.info) - 1
60 if lastIdx >= 0 {
61 latest := i.info[lastIdx]
62 if latest.uncompressedOffset == uncompressedOffset {
63 // Uncompressed didn't change, don't add entry,
64 // but update start index.
65 latest.compressedOffset = compressedOffset
66 i.info[lastIdx] = latest
67 return nil
68 }
69 if latest.uncompressedOffset > uncompressedOffset {
70 return fmt.Errorf("internal error: Earlier uncompressed received (%d > %d)", latest.uncompressedOffset, uncompressedOffset)
71 }
72 if latest.compressedOffset > compressedOffset {
73 return fmt.Errorf("internal error: Earlier compressed received (%d > %d)", latest.uncompressedOffset, uncompressedOffset)
74 }
75 }
76 i.info = append(i.info, struct {
77 compressedOffset int64
78 uncompressedOffset int64
79 }{compressedOffset: compressedOffset, uncompressedOffset: uncompressedOffset})
80 return nil
81}
82
83// Find the offset at or before the wanted (uncompressed) offset.
84// If offset is 0 or positive it is the offset from the beginning of the file.
85// If the uncompressed size is known, the offset must be within the file.
86// If an offset outside the file is requested io.ErrUnexpectedEOF is returned.
87// If the offset is negative, it is interpreted as the distance from the end of the file,
88// where -1 represents the last byte.
89// If offset from the end of the file is requested, but size is unknown,
90// ErrUnsupported will be returned.
91func (i *Index) Find(offset int64) (compressedOff, uncompressedOff int64, err error) {
92 if i.TotalUncompressed < 0 {
93 return 0, 0, ErrCorrupt
94 }
95 if offset < 0 {
96 offset = i.TotalUncompressed + offset
97 if offset < 0 {
98 return 0, 0, io.ErrUnexpectedEOF
99 }
100 }
101 if offset > i.TotalUncompressed {
102 return 0, 0, io.ErrUnexpectedEOF
103 }
104 if len(i.info) > 200 {
105 n := sort.Search(len(i.info), func(n int) bool {
106 return i.info[n].uncompressedOffset > offset
107 })
108 if n == 0 {
109 n = 1
110 }
111 return i.info[n-1].compressedOffset, i.info[n-1].uncompressedOffset, nil
112 }
113 for _, info := range i.info {
114 if info.uncompressedOffset > offset {
115 break
116 }
117 compressedOff = info.compressedOffset
118 uncompressedOff = info.uncompressedOffset
119 }
120 return compressedOff, uncompressedOff, nil
121}
122
123// reduce to stay below maxIndexEntries
124func (i *Index) reduce() {
125 if len(i.info) < maxIndexEntries && i.estBlockUncomp >= 1<<20 {
126 return
127 }
128
129 // Algorithm, keep 1, remove removeN entries...
130 removeN := (len(i.info) + 1) / maxIndexEntries
131 src := i.info
132 j := 0
133
134 // Each block should be at least 1MB, but don't reduce below 1000 entries.
135 for i.estBlockUncomp*(int64(removeN)+1) < 1<<20 && len(i.info)/(removeN+1) > 1000 {
136 removeN++
137 }
138 for idx := 0; idx < len(src); idx++ {
139 i.info[j] = src[idx]
140 j++
141 idx += removeN
142 }
143 i.info = i.info[:j]
144 // Update maxblock estimate.
145 i.estBlockUncomp += i.estBlockUncomp * int64(removeN)
146}
147
148func (i *Index) appendTo(b []byte, uncompTotal, compTotal int64) []byte {
149 i.reduce()
150 var tmp [binary.MaxVarintLen64]byte
151
152 initSize := len(b)
153 // We make the start a skippable header+size.
154 b = append(b, ChunkTypeIndex, 0, 0, 0)
155 b = append(b, []byte(S2IndexHeader)...)
156 // Total Uncompressed size
157 n := binary.PutVarint(tmp[:], uncompTotal)
158 b = append(b, tmp[:n]...)
159 // Total Compressed size
160 n = binary.PutVarint(tmp[:], compTotal)
161 b = append(b, tmp[:n]...)
162 // Put EstBlockUncomp size
163 n = binary.PutVarint(tmp[:], i.estBlockUncomp)
164 b = append(b, tmp[:n]...)
165 // Put length
166 n = binary.PutVarint(tmp[:], int64(len(i.info)))
167 b = append(b, tmp[:n]...)
168
169 // Check if we should add uncompressed offsets
170 var hasUncompressed byte
171 for idx, info := range i.info {
172 if idx == 0 {
173 if info.uncompressedOffset != 0 {
174 hasUncompressed = 1
175 break
176 }
177 continue
178 }
179 if info.uncompressedOffset != i.info[idx-1].uncompressedOffset+i.estBlockUncomp {
180 hasUncompressed = 1
181 break
182 }
183 }
184 b = append(b, hasUncompressed)
185
186 // Add each entry
187 if hasUncompressed == 1 {
188 for idx, info := range i.info {
189 uOff := info.uncompressedOffset
190 if idx > 0 {
191 prev := i.info[idx-1]
192 uOff -= prev.uncompressedOffset + (i.estBlockUncomp)
193 }
194 n = binary.PutVarint(tmp[:], uOff)
195 b = append(b, tmp[:n]...)
196 }
197 }
198
199 // Initial compressed size estimate.
200 cPredict := i.estBlockUncomp / 2
201
202 for idx, info := range i.info {
203 cOff := info.compressedOffset
204 if idx > 0 {
205 prev := i.info[idx-1]
206 cOff -= prev.compressedOffset + cPredict
207 // Update compressed size prediction, with half the error.
208 cPredict += cOff / 2
209 }
210 n = binary.PutVarint(tmp[:], cOff)
211 b = append(b, tmp[:n]...)
212 }
213
214 // Add Total Size.
215 // Stored as fixed size for easier reading.
216 binary.LittleEndian.PutUint32(tmp[:], uint32(len(b)-initSize+4+len(S2IndexTrailer)))
217 b = append(b, tmp[:4]...)
218 // Trailer
219 b = append(b, []byte(S2IndexTrailer)...)
220
221 // Update size
222 chunkLen := len(b) - initSize - skippableFrameHeader
223 b[initSize+1] = uint8(chunkLen >> 0)
224 b[initSize+2] = uint8(chunkLen >> 8)
225 b[initSize+3] = uint8(chunkLen >> 16)
226 //fmt.Printf("chunklen: 0x%x Uncomp:%d, Comp:%d\n", chunkLen, uncompTotal, compTotal)
227 return b
228}
229
230// Load a binary index.
231// A zero value Index can be used or a previous one can be reused.
232func (i *Index) Load(b []byte) ([]byte, error) {
233 if len(b) <= 4+len(S2IndexHeader)+len(S2IndexTrailer) {
234 return b, io.ErrUnexpectedEOF
235 }
236 if b[0] != ChunkTypeIndex {
237 return b, ErrCorrupt
238 }
239 chunkLen := int(b[1]) | int(b[2])<<8 | int(b[3])<<16
240 b = b[4:]
241
242 // Validate we have enough...
243 if len(b) < chunkLen {
244 return b, io.ErrUnexpectedEOF
245 }
246 if !bytes.Equal(b[:len(S2IndexHeader)], []byte(S2IndexHeader)) {
247 return b, ErrUnsupported
248 }
249 b = b[len(S2IndexHeader):]
250
251 // Total Uncompressed
252 if v, n := binary.Varint(b); n <= 0 || v < 0 {
253 return b, ErrCorrupt
254 } else {
255 i.TotalUncompressed = v
256 b = b[n:]
257 }
258
259 // Total Compressed
260 if v, n := binary.Varint(b); n <= 0 {
261 return b, ErrCorrupt
262 } else {
263 i.TotalCompressed = v
264 b = b[n:]
265 }
266
267 // Read EstBlockUncomp
268 if v, n := binary.Varint(b); n <= 0 {
269 return b, ErrCorrupt
270 } else {
271 if v < 0 {
272 return b, ErrCorrupt
273 }
274 i.estBlockUncomp = v
275 b = b[n:]
276 }
277
278 var entries int
279 if v, n := binary.Varint(b); n <= 0 {
280 return b, ErrCorrupt
281 } else {
282 if v < 0 || v > maxIndexEntries {
283 return b, ErrCorrupt
284 }
285 entries = int(v)
286 b = b[n:]
287 }
288 if cap(i.info) < entries {
289 i.allocInfos(entries)
290 }
291 i.info = i.info[:entries]
292
293 if len(b) < 1 {
294 return b, io.ErrUnexpectedEOF
295 }
296 hasUncompressed := b[0]
297 b = b[1:]
298 if hasUncompressed&1 != hasUncompressed {
299 return b, ErrCorrupt
300 }
301
302 // Add each uncompressed entry
303 for idx := range i.info {
304 var uOff int64
305 if hasUncompressed != 0 {
306 // Load delta
307 if v, n := binary.Varint(b); n <= 0 {
308 return b, ErrCorrupt
309 } else {
310 uOff = v
311 b = b[n:]
312 }
313 }
314
315 if idx > 0 {
316 prev := i.info[idx-1].uncompressedOffset
317 uOff += prev + (i.estBlockUncomp)
318 if uOff <= prev {
319 return b, ErrCorrupt
320 }
321 }
322 if uOff < 0 {
323 return b, ErrCorrupt
324 }
325 i.info[idx].uncompressedOffset = uOff
326 }
327
328 // Initial compressed size estimate.
329 cPredict := i.estBlockUncomp / 2
330
331 // Add each compressed entry
332 for idx := range i.info {
333 var cOff int64
334 if v, n := binary.Varint(b); n <= 0 {
335 return b, ErrCorrupt
336 } else {
337 cOff = v
338 b = b[n:]
339 }
340
341 if idx > 0 {
342 // Update compressed size prediction, with half the error.
343 cPredictNew := cPredict + cOff/2
344
345 prev := i.info[idx-1].compressedOffset
346 cOff += prev + cPredict
347 if cOff <= prev {
348 return b, ErrCorrupt
349 }
350 cPredict = cPredictNew
351 }
352 if cOff < 0 {
353 return b, ErrCorrupt
354 }
355 i.info[idx].compressedOffset = cOff
356 }
357 if len(b) < 4+len(S2IndexTrailer) {
358 return b, io.ErrUnexpectedEOF
359 }
360 // Skip size...
361 b = b[4:]
362
363 // Check trailer...
364 if !bytes.Equal(b[:len(S2IndexTrailer)], []byte(S2IndexTrailer)) {
365 return b, ErrCorrupt
366 }
367 return b[len(S2IndexTrailer):], nil
368}
369
370// LoadStream will load an index from the end of the supplied stream.
371// ErrUnsupported will be returned if the signature cannot be found.
372// ErrCorrupt will be returned if unexpected values are found.
373// io.ErrUnexpectedEOF is returned if there are too few bytes.
374// IO errors are returned as-is.
375func (i *Index) LoadStream(rs io.ReadSeeker) error {
376 // Go to end.
377 _, err := rs.Seek(-10, io.SeekEnd)
378 if err != nil {
379 return err
380 }
381 var tmp [10]byte
382 _, err = io.ReadFull(rs, tmp[:])
383 if err != nil {
384 return err
385 }
386 // Check trailer...
387 if !bytes.Equal(tmp[4:4+len(S2IndexTrailer)], []byte(S2IndexTrailer)) {
388 return ErrUnsupported
389 }
390 sz := binary.LittleEndian.Uint32(tmp[:4])
391 if sz > maxChunkSize+skippableFrameHeader {
392 return ErrCorrupt
393 }
394 _, err = rs.Seek(-int64(sz), io.SeekEnd)
395 if err != nil {
396 return err
397 }
398
399 // Read index.
400 buf := make([]byte, sz)
401 _, err = io.ReadFull(rs, buf)
402 if err != nil {
403 return err
404 }
405 _, err = i.Load(buf)
406 return err
407}
408
409// IndexStream will return an index for a stream.
410// The stream structure will be checked, but
411// data within blocks is not verified.
412// The returned index can either be appended to the end of the stream
413// or stored separately.
414func IndexStream(r io.Reader) ([]byte, error) {
415 var i Index
416 var buf [maxChunkSize]byte
417 var readHeader bool
418 for {
419 _, err := io.ReadFull(r, buf[:4])
420 if err != nil {
421 if err == io.EOF {
422 return i.appendTo(nil, i.TotalUncompressed, i.TotalCompressed), nil
423 }
424 return nil, err
425 }
426 // Start of this chunk.
427 startChunk := i.TotalCompressed
428 i.TotalCompressed += 4
429
430 chunkType := buf[0]
431 if !readHeader {
432 if chunkType != chunkTypeStreamIdentifier {
433 return nil, ErrCorrupt
434 }
435 readHeader = true
436 }
437 chunkLen := int(buf[1]) | int(buf[2])<<8 | int(buf[3])<<16
438 if chunkLen < checksumSize {
439 return nil, ErrCorrupt
440 }
441
442 i.TotalCompressed += int64(chunkLen)
443 _, err = io.ReadFull(r, buf[:chunkLen])
444 if err != nil {
445 return nil, io.ErrUnexpectedEOF
446 }
447 // The chunk types are specified at
448 // https://github.com/google/snappy/blob/master/framing_format.txt
449 switch chunkType {
450 case chunkTypeCompressedData:
451 // Section 4.2. Compressed data (chunk type 0x00).
452 // Skip checksum.
453 dLen, err := DecodedLen(buf[checksumSize:])
454 if err != nil {
455 return nil, err
456 }
457 if dLen > maxBlockSize {
458 return nil, ErrCorrupt
459 }
460 if i.estBlockUncomp == 0 {
461 // Use first block for estimate...
462 i.estBlockUncomp = int64(dLen)
463 }
464 err = i.add(startChunk, i.TotalUncompressed)
465 if err != nil {
466 return nil, err
467 }
468 i.TotalUncompressed += int64(dLen)
469 continue
470 case chunkTypeUncompressedData:
471 n2 := chunkLen - checksumSize
472 if n2 > maxBlockSize {
473 return nil, ErrCorrupt
474 }
475 if i.estBlockUncomp == 0 {
476 // Use first block for estimate...
477 i.estBlockUncomp = int64(n2)
478 }
479 err = i.add(startChunk, i.TotalUncompressed)
480 if err != nil {
481 return nil, err
482 }
483 i.TotalUncompressed += int64(n2)
484 continue
485 case chunkTypeStreamIdentifier:
486 // Section 4.1. Stream identifier (chunk type 0xff).
487 if chunkLen != len(magicBody) {
488 return nil, ErrCorrupt
489 }
490
491 if string(buf[:len(magicBody)]) != magicBody {
492 if string(buf[:len(magicBody)]) != magicBodySnappy {
493 return nil, ErrCorrupt
494 }
495 }
496
497 continue
498 }
499
500 if chunkType <= 0x7f {
501 // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
502 return nil, ErrUnsupported
503 }
504 if chunkLen > maxChunkSize {
505 return nil, ErrUnsupported
506 }
507 // Section 4.4 Padding (chunk type 0xfe).
508 // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
509 }
510}
511
512// JSON returns the index as JSON text.
513func (i *Index) JSON() []byte {
514 type offset struct {
515 CompressedOffset int64 `json:"compressed"`
516 UncompressedOffset int64 `json:"uncompressed"`
517 }
518 x := struct {
519 TotalUncompressed int64 `json:"total_uncompressed"` // Total Uncompressed size if known. Will be -1 if unknown.
520 TotalCompressed int64 `json:"total_compressed"` // Total Compressed size if known. Will be -1 if unknown.
521 Offsets []offset `json:"offsets"`
522 EstBlockUncomp int64 `json:"est_block_uncompressed"`
523 }{
524 TotalUncompressed: i.TotalUncompressed,
525 TotalCompressed: i.TotalCompressed,
526 EstBlockUncomp: i.estBlockUncomp,
527 }
528 for _, v := range i.info {
529 x.Offsets = append(x.Offsets, offset{CompressedOffset: v.compressedOffset, UncompressedOffset: v.uncompressedOffset})
530 }
531 b, _ := json.MarshalIndent(x, "", " ")
532 return b
533}
534
535// RemoveIndexHeaders will trim all headers and trailers from a given index.
536// This is expected to save 20 bytes.
537// These can be restored using RestoreIndexHeaders.
538// This removes a layer of security, but is the most compact representation.
539// Returns nil if headers contains errors.
540// The returned slice references the provided slice.
541func RemoveIndexHeaders(b []byte) []byte {
542 const save = 4 + len(S2IndexHeader) + len(S2IndexTrailer) + 4
543 if len(b) <= save {
544 return nil
545 }
546 if b[0] != ChunkTypeIndex {
547 return nil
548 }
549 chunkLen := int(b[1]) | int(b[2])<<8 | int(b[3])<<16
550 b = b[4:]
551
552 // Validate we have enough...
553 if len(b) < chunkLen {
554 return nil
555 }
556 b = b[:chunkLen]
557
558 if !bytes.Equal(b[:len(S2IndexHeader)], []byte(S2IndexHeader)) {
559 return nil
560 }
561 b = b[len(S2IndexHeader):]
562 if !bytes.HasSuffix(b, []byte(S2IndexTrailer)) {
563 return nil
564 }
565 b = bytes.TrimSuffix(b, []byte(S2IndexTrailer))
566
567 if len(b) < 4 {
568 return nil
569 }
570 return b[:len(b)-4]
571}
572
573// RestoreIndexHeaders will index restore headers removed by RemoveIndexHeaders.
574// No error checking is performed on the input.
575// If a 0 length slice is sent, it is returned without modification.
576func RestoreIndexHeaders(in []byte) []byte {
577 if len(in) == 0 {
578 return in
579 }
580 b := make([]byte, 0, 4+len(S2IndexHeader)+len(in)+len(S2IndexTrailer)+4)
581 b = append(b, ChunkTypeIndex, 0, 0, 0)
582 b = append(b, []byte(S2IndexHeader)...)
583 b = append(b, in...)
584
585 var tmp [4]byte
586 binary.LittleEndian.PutUint32(tmp[:], uint32(len(b)+4+len(S2IndexTrailer)))
587 b = append(b, tmp[:4]...)
588 // Trailer
589 b = append(b, []byte(S2IndexTrailer)...)
590
591 chunkLen := len(b) - skippableFrameHeader
592 b[1] = uint8(chunkLen >> 0)
593 b[2] = uint8(chunkLen >> 8)
594 b[3] = uint8(chunkLen >> 16)
595 return b
596}
diff --git a/vendor/github.com/klauspost/compress/s2/lz4convert.go b/vendor/github.com/klauspost/compress/s2/lz4convert.go
new file mode 100644
index 0000000..46ed908
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/lz4convert.go
@@ -0,0 +1,585 @@
1// Copyright (c) 2022 Klaus Post. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package s2
6
7import (
8 "encoding/binary"
9 "errors"
10 "fmt"
11)
12
13// LZ4Converter provides conversion from LZ4 blocks as defined here:
14// https://github.com/lz4/lz4/blob/dev/doc/lz4_Block_format.md
15type LZ4Converter struct {
16}
17
18// ErrDstTooSmall is returned when provided destination is too small.
19var ErrDstTooSmall = errors.New("s2: destination too small")
20
21// ConvertBlock will convert an LZ4 block and append it as an S2
22// block without block length to dst.
23// The uncompressed size is returned as well.
24// dst must have capacity to contain the entire compressed block.
25func (l *LZ4Converter) ConvertBlock(dst, src []byte) ([]byte, int, error) {
26 if len(src) == 0 {
27 return dst, 0, nil
28 }
29 const debug = false
30 const inline = true
31 const lz4MinMatch = 4
32
33 s, d := 0, len(dst)
34 dst = dst[:cap(dst)]
35 if !debug && hasAmd64Asm {
36 res, sz := cvtLZ4BlockAsm(dst[d:], src)
37 if res < 0 {
38 const (
39 errCorrupt = -1
40 errDstTooSmall = -2
41 )
42 switch res {
43 case errCorrupt:
44 return nil, 0, ErrCorrupt
45 case errDstTooSmall:
46 return nil, 0, ErrDstTooSmall
47 default:
48 return nil, 0, fmt.Errorf("unexpected result: %d", res)
49 }
50 }
51 if d+sz > len(dst) {
52 return nil, 0, ErrDstTooSmall
53 }
54 return dst[:d+sz], res, nil
55 }
56
57 dLimit := len(dst) - 10
58 var lastOffset uint16
59 var uncompressed int
60 if debug {
61 fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst))
62 }
63
64 for {
65 if s >= len(src) {
66 return dst[:d], 0, ErrCorrupt
67 }
68 // Read literal info
69 token := src[s]
70 ll := int(token >> 4)
71 ml := int(lz4MinMatch + (token & 0xf))
72
73 // If upper nibble is 15, literal length is extended
74 if token >= 0xf0 {
75 for {
76 s++
77 if s >= len(src) {
78 if debug {
79 fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src))
80 }
81 return dst[:d], 0, ErrCorrupt
82 }
83 val := src[s]
84 ll += int(val)
85 if val != 255 {
86 break
87 }
88 }
89 }
90 // Skip past token
91 if s+ll >= len(src) {
92 if debug {
93 fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src))
94 }
95 return nil, 0, ErrCorrupt
96 }
97 s++
98 if ll > 0 {
99 if d+ll > dLimit {
100 return nil, 0, ErrDstTooSmall
101 }
102 if debug {
103 fmt.Printf("emit %d literals\n", ll)
104 }
105 d += emitLiteralGo(dst[d:], src[s:s+ll])
106 s += ll
107 uncompressed += ll
108 }
109
110 // Check if we are done...
111 if s == len(src) && ml == lz4MinMatch {
112 break
113 }
114 // 2 byte offset
115 if s >= len(src)-2 {
116 if debug {
117 fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2)
118 }
119 return nil, 0, ErrCorrupt
120 }
121 offset := binary.LittleEndian.Uint16(src[s:])
122 s += 2
123 if offset == 0 {
124 if debug {
125 fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s)
126 }
127 return nil, 0, ErrCorrupt
128 }
129 if int(offset) > uncompressed {
130 if debug {
131 fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed)
132 }
133 return nil, 0, ErrCorrupt
134 }
135
136 if ml == lz4MinMatch+15 {
137 for {
138 if s >= len(src) {
139 if debug {
140 fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
141 }
142 return nil, 0, ErrCorrupt
143 }
144 val := src[s]
145 s++
146 ml += int(val)
147 if val != 255 {
148 if s >= len(src) {
149 if debug {
150 fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
151 }
152 return nil, 0, ErrCorrupt
153 }
154 break
155 }
156 }
157 }
158 if offset == lastOffset {
159 if debug {
160 fmt.Printf("emit repeat, length: %d, offset: %d\n", ml, offset)
161 }
162 if !inline {
163 d += emitRepeat16(dst[d:], offset, ml)
164 } else {
165 length := ml
166 dst := dst[d:]
167 for len(dst) > 5 {
168 // Repeat offset, make length cheaper
169 length -= 4
170 if length <= 4 {
171 dst[0] = uint8(length)<<2 | tagCopy1
172 dst[1] = 0
173 d += 2
174 break
175 }
176 if length < 8 && offset < 2048 {
177 // Encode WITH offset
178 dst[1] = uint8(offset)
179 dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1
180 d += 2
181 break
182 }
183 if length < (1<<8)+4 {
184 length -= 4
185 dst[2] = uint8(length)
186 dst[1] = 0
187 dst[0] = 5<<2 | tagCopy1
188 d += 3
189 break
190 }
191 if length < (1<<16)+(1<<8) {
192 length -= 1 << 8
193 dst[3] = uint8(length >> 8)
194 dst[2] = uint8(length >> 0)
195 dst[1] = 0
196 dst[0] = 6<<2 | tagCopy1
197 d += 4
198 break
199 }
200 const maxRepeat = (1 << 24) - 1
201 length -= 1 << 16
202 left := 0
203 if length > maxRepeat {
204 left = length - maxRepeat + 4
205 length = maxRepeat - 4
206 }
207 dst[4] = uint8(length >> 16)
208 dst[3] = uint8(length >> 8)
209 dst[2] = uint8(length >> 0)
210 dst[1] = 0
211 dst[0] = 7<<2 | tagCopy1
212 if left > 0 {
213 d += 5 + emitRepeat16(dst[5:], offset, left)
214 break
215 }
216 d += 5
217 break
218 }
219 }
220 } else {
221 if debug {
222 fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset)
223 }
224 if !inline {
225 d += emitCopy16(dst[d:], offset, ml)
226 } else {
227 length := ml
228 dst := dst[d:]
229 for len(dst) > 5 {
230 // Offset no more than 2 bytes.
231 if length > 64 {
232 off := 3
233 if offset < 2048 {
234 // emit 8 bytes as tagCopy1, rest as repeats.
235 dst[1] = uint8(offset)
236 dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1
237 length -= 8
238 off = 2
239 } else {
240 // Emit a length 60 copy, encoded as 3 bytes.
241 // Emit remaining as repeat value (minimum 4 bytes).
242 dst[2] = uint8(offset >> 8)
243 dst[1] = uint8(offset)
244 dst[0] = 59<<2 | tagCopy2
245 length -= 60
246 }
247 // Emit remaining as repeats, at least 4 bytes remain.
248 d += off + emitRepeat16(dst[off:], offset, length)
249 break
250 }
251 if length >= 12 || offset >= 2048 {
252 // Emit the remaining copy, encoded as 3 bytes.
253 dst[2] = uint8(offset >> 8)
254 dst[1] = uint8(offset)
255 dst[0] = uint8(length-1)<<2 | tagCopy2
256 d += 3
257 break
258 }
259 // Emit the remaining copy, encoded as 2 bytes.
260 dst[1] = uint8(offset)
261 dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
262 d += 2
263 break
264 }
265 }
266 lastOffset = offset
267 }
268 uncompressed += ml
269 if d > dLimit {
270 return nil, 0, ErrDstTooSmall
271 }
272 }
273
274 return dst[:d], uncompressed, nil
275}
276
277// ConvertBlockSnappy will convert an LZ4 block and append it
278// as a Snappy block without block length to dst.
279// The uncompressed size is returned as well.
280// dst must have capacity to contain the entire compressed block.
281func (l *LZ4Converter) ConvertBlockSnappy(dst, src []byte) ([]byte, int, error) {
282 if len(src) == 0 {
283 return dst, 0, nil
284 }
285 const debug = false
286 const lz4MinMatch = 4
287
288 s, d := 0, len(dst)
289 dst = dst[:cap(dst)]
290 // Use assembly when possible
291 if !debug && hasAmd64Asm {
292 res, sz := cvtLZ4BlockSnappyAsm(dst[d:], src)
293 if res < 0 {
294 const (
295 errCorrupt = -1
296 errDstTooSmall = -2
297 )
298 switch res {
299 case errCorrupt:
300 return nil, 0, ErrCorrupt
301 case errDstTooSmall:
302 return nil, 0, ErrDstTooSmall
303 default:
304 return nil, 0, fmt.Errorf("unexpected result: %d", res)
305 }
306 }
307 if d+sz > len(dst) {
308 return nil, 0, ErrDstTooSmall
309 }
310 return dst[:d+sz], res, nil
311 }
312
313 dLimit := len(dst) - 10
314 var uncompressed int
315 if debug {
316 fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst))
317 }
318
319 for {
320 if s >= len(src) {
321 return nil, 0, ErrCorrupt
322 }
323 // Read literal info
324 token := src[s]
325 ll := int(token >> 4)
326 ml := int(lz4MinMatch + (token & 0xf))
327
328 // If upper nibble is 15, literal length is extended
329 if token >= 0xf0 {
330 for {
331 s++
332 if s >= len(src) {
333 if debug {
334 fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src))
335 }
336 return nil, 0, ErrCorrupt
337 }
338 val := src[s]
339 ll += int(val)
340 if val != 255 {
341 break
342 }
343 }
344 }
345 // Skip past token
346 if s+ll >= len(src) {
347 if debug {
348 fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src))
349 }
350 return nil, 0, ErrCorrupt
351 }
352 s++
353 if ll > 0 {
354 if d+ll > dLimit {
355 return nil, 0, ErrDstTooSmall
356 }
357 if debug {
358 fmt.Printf("emit %d literals\n", ll)
359 }
360 d += emitLiteralGo(dst[d:], src[s:s+ll])
361 s += ll
362 uncompressed += ll
363 }
364
365 // Check if we are done...
366 if s == len(src) && ml == lz4MinMatch {
367 break
368 }
369 // 2 byte offset
370 if s >= len(src)-2 {
371 if debug {
372 fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2)
373 }
374 return nil, 0, ErrCorrupt
375 }
376 offset := binary.LittleEndian.Uint16(src[s:])
377 s += 2
378 if offset == 0 {
379 if debug {
380 fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s)
381 }
382 return nil, 0, ErrCorrupt
383 }
384 if int(offset) > uncompressed {
385 if debug {
386 fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed)
387 }
388 return nil, 0, ErrCorrupt
389 }
390
391 if ml == lz4MinMatch+15 {
392 for {
393 if s >= len(src) {
394 if debug {
395 fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
396 }
397 return nil, 0, ErrCorrupt
398 }
399 val := src[s]
400 s++
401 ml += int(val)
402 if val != 255 {
403 if s >= len(src) {
404 if debug {
405 fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
406 }
407 return nil, 0, ErrCorrupt
408 }
409 break
410 }
411 }
412 }
413 if debug {
414 fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset)
415 }
416 length := ml
417 // d += emitCopyNoRepeat(dst[d:], int(offset), ml)
418 for length > 0 {
419 if d >= dLimit {
420 return nil, 0, ErrDstTooSmall
421 }
422
423 // Offset no more than 2 bytes.
424 if length > 64 {
425 // Emit a length 64 copy, encoded as 3 bytes.
426 dst[d+2] = uint8(offset >> 8)
427 dst[d+1] = uint8(offset)
428 dst[d+0] = 63<<2 | tagCopy2
429 length -= 64
430 d += 3
431 continue
432 }
433 if length >= 12 || offset >= 2048 || length < 4 {
434 // Emit the remaining copy, encoded as 3 bytes.
435 dst[d+2] = uint8(offset >> 8)
436 dst[d+1] = uint8(offset)
437 dst[d+0] = uint8(length-1)<<2 | tagCopy2
438 d += 3
439 break
440 }
441 // Emit the remaining copy, encoded as 2 bytes.
442 dst[d+1] = uint8(offset)
443 dst[d+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
444 d += 2
445 break
446 }
447 uncompressed += ml
448 if d > dLimit {
449 return nil, 0, ErrDstTooSmall
450 }
451 }
452
453 return dst[:d], uncompressed, nil
454}
455
456// emitRepeat writes a repeat chunk and returns the number of bytes written.
457// Length must be at least 4 and < 1<<24
458func emitRepeat16(dst []byte, offset uint16, length int) int {
459 // Repeat offset, make length cheaper
460 length -= 4
461 if length <= 4 {
462 dst[0] = uint8(length)<<2 | tagCopy1
463 dst[1] = 0
464 return 2
465 }
466 if length < 8 && offset < 2048 {
467 // Encode WITH offset
468 dst[1] = uint8(offset)
469 dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1
470 return 2
471 }
472 if length < (1<<8)+4 {
473 length -= 4
474 dst[2] = uint8(length)
475 dst[1] = 0
476 dst[0] = 5<<2 | tagCopy1
477 return 3
478 }
479 if length < (1<<16)+(1<<8) {
480 length -= 1 << 8
481 dst[3] = uint8(length >> 8)
482 dst[2] = uint8(length >> 0)
483 dst[1] = 0
484 dst[0] = 6<<2 | tagCopy1
485 return 4
486 }
487 const maxRepeat = (1 << 24) - 1
488 length -= 1 << 16
489 left := 0
490 if length > maxRepeat {
491 left = length - maxRepeat + 4
492 length = maxRepeat - 4
493 }
494 dst[4] = uint8(length >> 16)
495 dst[3] = uint8(length >> 8)
496 dst[2] = uint8(length >> 0)
497 dst[1] = 0
498 dst[0] = 7<<2 | tagCopy1
499 if left > 0 {
500 return 5 + emitRepeat16(dst[5:], offset, left)
501 }
502 return 5
503}
504
505// emitCopy writes a copy chunk and returns the number of bytes written.
506//
507// It assumes that:
508//
509// dst is long enough to hold the encoded bytes
510// 1 <= offset && offset <= math.MaxUint16
511// 4 <= length && length <= math.MaxUint32
512func emitCopy16(dst []byte, offset uint16, length int) int {
513 // Offset no more than 2 bytes.
514 if length > 64 {
515 off := 3
516 if offset < 2048 {
517 // emit 8 bytes as tagCopy1, rest as repeats.
518 dst[1] = uint8(offset)
519 dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1
520 length -= 8
521 off = 2
522 } else {
523 // Emit a length 60 copy, encoded as 3 bytes.
524 // Emit remaining as repeat value (minimum 4 bytes).
525 dst[2] = uint8(offset >> 8)
526 dst[1] = uint8(offset)
527 dst[0] = 59<<2 | tagCopy2
528 length -= 60
529 }
530 // Emit remaining as repeats, at least 4 bytes remain.
531 return off + emitRepeat16(dst[off:], offset, length)
532 }
533 if length >= 12 || offset >= 2048 {
534 // Emit the remaining copy, encoded as 3 bytes.
535 dst[2] = uint8(offset >> 8)
536 dst[1] = uint8(offset)
537 dst[0] = uint8(length-1)<<2 | tagCopy2
538 return 3
539 }
540 // Emit the remaining copy, encoded as 2 bytes.
541 dst[1] = uint8(offset)
542 dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
543 return 2
544}
545
546// emitLiteral writes a literal chunk and returns the number of bytes written.
547//
548// It assumes that:
549//
550// dst is long enough to hold the encoded bytes
551// 0 <= len(lit) && len(lit) <= math.MaxUint32
552func emitLiteralGo(dst, lit []byte) int {
553 if len(lit) == 0 {
554 return 0
555 }
556 i, n := 0, uint(len(lit)-1)
557 switch {
558 case n < 60:
559 dst[0] = uint8(n)<<2 | tagLiteral
560 i = 1
561 case n < 1<<8:
562 dst[1] = uint8(n)
563 dst[0] = 60<<2 | tagLiteral
564 i = 2
565 case n < 1<<16:
566 dst[2] = uint8(n >> 8)
567 dst[1] = uint8(n)
568 dst[0] = 61<<2 | tagLiteral
569 i = 3
570 case n < 1<<24:
571 dst[3] = uint8(n >> 16)
572 dst[2] = uint8(n >> 8)
573 dst[1] = uint8(n)
574 dst[0] = 62<<2 | tagLiteral
575 i = 4
576 default:
577 dst[4] = uint8(n >> 24)
578 dst[3] = uint8(n >> 16)
579 dst[2] = uint8(n >> 8)
580 dst[1] = uint8(n)
581 dst[0] = 63<<2 | tagLiteral
582 i = 5
583 }
584 return i + copy(dst[i:], lit)
585}
diff --git a/vendor/github.com/klauspost/compress/s2/lz4sconvert.go b/vendor/github.com/klauspost/compress/s2/lz4sconvert.go
new file mode 100644
index 0000000..000f397
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/lz4sconvert.go
@@ -0,0 +1,467 @@
1// Copyright (c) 2022 Klaus Post. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package s2
6
7import (
8 "encoding/binary"
9 "fmt"
10)
11
12// LZ4sConverter provides conversion from LZ4s.
13// (Intel modified LZ4 Blocks)
14// https://cdrdv2-public.intel.com/743912/743912-qat-programmers-guide-v2.0.pdf
15// LZ4s is a variant of LZ4 block format. LZ4s should be considered as an intermediate compressed block format.
16// The LZ4s format is selected when the application sets the compType to CPA_DC_LZ4S in CpaDcSessionSetupData.
17// The LZ4s block returned by the Intel® QAT hardware can be used by an external
18// software post-processing to generate other compressed data formats.
19// The following table lists the differences between LZ4 and LZ4s block format. LZ4s block format uses
20// the same high-level formatting as LZ4 block format with the following encoding changes:
21// For Min Match of 4 bytes, Copy length value 1-15 means length 4-18 with 18 bytes adding an extra byte.
22// ONLY "Min match of 4 bytes" is supported.
23type LZ4sConverter struct {
24}
25
26// ConvertBlock will convert an LZ4s block and append it as an S2
27// block without block length to dst.
28// The uncompressed size is returned as well.
29// dst must have capacity to contain the entire compressed block.
30func (l *LZ4sConverter) ConvertBlock(dst, src []byte) ([]byte, int, error) {
31 if len(src) == 0 {
32 return dst, 0, nil
33 }
34 const debug = false
35 const inline = true
36 const lz4MinMatch = 3
37
38 s, d := 0, len(dst)
39 dst = dst[:cap(dst)]
40 if !debug && hasAmd64Asm {
41 res, sz := cvtLZ4sBlockAsm(dst[d:], src)
42 if res < 0 {
43 const (
44 errCorrupt = -1
45 errDstTooSmall = -2
46 )
47 switch res {
48 case errCorrupt:
49 return nil, 0, ErrCorrupt
50 case errDstTooSmall:
51 return nil, 0, ErrDstTooSmall
52 default:
53 return nil, 0, fmt.Errorf("unexpected result: %d", res)
54 }
55 }
56 if d+sz > len(dst) {
57 return nil, 0, ErrDstTooSmall
58 }
59 return dst[:d+sz], res, nil
60 }
61
62 dLimit := len(dst) - 10
63 var lastOffset uint16
64 var uncompressed int
65 if debug {
66 fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst))
67 }
68
69 for {
70 if s >= len(src) {
71 return dst[:d], 0, ErrCorrupt
72 }
73 // Read literal info
74 token := src[s]
75 ll := int(token >> 4)
76 ml := int(lz4MinMatch + (token & 0xf))
77
78 // If upper nibble is 15, literal length is extended
79 if token >= 0xf0 {
80 for {
81 s++
82 if s >= len(src) {
83 if debug {
84 fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src))
85 }
86 return dst[:d], 0, ErrCorrupt
87 }
88 val := src[s]
89 ll += int(val)
90 if val != 255 {
91 break
92 }
93 }
94 }
95 // Skip past token
96 if s+ll >= len(src) {
97 if debug {
98 fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src))
99 }
100 return nil, 0, ErrCorrupt
101 }
102 s++
103 if ll > 0 {
104 if d+ll > dLimit {
105 return nil, 0, ErrDstTooSmall
106 }
107 if debug {
108 fmt.Printf("emit %d literals\n", ll)
109 }
110 d += emitLiteralGo(dst[d:], src[s:s+ll])
111 s += ll
112 uncompressed += ll
113 }
114
115 // Check if we are done...
116 if ml == lz4MinMatch {
117 if s == len(src) {
118 break
119 }
120 // 0 bytes.
121 continue
122 }
123 // 2 byte offset
124 if s >= len(src)-2 {
125 if debug {
126 fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2)
127 }
128 return nil, 0, ErrCorrupt
129 }
130 offset := binary.LittleEndian.Uint16(src[s:])
131 s += 2
132 if offset == 0 {
133 if debug {
134 fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s)
135 }
136 return nil, 0, ErrCorrupt
137 }
138 if int(offset) > uncompressed {
139 if debug {
140 fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed)
141 }
142 return nil, 0, ErrCorrupt
143 }
144
145 if ml == lz4MinMatch+15 {
146 for {
147 if s >= len(src) {
148 if debug {
149 fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
150 }
151 return nil, 0, ErrCorrupt
152 }
153 val := src[s]
154 s++
155 ml += int(val)
156 if val != 255 {
157 if s >= len(src) {
158 if debug {
159 fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
160 }
161 return nil, 0, ErrCorrupt
162 }
163 break
164 }
165 }
166 }
167 if offset == lastOffset {
168 if debug {
169 fmt.Printf("emit repeat, length: %d, offset: %d\n", ml, offset)
170 }
171 if !inline {
172 d += emitRepeat16(dst[d:], offset, ml)
173 } else {
174 length := ml
175 dst := dst[d:]
176 for len(dst) > 5 {
177 // Repeat offset, make length cheaper
178 length -= 4
179 if length <= 4 {
180 dst[0] = uint8(length)<<2 | tagCopy1
181 dst[1] = 0
182 d += 2
183 break
184 }
185 if length < 8 && offset < 2048 {
186 // Encode WITH offset
187 dst[1] = uint8(offset)
188 dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1
189 d += 2
190 break
191 }
192 if length < (1<<8)+4 {
193 length -= 4
194 dst[2] = uint8(length)
195 dst[1] = 0
196 dst[0] = 5<<2 | tagCopy1
197 d += 3
198 break
199 }
200 if length < (1<<16)+(1<<8) {
201 length -= 1 << 8
202 dst[3] = uint8(length >> 8)
203 dst[2] = uint8(length >> 0)
204 dst[1] = 0
205 dst[0] = 6<<2 | tagCopy1
206 d += 4
207 break
208 }
209 const maxRepeat = (1 << 24) - 1
210 length -= 1 << 16
211 left := 0
212 if length > maxRepeat {
213 left = length - maxRepeat + 4
214 length = maxRepeat - 4
215 }
216 dst[4] = uint8(length >> 16)
217 dst[3] = uint8(length >> 8)
218 dst[2] = uint8(length >> 0)
219 dst[1] = 0
220 dst[0] = 7<<2 | tagCopy1
221 if left > 0 {
222 d += 5 + emitRepeat16(dst[5:], offset, left)
223 break
224 }
225 d += 5
226 break
227 }
228 }
229 } else {
230 if debug {
231 fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset)
232 }
233 if !inline {
234 d += emitCopy16(dst[d:], offset, ml)
235 } else {
236 length := ml
237 dst := dst[d:]
238 for len(dst) > 5 {
239 // Offset no more than 2 bytes.
240 if length > 64 {
241 off := 3
242 if offset < 2048 {
243 // emit 8 bytes as tagCopy1, rest as repeats.
244 dst[1] = uint8(offset)
245 dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1
246 length -= 8
247 off = 2
248 } else {
249 // Emit a length 60 copy, encoded as 3 bytes.
250 // Emit remaining as repeat value (minimum 4 bytes).
251 dst[2] = uint8(offset >> 8)
252 dst[1] = uint8(offset)
253 dst[0] = 59<<2 | tagCopy2
254 length -= 60
255 }
256 // Emit remaining as repeats, at least 4 bytes remain.
257 d += off + emitRepeat16(dst[off:], offset, length)
258 break
259 }
260 if length >= 12 || offset >= 2048 {
261 // Emit the remaining copy, encoded as 3 bytes.
262 dst[2] = uint8(offset >> 8)
263 dst[1] = uint8(offset)
264 dst[0] = uint8(length-1)<<2 | tagCopy2
265 d += 3
266 break
267 }
268 // Emit the remaining copy, encoded as 2 bytes.
269 dst[1] = uint8(offset)
270 dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
271 d += 2
272 break
273 }
274 }
275 lastOffset = offset
276 }
277 uncompressed += ml
278 if d > dLimit {
279 return nil, 0, ErrDstTooSmall
280 }
281 }
282
283 return dst[:d], uncompressed, nil
284}
285
286// ConvertBlockSnappy will convert an LZ4s block and append it
287// as a Snappy block without block length to dst.
288// The uncompressed size is returned as well.
289// dst must have capacity to contain the entire compressed block.
290func (l *LZ4sConverter) ConvertBlockSnappy(dst, src []byte) ([]byte, int, error) {
291 if len(src) == 0 {
292 return dst, 0, nil
293 }
294 const debug = false
295 const lz4MinMatch = 3
296
297 s, d := 0, len(dst)
298 dst = dst[:cap(dst)]
299 // Use assembly when possible
300 if !debug && hasAmd64Asm {
301 res, sz := cvtLZ4sBlockSnappyAsm(dst[d:], src)
302 if res < 0 {
303 const (
304 errCorrupt = -1
305 errDstTooSmall = -2
306 )
307 switch res {
308 case errCorrupt:
309 return nil, 0, ErrCorrupt
310 case errDstTooSmall:
311 return nil, 0, ErrDstTooSmall
312 default:
313 return nil, 0, fmt.Errorf("unexpected result: %d", res)
314 }
315 }
316 if d+sz > len(dst) {
317 return nil, 0, ErrDstTooSmall
318 }
319 return dst[:d+sz], res, nil
320 }
321
322 dLimit := len(dst) - 10
323 var uncompressed int
324 if debug {
325 fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst))
326 }
327
328 for {
329 if s >= len(src) {
330 return nil, 0, ErrCorrupt
331 }
332 // Read literal info
333 token := src[s]
334 ll := int(token >> 4)
335 ml := int(lz4MinMatch + (token & 0xf))
336
337 // If upper nibble is 15, literal length is extended
338 if token >= 0xf0 {
339 for {
340 s++
341 if s >= len(src) {
342 if debug {
343 fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src))
344 }
345 return nil, 0, ErrCorrupt
346 }
347 val := src[s]
348 ll += int(val)
349 if val != 255 {
350 break
351 }
352 }
353 }
354 // Skip past token
355 if s+ll >= len(src) {
356 if debug {
357 fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src))
358 }
359 return nil, 0, ErrCorrupt
360 }
361 s++
362 if ll > 0 {
363 if d+ll > dLimit {
364 return nil, 0, ErrDstTooSmall
365 }
366 if debug {
367 fmt.Printf("emit %d literals\n", ll)
368 }
369 d += emitLiteralGo(dst[d:], src[s:s+ll])
370 s += ll
371 uncompressed += ll
372 }
373
374 // Check if we are done...
375 if ml == lz4MinMatch {
376 if s == len(src) {
377 break
378 }
379 // 0 bytes.
380 continue
381 }
382 // 2 byte offset
383 if s >= len(src)-2 {
384 if debug {
385 fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2)
386 }
387 return nil, 0, ErrCorrupt
388 }
389 offset := binary.LittleEndian.Uint16(src[s:])
390 s += 2
391 if offset == 0 {
392 if debug {
393 fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s)
394 }
395 return nil, 0, ErrCorrupt
396 }
397 if int(offset) > uncompressed {
398 if debug {
399 fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed)
400 }
401 return nil, 0, ErrCorrupt
402 }
403
404 if ml == lz4MinMatch+15 {
405 for {
406 if s >= len(src) {
407 if debug {
408 fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
409 }
410 return nil, 0, ErrCorrupt
411 }
412 val := src[s]
413 s++
414 ml += int(val)
415 if val != 255 {
416 if s >= len(src) {
417 if debug {
418 fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
419 }
420 return nil, 0, ErrCorrupt
421 }
422 break
423 }
424 }
425 }
426 if debug {
427 fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset)
428 }
429 length := ml
430 // d += emitCopyNoRepeat(dst[d:], int(offset), ml)
431 for length > 0 {
432 if d >= dLimit {
433 return nil, 0, ErrDstTooSmall
434 }
435
436 // Offset no more than 2 bytes.
437 if length > 64 {
438 // Emit a length 64 copy, encoded as 3 bytes.
439 dst[d+2] = uint8(offset >> 8)
440 dst[d+1] = uint8(offset)
441 dst[d+0] = 63<<2 | tagCopy2
442 length -= 64
443 d += 3
444 continue
445 }
446 if length >= 12 || offset >= 2048 || length < 4 {
447 // Emit the remaining copy, encoded as 3 bytes.
448 dst[d+2] = uint8(offset >> 8)
449 dst[d+1] = uint8(offset)
450 dst[d+0] = uint8(length-1)<<2 | tagCopy2
451 d += 3
452 break
453 }
454 // Emit the remaining copy, encoded as 2 bytes.
455 dst[d+1] = uint8(offset)
456 dst[d+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
457 d += 2
458 break
459 }
460 uncompressed += ml
461 if d > dLimit {
462 return nil, 0, ErrDstTooSmall
463 }
464 }
465
466 return dst[:d], uncompressed, nil
467}
diff --git a/vendor/github.com/klauspost/compress/s2/reader.go b/vendor/github.com/klauspost/compress/s2/reader.go
new file mode 100644
index 0000000..2f01a39
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/reader.go
@@ -0,0 +1,1062 @@
1// Copyright 2011 The Snappy-Go Authors. All rights reserved.
2// Copyright (c) 2019+ Klaus Post. All rights reserved.
3// Use of this source code is governed by a BSD-style
4// license that can be found in the LICENSE file.
5
6package s2
7
8import (
9 "errors"
10 "fmt"
11 "io"
12 "io/ioutil"
13 "math"
14 "runtime"
15 "sync"
16)
17
18// ErrCantSeek is returned if the stream cannot be seeked.
19type ErrCantSeek struct {
20 Reason string
21}
22
23// Error returns the error as string.
24func (e ErrCantSeek) Error() string {
25 return fmt.Sprintf("s2: Can't seek because %s", e.Reason)
26}
27
28// NewReader returns a new Reader that decompresses from r, using the framing
29// format described at
30// https://github.com/google/snappy/blob/master/framing_format.txt with S2 changes.
31func NewReader(r io.Reader, opts ...ReaderOption) *Reader {
32 nr := Reader{
33 r: r,
34 maxBlock: maxBlockSize,
35 }
36 for _, opt := range opts {
37 if err := opt(&nr); err != nil {
38 nr.err = err
39 return &nr
40 }
41 }
42 nr.maxBufSize = MaxEncodedLen(nr.maxBlock) + checksumSize
43 if nr.lazyBuf > 0 {
44 nr.buf = make([]byte, MaxEncodedLen(nr.lazyBuf)+checksumSize)
45 } else {
46 nr.buf = make([]byte, MaxEncodedLen(defaultBlockSize)+checksumSize)
47 }
48 nr.readHeader = nr.ignoreStreamID
49 nr.paramsOK = true
50 return &nr
51}
52
53// ReaderOption is an option for creating a decoder.
54type ReaderOption func(*Reader) error
55
56// ReaderMaxBlockSize allows to control allocations if the stream
57// has been compressed with a smaller WriterBlockSize, or with the default 1MB.
58// Blocks must be this size or smaller to decompress,
59// otherwise the decoder will return ErrUnsupported.
60//
61// For streams compressed with Snappy this can safely be set to 64KB (64 << 10).
62//
63// Default is the maximum limit of 4MB.
64func ReaderMaxBlockSize(blockSize int) ReaderOption {
65 return func(r *Reader) error {
66 if blockSize > maxBlockSize || blockSize <= 0 {
67 return errors.New("s2: block size too large. Must be <= 4MB and > 0")
68 }
69 if r.lazyBuf == 0 && blockSize < defaultBlockSize {
70 r.lazyBuf = blockSize
71 }
72 r.maxBlock = blockSize
73 return nil
74 }
75}
76
77// ReaderAllocBlock allows to control upfront stream allocations
78// and not allocate for frames bigger than this initially.
79// If frames bigger than this is seen a bigger buffer will be allocated.
80//
81// Default is 1MB, which is default output size.
82func ReaderAllocBlock(blockSize int) ReaderOption {
83 return func(r *Reader) error {
84 if blockSize > maxBlockSize || blockSize < 1024 {
85 return errors.New("s2: invalid ReaderAllocBlock. Must be <= 4MB and >= 1024")
86 }
87 r.lazyBuf = blockSize
88 return nil
89 }
90}
91
92// ReaderIgnoreStreamIdentifier will make the reader skip the expected
93// stream identifier at the beginning of the stream.
94// This can be used when serving a stream that has been forwarded to a specific point.
95func ReaderIgnoreStreamIdentifier() ReaderOption {
96 return func(r *Reader) error {
97 r.ignoreStreamID = true
98 return nil
99 }
100}
101
102// ReaderSkippableCB will register a callback for chuncks with the specified ID.
103// ID must be a Reserved skippable chunks ID, 0x80-0xfd (inclusive).
104// For each chunk with the ID, the callback is called with the content.
105// Any returned non-nil error will abort decompression.
106// Only one callback per ID is supported, latest sent will be used.
107func ReaderSkippableCB(id uint8, fn func(r io.Reader) error) ReaderOption {
108 return func(r *Reader) error {
109 if id < 0x80 || id > 0xfd {
110 return fmt.Errorf("ReaderSkippableCB: Invalid id provided, must be 0x80-0xfd (inclusive)")
111 }
112 r.skippableCB[id] = fn
113 return nil
114 }
115}
116
117// ReaderIgnoreCRC will make the reader skip CRC calculation and checks.
118func ReaderIgnoreCRC() ReaderOption {
119 return func(r *Reader) error {
120 r.ignoreCRC = true
121 return nil
122 }
123}
124
125// Reader is an io.Reader that can read Snappy-compressed bytes.
126type Reader struct {
127 r io.Reader
128 err error
129 decoded []byte
130 buf []byte
131 skippableCB [0x80]func(r io.Reader) error
132 blockStart int64 // Uncompressed offset at start of current.
133 index *Index
134
135 // decoded[i:j] contains decoded bytes that have not yet been passed on.
136 i, j int
137 // maximum block size allowed.
138 maxBlock int
139 // maximum expected buffer size.
140 maxBufSize int
141 // alloc a buffer this size if > 0.
142 lazyBuf int
143 readHeader bool
144 paramsOK bool
145 snappyFrame bool
146 ignoreStreamID bool
147 ignoreCRC bool
148}
149
150// GetBufferCapacity returns the capacity of the internal buffer.
151// This might be useful to know when reusing the same reader in combination
152// with the lazy buffer option.
153func (r *Reader) GetBufferCapacity() int {
154 return cap(r.buf)
155}
156
157// ensureBufferSize will ensure that the buffer can take at least n bytes.
158// If false is returned the buffer exceeds maximum allowed size.
159func (r *Reader) ensureBufferSize(n int) bool {
160 if n > r.maxBufSize {
161 r.err = ErrCorrupt
162 return false
163 }
164 if cap(r.buf) >= n {
165 return true
166 }
167 // Realloc buffer.
168 r.buf = make([]byte, n)
169 return true
170}
171
172// Reset discards any buffered data, resets all state, and switches the Snappy
173// reader to read from r. This permits reusing a Reader rather than allocating
174// a new one.
175func (r *Reader) Reset(reader io.Reader) {
176 if !r.paramsOK {
177 return
178 }
179 r.index = nil
180 r.r = reader
181 r.err = nil
182 r.i = 0
183 r.j = 0
184 r.blockStart = 0
185 r.readHeader = r.ignoreStreamID
186}
187
188func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
189 if _, r.err = io.ReadFull(r.r, p); r.err != nil {
190 if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
191 r.err = ErrCorrupt
192 }
193 return false
194 }
195 return true
196}
197
198// skippable will skip n bytes.
199// If the supplied reader supports seeking that is used.
200// tmp is used as a temporary buffer for reading.
201// The supplied slice does not need to be the size of the read.
202func (r *Reader) skippable(tmp []byte, n int, allowEOF bool, id uint8) (ok bool) {
203 if id < 0x80 {
204 r.err = fmt.Errorf("interbal error: skippable id < 0x80")
205 return false
206 }
207 if fn := r.skippableCB[id-0x80]; fn != nil {
208 rd := io.LimitReader(r.r, int64(n))
209 r.err = fn(rd)
210 if r.err != nil {
211 return false
212 }
213 _, r.err = io.CopyBuffer(ioutil.Discard, rd, tmp)
214 return r.err == nil
215 }
216 if rs, ok := r.r.(io.ReadSeeker); ok {
217 _, err := rs.Seek(int64(n), io.SeekCurrent)
218 if err == nil {
219 return true
220 }
221 if err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
222 r.err = ErrCorrupt
223 return false
224 }
225 }
226 for n > 0 {
227 if n < len(tmp) {
228 tmp = tmp[:n]
229 }
230 if _, r.err = io.ReadFull(r.r, tmp); r.err != nil {
231 if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
232 r.err = ErrCorrupt
233 }
234 return false
235 }
236 n -= len(tmp)
237 }
238 return true
239}
240
241// Read satisfies the io.Reader interface.
242func (r *Reader) Read(p []byte) (int, error) {
243 if r.err != nil {
244 return 0, r.err
245 }
246 for {
247 if r.i < r.j {
248 n := copy(p, r.decoded[r.i:r.j])
249 r.i += n
250 return n, nil
251 }
252 if !r.readFull(r.buf[:4], true) {
253 return 0, r.err
254 }
255 chunkType := r.buf[0]
256 if !r.readHeader {
257 if chunkType != chunkTypeStreamIdentifier {
258 r.err = ErrCorrupt
259 return 0, r.err
260 }
261 r.readHeader = true
262 }
263 chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
264
265 // The chunk types are specified at
266 // https://github.com/google/snappy/blob/master/framing_format.txt
267 switch chunkType {
268 case chunkTypeCompressedData:
269 r.blockStart += int64(r.j)
270 // Section 4.2. Compressed data (chunk type 0x00).
271 if chunkLen < checksumSize {
272 r.err = ErrCorrupt
273 return 0, r.err
274 }
275 if !r.ensureBufferSize(chunkLen) {
276 if r.err == nil {
277 r.err = ErrUnsupported
278 }
279 return 0, r.err
280 }
281 buf := r.buf[:chunkLen]
282 if !r.readFull(buf, false) {
283 return 0, r.err
284 }
285 checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
286 buf = buf[checksumSize:]
287
288 n, err := DecodedLen(buf)
289 if err != nil {
290 r.err = err
291 return 0, r.err
292 }
293 if r.snappyFrame && n > maxSnappyBlockSize {
294 r.err = ErrCorrupt
295 return 0, r.err
296 }
297
298 if n > len(r.decoded) {
299 if n > r.maxBlock {
300 r.err = ErrCorrupt
301 return 0, r.err
302 }
303 r.decoded = make([]byte, n)
304 }
305 if _, err := Decode(r.decoded, buf); err != nil {
306 r.err = err
307 return 0, r.err
308 }
309 if !r.ignoreCRC && crc(r.decoded[:n]) != checksum {
310 r.err = ErrCRC
311 return 0, r.err
312 }
313 r.i, r.j = 0, n
314 continue
315
316 case chunkTypeUncompressedData:
317 r.blockStart += int64(r.j)
318 // Section 4.3. Uncompressed data (chunk type 0x01).
319 if chunkLen < checksumSize {
320 r.err = ErrCorrupt
321 return 0, r.err
322 }
323 if !r.ensureBufferSize(chunkLen) {
324 if r.err == nil {
325 r.err = ErrUnsupported
326 }
327 return 0, r.err
328 }
329 buf := r.buf[:checksumSize]
330 if !r.readFull(buf, false) {
331 return 0, r.err
332 }
333 checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
334 // Read directly into r.decoded instead of via r.buf.
335 n := chunkLen - checksumSize
336 if r.snappyFrame && n > maxSnappyBlockSize {
337 r.err = ErrCorrupt
338 return 0, r.err
339 }
340 if n > len(r.decoded) {
341 if n > r.maxBlock {
342 r.err = ErrCorrupt
343 return 0, r.err
344 }
345 r.decoded = make([]byte, n)
346 }
347 if !r.readFull(r.decoded[:n], false) {
348 return 0, r.err
349 }
350 if !r.ignoreCRC && crc(r.decoded[:n]) != checksum {
351 r.err = ErrCRC
352 return 0, r.err
353 }
354 r.i, r.j = 0, n
355 continue
356
357 case chunkTypeStreamIdentifier:
358 // Section 4.1. Stream identifier (chunk type 0xff).
359 if chunkLen != len(magicBody) {
360 r.err = ErrCorrupt
361 return 0, r.err
362 }
363 if !r.readFull(r.buf[:len(magicBody)], false) {
364 return 0, r.err
365 }
366 if string(r.buf[:len(magicBody)]) != magicBody {
367 if string(r.buf[:len(magicBody)]) != magicBodySnappy {
368 r.err = ErrCorrupt
369 return 0, r.err
370 } else {
371 r.snappyFrame = true
372 }
373 } else {
374 r.snappyFrame = false
375 }
376 continue
377 }
378
379 if chunkType <= 0x7f {
380 // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
381 // fmt.Printf("ERR chunktype: 0x%x\n", chunkType)
382 r.err = ErrUnsupported
383 return 0, r.err
384 }
385 // Section 4.4 Padding (chunk type 0xfe).
386 // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
387 if chunkLen > maxChunkSize {
388 // fmt.Printf("ERR chunkLen: 0x%x\n", chunkLen)
389 r.err = ErrUnsupported
390 return 0, r.err
391 }
392
393 // fmt.Printf("skippable: ID: 0x%x, len: 0x%x\n", chunkType, chunkLen)
394 if !r.skippable(r.buf, chunkLen, false, chunkType) {
395 return 0, r.err
396 }
397 }
398}
399
400// DecodeConcurrent will decode the full stream to w.
401// This function should not be combined with reading, seeking or other operations.
402// Up to 'concurrent' goroutines will be used.
403// If <= 0, runtime.NumCPU will be used.
404// On success the number of bytes decompressed nil and is returned.
405// This is mainly intended for bigger streams.
406func (r *Reader) DecodeConcurrent(w io.Writer, concurrent int) (written int64, err error) {
407 if r.i > 0 || r.j > 0 || r.blockStart > 0 {
408 return 0, errors.New("DecodeConcurrent called after ")
409 }
410 if concurrent <= 0 {
411 concurrent = runtime.NumCPU()
412 }
413
414 // Write to output
415 var errMu sync.Mutex
416 var aErr error
417 setErr := func(e error) (ok bool) {
418 errMu.Lock()
419 defer errMu.Unlock()
420 if e == nil {
421 return aErr == nil
422 }
423 if aErr == nil {
424 aErr = e
425 }
426 return false
427 }
428 hasErr := func() (ok bool) {
429 errMu.Lock()
430 v := aErr != nil
431 errMu.Unlock()
432 return v
433 }
434
435 var aWritten int64
436 toRead := make(chan []byte, concurrent)
437 writtenBlocks := make(chan []byte, concurrent)
438 queue := make(chan chan []byte, concurrent)
439 reUse := make(chan chan []byte, concurrent)
440 for i := 0; i < concurrent; i++ {
441 toRead <- make([]byte, 0, r.maxBufSize)
442 writtenBlocks <- make([]byte, 0, r.maxBufSize)
443 reUse <- make(chan []byte, 1)
444 }
445 // Writer
446 var wg sync.WaitGroup
447 wg.Add(1)
448 go func() {
449 defer wg.Done()
450 for toWrite := range queue {
451 entry := <-toWrite
452 reUse <- toWrite
453 if hasErr() {
454 writtenBlocks <- entry
455 continue
456 }
457 n, err := w.Write(entry)
458 want := len(entry)
459 writtenBlocks <- entry
460 if err != nil {
461 setErr(err)
462 continue
463 }
464 if n != want {
465 setErr(io.ErrShortWrite)
466 continue
467 }
468 aWritten += int64(n)
469 }
470 }()
471
472 // Reader
473 defer func() {
474 close(queue)
475 if r.err != nil {
476 err = r.err
477 setErr(r.err)
478 }
479 wg.Wait()
480 if err == nil {
481 err = aErr
482 }
483 written = aWritten
484 }()
485
486 for !hasErr() {
487 if !r.readFull(r.buf[:4], true) {
488 if r.err == io.EOF {
489 r.err = nil
490 }
491 return 0, r.err
492 }
493 chunkType := r.buf[0]
494 if !r.readHeader {
495 if chunkType != chunkTypeStreamIdentifier {
496 r.err = ErrCorrupt
497 return 0, r.err
498 }
499 r.readHeader = true
500 }
501 chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
502
503 // The chunk types are specified at
504 // https://github.com/google/snappy/blob/master/framing_format.txt
505 switch chunkType {
506 case chunkTypeCompressedData:
507 r.blockStart += int64(r.j)
508 // Section 4.2. Compressed data (chunk type 0x00).
509 if chunkLen < checksumSize {
510 r.err = ErrCorrupt
511 return 0, r.err
512 }
513 if chunkLen > r.maxBufSize {
514 r.err = ErrCorrupt
515 return 0, r.err
516 }
517 orgBuf := <-toRead
518 buf := orgBuf[:chunkLen]
519
520 if !r.readFull(buf, false) {
521 return 0, r.err
522 }
523
524 checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
525 buf = buf[checksumSize:]
526
527 n, err := DecodedLen(buf)
528 if err != nil {
529 r.err = err
530 return 0, r.err
531 }
532 if r.snappyFrame && n > maxSnappyBlockSize {
533 r.err = ErrCorrupt
534 return 0, r.err
535 }
536
537 if n > r.maxBlock {
538 r.err = ErrCorrupt
539 return 0, r.err
540 }
541 wg.Add(1)
542
543 decoded := <-writtenBlocks
544 entry := <-reUse
545 queue <- entry
546 go func() {
547 defer wg.Done()
548 decoded = decoded[:n]
549 _, err := Decode(decoded, buf)
550 toRead <- orgBuf
551 if err != nil {
552 writtenBlocks <- decoded
553 setErr(err)
554 return
555 }
556 if !r.ignoreCRC && crc(decoded) != checksum {
557 writtenBlocks <- decoded
558 setErr(ErrCRC)
559 return
560 }
561 entry <- decoded
562 }()
563 continue
564
565 case chunkTypeUncompressedData:
566
567 // Section 4.3. Uncompressed data (chunk type 0x01).
568 if chunkLen < checksumSize {
569 r.err = ErrCorrupt
570 return 0, r.err
571 }
572 if chunkLen > r.maxBufSize {
573 r.err = ErrCorrupt
574 return 0, r.err
575 }
576 // Grab write buffer
577 orgBuf := <-writtenBlocks
578 buf := orgBuf[:checksumSize]
579 if !r.readFull(buf, false) {
580 return 0, r.err
581 }
582 checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
583 // Read content.
584 n := chunkLen - checksumSize
585
586 if r.snappyFrame && n > maxSnappyBlockSize {
587 r.err = ErrCorrupt
588 return 0, r.err
589 }
590 if n > r.maxBlock {
591 r.err = ErrCorrupt
592 return 0, r.err
593 }
594 // Read uncompressed
595 buf = orgBuf[:n]
596 if !r.readFull(buf, false) {
597 return 0, r.err
598 }
599
600 if !r.ignoreCRC && crc(buf) != checksum {
601 r.err = ErrCRC
602 return 0, r.err
603 }
604 entry := <-reUse
605 queue <- entry
606 entry <- buf
607 continue
608
609 case chunkTypeStreamIdentifier:
610 // Section 4.1. Stream identifier (chunk type 0xff).
611 if chunkLen != len(magicBody) {
612 r.err = ErrCorrupt
613 return 0, r.err
614 }
615 if !r.readFull(r.buf[:len(magicBody)], false) {
616 return 0, r.err
617 }
618 if string(r.buf[:len(magicBody)]) != magicBody {
619 if string(r.buf[:len(magicBody)]) != magicBodySnappy {
620 r.err = ErrCorrupt
621 return 0, r.err
622 } else {
623 r.snappyFrame = true
624 }
625 } else {
626 r.snappyFrame = false
627 }
628 continue
629 }
630
631 if chunkType <= 0x7f {
632 // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
633 // fmt.Printf("ERR chunktype: 0x%x\n", chunkType)
634 r.err = ErrUnsupported
635 return 0, r.err
636 }
637 // Section 4.4 Padding (chunk type 0xfe).
638 // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
639 if chunkLen > maxChunkSize {
640 // fmt.Printf("ERR chunkLen: 0x%x\n", chunkLen)
641 r.err = ErrUnsupported
642 return 0, r.err
643 }
644
645 // fmt.Printf("skippable: ID: 0x%x, len: 0x%x\n", chunkType, chunkLen)
646 if !r.skippable(r.buf, chunkLen, false, chunkType) {
647 return 0, r.err
648 }
649 }
650 return 0, r.err
651}
652
653// Skip will skip n bytes forward in the decompressed output.
654// For larger skips this consumes less CPU and is faster than reading output and discarding it.
655// CRC is not checked on skipped blocks.
656// io.ErrUnexpectedEOF is returned if the stream ends before all bytes have been skipped.
657// If a decoding error is encountered subsequent calls to Read will also fail.
658func (r *Reader) Skip(n int64) error {
659 if n < 0 {
660 return errors.New("attempted negative skip")
661 }
662 if r.err != nil {
663 return r.err
664 }
665
666 for n > 0 {
667 if r.i < r.j {
668 // Skip in buffer.
669 // decoded[i:j] contains decoded bytes that have not yet been passed on.
670 left := int64(r.j - r.i)
671 if left >= n {
672 tmp := int64(r.i) + n
673 if tmp > math.MaxInt32 {
674 return errors.New("s2: internal overflow in skip")
675 }
676 r.i = int(tmp)
677 return nil
678 }
679 n -= int64(r.j - r.i)
680 r.i = r.j
681 }
682
683 // Buffer empty; read blocks until we have content.
684 if !r.readFull(r.buf[:4], true) {
685 if r.err == io.EOF {
686 r.err = io.ErrUnexpectedEOF
687 }
688 return r.err
689 }
690 chunkType := r.buf[0]
691 if !r.readHeader {
692 if chunkType != chunkTypeStreamIdentifier {
693 r.err = ErrCorrupt
694 return r.err
695 }
696 r.readHeader = true
697 }
698 chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
699
700 // The chunk types are specified at
701 // https://github.com/google/snappy/blob/master/framing_format.txt
702 switch chunkType {
703 case chunkTypeCompressedData:
704 r.blockStart += int64(r.j)
705 // Section 4.2. Compressed data (chunk type 0x00).
706 if chunkLen < checksumSize {
707 r.err = ErrCorrupt
708 return r.err
709 }
710 if !r.ensureBufferSize(chunkLen) {
711 if r.err == nil {
712 r.err = ErrUnsupported
713 }
714 return r.err
715 }
716 buf := r.buf[:chunkLen]
717 if !r.readFull(buf, false) {
718 return r.err
719 }
720 checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
721 buf = buf[checksumSize:]
722
723 dLen, err := DecodedLen(buf)
724 if err != nil {
725 r.err = err
726 return r.err
727 }
728 if dLen > r.maxBlock {
729 r.err = ErrCorrupt
730 return r.err
731 }
732 // Check if destination is within this block
733 if int64(dLen) > n {
734 if len(r.decoded) < dLen {
735 r.decoded = make([]byte, dLen)
736 }
737 if _, err := Decode(r.decoded, buf); err != nil {
738 r.err = err
739 return r.err
740 }
741 if crc(r.decoded[:dLen]) != checksum {
742 r.err = ErrCorrupt
743 return r.err
744 }
745 } else {
746 // Skip block completely
747 n -= int64(dLen)
748 r.blockStart += int64(dLen)
749 dLen = 0
750 }
751 r.i, r.j = 0, dLen
752 continue
753 case chunkTypeUncompressedData:
754 r.blockStart += int64(r.j)
755 // Section 4.3. Uncompressed data (chunk type 0x01).
756 if chunkLen < checksumSize {
757 r.err = ErrCorrupt
758 return r.err
759 }
760 if !r.ensureBufferSize(chunkLen) {
761 if r.err != nil {
762 r.err = ErrUnsupported
763 }
764 return r.err
765 }
766 buf := r.buf[:checksumSize]
767 if !r.readFull(buf, false) {
768 return r.err
769 }
770 checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
771 // Read directly into r.decoded instead of via r.buf.
772 n2 := chunkLen - checksumSize
773 if n2 > len(r.decoded) {
774 if n2 > r.maxBlock {
775 r.err = ErrCorrupt
776 return r.err
777 }
778 r.decoded = make([]byte, n2)
779 }
780 if !r.readFull(r.decoded[:n2], false) {
781 return r.err
782 }
783 if int64(n2) < n {
784 if crc(r.decoded[:n2]) != checksum {
785 r.err = ErrCorrupt
786 return r.err
787 }
788 }
789 r.i, r.j = 0, n2
790 continue
791 case chunkTypeStreamIdentifier:
792 // Section 4.1. Stream identifier (chunk type 0xff).
793 if chunkLen != len(magicBody) {
794 r.err = ErrCorrupt
795 return r.err
796 }
797 if !r.readFull(r.buf[:len(magicBody)], false) {
798 return r.err
799 }
800 if string(r.buf[:len(magicBody)]) != magicBody {
801 if string(r.buf[:len(magicBody)]) != magicBodySnappy {
802 r.err = ErrCorrupt
803 return r.err
804 }
805 }
806
807 continue
808 }
809
810 if chunkType <= 0x7f {
811 // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
812 r.err = ErrUnsupported
813 return r.err
814 }
815 if chunkLen > maxChunkSize {
816 r.err = ErrUnsupported
817 return r.err
818 }
819 // Section 4.4 Padding (chunk type 0xfe).
820 // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
821 if !r.skippable(r.buf, chunkLen, false, chunkType) {
822 return r.err
823 }
824 }
825 return nil
826}
827
828// ReadSeeker provides random or forward seeking in compressed content.
829// See Reader.ReadSeeker
830type ReadSeeker struct {
831 *Reader
832 readAtMu sync.Mutex
833}
834
835// ReadSeeker will return an io.ReadSeeker and io.ReaderAt
836// compatible version of the reader.
837// If 'random' is specified the returned io.Seeker can be used for
838// random seeking, otherwise only forward seeking is supported.
839// Enabling random seeking requires the original input to support
840// the io.Seeker interface.
841// A custom index can be specified which will be used if supplied.
842// When using a custom index, it will not be read from the input stream.
843// The ReadAt position will affect regular reads and the current position of Seek.
844// So using Read after ReadAt will continue from where the ReadAt stopped.
845// No functions should be used concurrently.
846// The returned ReadSeeker contains a shallow reference to the existing Reader,
847// meaning changes performed to one is reflected in the other.
848func (r *Reader) ReadSeeker(random bool, index []byte) (*ReadSeeker, error) {
849 // Read index if provided.
850 if len(index) != 0 {
851 if r.index == nil {
852 r.index = &Index{}
853 }
854 if _, err := r.index.Load(index); err != nil {
855 return nil, ErrCantSeek{Reason: "loading index returned: " + err.Error()}
856 }
857 }
858
859 // Check if input is seekable
860 rs, ok := r.r.(io.ReadSeeker)
861 if !ok {
862 if !random {
863 return &ReadSeeker{Reader: r}, nil
864 }
865 return nil, ErrCantSeek{Reason: "input stream isn't seekable"}
866 }
867
868 if r.index != nil {
869 // Seekable and index, ok...
870 return &ReadSeeker{Reader: r}, nil
871 }
872
873 // Load from stream.
874 r.index = &Index{}
875
876 // Read current position.
877 pos, err := rs.Seek(0, io.SeekCurrent)
878 if err != nil {
879 return nil, ErrCantSeek{Reason: "seeking input returned: " + err.Error()}
880 }
881 err = r.index.LoadStream(rs)
882 if err != nil {
883 if err == ErrUnsupported {
884 // If we don't require random seeking, reset input and return.
885 if !random {
886 _, err = rs.Seek(pos, io.SeekStart)
887 if err != nil {
888 return nil, ErrCantSeek{Reason: "resetting stream returned: " + err.Error()}
889 }
890 r.index = nil
891 return &ReadSeeker{Reader: r}, nil
892 }
893 return nil, ErrCantSeek{Reason: "input stream does not contain an index"}
894 }
895 return nil, ErrCantSeek{Reason: "reading index returned: " + err.Error()}
896 }
897
898 // reset position.
899 _, err = rs.Seek(pos, io.SeekStart)
900 if err != nil {
901 return nil, ErrCantSeek{Reason: "seeking input returned: " + err.Error()}
902 }
903 return &ReadSeeker{Reader: r}, nil
904}
905
906// Seek allows seeking in compressed data.
907func (r *ReadSeeker) Seek(offset int64, whence int) (int64, error) {
908 if r.err != nil {
909 if !errors.Is(r.err, io.EOF) {
910 return 0, r.err
911 }
912 // Reset on EOF
913 r.err = nil
914 }
915
916 // Calculate absolute offset.
917 absOffset := offset
918
919 switch whence {
920 case io.SeekStart:
921 case io.SeekCurrent:
922 absOffset = r.blockStart + int64(r.i) + offset
923 case io.SeekEnd:
924 if r.index == nil {
925 return 0, ErrUnsupported
926 }
927 absOffset = r.index.TotalUncompressed + offset
928 default:
929 r.err = ErrUnsupported
930 return 0, r.err
931 }
932
933 if absOffset < 0 {
934 return 0, errors.New("seek before start of file")
935 }
936
937 if !r.readHeader {
938 // Make sure we read the header.
939 _, r.err = r.Read([]byte{})
940 if r.err != nil {
941 return 0, r.err
942 }
943 }
944
945 // If we are inside current block no need to seek.
946 // This includes no offset changes.
947 if absOffset >= r.blockStart && absOffset < r.blockStart+int64(r.j) {
948 r.i = int(absOffset - r.blockStart)
949 return r.blockStart + int64(r.i), nil
950 }
951
952 rs, ok := r.r.(io.ReadSeeker)
953 if r.index == nil || !ok {
954 currOffset := r.blockStart + int64(r.i)
955 if absOffset >= currOffset {
956 err := r.Skip(absOffset - currOffset)
957 return r.blockStart + int64(r.i), err
958 }
959 return 0, ErrUnsupported
960 }
961
962 // We can seek and we have an index.
963 c, u, err := r.index.Find(absOffset)
964 if err != nil {
965 return r.blockStart + int64(r.i), err
966 }
967
968 // Seek to next block
969 _, err = rs.Seek(c, io.SeekStart)
970 if err != nil {
971 return 0, err
972 }
973
974 r.i = r.j // Remove rest of current block.
975 r.blockStart = u - int64(r.j) // Adjust current block start for accounting.
976 if u < absOffset {
977 // Forward inside block
978 return absOffset, r.Skip(absOffset - u)
979 }
980 if u > absOffset {
981 return 0, fmt.Errorf("s2 seek: (internal error) u (%d) > absOffset (%d)", u, absOffset)
982 }
983 return absOffset, nil
984}
985
986// ReadAt reads len(p) bytes into p starting at offset off in the
987// underlying input source. It returns the number of bytes
988// read (0 <= n <= len(p)) and any error encountered.
989//
990// When ReadAt returns n < len(p), it returns a non-nil error
991// explaining why more bytes were not returned. In this respect,
992// ReadAt is stricter than Read.
993//
994// Even if ReadAt returns n < len(p), it may use all of p as scratch
995// space during the call. If some data is available but not len(p) bytes,
996// ReadAt blocks until either all the data is available or an error occurs.
997// In this respect ReadAt is different from Read.
998//
999// If the n = len(p) bytes returned by ReadAt are at the end of the
1000// input source, ReadAt may return either err == EOF or err == nil.
1001//
1002// If ReadAt is reading from an input source with a seek offset,
1003// ReadAt should not affect nor be affected by the underlying
1004// seek offset.
1005//
1006// Clients of ReadAt can execute parallel ReadAt calls on the
1007// same input source. This is however not recommended.
1008func (r *ReadSeeker) ReadAt(p []byte, offset int64) (int, error) {
1009 r.readAtMu.Lock()
1010 defer r.readAtMu.Unlock()
1011 _, err := r.Seek(offset, io.SeekStart)
1012 if err != nil {
1013 return 0, err
1014 }
1015 n := 0
1016 for n < len(p) {
1017 n2, err := r.Read(p[n:])
1018 if err != nil {
1019 // This will include io.EOF
1020 return n + n2, err
1021 }
1022 n += n2
1023 }
1024 return n, nil
1025}
1026
1027// ReadByte satisfies the io.ByteReader interface.
1028func (r *Reader) ReadByte() (byte, error) {
1029 if r.err != nil {
1030 return 0, r.err
1031 }
1032 if r.i < r.j {
1033 c := r.decoded[r.i]
1034 r.i++
1035 return c, nil
1036 }
1037 var tmp [1]byte
1038 for i := 0; i < 10; i++ {
1039 n, err := r.Read(tmp[:])
1040 if err != nil {
1041 return 0, err
1042 }
1043 if n == 1 {
1044 return tmp[0], nil
1045 }
1046 }
1047 return 0, io.ErrNoProgress
1048}
1049
1050// SkippableCB will register a callback for chunks with the specified ID.
1051// ID must be a Reserved skippable chunks ID, 0x80-0xfe (inclusive).
1052// For each chunk with the ID, the callback is called with the content.
1053// Any returned non-nil error will abort decompression.
1054// Only one callback per ID is supported, latest sent will be used.
1055// Sending a nil function will disable previous callbacks.
1056func (r *Reader) SkippableCB(id uint8, fn func(r io.Reader) error) error {
1057 if id < 0x80 || id > chunkTypePadding {
1058 return fmt.Errorf("ReaderSkippableCB: Invalid id provided, must be 0x80-0xfe (inclusive)")
1059 }
1060 r.skippableCB[id] = fn
1061 return nil
1062}
diff --git a/vendor/github.com/klauspost/compress/s2/s2.go b/vendor/github.com/klauspost/compress/s2/s2.go
new file mode 100644
index 0000000..dae3f73
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/s2.go
@@ -0,0 +1,143 @@
1// Copyright 2011 The Snappy-Go Authors. All rights reserved.
2// Copyright (c) 2019 Klaus Post. All rights reserved.
3// Use of this source code is governed by a BSD-style
4// license that can be found in the LICENSE file.
5
6// Package s2 implements the S2 compression format.
7//
8// S2 is an extension of Snappy. Similar to Snappy S2 is aimed for high throughput,
9// which is why it features concurrent compression for bigger payloads.
10//
11// Decoding is compatible with Snappy compressed content,
12// but content compressed with S2 cannot be decompressed by Snappy.
13//
14// For more information on Snappy/S2 differences see README in: https://github.com/klauspost/compress/tree/master/s2
15//
16// There are actually two S2 formats: block and stream. They are related,
17// but different: trying to decompress block-compressed data as a S2 stream
18// will fail, and vice versa. The block format is the Decode and Encode
19// functions and the stream format is the Reader and Writer types.
20//
21// A "better" compression option is available. This will trade some compression
22// speed
23//
24// The block format, the more common case, is used when the complete size (the
25// number of bytes) of the original data is known upfront, at the time
26// compression starts. The stream format, also known as the framing format, is
27// for when that isn't always true.
28//
29// Blocks to not offer much data protection, so it is up to you to
30// add data validation of decompressed blocks.
31//
32// Streams perform CRC validation of the decompressed data.
33// Stream compression will also be performed on multiple CPU cores concurrently
34// significantly improving throughput.
35package s2
36
37import (
38 "bytes"
39 "hash/crc32"
40)
41
42/*
43Each encoded block begins with the varint-encoded length of the decoded data,
44followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
45first byte of each chunk is broken into its 2 least and 6 most significant bits
46called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
47Zero means a literal tag. All other values mean a copy tag.
48
49For literal tags:
50 - If m < 60, the next 1 + m bytes are literal bytes.
51 - Otherwise, let n be the little-endian unsigned integer denoted by the next
52 m - 59 bytes. The next 1 + n bytes after that are literal bytes.
53
54For copy tags, length bytes are copied from offset bytes ago, in the style of
55Lempel-Ziv compression algorithms. In particular:
56 - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
57 The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
58 of the offset. The next byte is bits 0-7 of the offset.
59 - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
60 The length is 1 + m. The offset is the little-endian unsigned integer
61 denoted by the next 2 bytes.
62 - For l == 3, the offset ranges in [0, 1<<32) and the length in
63 [1, 65). The length is 1 + m. The offset is the little-endian unsigned
64 integer denoted by the next 4 bytes.
65*/
66const (
67 tagLiteral = 0x00
68 tagCopy1 = 0x01
69 tagCopy2 = 0x02
70 tagCopy4 = 0x03
71)
72
73const (
74 checksumSize = 4
75 chunkHeaderSize = 4
76 magicChunk = "\xff\x06\x00\x00" + magicBody
77 magicChunkSnappy = "\xff\x06\x00\x00" + magicBodySnappy
78 magicBodySnappy = "sNaPpY"
79 magicBody = "S2sTwO"
80
81 // maxBlockSize is the maximum size of the input to encodeBlock.
82 //
83 // For the framing format (Writer type instead of Encode function),
84 // this is the maximum uncompressed size of a block.
85 maxBlockSize = 4 << 20
86
87 // minBlockSize is the minimum size of block setting when creating a writer.
88 minBlockSize = 4 << 10
89
90 skippableFrameHeader = 4
91 maxChunkSize = 1<<24 - 1 // 16777215
92
93 // Default block size
94 defaultBlockSize = 1 << 20
95
96 // maxSnappyBlockSize is the maximum snappy block size.
97 maxSnappyBlockSize = 1 << 16
98
99 obufHeaderLen = checksumSize + chunkHeaderSize
100)
101
102const (
103 chunkTypeCompressedData = 0x00
104 chunkTypeUncompressedData = 0x01
105 ChunkTypeIndex = 0x99
106 chunkTypePadding = 0xfe
107 chunkTypeStreamIdentifier = 0xff
108)
109
110var crcTable = crc32.MakeTable(crc32.Castagnoli)
111
112// crc implements the checksum specified in section 3 of
113// https://github.com/google/snappy/blob/master/framing_format.txt
114func crc(b []byte) uint32 {
115 c := crc32.Update(0, crcTable, b)
116 return c>>15 | c<<17 + 0xa282ead8
117}
118
119// literalExtraSize returns the extra size of encoding n literals.
120// n should be >= 0 and <= math.MaxUint32.
121func literalExtraSize(n int64) int64 {
122 if n == 0 {
123 return 0
124 }
125 switch {
126 case n < 60:
127 return 1
128 case n < 1<<8:
129 return 2
130 case n < 1<<16:
131 return 3
132 case n < 1<<24:
133 return 4
134 default:
135 return 5
136 }
137}
138
139type byter interface {
140 Bytes() []byte
141}
142
143var _ byter = &bytes.Buffer{}
diff --git a/vendor/github.com/klauspost/compress/s2/writer.go b/vendor/github.com/klauspost/compress/s2/writer.go
new file mode 100644
index 0000000..089cd36
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/writer.go
@@ -0,0 +1,1020 @@
1// Copyright 2011 The Snappy-Go Authors. All rights reserved.
2// Copyright (c) 2019+ Klaus Post. All rights reserved.
3// Use of this source code is governed by a BSD-style
4// license that can be found in the LICENSE file.
5
6package s2
7
8import (
9 "crypto/rand"
10 "encoding/binary"
11 "errors"
12 "fmt"
13 "io"
14 "runtime"
15 "sync"
16)
17
18const (
19 levelUncompressed = iota + 1
20 levelFast
21 levelBetter
22 levelBest
23)
24
25// NewWriter returns a new Writer that compresses to w, using the
26// framing format described at
27// https://github.com/google/snappy/blob/master/framing_format.txt
28//
29// Users must call Close to guarantee all data has been forwarded to
30// the underlying io.Writer and that resources are released.
31// They may also call Flush zero or more times before calling Close.
32func NewWriter(w io.Writer, opts ...WriterOption) *Writer {
33 w2 := Writer{
34 blockSize: defaultBlockSize,
35 concurrency: runtime.GOMAXPROCS(0),
36 randSrc: rand.Reader,
37 level: levelFast,
38 }
39 for _, opt := range opts {
40 if err := opt(&w2); err != nil {
41 w2.errState = err
42 return &w2
43 }
44 }
45 w2.obufLen = obufHeaderLen + MaxEncodedLen(w2.blockSize)
46 w2.paramsOK = true
47 w2.ibuf = make([]byte, 0, w2.blockSize)
48 w2.buffers.New = func() interface{} {
49 return make([]byte, w2.obufLen)
50 }
51 w2.Reset(w)
52 return &w2
53}
54
55// Writer is an io.Writer that can write Snappy-compressed bytes.
56type Writer struct {
57 errMu sync.Mutex
58 errState error
59
60 // ibuf is a buffer for the incoming (uncompressed) bytes.
61 ibuf []byte
62
63 blockSize int
64 obufLen int
65 concurrency int
66 written int64
67 uncompWritten int64 // Bytes sent to compression
68 output chan chan result
69 buffers sync.Pool
70 pad int
71
72 writer io.Writer
73 randSrc io.Reader
74 writerWg sync.WaitGroup
75 index Index
76 customEnc func(dst, src []byte) int
77
78 // wroteStreamHeader is whether we have written the stream header.
79 wroteStreamHeader bool
80 paramsOK bool
81 snappy bool
82 flushOnWrite bool
83 appendIndex bool
84 level uint8
85}
86
87type result struct {
88 b []byte
89 // Uncompressed start offset
90 startOffset int64
91}
92
93// err returns the previously set error.
94// If no error has been set it is set to err if not nil.
95func (w *Writer) err(err error) error {
96 w.errMu.Lock()
97 errSet := w.errState
98 if errSet == nil && err != nil {
99 w.errState = err
100 errSet = err
101 }
102 w.errMu.Unlock()
103 return errSet
104}
105
106// Reset discards the writer's state and switches the Snappy writer to write to w.
107// This permits reusing a Writer rather than allocating a new one.
108func (w *Writer) Reset(writer io.Writer) {
109 if !w.paramsOK {
110 return
111 }
112 // Close previous writer, if any.
113 if w.output != nil {
114 close(w.output)
115 w.writerWg.Wait()
116 w.output = nil
117 }
118 w.errState = nil
119 w.ibuf = w.ibuf[:0]
120 w.wroteStreamHeader = false
121 w.written = 0
122 w.writer = writer
123 w.uncompWritten = 0
124 w.index.reset(w.blockSize)
125
126 // If we didn't get a writer, stop here.
127 if writer == nil {
128 return
129 }
130 // If no concurrency requested, don't spin up writer goroutine.
131 if w.concurrency == 1 {
132 return
133 }
134
135 toWrite := make(chan chan result, w.concurrency)
136 w.output = toWrite
137 w.writerWg.Add(1)
138
139 // Start a writer goroutine that will write all output in order.
140 go func() {
141 defer w.writerWg.Done()
142
143 // Get a queued write.
144 for write := range toWrite {
145 // Wait for the data to be available.
146 input := <-write
147 in := input.b
148 if len(in) > 0 {
149 if w.err(nil) == nil {
150 // Don't expose data from previous buffers.
151 toWrite := in[:len(in):len(in)]
152 // Write to output.
153 n, err := writer.Write(toWrite)
154 if err == nil && n != len(toWrite) {
155 err = io.ErrShortBuffer
156 }
157 _ = w.err(err)
158 w.err(w.index.add(w.written, input.startOffset))
159 w.written += int64(n)
160 }
161 }
162 if cap(in) >= w.obufLen {
163 w.buffers.Put(in)
164 }
165 // close the incoming write request.
166 // This can be used for synchronizing flushes.
167 close(write)
168 }
169 }()
170}
171
172// Write satisfies the io.Writer interface.
173func (w *Writer) Write(p []byte) (nRet int, errRet error) {
174 if err := w.err(nil); err != nil {
175 return 0, err
176 }
177 if w.flushOnWrite {
178 return w.write(p)
179 }
180 // If we exceed the input buffer size, start writing
181 for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err(nil) == nil {
182 var n int
183 if len(w.ibuf) == 0 {
184 // Large write, empty buffer.
185 // Write directly from p to avoid copy.
186 n, _ = w.write(p)
187 } else {
188 n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
189 w.ibuf = w.ibuf[:len(w.ibuf)+n]
190 w.write(w.ibuf)
191 w.ibuf = w.ibuf[:0]
192 }
193 nRet += n
194 p = p[n:]
195 }
196 if err := w.err(nil); err != nil {
197 return nRet, err
198 }
199 // p should always be able to fit into w.ibuf now.
200 n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
201 w.ibuf = w.ibuf[:len(w.ibuf)+n]
202 nRet += n
203 return nRet, nil
204}
205
206// ReadFrom implements the io.ReaderFrom interface.
207// Using this is typically more efficient since it avoids a memory copy.
208// ReadFrom reads data from r until EOF or error.
209// The return value n is the number of bytes read.
210// Any error except io.EOF encountered during the read is also returned.
211func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) {
212 if err := w.err(nil); err != nil {
213 return 0, err
214 }
215 if len(w.ibuf) > 0 {
216 err := w.Flush()
217 if err != nil {
218 return 0, err
219 }
220 }
221 if br, ok := r.(byter); ok {
222 buf := br.Bytes()
223 if err := w.EncodeBuffer(buf); err != nil {
224 return 0, err
225 }
226 return int64(len(buf)), w.Flush()
227 }
228 for {
229 inbuf := w.buffers.Get().([]byte)[:w.blockSize+obufHeaderLen]
230 n2, err := io.ReadFull(r, inbuf[obufHeaderLen:])
231 if err != nil {
232 if err == io.ErrUnexpectedEOF {
233 err = io.EOF
234 }
235 if err != io.EOF {
236 return n, w.err(err)
237 }
238 }
239 if n2 == 0 {
240 break
241 }
242 n += int64(n2)
243 err2 := w.writeFull(inbuf[:n2+obufHeaderLen])
244 if w.err(err2) != nil {
245 break
246 }
247
248 if err != nil {
249 // We got EOF and wrote everything
250 break
251 }
252 }
253
254 return n, w.err(nil)
255}
256
257// AddSkippableBlock will add a skippable block to the stream.
258// The ID must be 0x80-0xfe (inclusive).
259// Length of the skippable block must be <= 16777215 bytes.
260func (w *Writer) AddSkippableBlock(id uint8, data []byte) (err error) {
261 if err := w.err(nil); err != nil {
262 return err
263 }
264 if len(data) == 0 {
265 return nil
266 }
267 if id < 0x80 || id > chunkTypePadding {
268 return fmt.Errorf("invalid skippable block id %x", id)
269 }
270 if len(data) > maxChunkSize {
271 return fmt.Errorf("skippable block excessed maximum size")
272 }
273 var header [4]byte
274 chunkLen := 4 + len(data)
275 header[0] = id
276 header[1] = uint8(chunkLen >> 0)
277 header[2] = uint8(chunkLen >> 8)
278 header[3] = uint8(chunkLen >> 16)
279 if w.concurrency == 1 {
280 write := func(b []byte) error {
281 n, err := w.writer.Write(b)
282 if err = w.err(err); err != nil {
283 return err
284 }
285 if n != len(data) {
286 return w.err(io.ErrShortWrite)
287 }
288 w.written += int64(n)
289 return w.err(nil)
290 }
291 if !w.wroteStreamHeader {
292 w.wroteStreamHeader = true
293 if w.snappy {
294 if err := write([]byte(magicChunkSnappy)); err != nil {
295 return err
296 }
297 } else {
298 if err := write([]byte(magicChunk)); err != nil {
299 return err
300 }
301 }
302 }
303 if err := write(header[:]); err != nil {
304 return err
305 }
306 if err := write(data); err != nil {
307 return err
308 }
309 }
310
311 // Create output...
312 if !w.wroteStreamHeader {
313 w.wroteStreamHeader = true
314 hWriter := make(chan result)
315 w.output <- hWriter
316 if w.snappy {
317 hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunkSnappy)}
318 } else {
319 hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunk)}
320 }
321 }
322
323 // Copy input.
324 inbuf := w.buffers.Get().([]byte)[:4]
325 copy(inbuf, header[:])
326 inbuf = append(inbuf, data...)
327
328 output := make(chan result, 1)
329 // Queue output.
330 w.output <- output
331 output <- result{startOffset: w.uncompWritten, b: inbuf}
332
333 return nil
334}
335
336// EncodeBuffer will add a buffer to the stream.
337// This is the fastest way to encode a stream,
338// but the input buffer cannot be written to by the caller
339// until Flush or Close has been called when concurrency != 1.
340//
341// If you cannot control that, use the regular Write function.
342//
343// Note that input is not buffered.
344// This means that each write will result in discrete blocks being created.
345// For buffered writes, use the regular Write function.
346func (w *Writer) EncodeBuffer(buf []byte) (err error) {
347 if err := w.err(nil); err != nil {
348 return err
349 }
350
351 if w.flushOnWrite {
352 _, err := w.write(buf)
353 return err
354 }
355 // Flush queued data first.
356 if len(w.ibuf) > 0 {
357 err := w.Flush()
358 if err != nil {
359 return err
360 }
361 }
362 if w.concurrency == 1 {
363 _, err := w.writeSync(buf)
364 return err
365 }
366
367 // Spawn goroutine and write block to output channel.
368 if !w.wroteStreamHeader {
369 w.wroteStreamHeader = true
370 hWriter := make(chan result)
371 w.output <- hWriter
372 if w.snappy {
373 hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunkSnappy)}
374 } else {
375 hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunk)}
376 }
377 }
378
379 for len(buf) > 0 {
380 // Cut input.
381 uncompressed := buf
382 if len(uncompressed) > w.blockSize {
383 uncompressed = uncompressed[:w.blockSize]
384 }
385 buf = buf[len(uncompressed):]
386 // Get an output buffer.
387 obuf := w.buffers.Get().([]byte)[:len(uncompressed)+obufHeaderLen]
388 output := make(chan result)
389 // Queue output now, so we keep order.
390 w.output <- output
391 res := result{
392 startOffset: w.uncompWritten,
393 }
394 w.uncompWritten += int64(len(uncompressed))
395 go func() {
396 checksum := crc(uncompressed)
397
398 // Set to uncompressed.
399 chunkType := uint8(chunkTypeUncompressedData)
400 chunkLen := 4 + len(uncompressed)
401
402 // Attempt compressing.
403 n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed)))
404 n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed)
405
406 // Check if we should use this, or store as uncompressed instead.
407 if n2 > 0 {
408 chunkType = uint8(chunkTypeCompressedData)
409 chunkLen = 4 + n + n2
410 obuf = obuf[:obufHeaderLen+n+n2]
411 } else {
412 // copy uncompressed
413 copy(obuf[obufHeaderLen:], uncompressed)
414 }
415
416 // Fill in the per-chunk header that comes before the body.
417 obuf[0] = chunkType
418 obuf[1] = uint8(chunkLen >> 0)
419 obuf[2] = uint8(chunkLen >> 8)
420 obuf[3] = uint8(chunkLen >> 16)
421 obuf[4] = uint8(checksum >> 0)
422 obuf[5] = uint8(checksum >> 8)
423 obuf[6] = uint8(checksum >> 16)
424 obuf[7] = uint8(checksum >> 24)
425
426 // Queue final output.
427 res.b = obuf
428 output <- res
429 }()
430 }
431 return nil
432}
433
434func (w *Writer) encodeBlock(obuf, uncompressed []byte) int {
435 if w.customEnc != nil {
436 if ret := w.customEnc(obuf, uncompressed); ret >= 0 {
437 return ret
438 }
439 }
440 if w.snappy {
441 switch w.level {
442 case levelFast:
443 return encodeBlockSnappy(obuf, uncompressed)
444 case levelBetter:
445 return encodeBlockBetterSnappy(obuf, uncompressed)
446 case levelBest:
447 return encodeBlockBestSnappy(obuf, uncompressed)
448 }
449 return 0
450 }
451 switch w.level {
452 case levelFast:
453 return encodeBlock(obuf, uncompressed)
454 case levelBetter:
455 return encodeBlockBetter(obuf, uncompressed)
456 case levelBest:
457 return encodeBlockBest(obuf, uncompressed, nil)
458 }
459 return 0
460}
461
462func (w *Writer) write(p []byte) (nRet int, errRet error) {
463 if err := w.err(nil); err != nil {
464 return 0, err
465 }
466 if w.concurrency == 1 {
467 return w.writeSync(p)
468 }
469
470 // Spawn goroutine and write block to output channel.
471 for len(p) > 0 {
472 if !w.wroteStreamHeader {
473 w.wroteStreamHeader = true
474 hWriter := make(chan result)
475 w.output <- hWriter
476 if w.snappy {
477 hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunkSnappy)}
478 } else {
479 hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunk)}
480 }
481 }
482
483 var uncompressed []byte
484 if len(p) > w.blockSize {
485 uncompressed, p = p[:w.blockSize], p[w.blockSize:]
486 } else {
487 uncompressed, p = p, nil
488 }
489
490 // Copy input.
491 // If the block is incompressible, this is used for the result.
492 inbuf := w.buffers.Get().([]byte)[:len(uncompressed)+obufHeaderLen]
493 obuf := w.buffers.Get().([]byte)[:w.obufLen]
494 copy(inbuf[obufHeaderLen:], uncompressed)
495 uncompressed = inbuf[obufHeaderLen:]
496
497 output := make(chan result)
498 // Queue output now, so we keep order.
499 w.output <- output
500 res := result{
501 startOffset: w.uncompWritten,
502 }
503 w.uncompWritten += int64(len(uncompressed))
504
505 go func() {
506 checksum := crc(uncompressed)
507
508 // Set to uncompressed.
509 chunkType := uint8(chunkTypeUncompressedData)
510 chunkLen := 4 + len(uncompressed)
511
512 // Attempt compressing.
513 n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed)))
514 n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed)
515
516 // Check if we should use this, or store as uncompressed instead.
517 if n2 > 0 {
518 chunkType = uint8(chunkTypeCompressedData)
519 chunkLen = 4 + n + n2
520 obuf = obuf[:obufHeaderLen+n+n2]
521 } else {
522 // Use input as output.
523 obuf, inbuf = inbuf, obuf
524 }
525
526 // Fill in the per-chunk header that comes before the body.
527 obuf[0] = chunkType
528 obuf[1] = uint8(chunkLen >> 0)
529 obuf[2] = uint8(chunkLen >> 8)
530 obuf[3] = uint8(chunkLen >> 16)
531 obuf[4] = uint8(checksum >> 0)
532 obuf[5] = uint8(checksum >> 8)
533 obuf[6] = uint8(checksum >> 16)
534 obuf[7] = uint8(checksum >> 24)
535
536 // Queue final output.
537 res.b = obuf
538 output <- res
539
540 // Put unused buffer back in pool.
541 w.buffers.Put(inbuf)
542 }()
543 nRet += len(uncompressed)
544 }
545 return nRet, nil
546}
547
548// writeFull is a special version of write that will always write the full buffer.
549// Data to be compressed should start at offset obufHeaderLen and fill the remainder of the buffer.
550// The data will be written as a single block.
551// The caller is not allowed to use inbuf after this function has been called.
552func (w *Writer) writeFull(inbuf []byte) (errRet error) {
553 if err := w.err(nil); err != nil {
554 return err
555 }
556
557 if w.concurrency == 1 {
558 _, err := w.writeSync(inbuf[obufHeaderLen:])
559 return err
560 }
561
562 // Spawn goroutine and write block to output channel.
563 if !w.wroteStreamHeader {
564 w.wroteStreamHeader = true
565 hWriter := make(chan result)
566 w.output <- hWriter
567 if w.snappy {
568 hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunkSnappy)}
569 } else {
570 hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunk)}
571 }
572 }
573
574 // Get an output buffer.
575 obuf := w.buffers.Get().([]byte)[:w.obufLen]
576 uncompressed := inbuf[obufHeaderLen:]
577
578 output := make(chan result)
579 // Queue output now, so we keep order.
580 w.output <- output
581 res := result{
582 startOffset: w.uncompWritten,
583 }
584 w.uncompWritten += int64(len(uncompressed))
585
586 go func() {
587 checksum := crc(uncompressed)
588
589 // Set to uncompressed.
590 chunkType := uint8(chunkTypeUncompressedData)
591 chunkLen := 4 + len(uncompressed)
592
593 // Attempt compressing.
594 n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed)))
595 n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed)
596
597 // Check if we should use this, or store as uncompressed instead.
598 if n2 > 0 {
599 chunkType = uint8(chunkTypeCompressedData)
600 chunkLen = 4 + n + n2
601 obuf = obuf[:obufHeaderLen+n+n2]
602 } else {
603 // Use input as output.
604 obuf, inbuf = inbuf, obuf
605 }
606
607 // Fill in the per-chunk header that comes before the body.
608 obuf[0] = chunkType
609 obuf[1] = uint8(chunkLen >> 0)
610 obuf[2] = uint8(chunkLen >> 8)
611 obuf[3] = uint8(chunkLen >> 16)
612 obuf[4] = uint8(checksum >> 0)
613 obuf[5] = uint8(checksum >> 8)
614 obuf[6] = uint8(checksum >> 16)
615 obuf[7] = uint8(checksum >> 24)
616
617 // Queue final output.
618 res.b = obuf
619 output <- res
620
621 // Put unused buffer back in pool.
622 w.buffers.Put(inbuf)
623 }()
624 return nil
625}
626
627func (w *Writer) writeSync(p []byte) (nRet int, errRet error) {
628 if err := w.err(nil); err != nil {
629 return 0, err
630 }
631 if !w.wroteStreamHeader {
632 w.wroteStreamHeader = true
633 var n int
634 var err error
635 if w.snappy {
636 n, err = w.writer.Write([]byte(magicChunkSnappy))
637 } else {
638 n, err = w.writer.Write([]byte(magicChunk))
639 }
640 if err != nil {
641 return 0, w.err(err)
642 }
643 if n != len(magicChunk) {
644 return 0, w.err(io.ErrShortWrite)
645 }
646 w.written += int64(n)
647 }
648
649 for len(p) > 0 {
650 var uncompressed []byte
651 if len(p) > w.blockSize {
652 uncompressed, p = p[:w.blockSize], p[w.blockSize:]
653 } else {
654 uncompressed, p = p, nil
655 }
656
657 obuf := w.buffers.Get().([]byte)[:w.obufLen]
658 checksum := crc(uncompressed)
659
660 // Set to uncompressed.
661 chunkType := uint8(chunkTypeUncompressedData)
662 chunkLen := 4 + len(uncompressed)
663
664 // Attempt compressing.
665 n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed)))
666 n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed)
667
668 if n2 > 0 {
669 chunkType = uint8(chunkTypeCompressedData)
670 chunkLen = 4 + n + n2
671 obuf = obuf[:obufHeaderLen+n+n2]
672 } else {
673 obuf = obuf[:8]
674 }
675
676 // Fill in the per-chunk header that comes before the body.
677 obuf[0] = chunkType
678 obuf[1] = uint8(chunkLen >> 0)
679 obuf[2] = uint8(chunkLen >> 8)
680 obuf[3] = uint8(chunkLen >> 16)
681 obuf[4] = uint8(checksum >> 0)
682 obuf[5] = uint8(checksum >> 8)
683 obuf[6] = uint8(checksum >> 16)
684 obuf[7] = uint8(checksum >> 24)
685
686 n, err := w.writer.Write(obuf)
687 if err != nil {
688 return 0, w.err(err)
689 }
690 if n != len(obuf) {
691 return 0, w.err(io.ErrShortWrite)
692 }
693 w.err(w.index.add(w.written, w.uncompWritten))
694 w.written += int64(n)
695 w.uncompWritten += int64(len(uncompressed))
696
697 if chunkType == chunkTypeUncompressedData {
698 // Write uncompressed data.
699 n, err := w.writer.Write(uncompressed)
700 if err != nil {
701 return 0, w.err(err)
702 }
703 if n != len(uncompressed) {
704 return 0, w.err(io.ErrShortWrite)
705 }
706 w.written += int64(n)
707 }
708 w.buffers.Put(obuf)
709 // Queue final output.
710 nRet += len(uncompressed)
711 }
712 return nRet, nil
713}
714
715// Flush flushes the Writer to its underlying io.Writer.
716// This does not apply padding.
717func (w *Writer) Flush() error {
718 if err := w.err(nil); err != nil {
719 return err
720 }
721
722 // Queue any data still in input buffer.
723 if len(w.ibuf) != 0 {
724 if !w.wroteStreamHeader {
725 _, err := w.writeSync(w.ibuf)
726 w.ibuf = w.ibuf[:0]
727 return w.err(err)
728 } else {
729 _, err := w.write(w.ibuf)
730 w.ibuf = w.ibuf[:0]
731 err = w.err(err)
732 if err != nil {
733 return err
734 }
735 }
736 }
737 if w.output == nil {
738 return w.err(nil)
739 }
740
741 // Send empty buffer
742 res := make(chan result)
743 w.output <- res
744 // Block until this has been picked up.
745 res <- result{b: nil, startOffset: w.uncompWritten}
746 // When it is closed, we have flushed.
747 <-res
748 return w.err(nil)
749}
750
751// Close calls Flush and then closes the Writer.
752// Calling Close multiple times is ok,
753// but calling CloseIndex after this will make it not return the index.
754func (w *Writer) Close() error {
755 _, err := w.closeIndex(w.appendIndex)
756 return err
757}
758
759// CloseIndex calls Close and returns an index on first call.
760// This is not required if you are only adding index to a stream.
761func (w *Writer) CloseIndex() ([]byte, error) {
762 return w.closeIndex(true)
763}
764
765func (w *Writer) closeIndex(idx bool) ([]byte, error) {
766 err := w.Flush()
767 if w.output != nil {
768 close(w.output)
769 w.writerWg.Wait()
770 w.output = nil
771 }
772
773 var index []byte
774 if w.err(err) == nil && w.writer != nil {
775 // Create index.
776 if idx {
777 compSize := int64(-1)
778 if w.pad <= 1 {
779 compSize = w.written
780 }
781 index = w.index.appendTo(w.ibuf[:0], w.uncompWritten, compSize)
782 // Count as written for padding.
783 if w.appendIndex {
784 w.written += int64(len(index))
785 }
786 }
787
788 if w.pad > 1 {
789 tmp := w.ibuf[:0]
790 if len(index) > 0 {
791 // Allocate another buffer.
792 tmp = w.buffers.Get().([]byte)[:0]
793 defer w.buffers.Put(tmp)
794 }
795 add := calcSkippableFrame(w.written, int64(w.pad))
796 frame, err := skippableFrame(tmp, add, w.randSrc)
797 if err = w.err(err); err != nil {
798 return nil, err
799 }
800 n, err2 := w.writer.Write(frame)
801 if err2 == nil && n != len(frame) {
802 err2 = io.ErrShortWrite
803 }
804 _ = w.err(err2)
805 }
806 if len(index) > 0 && w.appendIndex {
807 n, err2 := w.writer.Write(index)
808 if err2 == nil && n != len(index) {
809 err2 = io.ErrShortWrite
810 }
811 _ = w.err(err2)
812 }
813 }
814 err = w.err(errClosed)
815 if err == errClosed {
816 return index, nil
817 }
818 return nil, err
819}
820
821// calcSkippableFrame will return a total size to be added for written
822// to be divisible by multiple.
823// The value will always be > skippableFrameHeader.
824// The function will panic if written < 0 or wantMultiple <= 0.
825func calcSkippableFrame(written, wantMultiple int64) int {
826 if wantMultiple <= 0 {
827 panic("wantMultiple <= 0")
828 }
829 if written < 0 {
830 panic("written < 0")
831 }
832 leftOver := written % wantMultiple
833 if leftOver == 0 {
834 return 0
835 }
836 toAdd := wantMultiple - leftOver
837 for toAdd < skippableFrameHeader {
838 toAdd += wantMultiple
839 }
840 return int(toAdd)
841}
842
843// skippableFrame will add a skippable frame with a total size of bytes.
844// total should be >= skippableFrameHeader and < maxBlockSize + skippableFrameHeader
845func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) {
846 if total == 0 {
847 return dst, nil
848 }
849 if total < skippableFrameHeader {
850 return dst, fmt.Errorf("s2: requested skippable frame (%d) < 4", total)
851 }
852 if int64(total) >= maxBlockSize+skippableFrameHeader {
853 return dst, fmt.Errorf("s2: requested skippable frame (%d) >= max 1<<24", total)
854 }
855 // Chunk type 0xfe "Section 4.4 Padding (chunk type 0xfe)"
856 dst = append(dst, chunkTypePadding)
857 f := uint32(total - skippableFrameHeader)
858 // Add chunk length.
859 dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16))
860 // Add data
861 start := len(dst)
862 dst = append(dst, make([]byte, f)...)
863 _, err := io.ReadFull(r, dst[start:])
864 return dst, err
865}
866
867var errClosed = errors.New("s2: Writer is closed")
868
869// WriterOption is an option for creating a encoder.
870type WriterOption func(*Writer) error
871
872// WriterConcurrency will set the concurrency,
873// meaning the maximum number of decoders to run concurrently.
874// The value supplied must be at least 1.
875// By default this will be set to GOMAXPROCS.
876func WriterConcurrency(n int) WriterOption {
877 return func(w *Writer) error {
878 if n <= 0 {
879 return errors.New("concurrency must be at least 1")
880 }
881 w.concurrency = n
882 return nil
883 }
884}
885
886// WriterAddIndex will append an index to the end of a stream
887// when it is closed.
888func WriterAddIndex() WriterOption {
889 return func(w *Writer) error {
890 w.appendIndex = true
891 return nil
892 }
893}
894
895// WriterBetterCompression will enable better compression.
896// EncodeBetter compresses better than Encode but typically with a
897// 10-40% speed decrease on both compression and decompression.
898func WriterBetterCompression() WriterOption {
899 return func(w *Writer) error {
900 w.level = levelBetter
901 return nil
902 }
903}
904
905// WriterBestCompression will enable better compression.
906// EncodeBetter compresses better than Encode but typically with a
907// big speed decrease on compression.
908func WriterBestCompression() WriterOption {
909 return func(w *Writer) error {
910 w.level = levelBest
911 return nil
912 }
913}
914
915// WriterUncompressed will bypass compression.
916// The stream will be written as uncompressed blocks only.
917// If concurrency is > 1 CRC and output will still be done async.
918func WriterUncompressed() WriterOption {
919 return func(w *Writer) error {
920 w.level = levelUncompressed
921 return nil
922 }
923}
924
925// WriterBlockSize allows to override the default block size.
926// Blocks will be this size or smaller.
927// Minimum size is 4KB and and maximum size is 4MB.
928//
929// Bigger blocks may give bigger throughput on systems with many cores,
930// and will increase compression slightly, but it will limit the possible
931// concurrency for smaller payloads for both encoding and decoding.
932// Default block size is 1MB.
933//
934// When writing Snappy compatible output using WriterSnappyCompat,
935// the maximum block size is 64KB.
936func WriterBlockSize(n int) WriterOption {
937 return func(w *Writer) error {
938 if w.snappy && n > maxSnappyBlockSize || n < minBlockSize {
939 return errors.New("s2: block size too large. Must be <= 64K and >=4KB on for snappy compatible output")
940 }
941 if n > maxBlockSize || n < minBlockSize {
942 return errors.New("s2: block size too large. Must be <= 4MB and >=4KB")
943 }
944 w.blockSize = n
945 return nil
946 }
947}
948
949// WriterPadding will add padding to all output so the size will be a multiple of n.
950// This can be used to obfuscate the exact output size or make blocks of a certain size.
951// The contents will be a skippable frame, so it will be invisible by the decoder.
952// n must be > 0 and <= 4MB.
953// The padded area will be filled with data from crypto/rand.Reader.
954// The padding will be applied whenever Close is called on the writer.
955func WriterPadding(n int) WriterOption {
956 return func(w *Writer) error {
957 if n <= 0 {
958 return fmt.Errorf("s2: padding must be at least 1")
959 }
960 // No need to waste our time.
961 if n == 1 {
962 w.pad = 0
963 }
964 if n > maxBlockSize {
965 return fmt.Errorf("s2: padding must less than 4MB")
966 }
967 w.pad = n
968 return nil
969 }
970}
971
972// WriterPaddingSrc will get random data for padding from the supplied source.
973// By default crypto/rand is used.
974func WriterPaddingSrc(reader io.Reader) WriterOption {
975 return func(w *Writer) error {
976 w.randSrc = reader
977 return nil
978 }
979}
980
981// WriterSnappyCompat will write snappy compatible output.
982// The output can be decompressed using either snappy or s2.
983// If block size is more than 64KB it is set to that.
984func WriterSnappyCompat() WriterOption {
985 return func(w *Writer) error {
986 w.snappy = true
987 if w.blockSize > 64<<10 {
988 // We choose 8 bytes less than 64K, since that will make literal emits slightly more effective.
989 // And allows us to skip some size checks.
990 w.blockSize = (64 << 10) - 8
991 }
992 return nil
993 }
994}
995
996// WriterFlushOnWrite will compress blocks on each call to the Write function.
997//
998// This is quite inefficient as blocks size will depend on the write size.
999//
1000// Use WriterConcurrency(1) to also make sure that output is flushed.
1001// When Write calls return, otherwise they will be written when compression is done.
1002func WriterFlushOnWrite() WriterOption {
1003 return func(w *Writer) error {
1004 w.flushOnWrite = true
1005 return nil
1006 }
1007}
1008
1009// WriterCustomEncoder allows to override the encoder for blocks on the stream.
1010// The function must compress 'src' into 'dst' and return the bytes used in dst as an integer.
1011// Block size (initial varint) should not be added by the encoder.
1012// Returning value 0 indicates the block could not be compressed.
1013// Returning a negative value indicates that compression should be attempted.
1014// The function should expect to be called concurrently.
1015func WriterCustomEncoder(fn func(dst, src []byte) int) WriterOption {
1016 return func(w *Writer) error {
1017 w.customEnc = fn
1018 return nil
1019 }
1020}
diff --git a/vendor/github.com/klauspost/cpuid/v2/.gitignore b/vendor/github.com/klauspost/cpuid/v2/.gitignore
new file mode 100644
index 0000000..daf913b
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/v2/.gitignore
@@ -0,0 +1,24 @@
1# Compiled Object files, Static and Dynamic libs (Shared Objects)
2*.o
3*.a
4*.so
5
6# Folders
7_obj
8_test
9
10# Architecture specific extensions/prefixes
11*.[568vq]
12[568vq].out
13
14*.cgo1.go
15*.cgo2.c
16_cgo_defun.c
17_cgo_gotypes.go
18_cgo_export.*
19
20_testmain.go
21
22*.exe
23*.test
24*.prof
diff --git a/vendor/github.com/klauspost/cpuid/v2/.goreleaser.yml b/vendor/github.com/klauspost/cpuid/v2/.goreleaser.yml
new file mode 100644
index 0000000..944cc00
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/v2/.goreleaser.yml
@@ -0,0 +1,74 @@
1# This is an example goreleaser.yaml file with some sane defaults.
2# Make sure to check the documentation at http://goreleaser.com
3
4builds:
5 -
6 id: "cpuid"
7 binary: cpuid
8 main: ./cmd/cpuid/main.go
9 env:
10 - CGO_ENABLED=0
11 flags:
12 - -ldflags=-s -w
13 goos:
14 - aix
15 - linux
16 - freebsd
17 - netbsd
18 - windows
19 - darwin
20 goarch:
21 - 386
22 - amd64
23 - arm64
24 goarm:
25 - 7
26
27archives:
28 -
29 id: cpuid
30 name_template: "cpuid-{{ .Os }}_{{ .Arch }}_{{ .Version }}"
31 replacements:
32 aix: AIX
33 darwin: OSX
34 linux: Linux
35 windows: Windows
36 386: i386
37 amd64: x86_64
38 freebsd: FreeBSD
39 netbsd: NetBSD
40 format_overrides:
41 - goos: windows
42 format: zip
43 files:
44 - LICENSE
45checksum:
46 name_template: 'checksums.txt'
47snapshot:
48 name_template: "{{ .Tag }}-next"
49changelog:
50 sort: asc
51 filters:
52 exclude:
53 - '^doc:'
54 - '^docs:'
55 - '^test:'
56 - '^tests:'
57 - '^Update\sREADME.md'
58
59nfpms:
60 -
61 file_name_template: "cpuid_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
62 vendor: Klaus Post
63 homepage: https://github.com/klauspost/cpuid
64 maintainer: Klaus Post <[email protected]>
65 description: CPUID Tool
66 license: BSD 3-Clause
67 formats:
68 - deb
69 - rpm
70 replacements:
71 darwin: Darwin
72 linux: Linux
73 freebsd: FreeBSD
74 amd64: x86_64
diff --git a/vendor/github.com/klauspost/cpuid/v2/CONTRIBUTING.txt b/vendor/github.com/klauspost/cpuid/v2/CONTRIBUTING.txt
new file mode 100644
index 0000000..452d28e
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/v2/CONTRIBUTING.txt
@@ -0,0 +1,35 @@
1Developer Certificate of Origin
2Version 1.1
3
4Copyright (C) 2015- Klaus Post & Contributors.
5Email: [email protected]
6
7Everyone is permitted to copy and distribute verbatim copies of this
8license document, but changing it is not allowed.
9
10
11Developer's Certificate of Origin 1.1
12
13By making a contribution to this project, I certify that:
14
15(a) The contribution was created in whole or in part by me and I
16 have the right to submit it under the open source license
17 indicated in the file; or
18
19(b) The contribution is based upon previous work that, to the best
20 of my knowledge, is covered under an appropriate open source
21 license and I have the right under that license to submit that
22 work with modifications, whether created in whole or in part
23 by me, under the same open source license (unless I am
24 permitted to submit under a different license), as indicated
25 in the file; or
26
27(c) The contribution was provided directly to me by some other
28 person who certified (a), (b) or (c) and I have not modified
29 it.
30
31(d) I understand and agree that this project and the contribution
32 are public and that a record of the contribution (including all
33 personal information I submit with it, including my sign-off) is
34 maintained indefinitely and may be redistributed consistent with
35 this project or the open source license(s) involved.
diff --git a/vendor/github.com/klauspost/cpuid/v2/LICENSE b/vendor/github.com/klauspost/cpuid/v2/LICENSE
new file mode 100644
index 0000000..5cec7ee
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/v2/LICENSE
@@ -0,0 +1,22 @@
1The MIT License (MIT)
2
3Copyright (c) 2015 Klaus Post
4
5Permission is hereby granted, free of charge, to any person obtaining a copy
6of this software and associated documentation files (the "Software"), to deal
7in the Software without restriction, including without limitation the rights
8to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9copies of the Software, and to permit persons to whom the Software is
10furnished to do so, subject to the following conditions:
11
12The above copyright notice and this permission notice shall be included in all
13copies or substantial portions of the Software.
14
15THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21SOFTWARE.
22
diff --git a/vendor/github.com/klauspost/cpuid/v2/README.md b/vendor/github.com/klauspost/cpuid/v2/README.md
new file mode 100644
index 0000000..30f8d29
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/v2/README.md
@@ -0,0 +1,497 @@
1# cpuid
2Package cpuid provides information about the CPU running the current program.
3
4CPU features are detected on startup, and kept for fast access through the life of the application.
5Currently x86 / x64 (AMD64/i386) and ARM (ARM64) is supported, and no external C (cgo) code is used, which should make the library very easy to use.
6
7You can access the CPU information by accessing the shared CPU variable of the cpuid library.
8
9Package home: https://github.com/klauspost/cpuid
10
11[![PkgGoDev](https://pkg.go.dev/badge/github.com/klauspost/cpuid)](https://pkg.go.dev/github.com/klauspost/cpuid/v2)
12[![Go](https://github.com/klauspost/cpuid/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/cpuid/actions/workflows/go.yml)
13
14## installing
15
16`go get -u github.com/klauspost/cpuid/v2` using modules.
17Drop `v2` for others.
18
19Installing binary:
20
21`go install github.com/klauspost/cpuid/v2/cmd/cpuid@latest`
22
23Or download binaries from release page: https://github.com/klauspost/cpuid/releases
24
25### Homebrew
26
27For macOS/Linux users, you can install via [brew](https://brew.sh/)
28
29```sh
30$ brew install cpuid
31```
32
33## example
34
35```Go
36package main
37
38import (
39 "fmt"
40 "strings"
41
42 . "github.com/klauspost/cpuid/v2"
43)
44
45func main() {
46 // Print basic CPU information:
47 fmt.Println("Name:", CPU.BrandName)
48 fmt.Println("PhysicalCores:", CPU.PhysicalCores)
49 fmt.Println("ThreadsPerCore:", CPU.ThreadsPerCore)
50 fmt.Println("LogicalCores:", CPU.LogicalCores)
51 fmt.Println("Family", CPU.Family, "Model:", CPU.Model, "Vendor ID:", CPU.VendorID)
52 fmt.Println("Features:", strings.Join(CPU.FeatureSet(), ","))
53 fmt.Println("Cacheline bytes:", CPU.CacheLine)
54 fmt.Println("L1 Data Cache:", CPU.Cache.L1D, "bytes")
55 fmt.Println("L1 Instruction Cache:", CPU.Cache.L1I, "bytes")
56 fmt.Println("L2 Cache:", CPU.Cache.L2, "bytes")
57 fmt.Println("L3 Cache:", CPU.Cache.L3, "bytes")
58 fmt.Println("Frequency", CPU.Hz, "hz")
59
60 // Test if we have these specific features:
61 if CPU.Supports(SSE, SSE2) {
62 fmt.Println("We have Streaming SIMD 2 Extensions")
63 }
64}
65```
66
67Sample output:
68```
69>go run main.go
70Name: AMD Ryzen 9 3950X 16-Core Processor
71PhysicalCores: 16
72ThreadsPerCore: 2
73LogicalCores: 32
74Family 23 Model: 113 Vendor ID: AMD
75Features: ADX,AESNI,AVX,AVX2,BMI1,BMI2,CLMUL,CMOV,CX16,F16C,FMA3,HTT,HYPERVISOR,LZCNT,MMX,MMXEXT,NX,POPCNT,RDRAND,RDSEED,RDTSCP,SHA,SSE,SSE2,SSE3,SSE4,SSE42,SSE4A,SSSE3
76Cacheline bytes: 64
77L1 Data Cache: 32768 bytes
78L1 Instruction Cache: 32768 bytes
79L2 Cache: 524288 bytes
80L3 Cache: 16777216 bytes
81Frequency 0 hz
82We have Streaming SIMD 2 Extensions
83```
84
85# usage
86
87The `cpuid.CPU` provides access to CPU features. Use `cpuid.CPU.Supports()` to check for CPU features.
88A faster `cpuid.CPU.Has()` is provided which will usually be inlined by the gc compiler.
89
90To test a larger number of features, they can be combined using `f := CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SYSCALL, SSE, SSE2)`, etc.
91This can be using with `cpuid.CPU.HasAll(f)` to quickly test if all features are supported.
92
93Note that for some cpu/os combinations some features will not be detected.
94`amd64` has rather good support and should work reliably on all platforms.
95
96Note that hypervisors may not pass through all CPU features through to the guest OS,
97so even if your host supports a feature it may not be visible on guests.
98
99## arm64 feature detection
100
101Not all operating systems provide ARM features directly
102and there is no safe way to do so for the rest.
103
104Currently `arm64/linux` and `arm64/freebsd` should be quite reliable.
105`arm64/darwin` adds features expected from the M1 processor, but a lot remains undetected.
106
107A `DetectARM()` can be used if you are able to control your deployment,
108it will detect CPU features, but may crash if the OS doesn't intercept the calls.
109A `-cpu.arm` flag for detecting unsafe ARM features can be added. See below.
110
111Note that currently only features are detected on ARM,
112no additional information is currently available.
113
114## flags
115
116It is possible to add flags that affects cpu detection.
117
118For this the `Flags()` command is provided.
119
120This must be called *before* `flag.Parse()` AND after the flags have been parsed `Detect()` must be called.
121
122This means that any detection used in `init()` functions will not contain these flags.
123
124Example:
125
126```Go
127package main
128
129import (
130 "flag"
131 "fmt"
132 "strings"
133
134 "github.com/klauspost/cpuid/v2"
135)
136
137func main() {
138 cpuid.Flags()
139 flag.Parse()
140 cpuid.Detect()
141
142 // Test if we have these specific features:
143 if cpuid.CPU.Supports(cpuid.SSE, cpuid.SSE2) {
144 fmt.Println("We have Streaming SIMD 2 Extensions")
145 }
146}
147```
148
149## commandline
150
151Download as binary from: https://github.com/klauspost/cpuid/releases
152
153Install from source:
154
155`go install github.com/klauspost/cpuid/v2/cmd/cpuid@latest`
156
157### Example
158
159```
160λ cpuid
161Name: AMD Ryzen 9 3950X 16-Core Processor
162Vendor String: AuthenticAMD
163Vendor ID: AMD
164PhysicalCores: 16
165Threads Per Core: 2
166Logical Cores: 32
167CPU Family 23 Model: 113
168Features: ADX,AESNI,AVX,AVX2,BMI1,BMI2,CLMUL,CLZERO,CMOV,CMPXCHG8,CPBOOST,CX16,F16C,FMA3,FXSR,FXSROPT,HTT,HYPERVISOR,LAHF,LZCNT,MCAOVERFLOW,MMX,MMXEXT,MOVBE,NX,OSXSAVE,POPCNT,RDRAND,RDSEED,RDTSCP,SCE,SHA,SSE,SSE2,SSE3,SSE4,SSE42,SSE4A,SSSE3,SUCCOR,X87,XSAVE
169Microarchitecture level: 3
170Cacheline bytes: 64
171L1 Instruction Cache: 32768 bytes
172L1 Data Cache: 32768 bytes
173L2 Cache: 524288 bytes
174L3 Cache: 16777216 bytes
175
176```
177### JSON Output:
178
179```
180λ cpuid --json
181{
182 "BrandName": "AMD Ryzen 9 3950X 16-Core Processor",
183 "VendorID": 2,
184 "VendorString": "AuthenticAMD",
185 "PhysicalCores": 16,
186 "ThreadsPerCore": 2,
187 "LogicalCores": 32,
188 "Family": 23,
189 "Model": 113,
190 "CacheLine": 64,
191 "Hz": 0,
192 "BoostFreq": 0,
193 "Cache": {
194 "L1I": 32768,
195 "L1D": 32768,
196 "L2": 524288,
197 "L3": 16777216
198 },
199 "SGX": {
200 "Available": false,
201 "LaunchControl": false,
202 "SGX1Supported": false,
203 "SGX2Supported": false,
204 "MaxEnclaveSizeNot64": 0,
205 "MaxEnclaveSize64": 0,
206 "EPCSections": null
207 },
208 "Features": [
209 "ADX",
210 "AESNI",
211 "AVX",
212 "AVX2",
213 "BMI1",
214 "BMI2",
215 "CLMUL",
216 "CLZERO",
217 "CMOV",
218 "CMPXCHG8",
219 "CPBOOST",
220 "CX16",
221 "F16C",
222 "FMA3",
223 "FXSR",
224 "FXSROPT",
225 "HTT",
226 "HYPERVISOR",
227 "LAHF",
228 "LZCNT",
229 "MCAOVERFLOW",
230 "MMX",
231 "MMXEXT",
232 "MOVBE",
233 "NX",
234 "OSXSAVE",
235 "POPCNT",
236 "RDRAND",
237 "RDSEED",
238 "RDTSCP",
239 "SCE",
240 "SHA",
241 "SSE",
242 "SSE2",
243 "SSE3",
244 "SSE4",
245 "SSE42",
246 "SSE4A",
247 "SSSE3",
248 "SUCCOR",
249 "X87",
250 "XSAVE"
251 ],
252 "X64Level": 3
253}
254```
255
256### Check CPU microarch level
257
258```
259λ cpuid --check-level=3
2602022/03/18 17:04:40 AMD Ryzen 9 3950X 16-Core Processor
2612022/03/18 17:04:40 Microarchitecture level 3 is supported. Max level is 3.
262Exit Code 0
263
264λ cpuid --check-level=4
2652022/03/18 17:06:18 AMD Ryzen 9 3950X 16-Core Processor
2662022/03/18 17:06:18 Microarchitecture level 4 not supported. Max level is 3.
267Exit Code 1
268```
269
270
271## Available flags
272
273### x86 & amd64
274
275| Feature Flag | Description |
276|--------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
277| ADX | Intel ADX (Multi-Precision Add-Carry Instruction Extensions) |
278| AESNI | Advanced Encryption Standard New Instructions |
279| AMD3DNOW | AMD 3DNOW |
280| AMD3DNOWEXT | AMD 3DNowExt |
281| AMXBF16 | Tile computational operations on BFLOAT16 numbers |
282| AMXINT8 | Tile computational operations on 8-bit integers |
283| AMXFP16 | Tile computational operations on FP16 numbers |
284| AMXTILE | Tile architecture |
285| APX_F | Intel APX |
286| AVX | AVX functions |
287| AVX10 | If set the Intel AVX10 Converged Vector ISA is supported |
288| AVX10_128 | If set indicates that AVX10 128-bit vector support is present |
289| AVX10_256 | If set indicates that AVX10 256-bit vector support is present |
290| AVX10_512 | If set indicates that AVX10 512-bit vector support is present |
291| AVX2 | AVX2 functions |
292| AVX512BF16 | AVX-512 BFLOAT16 Instructions |
293| AVX512BITALG | AVX-512 Bit Algorithms |
294| AVX512BW | AVX-512 Byte and Word Instructions |
295| AVX512CD | AVX-512 Conflict Detection Instructions |
296| AVX512DQ | AVX-512 Doubleword and Quadword Instructions |
297| AVX512ER | AVX-512 Exponential and Reciprocal Instructions |
298| AVX512F | AVX-512 Foundation |
299| AVX512FP16 | AVX-512 FP16 Instructions |
300| AVX512IFMA | AVX-512 Integer Fused Multiply-Add Instructions |
301| AVX512PF | AVX-512 Prefetch Instructions |
302| AVX512VBMI | AVX-512 Vector Bit Manipulation Instructions |
303| AVX512VBMI2 | AVX-512 Vector Bit Manipulation Instructions, Version 2 |
304| AVX512VL | AVX-512 Vector Length Extensions |
305| AVX512VNNI | AVX-512 Vector Neural Network Instructions |
306| AVX512VP2INTERSECT | AVX-512 Intersect for D/Q |
307| AVX512VPOPCNTDQ | AVX-512 Vector Population Count Doubleword and Quadword |
308| AVXIFMA | AVX-IFMA instructions |
309| AVXNECONVERT | AVX-NE-CONVERT instructions |
310| AVXSLOW | Indicates the CPU performs 2 128 bit operations instead of one |
311| AVXVNNI | AVX (VEX encoded) VNNI neural network instructions |
312| AVXVNNIINT8 | AVX-VNNI-INT8 instructions |
313| BHI_CTRL | Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598 |
314| BMI1 | Bit Manipulation Instruction Set 1 |
315| BMI2 | Bit Manipulation Instruction Set 2 |
316| CETIBT | Intel CET Indirect Branch Tracking |
317| CETSS | Intel CET Shadow Stack |
318| CLDEMOTE | Cache Line Demote |
319| CLMUL | Carry-less Multiplication |
320| CLZERO | CLZERO instruction supported |
321| CMOV | i686 CMOV |
322| CMPCCXADD | CMPCCXADD instructions |
323| CMPSB_SCADBS_SHORT | Fast short CMPSB and SCASB |
324| CMPXCHG8 | CMPXCHG8 instruction |
325| CPBOOST | Core Performance Boost |
326| CPPC | AMD: Collaborative Processor Performance Control |
327| CX16 | CMPXCHG16B Instruction |
328| EFER_LMSLE_UNS | AMD: =Core::X86::Msr::EFER[LMSLE] is not supported, and MBZ |
329| ENQCMD | Enqueue Command |
330| ERMS | Enhanced REP MOVSB/STOSB |
331| F16C | Half-precision floating-point conversion |
332| FLUSH_L1D | Flush L1D cache |
333| FMA3 | Intel FMA 3. Does not imply AVX. |
334| FMA4 | Bulldozer FMA4 functions |
335| FP128 | AMD: When set, the internal FP/SIMD execution datapath is 128-bits wide |
336| FP256 | AMD: When set, the internal FP/SIMD execution datapath is 256-bits wide |
337| FSRM | Fast Short Rep Mov |
338| FXSR | FXSAVE, FXRESTOR instructions, CR4 bit 9 |
339| FXSROPT | FXSAVE/FXRSTOR optimizations |
340| GFNI | Galois Field New Instructions. May require other features (AVX, AVX512VL,AVX512F) based on usage. |
341| HLE | Hardware Lock Elision |
342| HRESET | If set CPU supports history reset and the IA32_HRESET_ENABLE MSR |
343| HTT | Hyperthreading (enabled) |
344| HWA | Hardware assert supported. Indicates support for MSRC001_10 |
345| HYBRID_CPU | This part has CPUs of more than one type. |
346| HYPERVISOR | This bit has been reserved by Intel & AMD for use by hypervisors |
347| IA32_ARCH_CAP | IA32_ARCH_CAPABILITIES MSR (Intel) |
348| IA32_CORE_CAP | IA32_CORE_CAPABILITIES MSR |
349| IBPB | Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB) |
350| IBRS | AMD: Indirect Branch Restricted Speculation |
351| IBRS_PREFERRED | AMD: IBRS is preferred over software solution |
352| IBRS_PROVIDES_SMP | AMD: IBRS provides Same Mode Protection |
353| IBS | Instruction Based Sampling (AMD) |
354| IBSBRNTRGT | Instruction Based Sampling Feature (AMD) |
355| IBSFETCHSAM | Instruction Based Sampling Feature (AMD) |
356| IBSFFV | Instruction Based Sampling Feature (AMD) |
357| IBSOPCNT | Instruction Based Sampling Feature (AMD) |
358| IBSOPCNTEXT | Instruction Based Sampling Feature (AMD) |
359| IBSOPSAM | Instruction Based Sampling Feature (AMD) |
360| IBSRDWROPCNT | Instruction Based Sampling Feature (AMD) |
361| IBSRIPINVALIDCHK | Instruction Based Sampling Feature (AMD) |
362| IBS_FETCH_CTLX | AMD: IBS fetch control extended MSR supported |
363| IBS_OPDATA4 | AMD: IBS op data 4 MSR supported |
364| IBS_OPFUSE | AMD: Indicates support for IbsOpFuse |
365| IBS_PREVENTHOST | Disallowing IBS use by the host supported |
366| IBS_ZEN4 | Fetch and Op IBS support IBS extensions added with Zen4 |
367| IDPRED_CTRL | IPRED_DIS |
368| INT_WBINVD | WBINVD/WBNOINVD are interruptible. |
369| INVLPGB | NVLPGB and TLBSYNC instruction supported |
370| KEYLOCKER | Key locker |
371| KEYLOCKERW | Key locker wide |
372| LAHF | LAHF/SAHF in long mode |
373| LAM | If set, CPU supports Linear Address Masking |
374| LBRVIRT | LBR virtualization |
375| LZCNT | LZCNT instruction |
376| MCAOVERFLOW | MCA overflow recovery support. |
377| MCDT_NO | Processor do not exhibit MXCSR Configuration Dependent Timing behavior and do not need to mitigate it. |
378| MCOMMIT | MCOMMIT instruction supported |
379| MD_CLEAR | VERW clears CPU buffers |
380| MMX | standard MMX |
381| MMXEXT | SSE integer functions or AMD MMX ext |
382| MOVBE | MOVBE instruction (big-endian) |
383| MOVDIR64B | Move 64 Bytes as Direct Store |
384| MOVDIRI | Move Doubleword as Direct Store |
385| MOVSB_ZL | Fast Zero-Length MOVSB |
386| MPX | Intel MPX (Memory Protection Extensions) |
387| MOVU | MOVU SSE instructions are more efficient and should be preferred to SSE MOVL/MOVH. MOVUPS is more efficient than MOVLPS/MOVHPS. MOVUPD is more efficient than MOVLPD/MOVHPD |
388| MSRIRC | Instruction Retired Counter MSR available |
389| MSRLIST | Read/Write List of Model Specific Registers |
390| MSR_PAGEFLUSH | Page Flush MSR available |
391| NRIPS | Indicates support for NRIP save on VMEXIT |
392| NX | NX (No-Execute) bit |
393| OSXSAVE | XSAVE enabled by OS |
394| PCONFIG | PCONFIG for Intel Multi-Key Total Memory Encryption |
395| POPCNT | POPCNT instruction |
396| PPIN | AMD: Protected Processor Inventory Number support. Indicates that Protected Processor Inventory Number (PPIN) capability can be enabled |
397| PREFETCHI | PREFETCHIT0/1 instructions |
398| PSFD | Predictive Store Forward Disable |
399| RDPRU | RDPRU instruction supported |
400| RDRAND | RDRAND instruction is available |
401| RDSEED | RDSEED instruction is available |
402| RDTSCP | RDTSCP Instruction |
403| RRSBA_CTRL | Restricted RSB Alternate |
404| RTM | Restricted Transactional Memory |
405| RTM_ALWAYS_ABORT | Indicates that the loaded microcode is forcing RTM abort. |
406| SERIALIZE | Serialize Instruction Execution |
407| SEV | AMD Secure Encrypted Virtualization supported |
408| SEV_64BIT | AMD SEV guest execution only allowed from a 64-bit host |
409| SEV_ALTERNATIVE | AMD SEV Alternate Injection supported |
410| SEV_DEBUGSWAP | Full debug state swap supported for SEV-ES guests |
411| SEV_ES | AMD SEV Encrypted State supported |
412| SEV_RESTRICTED | AMD SEV Restricted Injection supported |
413| SEV_SNP | AMD SEV Secure Nested Paging supported |
414| SGX | Software Guard Extensions |
415| SGXLC | Software Guard Extensions Launch Control |
416| SHA | Intel SHA Extensions |
417| SME | AMD Secure Memory Encryption supported |
418| SME_COHERENT | AMD Hardware cache coherency across encryption domains enforced |
419| SPEC_CTRL_SSBD | Speculative Store Bypass Disable |
420| SRBDS_CTRL | SRBDS mitigation MSR available |
421| SSE | SSE functions |
422| SSE2 | P4 SSE functions |
423| SSE3 | Prescott SSE3 functions |
424| SSE4 | Penryn SSE4.1 functions |
425| SSE42 | Nehalem SSE4.2 functions |
426| SSE4A | AMD Barcelona microarchitecture SSE4a instructions |
427| SSSE3 | Conroe SSSE3 functions |
428| STIBP | Single Thread Indirect Branch Predictors |
429| STIBP_ALWAYSON | AMD: Single Thread Indirect Branch Prediction Mode has Enhanced Performance and may be left Always On |
430| STOSB_SHORT | Fast short STOSB |
431| SUCCOR | Software uncorrectable error containment and recovery capability. |
432| SVM | AMD Secure Virtual Machine |
433| SVMDA | Indicates support for the SVM decode assists. |
434| SVMFBASID | SVM, Indicates that TLB flush events, including CR3 writes and CR4.PGE toggles, flush only the current ASID's TLB entries. Also indicates support for the extended VMCBTLB_Control |
435| SVML | AMD SVM lock. Indicates support for SVM-Lock. |
436| SVMNP | AMD SVM nested paging |
437| SVMPF | SVM pause intercept filter. Indicates support for the pause intercept filter |
438| SVMPFT | SVM PAUSE filter threshold. Indicates support for the PAUSE filter cycle count threshold |
439| SYSCALL | System-Call Extension (SCE): SYSCALL and SYSRET instructions. |
440| SYSEE | SYSENTER and SYSEXIT instructions |
441| TBM | AMD Trailing Bit Manipulation |
442| TDX_GUEST | Intel Trust Domain Extensions Guest |
443| TLB_FLUSH_NESTED | AMD: Flushing includes all the nested translations for guest translations |
444| TME | Intel Total Memory Encryption. The following MSRs are supported: IA32_TME_CAPABILITY, IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE. |
445| TOPEXT | TopologyExtensions: topology extensions support. Indicates support for CPUID Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX. |
446| TSCRATEMSR | MSR based TSC rate control. Indicates support for MSR TSC ratio MSRC000_0104 |
447| TSXLDTRK | Intel TSX Suspend Load Address Tracking |
448| VAES | Vector AES. AVX(512) versions requires additional checks. |
449| VMCBCLEAN | VMCB clean bits. Indicates support for VMCB clean bits. |
450| VMPL | AMD VM Permission Levels supported |
451| VMSA_REGPROT | AMD VMSA Register Protection supported |
452| VMX | Virtual Machine Extensions |
453| VPCLMULQDQ | Carry-Less Multiplication Quadword. Requires AVX for 3 register versions. |
454| VTE | AMD Virtual Transparent Encryption supported |
455| WAITPKG | TPAUSE, UMONITOR, UMWAIT |
456| WBNOINVD | Write Back and Do Not Invalidate Cache |
457| WRMSRNS | Non-Serializing Write to Model Specific Register |
458| X87 | FPU |
459| XGETBV1 | Supports XGETBV with ECX = 1 |
460| XOP | Bulldozer XOP functions |
461| XSAVE | XSAVE, XRESTOR, XSETBV, XGETBV |
462| XSAVEC | Supports XSAVEC and the compacted form of XRSTOR. |
463| XSAVEOPT | XSAVEOPT available |
464| XSAVES | Supports XSAVES/XRSTORS and IA32_XSS |
465
466# ARM features:
467
468| Feature Flag | Description |
469|--------------|------------------------------------------------------------------|
470| AESARM | AES instructions |
471| ARMCPUID | Some CPU ID registers readable at user-level |
472| ASIMD | Advanced SIMD |
473| ASIMDDP | SIMD Dot Product |
474| ASIMDHP | Advanced SIMD half-precision floating point |
475| ASIMDRDM | Rounding Double Multiply Accumulate/Subtract (SQRDMLAH/SQRDMLSH) |
476| ATOMICS | Large System Extensions (LSE) |
477| CRC32 | CRC32/CRC32C instructions |
478| DCPOP | Data cache clean to Point of Persistence (DC CVAP) |
479| EVTSTRM | Generic timer |
480| FCMA | Floatin point complex number addition and multiplication |
481| FP | Single-precision and double-precision floating point |
482| FPHP | Half-precision floating point |
483| GPA | Generic Pointer Authentication |
484| JSCVT | Javascript-style double->int convert (FJCVTZS) |
485| LRCPC | Weaker release consistency (LDAPR, etc) |
486| PMULL | Polynomial Multiply instructions (PMULL/PMULL2) |
487| SHA1 | SHA-1 instructions (SHA1C, etc) |
488| SHA2 | SHA-2 instructions (SHA256H, etc) |
489| SHA3 | SHA-3 instructions (EOR3, RAXI, XAR, BCAX) |
490| SHA512 | SHA512 instructions |
491| SM3 | SM3 instructions |
492| SM4 | SM4 instructions |
493| SVE | Scalable Vector Extension |
494
495# license
496
497This code is published under an MIT license. See LICENSE file for more information.
diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid.go b/vendor/github.com/klauspost/cpuid/v2/cpuid.go
new file mode 100644
index 0000000..15b7603
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/v2/cpuid.go
@@ -0,0 +1,1473 @@
1// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
2
3// Package cpuid provides information about the CPU running the current program.
4//
5// CPU features are detected on startup, and kept for fast access through the life of the application.
6// Currently x86 / x64 (AMD64) as well as arm64 is supported.
7//
8// You can access the CPU information by accessing the shared CPU variable of the cpuid library.
9//
10// Package home: https://github.com/klauspost/cpuid
11package cpuid
12
13import (
14 "flag"
15 "fmt"
16 "math"
17 "math/bits"
18 "os"
19 "runtime"
20 "strings"
21)
22
23// AMD refererence: https://www.amd.com/system/files/TechDocs/25481.pdf
24// and Processor Programming Reference (PPR)
25
26// Vendor is a representation of a CPU vendor.
27type Vendor int
28
29const (
30 VendorUnknown Vendor = iota
31 Intel
32 AMD
33 VIA
34 Transmeta
35 NSC
36 KVM // Kernel-based Virtual Machine
37 MSVM // Microsoft Hyper-V or Windows Virtual PC
38 VMware
39 XenHVM
40 Bhyve
41 Hygon
42 SiS
43 RDC
44
45 Ampere
46 ARM
47 Broadcom
48 Cavium
49 DEC
50 Fujitsu
51 Infineon
52 Motorola
53 NVIDIA
54 AMCC
55 Qualcomm
56 Marvell
57
58 lastVendor
59)
60
61//go:generate stringer -type=FeatureID,Vendor
62
63// FeatureID is the ID of a specific cpu feature.
64type FeatureID int
65
66const (
67 // Keep index -1 as unknown
68 UNKNOWN = -1
69
70 // Add features
71 ADX FeatureID = iota // Intel ADX (Multi-Precision Add-Carry Instruction Extensions)
72 AESNI // Advanced Encryption Standard New Instructions
73 AMD3DNOW // AMD 3DNOW
74 AMD3DNOWEXT // AMD 3DNowExt
75 AMXBF16 // Tile computational operations on BFLOAT16 numbers
76 AMXFP16 // Tile computational operations on FP16 numbers
77 AMXINT8 // Tile computational operations on 8-bit integers
78 AMXTILE // Tile architecture
79 APX_F // Intel APX
80 AVX // AVX functions
81 AVX10 // If set the Intel AVX10 Converged Vector ISA is supported
82 AVX10_128 // If set indicates that AVX10 128-bit vector support is present
83 AVX10_256 // If set indicates that AVX10 256-bit vector support is present
84 AVX10_512 // If set indicates that AVX10 512-bit vector support is present
85 AVX2 // AVX2 functions
86 AVX512BF16 // AVX-512 BFLOAT16 Instructions
87 AVX512BITALG // AVX-512 Bit Algorithms
88 AVX512BW // AVX-512 Byte and Word Instructions
89 AVX512CD // AVX-512 Conflict Detection Instructions
90 AVX512DQ // AVX-512 Doubleword and Quadword Instructions
91 AVX512ER // AVX-512 Exponential and Reciprocal Instructions
92 AVX512F // AVX-512 Foundation
93 AVX512FP16 // AVX-512 FP16 Instructions
94 AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions
95 AVX512PF // AVX-512 Prefetch Instructions
96 AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions
97 AVX512VBMI2 // AVX-512 Vector Bit Manipulation Instructions, Version 2
98 AVX512VL // AVX-512 Vector Length Extensions
99 AVX512VNNI // AVX-512 Vector Neural Network Instructions
100 AVX512VP2INTERSECT // AVX-512 Intersect for D/Q
101 AVX512VPOPCNTDQ // AVX-512 Vector Population Count Doubleword and Quadword
102 AVXIFMA // AVX-IFMA instructions
103 AVXNECONVERT // AVX-NE-CONVERT instructions
104 AVXSLOW // Indicates the CPU performs 2 128 bit operations instead of one
105 AVXVNNI // AVX (VEX encoded) VNNI neural network instructions
106 AVXVNNIINT8 // AVX-VNNI-INT8 instructions
107 BHI_CTRL // Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598
108 BMI1 // Bit Manipulation Instruction Set 1
109 BMI2 // Bit Manipulation Instruction Set 2
110 CETIBT // Intel CET Indirect Branch Tracking
111 CETSS // Intel CET Shadow Stack
112 CLDEMOTE // Cache Line Demote
113 CLMUL // Carry-less Multiplication
114 CLZERO // CLZERO instruction supported
115 CMOV // i686 CMOV
116 CMPCCXADD // CMPCCXADD instructions
117 CMPSB_SCADBS_SHORT // Fast short CMPSB and SCASB
118 CMPXCHG8 // CMPXCHG8 instruction
119 CPBOOST // Core Performance Boost
120 CPPC // AMD: Collaborative Processor Performance Control
121 CX16 // CMPXCHG16B Instruction
122 EFER_LMSLE_UNS // AMD: =Core::X86::Msr::EFER[LMSLE] is not supported, and MBZ
123 ENQCMD // Enqueue Command
124 ERMS // Enhanced REP MOVSB/STOSB
125 F16C // Half-precision floating-point conversion
126 FLUSH_L1D // Flush L1D cache
127 FMA3 // Intel FMA 3. Does not imply AVX.
128 FMA4 // Bulldozer FMA4 functions
129 FP128 // AMD: When set, the internal FP/SIMD execution datapath is no more than 128-bits wide
130 FP256 // AMD: When set, the internal FP/SIMD execution datapath is no more than 256-bits wide
131 FSRM // Fast Short Rep Mov
132 FXSR // FXSAVE, FXRESTOR instructions, CR4 bit 9
133 FXSROPT // FXSAVE/FXRSTOR optimizations
134 GFNI // Galois Field New Instructions. May require other features (AVX, AVX512VL,AVX512F) based on usage.
135 HLE // Hardware Lock Elision
136 HRESET // If set CPU supports history reset and the IA32_HRESET_ENABLE MSR
137 HTT // Hyperthreading (enabled)
138 HWA // Hardware assert supported. Indicates support for MSRC001_10
139 HYBRID_CPU // This part has CPUs of more than one type.
140 HYPERVISOR // This bit has been reserved by Intel & AMD for use by hypervisors
141 IA32_ARCH_CAP // IA32_ARCH_CAPABILITIES MSR (Intel)
142 IA32_CORE_CAP // IA32_CORE_CAPABILITIES MSR
143 IBPB // Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB)
144 IBRS // AMD: Indirect Branch Restricted Speculation
145 IBRS_PREFERRED // AMD: IBRS is preferred over software solution
146 IBRS_PROVIDES_SMP // AMD: IBRS provides Same Mode Protection
147 IBS // Instruction Based Sampling (AMD)
148 IBSBRNTRGT // Instruction Based Sampling Feature (AMD)
149 IBSFETCHSAM // Instruction Based Sampling Feature (AMD)
150 IBSFFV // Instruction Based Sampling Feature (AMD)
151 IBSOPCNT // Instruction Based Sampling Feature (AMD)
152 IBSOPCNTEXT // Instruction Based Sampling Feature (AMD)
153 IBSOPSAM // Instruction Based Sampling Feature (AMD)
154 IBSRDWROPCNT // Instruction Based Sampling Feature (AMD)
155 IBSRIPINVALIDCHK // Instruction Based Sampling Feature (AMD)
156 IBS_FETCH_CTLX // AMD: IBS fetch control extended MSR supported
157 IBS_OPDATA4 // AMD: IBS op data 4 MSR supported
158 IBS_OPFUSE // AMD: Indicates support for IbsOpFuse
159 IBS_PREVENTHOST // Disallowing IBS use by the host supported
160 IBS_ZEN4 // AMD: Fetch and Op IBS support IBS extensions added with Zen4
161 IDPRED_CTRL // IPRED_DIS
162 INT_WBINVD // WBINVD/WBNOINVD are interruptible.
163 INVLPGB // NVLPGB and TLBSYNC instruction supported
164 KEYLOCKER // Key locker
165 KEYLOCKERW // Key locker wide
166 LAHF // LAHF/SAHF in long mode
167 LAM // If set, CPU supports Linear Address Masking
168 LBRVIRT // LBR virtualization
169 LZCNT // LZCNT instruction
170 MCAOVERFLOW // MCA overflow recovery support.
171 MCDT_NO // Processor do not exhibit MXCSR Configuration Dependent Timing behavior and do not need to mitigate it.
172 MCOMMIT // MCOMMIT instruction supported
173 MD_CLEAR // VERW clears CPU buffers
174 MMX // standard MMX
175 MMXEXT // SSE integer functions or AMD MMX ext
176 MOVBE // MOVBE instruction (big-endian)
177 MOVDIR64B // Move 64 Bytes as Direct Store
178 MOVDIRI // Move Doubleword as Direct Store
179 MOVSB_ZL // Fast Zero-Length MOVSB
180 MOVU // AMD: MOVU SSE instructions are more efficient and should be preferred to SSE MOVL/MOVH. MOVUPS is more efficient than MOVLPS/MOVHPS. MOVUPD is more efficient than MOVLPD/MOVHPD
181 MPX // Intel MPX (Memory Protection Extensions)
182 MSRIRC // Instruction Retired Counter MSR available
183 MSRLIST // Read/Write List of Model Specific Registers
184 MSR_PAGEFLUSH // Page Flush MSR available
185 NRIPS // Indicates support for NRIP save on VMEXIT
186 NX // NX (No-Execute) bit
187 OSXSAVE // XSAVE enabled by OS
188 PCONFIG // PCONFIG for Intel Multi-Key Total Memory Encryption
189 POPCNT // POPCNT instruction
190 PPIN // AMD: Protected Processor Inventory Number support. Indicates that Protected Processor Inventory Number (PPIN) capability can be enabled
191 PREFETCHI // PREFETCHIT0/1 instructions
192 PSFD // Predictive Store Forward Disable
193 RDPRU // RDPRU instruction supported
194 RDRAND // RDRAND instruction is available
195 RDSEED // RDSEED instruction is available
196 RDTSCP // RDTSCP Instruction
197 RRSBA_CTRL // Restricted RSB Alternate
198 RTM // Restricted Transactional Memory
199 RTM_ALWAYS_ABORT // Indicates that the loaded microcode is forcing RTM abort.
200 SERIALIZE // Serialize Instruction Execution
201 SEV // AMD Secure Encrypted Virtualization supported
202 SEV_64BIT // AMD SEV guest execution only allowed from a 64-bit host
203 SEV_ALTERNATIVE // AMD SEV Alternate Injection supported
204 SEV_DEBUGSWAP // Full debug state swap supported for SEV-ES guests
205 SEV_ES // AMD SEV Encrypted State supported
206 SEV_RESTRICTED // AMD SEV Restricted Injection supported
207 SEV_SNP // AMD SEV Secure Nested Paging supported
208 SGX // Software Guard Extensions
209 SGXLC // Software Guard Extensions Launch Control
210 SHA // Intel SHA Extensions
211 SME // AMD Secure Memory Encryption supported
212 SME_COHERENT // AMD Hardware cache coherency across encryption domains enforced
213 SPEC_CTRL_SSBD // Speculative Store Bypass Disable
214 SRBDS_CTRL // SRBDS mitigation MSR available
215 SSE // SSE functions
216 SSE2 // P4 SSE functions
217 SSE3 // Prescott SSE3 functions
218 SSE4 // Penryn SSE4.1 functions
219 SSE42 // Nehalem SSE4.2 functions
220 SSE4A // AMD Barcelona microarchitecture SSE4a instructions
221 SSSE3 // Conroe SSSE3 functions
222 STIBP // Single Thread Indirect Branch Predictors
223 STIBP_ALWAYSON // AMD: Single Thread Indirect Branch Prediction Mode has Enhanced Performance and may be left Always On
224 STOSB_SHORT // Fast short STOSB
225 SUCCOR // Software uncorrectable error containment and recovery capability.
226 SVM // AMD Secure Virtual Machine
227 SVMDA // Indicates support for the SVM decode assists.
228 SVMFBASID // SVM, Indicates that TLB flush events, including CR3 writes and CR4.PGE toggles, flush only the current ASID's TLB entries. Also indicates support for the extended VMCBTLB_Control
229 SVML // AMD SVM lock. Indicates support for SVM-Lock.
230 SVMNP // AMD SVM nested paging
231 SVMPF // SVM pause intercept filter. Indicates support for the pause intercept filter
232 SVMPFT // SVM PAUSE filter threshold. Indicates support for the PAUSE filter cycle count threshold
233 SYSCALL // System-Call Extension (SCE): SYSCALL and SYSRET instructions.
234 SYSEE // SYSENTER and SYSEXIT instructions
235 TBM // AMD Trailing Bit Manipulation
236 TDX_GUEST // Intel Trust Domain Extensions Guest
237 TLB_FLUSH_NESTED // AMD: Flushing includes all the nested translations for guest translations
238 TME // Intel Total Memory Encryption. The following MSRs are supported: IA32_TME_CAPABILITY, IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE.
239 TOPEXT // TopologyExtensions: topology extensions support. Indicates support for CPUID Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX.
240 TSCRATEMSR // MSR based TSC rate control. Indicates support for MSR TSC ratio MSRC000_0104
241 TSXLDTRK // Intel TSX Suspend Load Address Tracking
242 VAES // Vector AES. AVX(512) versions requires additional checks.
243 VMCBCLEAN // VMCB clean bits. Indicates support for VMCB clean bits.
244 VMPL // AMD VM Permission Levels supported
245 VMSA_REGPROT // AMD VMSA Register Protection supported
246 VMX // Virtual Machine Extensions
247 VPCLMULQDQ // Carry-Less Multiplication Quadword. Requires AVX for 3 register versions.
248 VTE // AMD Virtual Transparent Encryption supported
249 WAITPKG // TPAUSE, UMONITOR, UMWAIT
250 WBNOINVD // Write Back and Do Not Invalidate Cache
251 WRMSRNS // Non-Serializing Write to Model Specific Register
252 X87 // FPU
253 XGETBV1 // Supports XGETBV with ECX = 1
254 XOP // Bulldozer XOP functions
255 XSAVE // XSAVE, XRESTOR, XSETBV, XGETBV
256 XSAVEC // Supports XSAVEC and the compacted form of XRSTOR.
257 XSAVEOPT // XSAVEOPT available
258 XSAVES // Supports XSAVES/XRSTORS and IA32_XSS
259
260 // ARM features:
261 AESARM // AES instructions
262 ARMCPUID // Some CPU ID registers readable at user-level
263 ASIMD // Advanced SIMD
264 ASIMDDP // SIMD Dot Product
265 ASIMDHP // Advanced SIMD half-precision floating point
266 ASIMDRDM // Rounding Double Multiply Accumulate/Subtract (SQRDMLAH/SQRDMLSH)
267 ATOMICS // Large System Extensions (LSE)
268 CRC32 // CRC32/CRC32C instructions
269 DCPOP // Data cache clean to Point of Persistence (DC CVAP)
270 EVTSTRM // Generic timer
271 FCMA // Floatin point complex number addition and multiplication
272 FP // Single-precision and double-precision floating point
273 FPHP // Half-precision floating point
274 GPA // Generic Pointer Authentication
275 JSCVT // Javascript-style double->int convert (FJCVTZS)
276 LRCPC // Weaker release consistency (LDAPR, etc)
277 PMULL // Polynomial Multiply instructions (PMULL/PMULL2)
278 SHA1 // SHA-1 instructions (SHA1C, etc)
279 SHA2 // SHA-2 instructions (SHA256H, etc)
280 SHA3 // SHA-3 instructions (EOR3, RAXI, XAR, BCAX)
281 SHA512 // SHA512 instructions
282 SM3 // SM3 instructions
283 SM4 // SM4 instructions
284 SVE // Scalable Vector Extension
285 // Keep it last. It automatically defines the size of []flagSet
286 lastID
287
288 firstID FeatureID = UNKNOWN + 1
289)
290
291// CPUInfo contains information about the detected system CPU.
292type CPUInfo struct {
293 BrandName string // Brand name reported by the CPU
294 VendorID Vendor // Comparable CPU vendor ID
295 VendorString string // Raw vendor string.
296 featureSet flagSet // Features of the CPU
297 PhysicalCores int // Number of physical processor cores in your CPU. Will be 0 if undetectable.
298 ThreadsPerCore int // Number of threads per physical core. Will be 1 if undetectable.
299 LogicalCores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable.
300 Family int // CPU family number
301 Model int // CPU model number
302 Stepping int // CPU stepping info
303 CacheLine int // Cache line size in bytes. Will be 0 if undetectable.
304 Hz int64 // Clock speed, if known, 0 otherwise. Will attempt to contain base clock speed.
305 BoostFreq int64 // Max clock speed, if known, 0 otherwise
306 Cache struct {
307 L1I int // L1 Instruction Cache (per core or shared). Will be -1 if undetected
308 L1D int // L1 Data Cache (per core or shared). Will be -1 if undetected
309 L2 int // L2 Cache (per core or shared). Will be -1 if undetected
310 L3 int // L3 Cache (per core, per ccx or shared). Will be -1 if undetected
311 }
312 SGX SGXSupport
313 AVX10Level uint8
314 maxFunc uint32
315 maxExFunc uint32
316}
317
318var cpuid func(op uint32) (eax, ebx, ecx, edx uint32)
319var cpuidex func(op, op2 uint32) (eax, ebx, ecx, edx uint32)
320var xgetbv func(index uint32) (eax, edx uint32)
321var rdtscpAsm func() (eax, ebx, ecx, edx uint32)
322var darwinHasAVX512 = func() bool { return false }
323
324// CPU contains information about the CPU as detected on startup,
325// or when Detect last was called.
326//
327// Use this as the primary entry point to you data.
328var CPU CPUInfo
329
330func init() {
331 initCPU()
332 Detect()
333}
334
335// Detect will re-detect current CPU info.
336// This will replace the content of the exported CPU variable.
337//
338// Unless you expect the CPU to change while you are running your program
339// you should not need to call this function.
340// If you call this, you must ensure that no other goroutine is accessing the
341// exported CPU variable.
342func Detect() {
343 // Set defaults
344 CPU.ThreadsPerCore = 1
345 CPU.Cache.L1I = -1
346 CPU.Cache.L1D = -1
347 CPU.Cache.L2 = -1
348 CPU.Cache.L3 = -1
349 safe := true
350 if detectArmFlag != nil {
351 safe = !*detectArmFlag
352 }
353 addInfo(&CPU, safe)
354 if displayFeats != nil && *displayFeats {
355 fmt.Println("cpu features:", strings.Join(CPU.FeatureSet(), ","))
356 // Exit with non-zero so tests will print value.
357 os.Exit(1)
358 }
359 if disableFlag != nil {
360 s := strings.Split(*disableFlag, ",")
361 for _, feat := range s {
362 feat := ParseFeature(strings.TrimSpace(feat))
363 if feat != UNKNOWN {
364 CPU.featureSet.unset(feat)
365 }
366 }
367 }
368}
369
370// DetectARM will detect ARM64 features.
371// This is NOT done automatically since it can potentially crash
372// if the OS does not handle the command.
373// If in the future this can be done safely this function may not
374// do anything.
375func DetectARM() {
376 addInfo(&CPU, false)
377}
378
379var detectArmFlag *bool
380var displayFeats *bool
381var disableFlag *string
382
383// Flags will enable flags.
384// This must be called *before* flag.Parse AND
385// Detect must be called after the flags have been parsed.
386// Note that this means that any detection used in init() functions
387// will not contain these flags.
388func Flags() {
389 disableFlag = flag.String("cpu.disable", "", "disable cpu features; comma separated list")
390 displayFeats = flag.Bool("cpu.features", false, "lists cpu features and exits")
391 detectArmFlag = flag.Bool("cpu.arm", false, "allow ARM features to be detected; can potentially crash")
392}
393
394// Supports returns whether the CPU supports all of the requested features.
395func (c CPUInfo) Supports(ids ...FeatureID) bool {
396 for _, id := range ids {
397 if !c.featureSet.inSet(id) {
398 return false
399 }
400 }
401 return true
402}
403
404// Has allows for checking a single feature.
405// Should be inlined by the compiler.
406func (c *CPUInfo) Has(id FeatureID) bool {
407 return c.featureSet.inSet(id)
408}
409
410// AnyOf returns whether the CPU supports one or more of the requested features.
411func (c CPUInfo) AnyOf(ids ...FeatureID) bool {
412 for _, id := range ids {
413 if c.featureSet.inSet(id) {
414 return true
415 }
416 }
417 return false
418}
419
420// Features contains several features combined for a fast check using
421// CpuInfo.HasAll
422type Features *flagSet
423
424// CombineFeatures allows to combine several features for a close to constant time lookup.
425func CombineFeatures(ids ...FeatureID) Features {
426 var v flagSet
427 for _, id := range ids {
428 v.set(id)
429 }
430 return &v
431}
432
433func (c *CPUInfo) HasAll(f Features) bool {
434 return c.featureSet.hasSetP(f)
435}
436
437// https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels
438var oneOfLevel = CombineFeatures(SYSEE, SYSCALL)
439var level1Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2)
440var level2Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3)
441var level3Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE)
442var level4Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE, AVX512F, AVX512BW, AVX512CD, AVX512DQ, AVX512VL)
443
444// X64Level returns the microarchitecture level detected on the CPU.
445// If features are lacking or non x64 mode, 0 is returned.
446// See https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels
447func (c CPUInfo) X64Level() int {
448 if !c.featureSet.hasOneOf(oneOfLevel) {
449 return 0
450 }
451 if c.featureSet.hasSetP(level4Features) {
452 return 4
453 }
454 if c.featureSet.hasSetP(level3Features) {
455 return 3
456 }
457 if c.featureSet.hasSetP(level2Features) {
458 return 2
459 }
460 if c.featureSet.hasSetP(level1Features) {
461 return 1
462 }
463 return 0
464}
465
466// Disable will disable one or several features.
467func (c *CPUInfo) Disable(ids ...FeatureID) bool {
468 for _, id := range ids {
469 c.featureSet.unset(id)
470 }
471 return true
472}
473
474// Enable will disable one or several features even if they were undetected.
475// This is of course not recommended for obvious reasons.
476func (c *CPUInfo) Enable(ids ...FeatureID) bool {
477 for _, id := range ids {
478 c.featureSet.set(id)
479 }
480 return true
481}
482
483// IsVendor returns true if vendor is recognized as Intel
484func (c CPUInfo) IsVendor(v Vendor) bool {
485 return c.VendorID == v
486}
487
488// FeatureSet returns all available features as strings.
489func (c CPUInfo) FeatureSet() []string {
490 s := make([]string, 0, c.featureSet.nEnabled())
491 s = append(s, c.featureSet.Strings()...)
492 return s
493}
494
495// RTCounter returns the 64-bit time-stamp counter
496// Uses the RDTSCP instruction. The value 0 is returned
497// if the CPU does not support the instruction.
498func (c CPUInfo) RTCounter() uint64 {
499 if !c.Supports(RDTSCP) {
500 return 0
501 }
502 a, _, _, d := rdtscpAsm()
503 return uint64(a) | (uint64(d) << 32)
504}
505
506// Ia32TscAux returns the IA32_TSC_AUX part of the RDTSCP.
507// This variable is OS dependent, but on Linux contains information
508// about the current cpu/core the code is running on.
509// If the RDTSCP instruction isn't supported on the CPU, the value 0 is returned.
510func (c CPUInfo) Ia32TscAux() uint32 {
511 if !c.Supports(RDTSCP) {
512 return 0
513 }
514 _, _, ecx, _ := rdtscpAsm()
515 return ecx
516}
517
518// LogicalCPU will return the Logical CPU the code is currently executing on.
519// This is likely to change when the OS re-schedules the running thread
520// to another CPU.
521// If the current core cannot be detected, -1 will be returned.
522func (c CPUInfo) LogicalCPU() int {
523 if c.maxFunc < 1 {
524 return -1
525 }
526 _, ebx, _, _ := cpuid(1)
527 return int(ebx >> 24)
528}
529
530// frequencies tries to compute the clock speed of the CPU. If leaf 15 is
531// supported, use it, otherwise parse the brand string. Yes, really.
532func (c *CPUInfo) frequencies() {
533 c.Hz, c.BoostFreq = 0, 0
534 mfi := maxFunctionID()
535 if mfi >= 0x15 {
536 eax, ebx, ecx, _ := cpuid(0x15)
537 if eax != 0 && ebx != 0 && ecx != 0 {
538 c.Hz = (int64(ecx) * int64(ebx)) / int64(eax)
539 }
540 }
541 if mfi >= 0x16 {
542 a, b, _, _ := cpuid(0x16)
543 // Base...
544 if a&0xffff > 0 {
545 c.Hz = int64(a&0xffff) * 1_000_000
546 }
547 // Boost...
548 if b&0xffff > 0 {
549 c.BoostFreq = int64(b&0xffff) * 1_000_000
550 }
551 }
552 if c.Hz > 0 {
553 return
554 }
555
556 // computeHz determines the official rated speed of a CPU from its brand
557 // string. This insanity is *actually the official documented way to do
558 // this according to Intel*, prior to leaf 0x15 existing. The official
559 // documentation only shows this working for exactly `x.xx` or `xxxx`
560 // cases, e.g., `2.50GHz` or `1300MHz`; this parser will accept other
561 // sizes.
562 model := c.BrandName
563 hz := strings.LastIndex(model, "Hz")
564 if hz < 3 {
565 return
566 }
567 var multiplier int64
568 switch model[hz-1] {
569 case 'M':
570 multiplier = 1000 * 1000
571 case 'G':
572 multiplier = 1000 * 1000 * 1000
573 case 'T':
574 multiplier = 1000 * 1000 * 1000 * 1000
575 }
576 if multiplier == 0 {
577 return
578 }
579 freq := int64(0)
580 divisor := int64(0)
581 decimalShift := int64(1)
582 var i int
583 for i = hz - 2; i >= 0 && model[i] != ' '; i-- {
584 if model[i] >= '0' && model[i] <= '9' {
585 freq += int64(model[i]-'0') * decimalShift
586 decimalShift *= 10
587 } else if model[i] == '.' {
588 if divisor != 0 {
589 return
590 }
591 divisor = decimalShift
592 } else {
593 return
594 }
595 }
596 // we didn't find a space
597 if i < 0 {
598 return
599 }
600 if divisor != 0 {
601 c.Hz = (freq * multiplier) / divisor
602 return
603 }
604 c.Hz = freq * multiplier
605}
606
607// VM Will return true if the cpu id indicates we are in
608// a virtual machine.
609func (c CPUInfo) VM() bool {
610 return CPU.featureSet.inSet(HYPERVISOR)
611}
612
613// flags contains detected cpu features and characteristics
614type flags uint64
615
616// log2(bits_in_uint64)
617const flagBitsLog2 = 6
618const flagBits = 1 << flagBitsLog2
619const flagMask = flagBits - 1
620
621// flagSet contains detected cpu features and characteristics in an array of flags
622type flagSet [(lastID + flagMask) / flagBits]flags
623
624func (s *flagSet) inSet(feat FeatureID) bool {
625 return s[feat>>flagBitsLog2]&(1<<(feat&flagMask)) != 0
626}
627
628func (s *flagSet) set(feat FeatureID) {
629 s[feat>>flagBitsLog2] |= 1 << (feat & flagMask)
630}
631
632// setIf will set a feature if boolean is true.
633func (s *flagSet) setIf(cond bool, features ...FeatureID) {
634 if cond {
635 for _, offset := range features {
636 s[offset>>flagBitsLog2] |= 1 << (offset & flagMask)
637 }
638 }
639}
640
641func (s *flagSet) unset(offset FeatureID) {
642 bit := flags(1 << (offset & flagMask))
643 s[offset>>flagBitsLog2] = s[offset>>flagBitsLog2] & ^bit
644}
645
646// or with another flagset.
647func (s *flagSet) or(other flagSet) {
648 for i, v := range other[:] {
649 s[i] |= v
650 }
651}
652
653// hasSet returns whether all features are present.
654func (s *flagSet) hasSet(other flagSet) bool {
655 for i, v := range other[:] {
656 if s[i]&v != v {
657 return false
658 }
659 }
660 return true
661}
662
663// hasSet returns whether all features are present.
664func (s *flagSet) hasSetP(other *flagSet) bool {
665 for i, v := range other[:] {
666 if s[i]&v != v {
667 return false
668 }
669 }
670 return true
671}
672
673// hasOneOf returns whether one or more features are present.
674func (s *flagSet) hasOneOf(other *flagSet) bool {
675 for i, v := range other[:] {
676 if s[i]&v != 0 {
677 return true
678 }
679 }
680 return false
681}
682
683// nEnabled will return the number of enabled flags.
684func (s *flagSet) nEnabled() (n int) {
685 for _, v := range s[:] {
686 n += bits.OnesCount64(uint64(v))
687 }
688 return n
689}
690
691func flagSetWith(feat ...FeatureID) flagSet {
692 var res flagSet
693 for _, f := range feat {
694 res.set(f)
695 }
696 return res
697}
698
699// ParseFeature will parse the string and return the ID of the matching feature.
700// Will return UNKNOWN if not found.
701func ParseFeature(s string) FeatureID {
702 s = strings.ToUpper(s)
703 for i := firstID; i < lastID; i++ {
704 if i.String() == s {
705 return i
706 }
707 }
708 return UNKNOWN
709}
710
711// Strings returns an array of the detected features for FlagsSet.
712func (s flagSet) Strings() []string {
713 if len(s) == 0 {
714 return []string{""}
715 }
716 r := make([]string, 0)
717 for i := firstID; i < lastID; i++ {
718 if s.inSet(i) {
719 r = append(r, i.String())
720 }
721 }
722 return r
723}
724
725func maxExtendedFunction() uint32 {
726 eax, _, _, _ := cpuid(0x80000000)
727 return eax
728}
729
730func maxFunctionID() uint32 {
731 a, _, _, _ := cpuid(0)
732 return a
733}
734
735func brandName() string {
736 if maxExtendedFunction() >= 0x80000004 {
737 v := make([]uint32, 0, 48)
738 for i := uint32(0); i < 3; i++ {
739 a, b, c, d := cpuid(0x80000002 + i)
740 v = append(v, a, b, c, d)
741 }
742 return strings.Trim(string(valAsString(v...)), " ")
743 }
744 return "unknown"
745}
746
747func threadsPerCore() int {
748 mfi := maxFunctionID()
749 vend, _ := vendorID()
750
751 if mfi < 0x4 || (vend != Intel && vend != AMD) {
752 return 1
753 }
754
755 if mfi < 0xb {
756 if vend != Intel {
757 return 1
758 }
759 _, b, _, d := cpuid(1)
760 if (d & (1 << 28)) != 0 {
761 // v will contain logical core count
762 v := (b >> 16) & 255
763 if v > 1 {
764 a4, _, _, _ := cpuid(4)
765 // physical cores
766 v2 := (a4 >> 26) + 1
767 if v2 > 0 {
768 return int(v) / int(v2)
769 }
770 }
771 }
772 return 1
773 }
774 _, b, _, _ := cpuidex(0xb, 0)
775 if b&0xffff == 0 {
776 if vend == AMD {
777 // Workaround for AMD returning 0, assume 2 if >= Zen 2
778 // It will be more correct than not.
779 fam, _, _ := familyModel()
780 _, _, _, d := cpuid(1)
781 if (d&(1<<28)) != 0 && fam >= 23 {
782 return 2
783 }
784 }
785 return 1
786 }
787 return int(b & 0xffff)
788}
789
790func logicalCores() int {
791 mfi := maxFunctionID()
792 v, _ := vendorID()
793 switch v {
794 case Intel:
795 // Use this on old Intel processors
796 if mfi < 0xb {
797 if mfi < 1 {
798 return 0
799 }
800 // CPUID.1:EBX[23:16] represents the maximum number of addressable IDs (initial APIC ID)
801 // that can be assigned to logical processors in a physical package.
802 // The value may not be the same as the number of logical processors that are present in the hardware of a physical package.
803 _, ebx, _, _ := cpuid(1)
804 logical := (ebx >> 16) & 0xff
805 return int(logical)
806 }
807 _, b, _, _ := cpuidex(0xb, 1)
808 return int(b & 0xffff)
809 case AMD, Hygon:
810 _, b, _, _ := cpuid(1)
811 return int((b >> 16) & 0xff)
812 default:
813 return 0
814 }
815}
816
817func familyModel() (family, model, stepping int) {
818 if maxFunctionID() < 0x1 {
819 return 0, 0, 0
820 }
821 eax, _, _, _ := cpuid(1)
822 // If BaseFamily[3:0] is less than Fh then ExtendedFamily[7:0] is reserved and Family is equal to BaseFamily[3:0].
823 family = int((eax >> 8) & 0xf)
824 extFam := family == 0x6 // Intel is 0x6, needs extended model.
825 if family == 0xf {
826 // Add ExtFamily
827 family += int((eax >> 20) & 0xff)
828 extFam = true
829 }
830 // If BaseFamily[3:0] is less than 0Fh then ExtendedModel[3:0] is reserved and Model is equal to BaseModel[3:0].
831 model = int((eax >> 4) & 0xf)
832 if extFam {
833 // Add ExtModel
834 model += int((eax >> 12) & 0xf0)
835 }
836 stepping = int(eax & 0xf)
837 return family, model, stepping
838}
839
840func physicalCores() int {
841 v, _ := vendorID()
842 switch v {
843 case Intel:
844 return logicalCores() / threadsPerCore()
845 case AMD, Hygon:
846 lc := logicalCores()
847 tpc := threadsPerCore()
848 if lc > 0 && tpc > 0 {
849 return lc / tpc
850 }
851
852 // The following is inaccurate on AMD EPYC 7742 64-Core Processor
853 if maxExtendedFunction() >= 0x80000008 {
854 _, _, c, _ := cpuid(0x80000008)
855 if c&0xff > 0 {
856 return int(c&0xff) + 1
857 }
858 }
859 }
860 return 0
861}
862
863// Except from http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID
864var vendorMapping = map[string]Vendor{
865 "AMDisbetter!": AMD,
866 "AuthenticAMD": AMD,
867 "CentaurHauls": VIA,
868 "GenuineIntel": Intel,
869 "TransmetaCPU": Transmeta,
870 "GenuineTMx86": Transmeta,
871 "Geode by NSC": NSC,
872 "VIA VIA VIA ": VIA,
873 "KVMKVMKVMKVM": KVM,
874 "Microsoft Hv": MSVM,
875 "VMwareVMware": VMware,
876 "XenVMMXenVMM": XenHVM,
877 "bhyve bhyve ": Bhyve,
878 "HygonGenuine": Hygon,
879 "Vortex86 SoC": SiS,
880 "SiS SiS SiS ": SiS,
881 "RiseRiseRise": SiS,
882 "Genuine RDC": RDC,
883}
884
885func vendorID() (Vendor, string) {
886 _, b, c, d := cpuid(0)
887 v := string(valAsString(b, d, c))
888 vend, ok := vendorMapping[v]
889 if !ok {
890 return VendorUnknown, v
891 }
892 return vend, v
893}
894
895func cacheLine() int {
896 if maxFunctionID() < 0x1 {
897 return 0
898 }
899
900 _, ebx, _, _ := cpuid(1)
901 cache := (ebx & 0xff00) >> 5 // cflush size
902 if cache == 0 && maxExtendedFunction() >= 0x80000006 {
903 _, _, ecx, _ := cpuid(0x80000006)
904 cache = ecx & 0xff // cacheline size
905 }
906 // TODO: Read from Cache and TLB Information
907 return int(cache)
908}
909
910func (c *CPUInfo) cacheSize() {
911 c.Cache.L1D = -1
912 c.Cache.L1I = -1
913 c.Cache.L2 = -1
914 c.Cache.L3 = -1
915 vendor, _ := vendorID()
916 switch vendor {
917 case Intel:
918 if maxFunctionID() < 4 {
919 return
920 }
921 c.Cache.L1I, c.Cache.L1D, c.Cache.L2, c.Cache.L3 = 0, 0, 0, 0
922 for i := uint32(0); ; i++ {
923 eax, ebx, ecx, _ := cpuidex(4, i)
924 cacheType := eax & 15
925 if cacheType == 0 {
926 break
927 }
928 cacheLevel := (eax >> 5) & 7
929 coherency := int(ebx&0xfff) + 1
930 partitions := int((ebx>>12)&0x3ff) + 1
931 associativity := int((ebx>>22)&0x3ff) + 1
932 sets := int(ecx) + 1
933 size := associativity * partitions * coherency * sets
934 switch cacheLevel {
935 case 1:
936 if cacheType == 1 {
937 // 1 = Data Cache
938 c.Cache.L1D = size
939 } else if cacheType == 2 {
940 // 2 = Instruction Cache
941 c.Cache.L1I = size
942 } else {
943 if c.Cache.L1D < 0 {
944 c.Cache.L1I = size
945 }
946 if c.Cache.L1I < 0 {
947 c.Cache.L1I = size
948 }
949 }
950 case 2:
951 c.Cache.L2 = size
952 case 3:
953 c.Cache.L3 = size
954 }
955 }
956 case AMD, Hygon:
957 // Untested.
958 if maxExtendedFunction() < 0x80000005 {
959 return
960 }
961 _, _, ecx, edx := cpuid(0x80000005)
962 c.Cache.L1D = int(((ecx >> 24) & 0xFF) * 1024)
963 c.Cache.L1I = int(((edx >> 24) & 0xFF) * 1024)
964
965 if maxExtendedFunction() < 0x80000006 {
966 return
967 }
968 _, _, ecx, _ = cpuid(0x80000006)
969 c.Cache.L2 = int(((ecx >> 16) & 0xFFFF) * 1024)
970
971 // CPUID Fn8000_001D_EAX_x[N:0] Cache Properties
972 if maxExtendedFunction() < 0x8000001D || !c.Has(TOPEXT) {
973 return
974 }
975
976 // Xen Hypervisor is buggy and returns the same entry no matter ECX value.
977 // Hack: When we encounter the same entry 100 times we break.
978 nSame := 0
979 var last uint32
980 for i := uint32(0); i < math.MaxUint32; i++ {
981 eax, ebx, ecx, _ := cpuidex(0x8000001D, i)
982
983 level := (eax >> 5) & 7
984 cacheNumSets := ecx + 1
985 cacheLineSize := 1 + (ebx & 2047)
986 cachePhysPartitions := 1 + ((ebx >> 12) & 511)
987 cacheNumWays := 1 + ((ebx >> 22) & 511)
988
989 typ := eax & 15
990 size := int(cacheNumSets * cacheLineSize * cachePhysPartitions * cacheNumWays)
991 if typ == 0 {
992 return
993 }
994
995 // Check for the same value repeated.
996 comb := eax ^ ebx ^ ecx
997 if comb == last {
998 nSame++
999 if nSame == 100 {
1000 return
1001 }
1002 }
1003 last = comb
1004
1005 switch level {
1006 case 1:
1007 switch typ {
1008 case 1:
1009 // Data cache
1010 c.Cache.L1D = size
1011 case 2:
1012 // Inst cache
1013 c.Cache.L1I = size
1014 default:
1015 if c.Cache.L1D < 0 {
1016 c.Cache.L1I = size
1017 }
1018 if c.Cache.L1I < 0 {
1019 c.Cache.L1I = size
1020 }
1021 }
1022 case 2:
1023 c.Cache.L2 = size
1024 case 3:
1025 c.Cache.L3 = size
1026 }
1027 }
1028 }
1029}
1030
1031type SGXEPCSection struct {
1032 BaseAddress uint64
1033 EPCSize uint64
1034}
1035
1036type SGXSupport struct {
1037 Available bool
1038 LaunchControl bool
1039 SGX1Supported bool
1040 SGX2Supported bool
1041 MaxEnclaveSizeNot64 int64
1042 MaxEnclaveSize64 int64
1043 EPCSections []SGXEPCSection
1044}
1045
1046func hasSGX(available, lc bool) (rval SGXSupport) {
1047 rval.Available = available
1048
1049 if !available {
1050 return
1051 }
1052
1053 rval.LaunchControl = lc
1054
1055 a, _, _, d := cpuidex(0x12, 0)
1056 rval.SGX1Supported = a&0x01 != 0
1057 rval.SGX2Supported = a&0x02 != 0
1058 rval.MaxEnclaveSizeNot64 = 1 << (d & 0xFF) // pow 2
1059 rval.MaxEnclaveSize64 = 1 << ((d >> 8) & 0xFF) // pow 2
1060 rval.EPCSections = make([]SGXEPCSection, 0)
1061
1062 for subleaf := uint32(2); subleaf < 2+8; subleaf++ {
1063 eax, ebx, ecx, edx := cpuidex(0x12, subleaf)
1064 leafType := eax & 0xf
1065
1066 if leafType == 0 {
1067 // Invalid subleaf, stop iterating
1068 break
1069 } else if leafType == 1 {
1070 // EPC Section subleaf
1071 baseAddress := uint64(eax&0xfffff000) + (uint64(ebx&0x000fffff) << 32)
1072 size := uint64(ecx&0xfffff000) + (uint64(edx&0x000fffff) << 32)
1073
1074 section := SGXEPCSection{BaseAddress: baseAddress, EPCSize: size}
1075 rval.EPCSections = append(rval.EPCSections, section)
1076 }
1077 }
1078
1079 return
1080}
1081
1082func support() flagSet {
1083 var fs flagSet
1084 mfi := maxFunctionID()
1085 vend, _ := vendorID()
1086 if mfi < 0x1 {
1087 return fs
1088 }
1089 family, model, _ := familyModel()
1090
1091 _, _, c, d := cpuid(1)
1092 fs.setIf((d&(1<<0)) != 0, X87)
1093 fs.setIf((d&(1<<8)) != 0, CMPXCHG8)
1094 fs.setIf((d&(1<<11)) != 0, SYSEE)
1095 fs.setIf((d&(1<<15)) != 0, CMOV)
1096 fs.setIf((d&(1<<23)) != 0, MMX)
1097 fs.setIf((d&(1<<24)) != 0, FXSR)
1098 fs.setIf((d&(1<<25)) != 0, FXSROPT)
1099 fs.setIf((d&(1<<25)) != 0, SSE)
1100 fs.setIf((d&(1<<26)) != 0, SSE2)
1101 fs.setIf((c&1) != 0, SSE3)
1102 fs.setIf((c&(1<<5)) != 0, VMX)
1103 fs.setIf((c&(1<<9)) != 0, SSSE3)
1104 fs.setIf((c&(1<<19)) != 0, SSE4)
1105 fs.setIf((c&(1<<20)) != 0, SSE42)
1106 fs.setIf((c&(1<<25)) != 0, AESNI)
1107 fs.setIf((c&(1<<1)) != 0, CLMUL)
1108 fs.setIf(c&(1<<22) != 0, MOVBE)
1109 fs.setIf(c&(1<<23) != 0, POPCNT)
1110 fs.setIf(c&(1<<30) != 0, RDRAND)
1111
1112 // This bit has been reserved by Intel & AMD for use by hypervisors,
1113 // and indicates the presence of a hypervisor.
1114 fs.setIf(c&(1<<31) != 0, HYPERVISOR)
1115 fs.setIf(c&(1<<29) != 0, F16C)
1116 fs.setIf(c&(1<<13) != 0, CX16)
1117
1118 if vend == Intel && (d&(1<<28)) != 0 && mfi >= 4 {
1119 fs.setIf(threadsPerCore() > 1, HTT)
1120 }
1121 if vend == AMD && (d&(1<<28)) != 0 && mfi >= 4 {
1122 fs.setIf(threadsPerCore() > 1, HTT)
1123 }
1124 fs.setIf(c&1<<26 != 0, XSAVE)
1125 fs.setIf(c&1<<27 != 0, OSXSAVE)
1126 // Check XGETBV/XSAVE (26), OXSAVE (27) and AVX (28) bits
1127 const avxCheck = 1<<26 | 1<<27 | 1<<28
1128 if c&avxCheck == avxCheck {
1129 // Check for OS support
1130 eax, _ := xgetbv(0)
1131 if (eax & 0x6) == 0x6 {
1132 fs.set(AVX)
1133 switch vend {
1134 case Intel:
1135 // Older than Haswell.
1136 fs.setIf(family == 6 && model < 60, AVXSLOW)
1137 case AMD:
1138 // Older than Zen 2
1139 fs.setIf(family < 23 || (family == 23 && model < 49), AVXSLOW)
1140 }
1141 }
1142 }
1143 // FMA3 can be used with SSE registers, so no OS support is strictly needed.
1144 // fma3 and OSXSAVE needed.
1145 const fma3Check = 1<<12 | 1<<27
1146 fs.setIf(c&fma3Check == fma3Check, FMA3)
1147
1148 // Check AVX2, AVX2 requires OS support, but BMI1/2 don't.
1149 if mfi >= 7 {
1150 _, ebx, ecx, edx := cpuidex(7, 0)
1151 if fs.inSet(AVX) && (ebx&0x00000020) != 0 {
1152 fs.set(AVX2)
1153 }
1154 // CPUID.(EAX=7, ECX=0).EBX
1155 if (ebx & 0x00000008) != 0 {
1156 fs.set(BMI1)
1157 fs.setIf((ebx&0x00000100) != 0, BMI2)
1158 }
1159 fs.setIf(ebx&(1<<2) != 0, SGX)
1160 fs.setIf(ebx&(1<<4) != 0, HLE)
1161 fs.setIf(ebx&(1<<9) != 0, ERMS)
1162 fs.setIf(ebx&(1<<11) != 0, RTM)
1163 fs.setIf(ebx&(1<<14) != 0, MPX)
1164 fs.setIf(ebx&(1<<18) != 0, RDSEED)
1165 fs.setIf(ebx&(1<<19) != 0, ADX)
1166 fs.setIf(ebx&(1<<29) != 0, SHA)
1167
1168 // CPUID.(EAX=7, ECX=0).ECX
1169 fs.setIf(ecx&(1<<5) != 0, WAITPKG)
1170 fs.setIf(ecx&(1<<7) != 0, CETSS)
1171 fs.setIf(ecx&(1<<8) != 0, GFNI)
1172 fs.setIf(ecx&(1<<9) != 0, VAES)
1173 fs.setIf(ecx&(1<<10) != 0, VPCLMULQDQ)
1174 fs.setIf(ecx&(1<<13) != 0, TME)
1175 fs.setIf(ecx&(1<<25) != 0, CLDEMOTE)
1176 fs.setIf(ecx&(1<<23) != 0, KEYLOCKER)
1177 fs.setIf(ecx&(1<<27) != 0, MOVDIRI)
1178 fs.setIf(ecx&(1<<28) != 0, MOVDIR64B)
1179 fs.setIf(ecx&(1<<29) != 0, ENQCMD)
1180 fs.setIf(ecx&(1<<30) != 0, SGXLC)
1181
1182 // CPUID.(EAX=7, ECX=0).EDX
1183 fs.setIf(edx&(1<<4) != 0, FSRM)
1184 fs.setIf(edx&(1<<9) != 0, SRBDS_CTRL)
1185 fs.setIf(edx&(1<<10) != 0, MD_CLEAR)
1186 fs.setIf(edx&(1<<11) != 0, RTM_ALWAYS_ABORT)
1187 fs.setIf(edx&(1<<14) != 0, SERIALIZE)
1188 fs.setIf(edx&(1<<15) != 0, HYBRID_CPU)
1189 fs.setIf(edx&(1<<16) != 0, TSXLDTRK)
1190 fs.setIf(edx&(1<<18) != 0, PCONFIG)
1191 fs.setIf(edx&(1<<20) != 0, CETIBT)
1192 fs.setIf(edx&(1<<26) != 0, IBPB)
1193 fs.setIf(edx&(1<<27) != 0, STIBP)
1194 fs.setIf(edx&(1<<28) != 0, FLUSH_L1D)
1195 fs.setIf(edx&(1<<29) != 0, IA32_ARCH_CAP)
1196 fs.setIf(edx&(1<<30) != 0, IA32_CORE_CAP)
1197 fs.setIf(edx&(1<<31) != 0, SPEC_CTRL_SSBD)
1198
1199 // CPUID.(EAX=7, ECX=1).EAX
1200 eax1, _, _, edx1 := cpuidex(7, 1)
1201 fs.setIf(fs.inSet(AVX) && eax1&(1<<4) != 0, AVXVNNI)
1202 fs.setIf(eax1&(1<<7) != 0, CMPCCXADD)
1203 fs.setIf(eax1&(1<<10) != 0, MOVSB_ZL)
1204 fs.setIf(eax1&(1<<11) != 0, STOSB_SHORT)
1205 fs.setIf(eax1&(1<<12) != 0, CMPSB_SCADBS_SHORT)
1206 fs.setIf(eax1&(1<<22) != 0, HRESET)
1207 fs.setIf(eax1&(1<<23) != 0, AVXIFMA)
1208 fs.setIf(eax1&(1<<26) != 0, LAM)
1209
1210 // CPUID.(EAX=7, ECX=1).EDX
1211 fs.setIf(edx1&(1<<4) != 0, AVXVNNIINT8)
1212 fs.setIf(edx1&(1<<5) != 0, AVXNECONVERT)
1213 fs.setIf(edx1&(1<<14) != 0, PREFETCHI)
1214 fs.setIf(edx1&(1<<19) != 0, AVX10)
1215 fs.setIf(edx1&(1<<21) != 0, APX_F)
1216
1217 // Only detect AVX-512 features if XGETBV is supported
1218 if c&((1<<26)|(1<<27)) == (1<<26)|(1<<27) {
1219 // Check for OS support
1220 eax, _ := xgetbv(0)
1221
1222 // Verify that XCR0[7:5] = ‘111b’ (OPMASK state, upper 256-bit of ZMM0-ZMM15 and
1223 // ZMM16-ZMM31 state are enabled by OS)
1224 /// and that XCR0[2:1] = ‘11b’ (XMM state and YMM state are enabled by OS).
1225 hasAVX512 := (eax>>5)&7 == 7 && (eax>>1)&3 == 3
1226 if runtime.GOOS == "darwin" {
1227 hasAVX512 = fs.inSet(AVX) && darwinHasAVX512()
1228 }
1229 if hasAVX512 {
1230 fs.setIf(ebx&(1<<16) != 0, AVX512F)
1231 fs.setIf(ebx&(1<<17) != 0, AVX512DQ)
1232 fs.setIf(ebx&(1<<21) != 0, AVX512IFMA)
1233 fs.setIf(ebx&(1<<26) != 0, AVX512PF)
1234 fs.setIf(ebx&(1<<27) != 0, AVX512ER)
1235 fs.setIf(ebx&(1<<28) != 0, AVX512CD)
1236 fs.setIf(ebx&(1<<30) != 0, AVX512BW)
1237 fs.setIf(ebx&(1<<31) != 0, AVX512VL)
1238 // ecx
1239 fs.setIf(ecx&(1<<1) != 0, AVX512VBMI)
1240 fs.setIf(ecx&(1<<6) != 0, AVX512VBMI2)
1241 fs.setIf(ecx&(1<<11) != 0, AVX512VNNI)
1242 fs.setIf(ecx&(1<<12) != 0, AVX512BITALG)
1243 fs.setIf(ecx&(1<<14) != 0, AVX512VPOPCNTDQ)
1244 // edx
1245 fs.setIf(edx&(1<<8) != 0, AVX512VP2INTERSECT)
1246 fs.setIf(edx&(1<<22) != 0, AMXBF16)
1247 fs.setIf(edx&(1<<23) != 0, AVX512FP16)
1248 fs.setIf(edx&(1<<24) != 0, AMXTILE)
1249 fs.setIf(edx&(1<<25) != 0, AMXINT8)
1250 // eax1 = CPUID.(EAX=7, ECX=1).EAX
1251 fs.setIf(eax1&(1<<5) != 0, AVX512BF16)
1252 fs.setIf(eax1&(1<<19) != 0, WRMSRNS)
1253 fs.setIf(eax1&(1<<21) != 0, AMXFP16)
1254 fs.setIf(eax1&(1<<27) != 0, MSRLIST)
1255 }
1256 }
1257
1258 // CPUID.(EAX=7, ECX=2)
1259 _, _, _, edx = cpuidex(7, 2)
1260 fs.setIf(edx&(1<<0) != 0, PSFD)
1261 fs.setIf(edx&(1<<1) != 0, IDPRED_CTRL)
1262 fs.setIf(edx&(1<<2) != 0, RRSBA_CTRL)
1263 fs.setIf(edx&(1<<4) != 0, BHI_CTRL)
1264 fs.setIf(edx&(1<<5) != 0, MCDT_NO)
1265
1266 // Add keylocker features.
1267 if fs.inSet(KEYLOCKER) && mfi >= 0x19 {
1268 _, ebx, _, _ := cpuidex(0x19, 0)
1269 fs.setIf(ebx&5 == 5, KEYLOCKERW) // Bit 0 and 2 (1+4)
1270 }
1271
1272 // Add AVX10 features.
1273 if fs.inSet(AVX10) && mfi >= 0x24 {
1274 _, ebx, _, _ := cpuidex(0x24, 0)
1275 fs.setIf(ebx&(1<<16) != 0, AVX10_128)
1276 fs.setIf(ebx&(1<<17) != 0, AVX10_256)
1277 fs.setIf(ebx&(1<<18) != 0, AVX10_512)
1278 }
1279 }
1280
1281 // Processor Extended State Enumeration Sub-leaf (EAX = 0DH, ECX = 1)
1282 // EAX
1283 // Bit 00: XSAVEOPT is available.
1284 // Bit 01: Supports XSAVEC and the compacted form of XRSTOR if set.
1285 // Bit 02: Supports XGETBV with ECX = 1 if set.
1286 // Bit 03: Supports XSAVES/XRSTORS and IA32_XSS if set.
1287 // Bits 31 - 04: Reserved.
1288 // EBX
1289 // Bits 31 - 00: The size in bytes of the XSAVE area containing all states enabled by XCRO | IA32_XSS.
1290 // ECX
1291 // Bits 31 - 00: Reports the supported bits of the lower 32 bits of the IA32_XSS MSR. IA32_XSS[n] can be set to 1 only if ECX[n] is 1.
1292 // EDX?
1293 // Bits 07 - 00: Used for XCR0. Bit 08: PT state. Bit 09: Used for XCR0. Bits 12 - 10: Reserved. Bit 13: HWP state. Bits 31 - 14: Reserved.
1294 if mfi >= 0xd {
1295 if fs.inSet(XSAVE) {
1296 eax, _, _, _ := cpuidex(0xd, 1)
1297 fs.setIf(eax&(1<<0) != 0, XSAVEOPT)
1298 fs.setIf(eax&(1<<1) != 0, XSAVEC)
1299 fs.setIf(eax&(1<<2) != 0, XGETBV1)
1300 fs.setIf(eax&(1<<3) != 0, XSAVES)
1301 }
1302 }
1303 if maxExtendedFunction() >= 0x80000001 {
1304 _, _, c, d := cpuid(0x80000001)
1305 if (c & (1 << 5)) != 0 {
1306 fs.set(LZCNT)
1307 fs.set(POPCNT)
1308 }
1309 // ECX
1310 fs.setIf((c&(1<<0)) != 0, LAHF)
1311 fs.setIf((c&(1<<2)) != 0, SVM)
1312 fs.setIf((c&(1<<6)) != 0, SSE4A)
1313 fs.setIf((c&(1<<10)) != 0, IBS)
1314 fs.setIf((c&(1<<22)) != 0, TOPEXT)
1315
1316 // EDX
1317 fs.setIf(d&(1<<11) != 0, SYSCALL)
1318 fs.setIf(d&(1<<20) != 0, NX)
1319 fs.setIf(d&(1<<22) != 0, MMXEXT)
1320 fs.setIf(d&(1<<23) != 0, MMX)
1321 fs.setIf(d&(1<<24) != 0, FXSR)
1322 fs.setIf(d&(1<<25) != 0, FXSROPT)
1323 fs.setIf(d&(1<<27) != 0, RDTSCP)
1324 fs.setIf(d&(1<<30) != 0, AMD3DNOWEXT)
1325 fs.setIf(d&(1<<31) != 0, AMD3DNOW)
1326
1327 /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be
1328 * used unless the OS has AVX support. */
1329 if fs.inSet(AVX) {
1330 fs.setIf((c&(1<<11)) != 0, XOP)
1331 fs.setIf((c&(1<<16)) != 0, FMA4)
1332 }
1333
1334 }
1335 if maxExtendedFunction() >= 0x80000007 {
1336 _, b, _, d := cpuid(0x80000007)
1337 fs.setIf((b&(1<<0)) != 0, MCAOVERFLOW)
1338 fs.setIf((b&(1<<1)) != 0, SUCCOR)
1339 fs.setIf((b&(1<<2)) != 0, HWA)
1340 fs.setIf((d&(1<<9)) != 0, CPBOOST)
1341 }
1342
1343 if maxExtendedFunction() >= 0x80000008 {
1344 _, b, _, _ := cpuid(0x80000008)
1345 fs.setIf(b&(1<<28) != 0, PSFD)
1346 fs.setIf(b&(1<<27) != 0, CPPC)
1347 fs.setIf(b&(1<<24) != 0, SPEC_CTRL_SSBD)
1348 fs.setIf(b&(1<<23) != 0, PPIN)
1349 fs.setIf(b&(1<<21) != 0, TLB_FLUSH_NESTED)
1350 fs.setIf(b&(1<<20) != 0, EFER_LMSLE_UNS)
1351 fs.setIf(b&(1<<19) != 0, IBRS_PROVIDES_SMP)
1352 fs.setIf(b&(1<<18) != 0, IBRS_PREFERRED)
1353 fs.setIf(b&(1<<17) != 0, STIBP_ALWAYSON)
1354 fs.setIf(b&(1<<15) != 0, STIBP)
1355 fs.setIf(b&(1<<14) != 0, IBRS)
1356 fs.setIf((b&(1<<13)) != 0, INT_WBINVD)
1357 fs.setIf(b&(1<<12) != 0, IBPB)
1358 fs.setIf((b&(1<<9)) != 0, WBNOINVD)
1359 fs.setIf((b&(1<<8)) != 0, MCOMMIT)
1360 fs.setIf((b&(1<<4)) != 0, RDPRU)
1361 fs.setIf((b&(1<<3)) != 0, INVLPGB)
1362 fs.setIf((b&(1<<1)) != 0, MSRIRC)
1363 fs.setIf((b&(1<<0)) != 0, CLZERO)
1364 }
1365
1366 if fs.inSet(SVM) && maxExtendedFunction() >= 0x8000000A {
1367 _, _, _, edx := cpuid(0x8000000A)
1368 fs.setIf((edx>>0)&1 == 1, SVMNP)
1369 fs.setIf((edx>>1)&1 == 1, LBRVIRT)
1370 fs.setIf((edx>>2)&1 == 1, SVML)
1371 fs.setIf((edx>>3)&1 == 1, NRIPS)
1372 fs.setIf((edx>>4)&1 == 1, TSCRATEMSR)
1373 fs.setIf((edx>>5)&1 == 1, VMCBCLEAN)
1374 fs.setIf((edx>>6)&1 == 1, SVMFBASID)
1375 fs.setIf((edx>>7)&1 == 1, SVMDA)
1376 fs.setIf((edx>>10)&1 == 1, SVMPF)
1377 fs.setIf((edx>>12)&1 == 1, SVMPFT)
1378 }
1379
1380 if maxExtendedFunction() >= 0x8000001a {
1381 eax, _, _, _ := cpuid(0x8000001a)
1382 fs.setIf((eax>>0)&1 == 1, FP128)
1383 fs.setIf((eax>>1)&1 == 1, MOVU)
1384 fs.setIf((eax>>2)&1 == 1, FP256)
1385 }
1386
1387 if maxExtendedFunction() >= 0x8000001b && fs.inSet(IBS) {
1388 eax, _, _, _ := cpuid(0x8000001b)
1389 fs.setIf((eax>>0)&1 == 1, IBSFFV)
1390 fs.setIf((eax>>1)&1 == 1, IBSFETCHSAM)
1391 fs.setIf((eax>>2)&1 == 1, IBSOPSAM)
1392 fs.setIf((eax>>3)&1 == 1, IBSRDWROPCNT)
1393 fs.setIf((eax>>4)&1 == 1, IBSOPCNT)
1394 fs.setIf((eax>>5)&1 == 1, IBSBRNTRGT)
1395 fs.setIf((eax>>6)&1 == 1, IBSOPCNTEXT)
1396 fs.setIf((eax>>7)&1 == 1, IBSRIPINVALIDCHK)
1397 fs.setIf((eax>>8)&1 == 1, IBS_OPFUSE)
1398 fs.setIf((eax>>9)&1 == 1, IBS_FETCH_CTLX)
1399 fs.setIf((eax>>10)&1 == 1, IBS_OPDATA4) // Doc says "Fixed,0. IBS op data 4 MSR supported", but assuming they mean 1.
1400 fs.setIf((eax>>11)&1 == 1, IBS_ZEN4)
1401 }
1402
1403 if maxExtendedFunction() >= 0x8000001f && vend == AMD {
1404 a, _, _, _ := cpuid(0x8000001f)
1405 fs.setIf((a>>0)&1 == 1, SME)
1406 fs.setIf((a>>1)&1 == 1, SEV)
1407 fs.setIf((a>>2)&1 == 1, MSR_PAGEFLUSH)
1408 fs.setIf((a>>3)&1 == 1, SEV_ES)
1409 fs.setIf((a>>4)&1 == 1, SEV_SNP)
1410 fs.setIf((a>>5)&1 == 1, VMPL)
1411 fs.setIf((a>>10)&1 == 1, SME_COHERENT)
1412 fs.setIf((a>>11)&1 == 1, SEV_64BIT)
1413 fs.setIf((a>>12)&1 == 1, SEV_RESTRICTED)
1414 fs.setIf((a>>13)&1 == 1, SEV_ALTERNATIVE)
1415 fs.setIf((a>>14)&1 == 1, SEV_DEBUGSWAP)
1416 fs.setIf((a>>15)&1 == 1, IBS_PREVENTHOST)
1417 fs.setIf((a>>16)&1 == 1, VTE)
1418 fs.setIf((a>>24)&1 == 1, VMSA_REGPROT)
1419 }
1420
1421 if mfi >= 0x20 {
1422 // Microsoft has decided to purposefully hide the information
1423 // of the guest TEE when VMs are being created using Hyper-V.
1424 //
1425 // This leads us to check for the Hyper-V cpuid features
1426 // (0x4000000C), and then for the `ebx` value set.
1427 //
1428 // For Intel TDX, `ebx` is set as `0xbe3`, being 3 the part
1429 // we're mostly interested about,according to:
1430 // https://github.com/torvalds/linux/blob/d2f51b3516dade79269ff45eae2a7668ae711b25/arch/x86/include/asm/hyperv-tlfs.h#L169-L174
1431 _, ebx, _, _ := cpuid(0x4000000C)
1432 fs.setIf(ebx == 0xbe3, TDX_GUEST)
1433 }
1434
1435 if mfi >= 0x21 {
1436 // Intel Trusted Domain Extensions Guests have their own cpuid leaf (0x21).
1437 _, ebx, ecx, edx := cpuid(0x21)
1438 identity := string(valAsString(ebx, edx, ecx))
1439 fs.setIf(identity == "IntelTDX ", TDX_GUEST)
1440 }
1441
1442 return fs
1443}
1444
1445func (c *CPUInfo) supportAVX10() uint8 {
1446 if c.maxFunc >= 0x24 && c.featureSet.inSet(AVX10) {
1447 _, ebx, _, _ := cpuidex(0x24, 0)
1448 return uint8(ebx)
1449 }
1450 return 0
1451}
1452
1453func valAsString(values ...uint32) []byte {
1454 r := make([]byte, 4*len(values))
1455 for i, v := range values {
1456 dst := r[i*4:]
1457 dst[0] = byte(v & 0xff)
1458 dst[1] = byte((v >> 8) & 0xff)
1459 dst[2] = byte((v >> 16) & 0xff)
1460 dst[3] = byte((v >> 24) & 0xff)
1461 switch {
1462 case dst[0] == 0:
1463 return r[:i*4]
1464 case dst[1] == 0:
1465 return r[:i*4+1]
1466 case dst[2] == 0:
1467 return r[:i*4+2]
1468 case dst[3] == 0:
1469 return r[:i*4+3]
1470 }
1471 }
1472 return r
1473}
diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid_386.s b/vendor/github.com/klauspost/cpuid/v2/cpuid_386.s
new file mode 100644
index 0000000..8587c3a
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/v2/cpuid_386.s
@@ -0,0 +1,47 @@
1// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
2
3//+build 386,!gccgo,!noasm,!appengine
4
5// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
6TEXT ·asmCpuid(SB), 7, $0
7 XORL CX, CX
8 MOVL op+0(FP), AX
9 CPUID
10 MOVL AX, eax+4(FP)
11 MOVL BX, ebx+8(FP)
12 MOVL CX, ecx+12(FP)
13 MOVL DX, edx+16(FP)
14 RET
15
16// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
17TEXT ·asmCpuidex(SB), 7, $0
18 MOVL op+0(FP), AX
19 MOVL op2+4(FP), CX
20 CPUID
21 MOVL AX, eax+8(FP)
22 MOVL BX, ebx+12(FP)
23 MOVL CX, ecx+16(FP)
24 MOVL DX, edx+20(FP)
25 RET
26
27// func xgetbv(index uint32) (eax, edx uint32)
28TEXT ·asmXgetbv(SB), 7, $0
29 MOVL index+0(FP), CX
30 BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
31 MOVL AX, eax+4(FP)
32 MOVL DX, edx+8(FP)
33 RET
34
35// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
36TEXT ·asmRdtscpAsm(SB), 7, $0
37 BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP
38 MOVL AX, eax+0(FP)
39 MOVL BX, ebx+4(FP)
40 MOVL CX, ecx+8(FP)
41 MOVL DX, edx+12(FP)
42 RET
43
44// func asmDarwinHasAVX512() bool
45TEXT ·asmDarwinHasAVX512(SB), 7, $0
46 MOVL $0, eax+0(FP)
47 RET
diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid_amd64.s b/vendor/github.com/klauspost/cpuid/v2/cpuid_amd64.s
new file mode 100644
index 0000000..bc11f89
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/v2/cpuid_amd64.s
@@ -0,0 +1,72 @@
1// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
2
3//+build amd64,!gccgo,!noasm,!appengine
4
5// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
6TEXT ·asmCpuid(SB), 7, $0
7 XORQ CX, CX
8 MOVL op+0(FP), AX
9 CPUID
10 MOVL AX, eax+8(FP)
11 MOVL BX, ebx+12(FP)
12 MOVL CX, ecx+16(FP)
13 MOVL DX, edx+20(FP)
14 RET
15
16// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
17TEXT ·asmCpuidex(SB), 7, $0
18 MOVL op+0(FP), AX
19 MOVL op2+4(FP), CX
20 CPUID
21 MOVL AX, eax+8(FP)
22 MOVL BX, ebx+12(FP)
23 MOVL CX, ecx+16(FP)
24 MOVL DX, edx+20(FP)
25 RET
26
27// func asmXgetbv(index uint32) (eax, edx uint32)
28TEXT ·asmXgetbv(SB), 7, $0
29 MOVL index+0(FP), CX
30 BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
31 MOVL AX, eax+8(FP)
32 MOVL DX, edx+12(FP)
33 RET
34
35// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
36TEXT ·asmRdtscpAsm(SB), 7, $0
37 BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP
38 MOVL AX, eax+0(FP)
39 MOVL BX, ebx+4(FP)
40 MOVL CX, ecx+8(FP)
41 MOVL DX, edx+12(FP)
42 RET
43
44// From https://go-review.googlesource.com/c/sys/+/285572/
45// func asmDarwinHasAVX512() bool
46TEXT ·asmDarwinHasAVX512(SB), 7, $0-1
47 MOVB $0, ret+0(FP) // default to false
48
49#ifdef GOOS_darwin // return if not darwin
50#ifdef GOARCH_amd64 // return if not amd64
51// These values from:
52// https://github.com/apple/darwin-xnu/blob/xnu-4570.1.46/osfmk/i386/cpu_capabilities.h
53#define commpage64_base_address 0x00007fffffe00000
54#define commpage64_cpu_capabilities64 (commpage64_base_address+0x010)
55#define commpage64_version (commpage64_base_address+0x01E)
56#define hasAVX512F 0x0000004000000000
57 MOVQ $commpage64_version, BX
58 MOVW (BX), AX
59 CMPW AX, $13 // versions < 13 do not support AVX512
60 JL no_avx512
61 MOVQ $commpage64_cpu_capabilities64, BX
62 MOVQ (BX), AX
63 MOVQ $hasAVX512F, CX
64 ANDQ CX, AX
65 JZ no_avx512
66 MOVB $1, ret+0(FP)
67
68no_avx512:
69#endif
70#endif
71 RET
72
diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s b/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s
new file mode 100644
index 0000000..b31d6ae
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s
@@ -0,0 +1,26 @@
1// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
2
3//+build arm64,!gccgo,!noasm,!appengine
4
5// See https://www.kernel.org/doc/Documentation/arm64/cpu-feature-registers.txt
6
7// func getMidr
8TEXT ·getMidr(SB), 7, $0
9 WORD $0xd5380000 // mrs x0, midr_el1 /* Main ID Register */
10 MOVD R0, midr+0(FP)
11 RET
12
13// func getProcFeatures
14TEXT ·getProcFeatures(SB), 7, $0
15 WORD $0xd5380400 // mrs x0, id_aa64pfr0_el1 /* Processor Feature Register 0 */
16 MOVD R0, procFeatures+0(FP)
17 RET
18
19// func getInstAttributes
20TEXT ·getInstAttributes(SB), 7, $0
21 WORD $0xd5380600 // mrs x0, id_aa64isar0_el1 /* Instruction Set Attribute Register 0 */
22 WORD $0xd5380621 // mrs x1, id_aa64isar1_el1 /* Instruction Set Attribute Register 1 */
23 MOVD R0, instAttrReg0+0(FP)
24 MOVD R1, instAttrReg1+8(FP)
25 RET
26
diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go b/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go
new file mode 100644
index 0000000..9a53504
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go
@@ -0,0 +1,247 @@
1// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
2
3//go:build arm64 && !gccgo && !noasm && !appengine
4// +build arm64,!gccgo,!noasm,!appengine
5
6package cpuid
7
8import "runtime"
9
10func getMidr() (midr uint64)
11func getProcFeatures() (procFeatures uint64)
12func getInstAttributes() (instAttrReg0, instAttrReg1 uint64)
13
14func initCPU() {
15 cpuid = func(uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 }
16 cpuidex = func(x, y uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 }
17 xgetbv = func(uint32) (a, b uint32) { return 0, 0 }
18 rdtscpAsm = func() (a, b, c, d uint32) { return 0, 0, 0, 0 }
19}
20
21func addInfo(c *CPUInfo, safe bool) {
22 // Seems to be safe to assume on ARM64
23 c.CacheLine = 64
24 detectOS(c)
25
26 // ARM64 disabled since it may crash if interrupt is not intercepted by OS.
27 if safe && !c.Supports(ARMCPUID) && runtime.GOOS != "freebsd" {
28 return
29 }
30 midr := getMidr()
31
32 // MIDR_EL1 - Main ID Register
33 // https://developer.arm.com/docs/ddi0595/h/aarch64-system-registers/midr_el1
34 // x--------------------------------------------------x
35 // | Name | bits | visible |
36 // |--------------------------------------------------|
37 // | Implementer | [31-24] | y |
38 // |--------------------------------------------------|
39 // | Variant | [23-20] | y |
40 // |--------------------------------------------------|
41 // | Architecture | [19-16] | y |
42 // |--------------------------------------------------|
43 // | PartNum | [15-4] | y |
44 // |--------------------------------------------------|
45 // | Revision | [3-0] | y |
46 // x--------------------------------------------------x
47
48 switch (midr >> 24) & 0xff {
49 case 0xC0:
50 c.VendorString = "Ampere Computing"
51 c.VendorID = Ampere
52 case 0x41:
53 c.VendorString = "Arm Limited"
54 c.VendorID = ARM
55 case 0x42:
56 c.VendorString = "Broadcom Corporation"
57 c.VendorID = Broadcom
58 case 0x43:
59 c.VendorString = "Cavium Inc"
60 c.VendorID = Cavium
61 case 0x44:
62 c.VendorString = "Digital Equipment Corporation"
63 c.VendorID = DEC
64 case 0x46:
65 c.VendorString = "Fujitsu Ltd"
66 c.VendorID = Fujitsu
67 case 0x49:
68 c.VendorString = "Infineon Technologies AG"
69 c.VendorID = Infineon
70 case 0x4D:
71 c.VendorString = "Motorola or Freescale Semiconductor Inc"
72 c.VendorID = Motorola
73 case 0x4E:
74 c.VendorString = "NVIDIA Corporation"
75 c.VendorID = NVIDIA
76 case 0x50:
77 c.VendorString = "Applied Micro Circuits Corporation"
78 c.VendorID = AMCC
79 case 0x51:
80 c.VendorString = "Qualcomm Inc"
81 c.VendorID = Qualcomm
82 case 0x56:
83 c.VendorString = "Marvell International Ltd"
84 c.VendorID = Marvell
85 case 0x69:
86 c.VendorString = "Intel Corporation"
87 c.VendorID = Intel
88 }
89
90 // Lower 4 bits: Architecture
91 // Architecture Meaning
92 // 0b0001 Armv4.
93 // 0b0010 Armv4T.
94 // 0b0011 Armv5 (obsolete).
95 // 0b0100 Armv5T.
96 // 0b0101 Armv5TE.
97 // 0b0110 Armv5TEJ.
98 // 0b0111 Armv6.
99 // 0b1111 Architectural features are individually identified in the ID_* registers, see 'ID registers'.
100 // Upper 4 bit: Variant
101 // An IMPLEMENTATION DEFINED variant number.
102 // Typically, this field is used to distinguish between different product variants, or major revisions of a product.
103 c.Family = int(midr>>16) & 0xff
104
105 // PartNum, bits [15:4]
106 // An IMPLEMENTATION DEFINED primary part number for the device.
107 // On processors implemented by Arm, if the top four bits of the primary
108 // part number are 0x0 or 0x7, the variant and architecture are encoded differently.
109 // Revision, bits [3:0]
110 // An IMPLEMENTATION DEFINED revision number for the device.
111 c.Model = int(midr) & 0xffff
112
113 procFeatures := getProcFeatures()
114
115 // ID_AA64PFR0_EL1 - Processor Feature Register 0
116 // x--------------------------------------------------x
117 // | Name | bits | visible |
118 // |--------------------------------------------------|
119 // | DIT | [51-48] | y |
120 // |--------------------------------------------------|
121 // | SVE | [35-32] | y |
122 // |--------------------------------------------------|
123 // | GIC | [27-24] | n |
124 // |--------------------------------------------------|
125 // | AdvSIMD | [23-20] | y |
126 // |--------------------------------------------------|
127 // | FP | [19-16] | y |
128 // |--------------------------------------------------|
129 // | EL3 | [15-12] | n |
130 // |--------------------------------------------------|
131 // | EL2 | [11-8] | n |
132 // |--------------------------------------------------|
133 // | EL1 | [7-4] | n |
134 // |--------------------------------------------------|
135 // | EL0 | [3-0] | n |
136 // x--------------------------------------------------x
137
138 var f flagSet
139 // if procFeatures&(0xf<<48) != 0 {
140 // fmt.Println("DIT")
141 // }
142 f.setIf(procFeatures&(0xf<<32) != 0, SVE)
143 if procFeatures&(0xf<<20) != 15<<20 {
144 f.set(ASIMD)
145 // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64pfr0_el1
146 // 0b0001 --> As for 0b0000, and also includes support for half-precision floating-point arithmetic.
147 f.setIf(procFeatures&(0xf<<20) == 1<<20, FPHP, ASIMDHP)
148 }
149 f.setIf(procFeatures&(0xf<<16) != 0, FP)
150
151 instAttrReg0, instAttrReg1 := getInstAttributes()
152
153 // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1
154 //
155 // ID_AA64ISAR0_EL1 - Instruction Set Attribute Register 0
156 // x--------------------------------------------------x
157 // | Name | bits | visible |
158 // |--------------------------------------------------|
159 // | TS | [55-52] | y |
160 // |--------------------------------------------------|
161 // | FHM | [51-48] | y |
162 // |--------------------------------------------------|
163 // | DP | [47-44] | y |
164 // |--------------------------------------------------|
165 // | SM4 | [43-40] | y |
166 // |--------------------------------------------------|
167 // | SM3 | [39-36] | y |
168 // |--------------------------------------------------|
169 // | SHA3 | [35-32] | y |
170 // |--------------------------------------------------|
171 // | RDM | [31-28] | y |
172 // |--------------------------------------------------|
173 // | ATOMICS | [23-20] | y |
174 // |--------------------------------------------------|
175 // | CRC32 | [19-16] | y |
176 // |--------------------------------------------------|
177 // | SHA2 | [15-12] | y |
178 // |--------------------------------------------------|
179 // | SHA1 | [11-8] | y |
180 // |--------------------------------------------------|
181 // | AES | [7-4] | y |
182 // x--------------------------------------------------x
183
184 // if instAttrReg0&(0xf<<52) != 0 {
185 // fmt.Println("TS")
186 // }
187 // if instAttrReg0&(0xf<<48) != 0 {
188 // fmt.Println("FHM")
189 // }
190 f.setIf(instAttrReg0&(0xf<<44) != 0, ASIMDDP)
191 f.setIf(instAttrReg0&(0xf<<40) != 0, SM4)
192 f.setIf(instAttrReg0&(0xf<<36) != 0, SM3)
193 f.setIf(instAttrReg0&(0xf<<32) != 0, SHA3)
194 f.setIf(instAttrReg0&(0xf<<28) != 0, ASIMDRDM)
195 f.setIf(instAttrReg0&(0xf<<20) != 0, ATOMICS)
196 f.setIf(instAttrReg0&(0xf<<16) != 0, CRC32)
197 f.setIf(instAttrReg0&(0xf<<12) != 0, SHA2)
198 // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1
199 // 0b0010 --> As 0b0001, plus SHA512H, SHA512H2, SHA512SU0, and SHA512SU1 instructions implemented.
200 f.setIf(instAttrReg0&(0xf<<12) == 2<<12, SHA512)
201 f.setIf(instAttrReg0&(0xf<<8) != 0, SHA1)
202 f.setIf(instAttrReg0&(0xf<<4) != 0, AESARM)
203 // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1
204 // 0b0010 --> As for 0b0001, plus PMULL/PMULL2 instructions operating on 64-bit data quantities.
205 f.setIf(instAttrReg0&(0xf<<4) == 2<<4, PMULL)
206
207 // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar1_el1
208 //
209 // ID_AA64ISAR1_EL1 - Instruction set attribute register 1
210 // x--------------------------------------------------x
211 // | Name | bits | visible |
212 // |--------------------------------------------------|
213 // | GPI | [31-28] | y |
214 // |--------------------------------------------------|
215 // | GPA | [27-24] | y |
216 // |--------------------------------------------------|
217 // | LRCPC | [23-20] | y |
218 // |--------------------------------------------------|
219 // | FCMA | [19-16] | y |
220 // |--------------------------------------------------|
221 // | JSCVT | [15-12] | y |
222 // |--------------------------------------------------|
223 // | API | [11-8] | y |
224 // |--------------------------------------------------|
225 // | APA | [7-4] | y |
226 // |--------------------------------------------------|
227 // | DPB | [3-0] | y |
228 // x--------------------------------------------------x
229
230 // if instAttrReg1&(0xf<<28) != 0 {
231 // fmt.Println("GPI")
232 // }
233 f.setIf(instAttrReg1&(0xf<<28) != 24, GPA)
234 f.setIf(instAttrReg1&(0xf<<20) != 0, LRCPC)
235 f.setIf(instAttrReg1&(0xf<<16) != 0, FCMA)
236 f.setIf(instAttrReg1&(0xf<<12) != 0, JSCVT)
237 // if instAttrReg1&(0xf<<8) != 0 {
238 // fmt.Println("API")
239 // }
240 // if instAttrReg1&(0xf<<4) != 0 {
241 // fmt.Println("APA")
242 // }
243 f.setIf(instAttrReg1&(0xf<<0) != 0, DCPOP)
244
245 // Store
246 c.featureSet.or(f)
247}
diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_ref.go b/vendor/github.com/klauspost/cpuid/v2/detect_ref.go
new file mode 100644
index 0000000..9636c2b
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/v2/detect_ref.go
@@ -0,0 +1,15 @@
1// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
2
3//go:build (!amd64 && !386 && !arm64) || gccgo || noasm || appengine
4// +build !amd64,!386,!arm64 gccgo noasm appengine
5
6package cpuid
7
8func initCPU() {
9 cpuid = func(uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 }
10 cpuidex = func(x, y uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 }
11 xgetbv = func(uint32) (a, b uint32) { return 0, 0 }
12 rdtscpAsm = func() (a, b, c, d uint32) { return 0, 0, 0, 0 }
13}
14
15func addInfo(info *CPUInfo, safe bool) {}
diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_x86.go b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go
new file mode 100644
index 0000000..c7dfa12
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go
@@ -0,0 +1,37 @@
1// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
2
3//go:build (386 && !gccgo && !noasm && !appengine) || (amd64 && !gccgo && !noasm && !appengine)
4// +build 386,!gccgo,!noasm,!appengine amd64,!gccgo,!noasm,!appengine
5
6package cpuid
7
8func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
9func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
10func asmXgetbv(index uint32) (eax, edx uint32)
11func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
12func asmDarwinHasAVX512() bool
13
14func initCPU() {
15 cpuid = asmCpuid
16 cpuidex = asmCpuidex
17 xgetbv = asmXgetbv
18 rdtscpAsm = asmRdtscpAsm
19 darwinHasAVX512 = asmDarwinHasAVX512
20}
21
22func addInfo(c *CPUInfo, safe bool) {
23 c.maxFunc = maxFunctionID()
24 c.maxExFunc = maxExtendedFunction()
25 c.BrandName = brandName()
26 c.CacheLine = cacheLine()
27 c.Family, c.Model, c.Stepping = familyModel()
28 c.featureSet = support()
29 c.SGX = hasSGX(c.featureSet.inSet(SGX), c.featureSet.inSet(SGXLC))
30 c.ThreadsPerCore = threadsPerCore()
31 c.LogicalCores = logicalCores()
32 c.PhysicalCores = physicalCores()
33 c.VendorID, c.VendorString = vendorID()
34 c.AVX10Level = c.supportAVX10()
35 c.cacheSize()
36 c.frequencies()
37}
diff --git a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go
new file mode 100644
index 0000000..43bd05f
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go
@@ -0,0 +1,279 @@
1// Code generated by "stringer -type=FeatureID,Vendor"; DO NOT EDIT.
2
3package cpuid
4
5import "strconv"
6
7func _() {
8 // An "invalid array index" compiler error signifies that the constant values have changed.
9 // Re-run the stringer command to generate them again.
10 var x [1]struct{}
11 _ = x[ADX-1]
12 _ = x[AESNI-2]
13 _ = x[AMD3DNOW-3]
14 _ = x[AMD3DNOWEXT-4]
15 _ = x[AMXBF16-5]
16 _ = x[AMXFP16-6]
17 _ = x[AMXINT8-7]
18 _ = x[AMXTILE-8]
19 _ = x[APX_F-9]
20 _ = x[AVX-10]
21 _ = x[AVX10-11]
22 _ = x[AVX10_128-12]
23 _ = x[AVX10_256-13]
24 _ = x[AVX10_512-14]
25 _ = x[AVX2-15]
26 _ = x[AVX512BF16-16]
27 _ = x[AVX512BITALG-17]
28 _ = x[AVX512BW-18]
29 _ = x[AVX512CD-19]
30 _ = x[AVX512DQ-20]
31 _ = x[AVX512ER-21]
32 _ = x[AVX512F-22]
33 _ = x[AVX512FP16-23]
34 _ = x[AVX512IFMA-24]
35 _ = x[AVX512PF-25]
36 _ = x[AVX512VBMI-26]
37 _ = x[AVX512VBMI2-27]
38 _ = x[AVX512VL-28]
39 _ = x[AVX512VNNI-29]
40 _ = x[AVX512VP2INTERSECT-30]
41 _ = x[AVX512VPOPCNTDQ-31]
42 _ = x[AVXIFMA-32]
43 _ = x[AVXNECONVERT-33]
44 _ = x[AVXSLOW-34]
45 _ = x[AVXVNNI-35]
46 _ = x[AVXVNNIINT8-36]
47 _ = x[BHI_CTRL-37]
48 _ = x[BMI1-38]
49 _ = x[BMI2-39]
50 _ = x[CETIBT-40]
51 _ = x[CETSS-41]
52 _ = x[CLDEMOTE-42]
53 _ = x[CLMUL-43]
54 _ = x[CLZERO-44]
55 _ = x[CMOV-45]
56 _ = x[CMPCCXADD-46]
57 _ = x[CMPSB_SCADBS_SHORT-47]
58 _ = x[CMPXCHG8-48]
59 _ = x[CPBOOST-49]
60 _ = x[CPPC-50]
61 _ = x[CX16-51]
62 _ = x[EFER_LMSLE_UNS-52]
63 _ = x[ENQCMD-53]
64 _ = x[ERMS-54]
65 _ = x[F16C-55]
66 _ = x[FLUSH_L1D-56]
67 _ = x[FMA3-57]
68 _ = x[FMA4-58]
69 _ = x[FP128-59]
70 _ = x[FP256-60]
71 _ = x[FSRM-61]
72 _ = x[FXSR-62]
73 _ = x[FXSROPT-63]
74 _ = x[GFNI-64]
75 _ = x[HLE-65]
76 _ = x[HRESET-66]
77 _ = x[HTT-67]
78 _ = x[HWA-68]
79 _ = x[HYBRID_CPU-69]
80 _ = x[HYPERVISOR-70]
81 _ = x[IA32_ARCH_CAP-71]
82 _ = x[IA32_CORE_CAP-72]
83 _ = x[IBPB-73]
84 _ = x[IBRS-74]
85 _ = x[IBRS_PREFERRED-75]
86 _ = x[IBRS_PROVIDES_SMP-76]
87 _ = x[IBS-77]
88 _ = x[IBSBRNTRGT-78]
89 _ = x[IBSFETCHSAM-79]
90 _ = x[IBSFFV-80]
91 _ = x[IBSOPCNT-81]
92 _ = x[IBSOPCNTEXT-82]
93 _ = x[IBSOPSAM-83]
94 _ = x[IBSRDWROPCNT-84]
95 _ = x[IBSRIPINVALIDCHK-85]
96 _ = x[IBS_FETCH_CTLX-86]
97 _ = x[IBS_OPDATA4-87]
98 _ = x[IBS_OPFUSE-88]
99 _ = x[IBS_PREVENTHOST-89]
100 _ = x[IBS_ZEN4-90]
101 _ = x[IDPRED_CTRL-91]
102 _ = x[INT_WBINVD-92]
103 _ = x[INVLPGB-93]
104 _ = x[KEYLOCKER-94]
105 _ = x[KEYLOCKERW-95]
106 _ = x[LAHF-96]
107 _ = x[LAM-97]
108 _ = x[LBRVIRT-98]
109 _ = x[LZCNT-99]
110 _ = x[MCAOVERFLOW-100]
111 _ = x[MCDT_NO-101]
112 _ = x[MCOMMIT-102]
113 _ = x[MD_CLEAR-103]
114 _ = x[MMX-104]
115 _ = x[MMXEXT-105]
116 _ = x[MOVBE-106]
117 _ = x[MOVDIR64B-107]
118 _ = x[MOVDIRI-108]
119 _ = x[MOVSB_ZL-109]
120 _ = x[MOVU-110]
121 _ = x[MPX-111]
122 _ = x[MSRIRC-112]
123 _ = x[MSRLIST-113]
124 _ = x[MSR_PAGEFLUSH-114]
125 _ = x[NRIPS-115]
126 _ = x[NX-116]
127 _ = x[OSXSAVE-117]
128 _ = x[PCONFIG-118]
129 _ = x[POPCNT-119]
130 _ = x[PPIN-120]
131 _ = x[PREFETCHI-121]
132 _ = x[PSFD-122]
133 _ = x[RDPRU-123]
134 _ = x[RDRAND-124]
135 _ = x[RDSEED-125]
136 _ = x[RDTSCP-126]
137 _ = x[RRSBA_CTRL-127]
138 _ = x[RTM-128]
139 _ = x[RTM_ALWAYS_ABORT-129]
140 _ = x[SERIALIZE-130]
141 _ = x[SEV-131]
142 _ = x[SEV_64BIT-132]
143 _ = x[SEV_ALTERNATIVE-133]
144 _ = x[SEV_DEBUGSWAP-134]
145 _ = x[SEV_ES-135]
146 _ = x[SEV_RESTRICTED-136]
147 _ = x[SEV_SNP-137]
148 _ = x[SGX-138]
149 _ = x[SGXLC-139]
150 _ = x[SHA-140]
151 _ = x[SME-141]
152 _ = x[SME_COHERENT-142]
153 _ = x[SPEC_CTRL_SSBD-143]
154 _ = x[SRBDS_CTRL-144]
155 _ = x[SSE-145]
156 _ = x[SSE2-146]
157 _ = x[SSE3-147]
158 _ = x[SSE4-148]
159 _ = x[SSE42-149]
160 _ = x[SSE4A-150]
161 _ = x[SSSE3-151]
162 _ = x[STIBP-152]
163 _ = x[STIBP_ALWAYSON-153]
164 _ = x[STOSB_SHORT-154]
165 _ = x[SUCCOR-155]
166 _ = x[SVM-156]
167 _ = x[SVMDA-157]
168 _ = x[SVMFBASID-158]
169 _ = x[SVML-159]
170 _ = x[SVMNP-160]
171 _ = x[SVMPF-161]
172 _ = x[SVMPFT-162]
173 _ = x[SYSCALL-163]
174 _ = x[SYSEE-164]
175 _ = x[TBM-165]
176 _ = x[TDX_GUEST-166]
177 _ = x[TLB_FLUSH_NESTED-167]
178 _ = x[TME-168]
179 _ = x[TOPEXT-169]
180 _ = x[TSCRATEMSR-170]
181 _ = x[TSXLDTRK-171]
182 _ = x[VAES-172]
183 _ = x[VMCBCLEAN-173]
184 _ = x[VMPL-174]
185 _ = x[VMSA_REGPROT-175]
186 _ = x[VMX-176]
187 _ = x[VPCLMULQDQ-177]
188 _ = x[VTE-178]
189 _ = x[WAITPKG-179]
190 _ = x[WBNOINVD-180]
191 _ = x[WRMSRNS-181]
192 _ = x[X87-182]
193 _ = x[XGETBV1-183]
194 _ = x[XOP-184]
195 _ = x[XSAVE-185]
196 _ = x[XSAVEC-186]
197 _ = x[XSAVEOPT-187]
198 _ = x[XSAVES-188]
199 _ = x[AESARM-189]
200 _ = x[ARMCPUID-190]
201 _ = x[ASIMD-191]
202 _ = x[ASIMDDP-192]
203 _ = x[ASIMDHP-193]
204 _ = x[ASIMDRDM-194]
205 _ = x[ATOMICS-195]
206 _ = x[CRC32-196]
207 _ = x[DCPOP-197]
208 _ = x[EVTSTRM-198]
209 _ = x[FCMA-199]
210 _ = x[FP-200]
211 _ = x[FPHP-201]
212 _ = x[GPA-202]
213 _ = x[JSCVT-203]
214 _ = x[LRCPC-204]
215 _ = x[PMULL-205]
216 _ = x[SHA1-206]
217 _ = x[SHA2-207]
218 _ = x[SHA3-208]
219 _ = x[SHA512-209]
220 _ = x[SM3-210]
221 _ = x[SM4-211]
222 _ = x[SVE-212]
223 _ = x[lastID-213]
224 _ = x[firstID-0]
225}
226
227const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXTILEAPX_FAVXAVX10AVX10_128AVX10_256AVX10_512AVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBKEYLOCKERKEYLOCKERWLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID"
228
229var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 62, 67, 70, 75, 84, 93, 102, 106, 116, 128, 136, 144, 152, 160, 167, 177, 187, 195, 205, 216, 224, 234, 252, 267, 274, 286, 293, 300, 311, 319, 323, 327, 333, 338, 346, 351, 357, 361, 370, 388, 396, 403, 407, 411, 425, 431, 435, 439, 448, 452, 456, 461, 466, 470, 474, 481, 485, 488, 494, 497, 500, 510, 520, 533, 546, 550, 554, 568, 585, 588, 598, 609, 615, 623, 634, 642, 654, 670, 684, 695, 705, 720, 728, 739, 749, 756, 765, 775, 779, 782, 789, 794, 805, 812, 819, 827, 830, 836, 841, 850, 857, 865, 869, 872, 878, 885, 898, 903, 905, 912, 919, 925, 929, 938, 942, 947, 953, 959, 965, 975, 978, 994, 1003, 1006, 1015, 1030, 1043, 1049, 1063, 1070, 1073, 1078, 1081, 1084, 1096, 1110, 1120, 1123, 1127, 1131, 1135, 1140, 1145, 1150, 1155, 1169, 1180, 1186, 1189, 1194, 1203, 1207, 1212, 1217, 1223, 1230, 1235, 1238, 1247, 1263, 1266, 1272, 1282, 1290, 1294, 1303, 1307, 1319, 1322, 1332, 1335, 1342, 1350, 1357, 1360, 1367, 1370, 1375, 1381, 1389, 1395, 1401, 1409, 1414, 1421, 1428, 1436, 1443, 1448, 1453, 1460, 1464, 1466, 1470, 1473, 1478, 1483, 1488, 1492, 1496, 1500, 1506, 1509, 1512, 1515, 1521}
230
231func (i FeatureID) String() string {
232 if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) {
233 return "FeatureID(" + strconv.FormatInt(int64(i), 10) + ")"
234 }
235 return _FeatureID_name[_FeatureID_index[i]:_FeatureID_index[i+1]]
236}
237func _() {
238 // An "invalid array index" compiler error signifies that the constant values have changed.
239 // Re-run the stringer command to generate them again.
240 var x [1]struct{}
241 _ = x[VendorUnknown-0]
242 _ = x[Intel-1]
243 _ = x[AMD-2]
244 _ = x[VIA-3]
245 _ = x[Transmeta-4]
246 _ = x[NSC-5]
247 _ = x[KVM-6]
248 _ = x[MSVM-7]
249 _ = x[VMware-8]
250 _ = x[XenHVM-9]
251 _ = x[Bhyve-10]
252 _ = x[Hygon-11]
253 _ = x[SiS-12]
254 _ = x[RDC-13]
255 _ = x[Ampere-14]
256 _ = x[ARM-15]
257 _ = x[Broadcom-16]
258 _ = x[Cavium-17]
259 _ = x[DEC-18]
260 _ = x[Fujitsu-19]
261 _ = x[Infineon-20]
262 _ = x[Motorola-21]
263 _ = x[NVIDIA-22]
264 _ = x[AMCC-23]
265 _ = x[Qualcomm-24]
266 _ = x[Marvell-25]
267 _ = x[lastVendor-26]
268}
269
270const _Vendor_name = "VendorUnknownIntelAMDVIATransmetaNSCKVMMSVMVMwareXenHVMBhyveHygonSiSRDCAmpereARMBroadcomCaviumDECFujitsuInfineonMotorolaNVIDIAAMCCQualcommMarvelllastVendor"
271
272var _Vendor_index = [...]uint8{0, 13, 18, 21, 24, 33, 36, 39, 43, 49, 55, 60, 65, 68, 71, 77, 80, 88, 94, 97, 104, 112, 120, 126, 130, 138, 145, 155}
273
274func (i Vendor) String() string {
275 if i < 0 || i >= Vendor(len(_Vendor_index)-1) {
276 return "Vendor(" + strconv.FormatInt(int64(i), 10) + ")"
277 }
278 return _Vendor_name[_Vendor_index[i]:_Vendor_index[i+1]]
279}
diff --git a/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go
new file mode 100644
index 0000000..84b1acd
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go
@@ -0,0 +1,121 @@
1// Copyright (c) 2020 Klaus Post, released under MIT License. See LICENSE file.
2
3package cpuid
4
5import (
6 "runtime"
7 "strings"
8
9 "golang.org/x/sys/unix"
10)
11
12func detectOS(c *CPUInfo) bool {
13 if runtime.GOOS != "ios" {
14 tryToFillCPUInfoFomSysctl(c)
15 }
16 // There are no hw.optional sysctl values for the below features on Mac OS 11.0
17 // to detect their supported state dynamically. Assume the CPU features that
18 // Apple Silicon M1 supports to be available as a minimal set of features
19 // to all Go programs running on darwin/arm64.
20 // TODO: Add more if we know them.
21 c.featureSet.setIf(runtime.GOOS != "ios", AESARM, PMULL, SHA1, SHA2)
22
23 return true
24}
25
26func sysctlGetBool(name string) bool {
27 value, err := unix.SysctlUint32(name)
28 if err != nil {
29 return false
30 }
31 return value != 0
32}
33
34func sysctlGetString(name string) string {
35 value, err := unix.Sysctl(name)
36 if err != nil {
37 return ""
38 }
39 return value
40}
41
42func sysctlGetInt(unknown int, names ...string) int {
43 for _, name := range names {
44 value, err := unix.SysctlUint32(name)
45 if err != nil {
46 continue
47 }
48 if value != 0 {
49 return int(value)
50 }
51 }
52 return unknown
53}
54
55func sysctlGetInt64(unknown int, names ...string) int {
56 for _, name := range names {
57 value64, err := unix.SysctlUint64(name)
58 if err != nil {
59 continue
60 }
61 if int(value64) != unknown {
62 return int(value64)
63 }
64 }
65 return unknown
66}
67
68func setFeature(c *CPUInfo, name string, feature FeatureID) {
69 c.featureSet.setIf(sysctlGetBool(name), feature)
70}
71func tryToFillCPUInfoFomSysctl(c *CPUInfo) {
72 c.BrandName = sysctlGetString("machdep.cpu.brand_string")
73
74 if len(c.BrandName) != 0 {
75 c.VendorString = strings.Fields(c.BrandName)[0]
76 }
77
78 c.PhysicalCores = sysctlGetInt(runtime.NumCPU(), "hw.physicalcpu")
79 c.ThreadsPerCore = sysctlGetInt(1, "machdep.cpu.thread_count", "kern.num_threads") /
80 sysctlGetInt(1, "hw.physicalcpu")
81 c.LogicalCores = sysctlGetInt(runtime.NumCPU(), "machdep.cpu.core_count")
82 c.Family = sysctlGetInt(0, "machdep.cpu.family", "hw.cpufamily")
83 c.Model = sysctlGetInt(0, "machdep.cpu.model")
84 c.CacheLine = sysctlGetInt64(0, "hw.cachelinesize")
85 c.Cache.L1I = sysctlGetInt64(-1, "hw.l1icachesize")
86 c.Cache.L1D = sysctlGetInt64(-1, "hw.l1dcachesize")
87 c.Cache.L2 = sysctlGetInt64(-1, "hw.l2cachesize")
88 c.Cache.L3 = sysctlGetInt64(-1, "hw.l3cachesize")
89
90 // from https://developer.arm.com/downloads/-/exploration-tools/feature-names-for-a-profile
91 setFeature(c, "hw.optional.arm.FEAT_AES", AESARM)
92 setFeature(c, "hw.optional.AdvSIMD", ASIMD)
93 setFeature(c, "hw.optional.arm.FEAT_DotProd", ASIMDDP)
94 setFeature(c, "hw.optional.arm.FEAT_RDM", ASIMDRDM)
95 setFeature(c, "hw.optional.FEAT_CRC32", CRC32)
96 setFeature(c, "hw.optional.arm.FEAT_DPB", DCPOP)
97 // setFeature(c, "", EVTSTRM)
98 setFeature(c, "hw.optional.arm.FEAT_FCMA", FCMA)
99 setFeature(c, "hw.optional.arm.FEAT_FP", FP)
100 setFeature(c, "hw.optional.arm.FEAT_FP16", FPHP)
101 setFeature(c, "hw.optional.arm.FEAT_PAuth", GPA)
102 setFeature(c, "hw.optional.arm.FEAT_JSCVT", JSCVT)
103 setFeature(c, "hw.optional.arm.FEAT_LRCPC", LRCPC)
104 setFeature(c, "hw.optional.arm.FEAT_PMULL", PMULL)
105 setFeature(c, "hw.optional.arm.FEAT_SHA1", SHA1)
106 setFeature(c, "hw.optional.arm.FEAT_SHA256", SHA2)
107 setFeature(c, "hw.optional.arm.FEAT_SHA3", SHA3)
108 setFeature(c, "hw.optional.arm.FEAT_SHA512", SHA512)
109 // setFeature(c, "", SM3)
110 // setFeature(c, "", SM4)
111 setFeature(c, "hw.optional.arm.FEAT_SVE", SVE)
112
113 // from empirical observation
114 setFeature(c, "hw.optional.AdvSIMD_HPFPCvt", ASIMDHP)
115 setFeature(c, "hw.optional.armv8_1_atomics", ATOMICS)
116 setFeature(c, "hw.optional.floatingpoint", FP)
117 setFeature(c, "hw.optional.armv8_2_sha3", SHA3)
118 setFeature(c, "hw.optional.armv8_2_sha512", SHA512)
119 setFeature(c, "hw.optional.armv8_3_compnum", FCMA)
120 setFeature(c, "hw.optional.armv8_crc32", CRC32)
121}
diff --git a/vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go
new file mode 100644
index 0000000..ee278b9
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go
@@ -0,0 +1,130 @@
1// Copyright (c) 2020 Klaus Post, released under MIT License. See LICENSE file.
2
3// Copyright 2018 The Go Authors. All rights reserved.
4// Use of this source code is governed by a BSD-style
5// license that can be found in the LICENSE file located
6// here https://github.com/golang/sys/blob/master/LICENSE
7
8package cpuid
9
10import (
11 "encoding/binary"
12 "io/ioutil"
13 "runtime"
14)
15
16// HWCAP bits.
17const (
18 hwcap_FP = 1 << 0
19 hwcap_ASIMD = 1 << 1
20 hwcap_EVTSTRM = 1 << 2
21 hwcap_AES = 1 << 3
22 hwcap_PMULL = 1 << 4
23 hwcap_SHA1 = 1 << 5
24 hwcap_SHA2 = 1 << 6
25 hwcap_CRC32 = 1 << 7
26 hwcap_ATOMICS = 1 << 8
27 hwcap_FPHP = 1 << 9
28 hwcap_ASIMDHP = 1 << 10
29 hwcap_CPUID = 1 << 11
30 hwcap_ASIMDRDM = 1 << 12
31 hwcap_JSCVT = 1 << 13
32 hwcap_FCMA = 1 << 14
33 hwcap_LRCPC = 1 << 15
34 hwcap_DCPOP = 1 << 16
35 hwcap_SHA3 = 1 << 17
36 hwcap_SM3 = 1 << 18
37 hwcap_SM4 = 1 << 19
38 hwcap_ASIMDDP = 1 << 20
39 hwcap_SHA512 = 1 << 21
40 hwcap_SVE = 1 << 22
41 hwcap_ASIMDFHM = 1 << 23
42)
43
44func detectOS(c *CPUInfo) bool {
45 // For now assuming no hyperthreading is reasonable.
46 c.LogicalCores = runtime.NumCPU()
47 c.PhysicalCores = c.LogicalCores
48 c.ThreadsPerCore = 1
49 if hwcap == 0 {
50 // We did not get values from the runtime.
51 // Try reading /proc/self/auxv
52
53 // From https://github.com/golang/sys
54 const (
55 _AT_HWCAP = 16
56 _AT_HWCAP2 = 26
57
58 uintSize = int(32 << (^uint(0) >> 63))
59 )
60
61 buf, err := ioutil.ReadFile("/proc/self/auxv")
62 if err != nil {
63 // e.g. on android /proc/self/auxv is not accessible, so silently
64 // ignore the error and leave Initialized = false. On some
65 // architectures (e.g. arm64) doinit() implements a fallback
66 // readout and will set Initialized = true again.
67 return false
68 }
69 bo := binary.LittleEndian
70 for len(buf) >= 2*(uintSize/8) {
71 var tag, val uint
72 switch uintSize {
73 case 32:
74 tag = uint(bo.Uint32(buf[0:]))
75 val = uint(bo.Uint32(buf[4:]))
76 buf = buf[8:]
77 case 64:
78 tag = uint(bo.Uint64(buf[0:]))
79 val = uint(bo.Uint64(buf[8:]))
80 buf = buf[16:]
81 }
82 switch tag {
83 case _AT_HWCAP:
84 hwcap = val
85 case _AT_HWCAP2:
86 // Not used
87 }
88 }
89 if hwcap == 0 {
90 return false
91 }
92 }
93
94 // HWCap was populated by the runtime from the auxiliary vector.
95 // Use HWCap information since reading aarch64 system registers
96 // is not supported in user space on older linux kernels.
97 c.featureSet.setIf(isSet(hwcap, hwcap_AES), AESARM)
98 c.featureSet.setIf(isSet(hwcap, hwcap_ASIMD), ASIMD)
99 c.featureSet.setIf(isSet(hwcap, hwcap_ASIMDDP), ASIMDDP)
100 c.featureSet.setIf(isSet(hwcap, hwcap_ASIMDHP), ASIMDHP)
101 c.featureSet.setIf(isSet(hwcap, hwcap_ASIMDRDM), ASIMDRDM)
102 c.featureSet.setIf(isSet(hwcap, hwcap_CPUID), ARMCPUID)
103 c.featureSet.setIf(isSet(hwcap, hwcap_CRC32), CRC32)
104 c.featureSet.setIf(isSet(hwcap, hwcap_DCPOP), DCPOP)
105 c.featureSet.setIf(isSet(hwcap, hwcap_EVTSTRM), EVTSTRM)
106 c.featureSet.setIf(isSet(hwcap, hwcap_FCMA), FCMA)
107 c.featureSet.setIf(isSet(hwcap, hwcap_FP), FP)
108 c.featureSet.setIf(isSet(hwcap, hwcap_FPHP), FPHP)
109 c.featureSet.setIf(isSet(hwcap, hwcap_JSCVT), JSCVT)
110 c.featureSet.setIf(isSet(hwcap, hwcap_LRCPC), LRCPC)
111 c.featureSet.setIf(isSet(hwcap, hwcap_PMULL), PMULL)
112 c.featureSet.setIf(isSet(hwcap, hwcap_SHA1), SHA1)
113 c.featureSet.setIf(isSet(hwcap, hwcap_SHA2), SHA2)
114 c.featureSet.setIf(isSet(hwcap, hwcap_SHA3), SHA3)
115 c.featureSet.setIf(isSet(hwcap, hwcap_SHA512), SHA512)
116 c.featureSet.setIf(isSet(hwcap, hwcap_SM3), SM3)
117 c.featureSet.setIf(isSet(hwcap, hwcap_SM4), SM4)
118 c.featureSet.setIf(isSet(hwcap, hwcap_SVE), SVE)
119
120 // The Samsung S9+ kernel reports support for atomics, but not all cores
121 // actually support them, resulting in SIGILL. See issue #28431.
122 // TODO(elias.naur): Only disable the optimization on bad chipsets on android.
123 c.featureSet.setIf(isSet(hwcap, hwcap_ATOMICS) && runtime.GOOS != "android", ATOMICS)
124
125 return true
126}
127
128func isSet(hwc uint, value uint) bool {
129 return hwc&value != 0
130}
diff --git a/vendor/github.com/klauspost/cpuid/v2/os_other_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_other_arm64.go
new file mode 100644
index 0000000..8733ba3
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/v2/os_other_arm64.go
@@ -0,0 +1,16 @@
1// Copyright (c) 2020 Klaus Post, released under MIT License. See LICENSE file.
2
3//go:build arm64 && !linux && !darwin
4// +build arm64,!linux,!darwin
5
6package cpuid
7
8import "runtime"
9
10func detectOS(c *CPUInfo) bool {
11 c.PhysicalCores = runtime.NumCPU()
12 // For now assuming 1 thread per core...
13 c.ThreadsPerCore = 1
14 c.LogicalCores = c.PhysicalCores
15 return false
16}
diff --git a/vendor/github.com/klauspost/cpuid/v2/os_safe_linux_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_safe_linux_arm64.go
new file mode 100644
index 0000000..f8f201b
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/v2/os_safe_linux_arm64.go
@@ -0,0 +1,8 @@
1// Copyright (c) 2021 Klaus Post, released under MIT License. See LICENSE file.
2
3//go:build nounsafe
4// +build nounsafe
5
6package cpuid
7
8var hwcap uint
diff --git a/vendor/github.com/klauspost/cpuid/v2/os_unsafe_linux_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_unsafe_linux_arm64.go
new file mode 100644
index 0000000..92af622
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/v2/os_unsafe_linux_arm64.go
@@ -0,0 +1,11 @@
1// Copyright (c) 2021 Klaus Post, released under MIT License. See LICENSE file.
2
3//go:build !nounsafe
4// +build !nounsafe
5
6package cpuid
7
8import _ "unsafe" // needed for go:linkname
9
10//go:linkname hwcap internal/cpu.HWCap
11var hwcap uint
diff --git a/vendor/github.com/klauspost/cpuid/v2/test-architectures.sh b/vendor/github.com/klauspost/cpuid/v2/test-architectures.sh
new file mode 100644
index 0000000..471d986
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/v2/test-architectures.sh
@@ -0,0 +1,15 @@
1#!/bin/sh
2
3set -e
4
5go tool dist list | while IFS=/ read os arch; do
6 echo "Checking $os/$arch..."
7 echo " normal"
8 GOARCH=$arch GOOS=$os go build -o /dev/null .
9 echo " noasm"
10 GOARCH=$arch GOOS=$os go build -tags noasm -o /dev/null .
11 echo " appengine"
12 GOARCH=$arch GOOS=$os go build -tags appengine -o /dev/null .
13 echo " noasm,appengine"
14 GOARCH=$arch GOOS=$os go build -tags 'appengine noasm' -o /dev/null .
15done
diff --git a/vendor/github.com/minio/md5-simd/LICENSE b/vendor/github.com/minio/md5-simd/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/minio/md5-simd/LICENSE
@@ -0,0 +1,202 @@
1
2 Apache License
3 Version 2.0, January 2004
4 http://www.apache.org/licenses/
5
6 TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7
8 1. Definitions.
9
10 "License" shall mean the terms and conditions for use, reproduction,
11 and distribution as defined by Sections 1 through 9 of this document.
12
13 "Licensor" shall mean the copyright owner or entity authorized by
14 the copyright owner that is granting the License.
15
16 "Legal Entity" shall mean the union of the acting entity and all
17 other entities that control, are controlled by, or are under common
18 control with that entity. For the purposes of this definition,
19 "control" means (i) the power, direct or indirect, to cause the
20 direction or management of such entity, whether by contract or
21 otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 outstanding shares, or (iii) beneficial ownership of such entity.
23
24 "You" (or "Your") shall mean an individual or Legal Entity
25 exercising permissions granted by this License.
26
27 "Source" form shall mean the preferred form for making modifications,
28 including but not limited to software source code, documentation
29 source, and configuration files.
30
31 "Object" form shall mean any form resulting from mechanical
32 transformation or translation of a Source form, including but
33 not limited to compiled object code, generated documentation,
34 and conversions to other media types.
35
36 "Work" shall mean the work of authorship, whether in Source or
37 Object form, made available under the License, as indicated by a
38 copyright notice that is included in or attached to the work
39 (an example is provided in the Appendix below).
40
41 "Derivative Works" shall mean any work, whether in Source or Object
42 form, that is based on (or derived from) the Work and for which the
43 editorial revisions, annotations, elaborations, or other modifications
44 represent, as a whole, an original work of authorship. For the purposes
45 of this License, Derivative Works shall not include works that remain
46 separable from, or merely link (or bind by name) to the interfaces of,
47 the Work and Derivative Works thereof.
48
49 "Contribution" shall mean any work of authorship, including
50 the original version of the Work and any modifications or additions
51 to that Work or Derivative Works thereof, that is intentionally
52 submitted to Licensor for inclusion in the Work by the copyright owner
53 or by an individual or Legal Entity authorized to submit on behalf of
54 the copyright owner. For the purposes of this definition, "submitted"
55 means any form of electronic, verbal, or written communication sent
56 to the Licensor or its representatives, including but not limited to
57 communication on electronic mailing lists, source code control systems,
58 and issue tracking systems that are managed by, or on behalf of, the
59 Licensor for the purpose of discussing and improving the Work, but
60 excluding communication that is conspicuously marked or otherwise
61 designated in writing by the copyright owner as "Not a Contribution."
62
63 "Contributor" shall mean Licensor and any individual or Legal Entity
64 on behalf of whom a Contribution has been received by Licensor and
65 subsequently incorporated within the Work.
66
67 2. Grant of Copyright License. Subject to the terms and conditions of
68 this License, each Contributor hereby grants to You a perpetual,
69 worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 copyright license to reproduce, prepare Derivative Works of,
71 publicly display, publicly perform, sublicense, and distribute the
72 Work and such Derivative Works in Source or Object form.
73
74 3. Grant of Patent License. Subject to the terms and conditions of
75 this License, each Contributor hereby grants to You a perpetual,
76 worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 (except as stated in this section) patent license to make, have made,
78 use, offer to sell, sell, import, and otherwise transfer the Work,
79 where such license applies only to those patent claims licensable
80 by such Contributor that are necessarily infringed by their
81 Contribution(s) alone or by combination of their Contribution(s)
82 with the Work to which such Contribution(s) was submitted. If You
83 institute patent litigation against any entity (including a
84 cross-claim or counterclaim in a lawsuit) alleging that the Work
85 or a Contribution incorporated within the Work constitutes direct
86 or contributory patent infringement, then any patent licenses
87 granted to You under this License for that Work shall terminate
88 as of the date such litigation is filed.
89
90 4. Redistribution. You may reproduce and distribute copies of the
91 Work or Derivative Works thereof in any medium, with or without
92 modifications, and in Source or Object form, provided that You
93 meet the following conditions:
94
95 (a) You must give any other recipients of the Work or
96 Derivative Works a copy of this License; and
97
98 (b) You must cause any modified files to carry prominent notices
99 stating that You changed the files; and
100
101 (c) You must retain, in the Source form of any Derivative Works
102 that You distribute, all copyright, patent, trademark, and
103 attribution notices from the Source form of the Work,
104 excluding those notices that do not pertain to any part of
105 the Derivative Works; and
106
107 (d) If the Work includes a "NOTICE" text file as part of its
108 distribution, then any Derivative Works that You distribute must
109 include a readable copy of the attribution notices contained
110 within such NOTICE file, excluding those notices that do not
111 pertain to any part of the Derivative Works, in at least one
112 of the following places: within a NOTICE text file distributed
113 as part of the Derivative Works; within the Source form or
114 documentation, if provided along with the Derivative Works; or,
115 within a display generated by the Derivative Works, if and
116 wherever such third-party notices normally appear. The contents
117 of the NOTICE file are for informational purposes only and
118 do not modify the License. You may add Your own attribution
119 notices within Derivative Works that You distribute, alongside
120 or as an addendum to the NOTICE text from the Work, provided
121 that such additional attribution notices cannot be construed
122 as modifying the License.
123
124 You may add Your own copyright statement to Your modifications and
125 may provide additional or different license terms and conditions
126 for use, reproduction, or distribution of Your modifications, or
127 for any such Derivative Works as a whole, provided Your use,
128 reproduction, and distribution of the Work otherwise complies with
129 the conditions stated in this License.
130
131 5. Submission of Contributions. Unless You explicitly state otherwise,
132 any Contribution intentionally submitted for inclusion in the Work
133 by You to the Licensor shall be under the terms and conditions of
134 this License, without any additional terms or conditions.
135 Notwithstanding the above, nothing herein shall supersede or modify
136 the terms of any separate license agreement you may have executed
137 with Licensor regarding such Contributions.
138
139 6. Trademarks. This License does not grant permission to use the trade
140 names, trademarks, service marks, or product names of the Licensor,
141 except as required for reasonable and customary use in describing the
142 origin of the Work and reproducing the content of the NOTICE file.
143
144 7. Disclaimer of Warranty. Unless required by applicable law or
145 agreed to in writing, Licensor provides the Work (and each
146 Contributor provides its Contributions) on an "AS IS" BASIS,
147 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 implied, including, without limitation, any warranties or conditions
149 of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 PARTICULAR PURPOSE. You are solely responsible for determining the
151 appropriateness of using or redistributing the Work and assume any
152 risks associated with Your exercise of permissions under this License.
153
154 8. Limitation of Liability. In no event and under no legal theory,
155 whether in tort (including negligence), contract, or otherwise,
156 unless required by applicable law (such as deliberate and grossly
157 negligent acts) or agreed to in writing, shall any Contributor be
158 liable to You for damages, including any direct, indirect, special,
159 incidental, or consequential damages of any character arising as a
160 result of this License or out of the use or inability to use the
161 Work (including but not limited to damages for loss of goodwill,
162 work stoppage, computer failure or malfunction, or any and all
163 other commercial damages or losses), even if such Contributor
164 has been advised of the possibility of such damages.
165
166 9. Accepting Warranty or Additional Liability. While redistributing
167 the Work or Derivative Works thereof, You may choose to offer,
168 and charge a fee for, acceptance of support, warranty, indemnity,
169 or other liability obligations and/or rights consistent with this
170 License. However, in accepting such obligations, You may act only
171 on Your own behalf and on Your sole responsibility, not on behalf
172 of any other Contributor, and only if You agree to indemnify,
173 defend, and hold each Contributor harmless for any liability
174 incurred by, or claims asserted against, such Contributor by reason
175 of your accepting any such warranty or additional liability.
176
177 END OF TERMS AND CONDITIONS
178
179 APPENDIX: How to apply the Apache License to your work.
180
181 To apply the Apache License to your work, attach the following
182 boilerplate notice, with the fields enclosed by brackets "[]"
183 replaced with your own identifying information. (Don't include
184 the brackets!) The text should be enclosed in the appropriate
185 comment syntax for the file format. We also recommend that a
186 file or class name and description of purpose be included on the
187 same "printed page" as the copyright notice for easier
188 identification within third-party archives.
189
190 Copyright [yyyy] [name of copyright owner]
191
192 Licensed under the Apache License, Version 2.0 (the "License");
193 you may not use this file except in compliance with the License.
194 You may obtain a copy of the License at
195
196 http://www.apache.org/licenses/LICENSE-2.0
197
198 Unless required by applicable law or agreed to in writing, software
199 distributed under the License is distributed on an "AS IS" BASIS,
200 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201 See the License for the specific language governing permissions and
202 limitations under the License.
diff --git a/vendor/github.com/minio/md5-simd/LICENSE.Golang b/vendor/github.com/minio/md5-simd/LICENSE.Golang
new file mode 100644
index 0000000..6a66aea
--- /dev/null
+++ b/vendor/github.com/minio/md5-simd/LICENSE.Golang
@@ -0,0 +1,27 @@
1Copyright (c) 2009 The Go Authors. All rights reserved.
2
3Redistribution and use in source and binary forms, with or without
4modification, are permitted provided that the following conditions are
5met:
6
7 * Redistributions of source code must retain the above copyright
8notice, this list of conditions and the following disclaimer.
9 * Redistributions in binary form must reproduce the above
10copyright notice, this list of conditions and the following disclaimer
11in the documentation and/or other materials provided with the
12distribution.
13 * Neither the name of Google Inc. nor the names of its
14contributors may be used to endorse or promote products derived from
15this software without specific prior written permission.
16
17THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/minio/md5-simd/README.md b/vendor/github.com/minio/md5-simd/README.md
new file mode 100644
index 0000000..fa6fce1
--- /dev/null
+++ b/vendor/github.com/minio/md5-simd/README.md
@@ -0,0 +1,198 @@
1
2# md5-simd
3
4This is a SIMD accelerated MD5 package, allowing up to either 8 (AVX2) or 16 (AVX512) independent MD5 sums to be calculated on a single CPU core.
5
6It was originally based on the [md5vec](https://github.com/igneous-systems/md5vec) repository by Igneous Systems, but has been made more flexible by amongst others supporting different message sizes per lane and adding AVX512.
7
8`md5-simd` integrates a similar mechanism as described in [minio/sha256-simd](https://github.com/minio/sha256-simd#support-for-avx512) for making it easy for clients to take advantages of the parallel nature of the MD5 calculation. This will result in reduced overall CPU load.
9
10It is important to understand that `md5-simd` **does not speed up** a single threaded MD5 hash sum.
11Rather it allows multiple __independent__ MD5 sums to be computed in parallel on the same CPU core,
12thereby making more efficient usage of the computing resources.
13
14## Usage
15
16[![Documentation](https://godoc.org/github.com/minio/md5-simd?status.svg)](https://pkg.go.dev/github.com/minio/md5-simd?tab=doc)
17
18
19In order to use `md5-simd`, you must first create an `Server` which can be
20used to instantiate one or more objects for MD5 hashing.
21
22These objects conform to the regular [`hash.Hash`](https://pkg.go.dev/hash?tab=doc#Hash) interface
23and as such the normal Write/Reset/Sum functionality works as expected.
24
25As an example:
26```
27 // Create server
28 server := md5simd.NewServer()
29 defer server.Close()
30
31 // Create hashing object (conforming to hash.Hash)
32 md5Hash := server.NewHash()
33 defer md5Hash.Close()
34
35 // Write one (or more) blocks
36 md5Hash.Write(block)
37
38 // Return digest
39 digest := md5Hash.Sum([]byte{})
40```
41
42To keep performance both a [Server](https://pkg.go.dev/github.com/minio/md5-simd?tab=doc#Server)
43and individual [Hasher](https://pkg.go.dev/github.com/minio/md5-simd?tab=doc#Hasher) should
44be closed using the `Close()` function when no longer needed.
45
46A Hasher can efficiently be re-used by using [`Reset()`](https://pkg.go.dev/hash?tab=doc#Hash) functionality.
47
48In case your system does not support the instructions required it will fall back to using `crypto/md5` for hashing.
49
50## Limitations
51
52As explained above `md5-simd` does not speed up an individual MD5 hash sum computation,
53unless some hierarchical tree construct is used but this will result in different outcomes.
54Running a single hash on a server results in approximately half the throughput.
55
56Instead, it allows running multiple MD5 calculations in parallel on a single CPU core.
57This can be beneficial in e.g. multi-threaded server applications where many go-routines
58are dealing with many requests and multiple MD5 calculations can be packed/scheduled for parallel execution on a single core.
59
60This will result in a lower overall CPU usage as compared to using the standard `crypto/md5`
61functionality where each MD5 hash computation will consume a single thread (core).
62
63It is best to test and measure the overall CPU usage in a representative usage scenario in your application
64to get an overall understanding of the benefits of `md5-simd` as compared to `crypto/md5`, ideally under heavy CPU load.
65
66Also note that `md5-simd` is best meant to work with large objects,
67so if your application only hashes small objects of a few kilobytes
68you may be better of by using `crypto/md5`.
69
70## Performance
71
72For the best performance writes should be a multiple of 64 bytes, ideally a multiple of 32KB.
73To help with that a [`buffered := bufio.NewWriterSize(hasher, 32<<10)`](https://golang.org/pkg/bufio/#NewWriterSize)
74can be inserted if you are unsure of the sizes of the writes.
75Remember to [flush](https://golang.org/pkg/bufio/#Writer.Flush) `buffered` before reading the hash.
76
77A single 'server' can process 16 streams concurrently with 1 core (AVX-512) or 2 cores (AVX2).
78In situations where it is likely that more than 16 streams are fully loaded it may be beneficial
79to use multiple servers.
80
81The following chart compares the multi-core performance between `crypto/md5` vs the AVX2 vs the AVX512 code:
82
83![md5-performance-overview](chart/Multi-core-MD5-Aggregated-Hashing-Performance.png)
84
85Compared to `crypto/md5`, the AVX2 version is up to 4x faster:
86
87```
88$ benchcmp crypto-md5.txt avx2.txt
89benchmark old MB/s new MB/s speedup
90BenchmarkParallel/32KB-4 2229.22 7370.50 3.31x
91BenchmarkParallel/64KB-4 2233.61 8248.46 3.69x
92BenchmarkParallel/128KB-4 2235.43 8660.74 3.87x
93BenchmarkParallel/256KB-4 2236.39 8863.87 3.96x
94BenchmarkParallel/512KB-4 2238.05 8985.39 4.01x
95BenchmarkParallel/1MB-4 2233.56 9042.62 4.05x
96BenchmarkParallel/2MB-4 2224.11 9014.46 4.05x
97BenchmarkParallel/4MB-4 2199.78 8993.61 4.09x
98BenchmarkParallel/8MB-4 2182.48 8748.22 4.01x
99```
100
101Compared to `crypto/md5`, the AVX512 is up to 8x faster (for larger block sizes):
102
103```
104$ benchcmp crypto-md5.txt avx512.txt
105benchmark old MB/s new MB/s speedup
106BenchmarkParallel/32KB-4 2229.22 11605.78 5.21x
107BenchmarkParallel/64KB-4 2233.61 14329.65 6.42x
108BenchmarkParallel/128KB-4 2235.43 16166.39 7.23x
109BenchmarkParallel/256KB-4 2236.39 15570.09 6.96x
110BenchmarkParallel/512KB-4 2238.05 16705.83 7.46x
111BenchmarkParallel/1MB-4 2233.56 16941.95 7.59x
112BenchmarkParallel/2MB-4 2224.11 17136.01 7.70x
113BenchmarkParallel/4MB-4 2199.78 17218.61 7.83x
114BenchmarkParallel/8MB-4 2182.48 17252.88 7.91x
115```
116
117These measurements were performed on AWS EC2 instance of type `c5.xlarge` equipped with a Xeon Platinum 8124M CPU at 3.0 GHz.
118
119If only one or two inputs are available the scalar calculation method will be used for the
120optimal speed in these cases.
121
122## Operation
123
124To make operation as easy as possible there is a “Server” coordinating everything. The server keeps track of individual hash states and updates them as new data comes in. This can be visualized as follows:
125
126![server-architecture](chart/server-architecture.png)
127
128The data is sent to the server from each hash input in blocks of up to 32KB per round. In our testing we found this to be the block size that yielded the best results.
129
130Whenever there is data available the server will collect data for up to 16 hashes and process all 16 lanes in parallel. This means that if 16 hashes have data available all the lanes will be filled. However since that may not be the case, the server will fill less lanes and do a round anyway. Lanes can also be partially filled if less than 32KB of data is written.
131
132![server-lanes-example](chart/server-lanes-example.png)
133
134In this example 4 lanes are fully filled and 2 lanes are partially filled. In this case the black areas will simply be masked out from the results and ignored. This is also why calculating a single hash on a server will not result in any speedup and hash writes should be a multiple of 32KB for the best performance.
135
136For AVX512 all 16 calculations will be done on a single core, on AVX2 on 2 cores if there is data for more than 8 lanes.
137So for optimal usage there should be data available for all 16 hashes. It may be perfectly reasonable to use more than 16 concurrent hashes.
138
139
140## Design & Tech
141
142md5-simd has both an AVX2 (8-lane parallel), and an AVX512 (16-lane parallel version) algorithm to accelerate the computation with the following function definitions:
143```
144//go:noescape
145func block8(state *uint32, base uintptr, bufs *int32, cache *byte, n int)
146
147//go:noescape
148func block16(state *uint32, ptrs *int64, mask uint64, n int)
149```
150
151The AVX2 version is based on the [md5vec](https://github.com/igneous-systems/md5vec) repository and is essentially unchanged except for minor (cosmetic) changes.
152
153The AVX512 version is derived from the AVX2 version but adds some further optimizations and simplifications.
154
155### Caching in upper ZMM registers
156
157The AVX2 version passes in a `cache8` block of memory (about 0.5 KB) for temporary storage of intermediate results during `ROUND1` which are subsequently used during `ROUND2` through to `ROUND4`.
158
159Since AVX512 has double the amount of registers (32 ZMM registers as compared to 16 YMM registers), it is possible to use the upper 16 ZMM registers for keeping the intermediate states on the CPU. As such, there is no need to pass in a corresponding `cache16` into the AVX512 block function.
160
161### Direct loading using 64-bit pointers
162
163The AVX2 uses the `VPGATHERDD` instruction (for YMM) to do a parallel load of 8 lanes using (8 independent) 32-bit offets. Since there is no control over how the 8 slices that are passed into the (Golang) `blockMd5` function are laid out into memory, it is not possible to derive a "base" address and corresponding offsets (all within 32-bits) for all 8 slices.
164
165As such the AVX2 version uses an interim buffer to collect the byte slices to be hashed from all 8 inut slices and passed this buffer along with (fixed) 32-bit offsets into the assembly code.
166
167For the AVX512 version this interim buffer is not needed since the AVX512 code uses a pair of `VPGATHERQD` instructions to directly dereference 64-bit pointers (from a base register address that is initialized to zero).
168
169Note that two load (gather) instructions are needed because the AVX512 version processes 16-lanes in parallel, requiring 16 times 64-bit = 1024 bits in total to be loaded. A simple `VALIGND` and `VPORD` are subsequently used to merge the lower and upper halves together into a single ZMM register (that contains 16 lanes of 32-bit DWORDS).
170
171### Masking support
172
173Due to the fact that pointers are passed directly from the Golang slices, we need to protect against NULL pointers.
174For this a 16-bit mask is passed in the AVX512 assembly code which is used during the `VPGATHERQD` instructions to mask out lanes that could otherwise result in segment violations.
175
176### Minor optimizations
177
178The `roll` macro (three instructions on AVX2) is no longer needed for AVX512 and is replaced by a single `VPROLD` instruction.
179
180Also several logical operations from the various ROUNDS of the AVX2 version could be combined into a single instruction using ternary logic (with the `VPTERMLOGD` instruction), resulting in a further simplification and speed-up.
181
182## Low level block function performance
183
184The benchmark below shows the (single thread) maximum performance of the `block()` function for AVX2 (having 8 lanes) and AVX512 (having 16 lanes). Also the baseline single-core performance from the standard `crypto/md5` package is shown for comparison.
185
186```
187BenchmarkCryptoMd5-4 687.66 MB/s 0 B/op 0 allocs/op
188BenchmarkBlock8-4 4144.80 MB/s 0 B/op 0 allocs/op
189BenchmarkBlock16-4 8228.88 MB/s 0 B/op 0 allocs/op
190```
191
192## License
193
194`md5-simd` is released under the Apache License v2.0. You can find the complete text in the file LICENSE.
195
196## Contributing
197
198Contributions are welcome, please send PRs for any enhancements. \ No newline at end of file
diff --git a/vendor/github.com/minio/md5-simd/block16_amd64.s b/vendor/github.com/minio/md5-simd/block16_amd64.s
new file mode 100644
index 0000000..be0a43a
--- /dev/null
+++ b/vendor/github.com/minio/md5-simd/block16_amd64.s
@@ -0,0 +1,228 @@
1// Copyright (c) 2020 MinIO Inc. All rights reserved.
2// Use of this source code is governed by a license that can be
3// found in the LICENSE file.
4
5//+build !noasm,!appengine,gc
6
7// This is the AVX512 implementation of the MD5 block function (16-way parallel)
8
9#define prep(index) \
10 KMOVQ kmask, ktmp \
11 VPGATHERDD index*4(base)(ptrs*1), ktmp, mem
12
13#define ROUND1(a, b, c, d, index, const, shift) \
14 VPXORQ c, tmp, tmp \
15 VPADDD 64*const(consts), a, a \
16 VPADDD mem, a, a \
17 VPTERNLOGD $0x6C, b, d, tmp \
18 prep(index) \
19 VPADDD tmp, a, a \
20 VPROLD $shift, a, a \
21 VMOVAPD c, tmp \
22 VPADDD b, a, a
23
24#define ROUND1noload(a, b, c, d, const, shift) \
25 VPXORQ c, tmp, tmp \
26 VPADDD 64*const(consts), a, a \
27 VPADDD mem, a, a \
28 VPTERNLOGD $0x6C, b, d, tmp \
29 VPADDD tmp, a, a \
30 VPROLD $shift, a, a \
31 VMOVAPD c, tmp \
32 VPADDD b, a, a
33
34#define ROUND2(a, b, c, d, zreg, const, shift) \
35 VPADDD 64*const(consts), a, a \
36 VPADDD zreg, a, a \
37 VANDNPD c, tmp, tmp \
38 VPTERNLOGD $0xEC, b, tmp, tmp2 \
39 VMOVAPD c, tmp \
40 VPADDD tmp2, a, a \
41 VMOVAPD c, tmp2 \
42 VPROLD $shift, a, a \
43 VPADDD b, a, a
44
45#define ROUND3(a, b, c, d, zreg, const, shift) \
46 VPADDD 64*const(consts), a, a \
47 VPADDD zreg, a, a \
48 VPTERNLOGD $0x96, b, d, tmp \
49 VPADDD tmp, a, a \
50 VPROLD $shift, a, a \
51 VMOVAPD b, tmp \
52 VPADDD b, a, a
53
54#define ROUND4(a, b, c, d, zreg, const, shift) \
55 VPADDD 64*const(consts), a, a \
56 VPADDD zreg, a, a \
57 VPTERNLOGD $0x36, b, c, tmp \
58 VPADDD tmp, a, a \
59 VPROLD $shift, a, a \
60 VPXORQ c, ones, tmp \
61 VPADDD b, a, a
62
63TEXT ·block16(SB), 4, $0-40
64
65 MOVQ state+0(FP), BX
66 MOVQ base+8(FP), SI
67 MOVQ ptrs+16(FP), AX
68 KMOVQ mask+24(FP), K1
69 MOVQ n+32(FP), DX
70 MOVQ ·avx512md5consts+0(SB), DI
71
72#define a Z0
73#define b Z1
74#define c Z2
75#define d Z3
76
77#define sa Z4
78#define sb Z5
79#define sc Z6
80#define sd Z7
81
82#define tmp Z8
83#define tmp2 Z9
84#define ptrs Z10
85#define ones Z12
86#define mem Z15
87
88#define kmask K1
89#define ktmp K3
90
91// ----------------------------------------------------------
92// Registers Z16 through to Z31 are used for caching purposes
93// ----------------------------------------------------------
94
95#define dig BX
96#define count DX
97#define base SI
98#define consts DI
99
100 // load digest into state registers
101 VMOVUPD (dig), a
102 VMOVUPD 0x40(dig), b
103 VMOVUPD 0x80(dig), c
104 VMOVUPD 0xc0(dig), d
105
106 // load source pointers
107 VMOVUPD 0x00(AX), ptrs
108
109 MOVQ $-1, AX
110 VPBROADCASTQ AX, ones
111
112loop:
113 VMOVAPD a, sa
114 VMOVAPD b, sb
115 VMOVAPD c, sc
116 VMOVAPD d, sd
117
118 prep(0)
119 VMOVAPD d, tmp
120 VMOVAPD mem, Z16
121
122 ROUND1(a,b,c,d, 1,0x00, 7)
123 VMOVAPD mem, Z17
124 ROUND1(d,a,b,c, 2,0x01,12)
125 VMOVAPD mem, Z18
126 ROUND1(c,d,a,b, 3,0x02,17)
127 VMOVAPD mem, Z19
128 ROUND1(b,c,d,a, 4,0x03,22)
129 VMOVAPD mem, Z20
130 ROUND1(a,b,c,d, 5,0x04, 7)
131 VMOVAPD mem, Z21
132 ROUND1(d,a,b,c, 6,0x05,12)
133 VMOVAPD mem, Z22
134 ROUND1(c,d,a,b, 7,0x06,17)
135 VMOVAPD mem, Z23
136 ROUND1(b,c,d,a, 8,0x07,22)
137 VMOVAPD mem, Z24
138 ROUND1(a,b,c,d, 9,0x08, 7)
139 VMOVAPD mem, Z25
140 ROUND1(d,a,b,c,10,0x09,12)
141 VMOVAPD mem, Z26
142 ROUND1(c,d,a,b,11,0x0a,17)
143 VMOVAPD mem, Z27
144 ROUND1(b,c,d,a,12,0x0b,22)
145 VMOVAPD mem, Z28
146 ROUND1(a,b,c,d,13,0x0c, 7)
147 VMOVAPD mem, Z29
148 ROUND1(d,a,b,c,14,0x0d,12)
149 VMOVAPD mem, Z30
150 ROUND1(c,d,a,b,15,0x0e,17)
151 VMOVAPD mem, Z31
152
153 ROUND1noload(b,c,d,a, 0x0f,22)
154
155 VMOVAPD d, tmp
156 VMOVAPD d, tmp2
157
158 ROUND2(a,b,c,d, Z17,0x10, 5)
159 ROUND2(d,a,b,c, Z22,0x11, 9)
160 ROUND2(c,d,a,b, Z27,0x12,14)
161 ROUND2(b,c,d,a, Z16,0x13,20)
162 ROUND2(a,b,c,d, Z21,0x14, 5)
163 ROUND2(d,a,b,c, Z26,0x15, 9)
164 ROUND2(c,d,a,b, Z31,0x16,14)
165 ROUND2(b,c,d,a, Z20,0x17,20)
166 ROUND2(a,b,c,d, Z25,0x18, 5)
167 ROUND2(d,a,b,c, Z30,0x19, 9)
168 ROUND2(c,d,a,b, Z19,0x1a,14)
169 ROUND2(b,c,d,a, Z24,0x1b,20)
170 ROUND2(a,b,c,d, Z29,0x1c, 5)
171 ROUND2(d,a,b,c, Z18,0x1d, 9)
172 ROUND2(c,d,a,b, Z23,0x1e,14)
173 ROUND2(b,c,d,a, Z28,0x1f,20)
174
175 VMOVAPD c, tmp
176
177 ROUND3(a,b,c,d, Z21,0x20, 4)
178 ROUND3(d,a,b,c, Z24,0x21,11)
179 ROUND3(c,d,a,b, Z27,0x22,16)
180 ROUND3(b,c,d,a, Z30,0x23,23)
181 ROUND3(a,b,c,d, Z17,0x24, 4)
182 ROUND3(d,a,b,c, Z20,0x25,11)
183 ROUND3(c,d,a,b, Z23,0x26,16)
184 ROUND3(b,c,d,a, Z26,0x27,23)
185 ROUND3(a,b,c,d, Z29,0x28, 4)
186 ROUND3(d,a,b,c, Z16,0x29,11)
187 ROUND3(c,d,a,b, Z19,0x2a,16)
188 ROUND3(b,c,d,a, Z22,0x2b,23)
189 ROUND3(a,b,c,d, Z25,0x2c, 4)
190 ROUND3(d,a,b,c, Z28,0x2d,11)
191 ROUND3(c,d,a,b, Z31,0x2e,16)
192 ROUND3(b,c,d,a, Z18,0x2f,23)
193
194 VPXORQ d, ones, tmp
195
196 ROUND4(a,b,c,d, Z16,0x30, 6)
197 ROUND4(d,a,b,c, Z23,0x31,10)
198 ROUND4(c,d,a,b, Z30,0x32,15)
199 ROUND4(b,c,d,a, Z21,0x33,21)
200 ROUND4(a,b,c,d, Z28,0x34, 6)
201 ROUND4(d,a,b,c, Z19,0x35,10)
202 ROUND4(c,d,a,b, Z26,0x36,15)
203 ROUND4(b,c,d,a, Z17,0x37,21)
204 ROUND4(a,b,c,d, Z24,0x38, 6)
205 ROUND4(d,a,b,c, Z31,0x39,10)
206 ROUND4(c,d,a,b, Z22,0x3a,15)
207 ROUND4(b,c,d,a, Z29,0x3b,21)
208 ROUND4(a,b,c,d, Z20,0x3c, 6)
209 ROUND4(d,a,b,c, Z27,0x3d,10)
210 ROUND4(c,d,a,b, Z18,0x3e,15)
211 ROUND4(b,c,d,a, Z25,0x3f,21)
212
213 VPADDD sa, a, a
214 VPADDD sb, b, b
215 VPADDD sc, c, c
216 VPADDD sd, d, d
217
218 LEAQ 64(base), base
219 SUBQ $64, count
220 JNE loop
221
222 VMOVUPD a, (dig)
223 VMOVUPD b, 0x40(dig)
224 VMOVUPD c, 0x80(dig)
225 VMOVUPD d, 0xc0(dig)
226
227 VZEROUPPER
228 RET
diff --git a/vendor/github.com/minio/md5-simd/block8_amd64.s b/vendor/github.com/minio/md5-simd/block8_amd64.s
new file mode 100644
index 0000000..f57db17
--- /dev/null
+++ b/vendor/github.com/minio/md5-simd/block8_amd64.s
@@ -0,0 +1,281 @@
1//+build !noasm,!appengine,gc
2
3// Copyright (c) 2018 Igneous Systems
4// MIT License
5//
6// Permission is hereby granted, free of charge, to any person obtaining a copy
7// of this software and associated documentation files (the "Software"), to deal
8// in the Software without restriction, including without limitation the rights
9// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10// copies of the Software, and to permit persons to whom the Software is
11// furnished to do so, subject to the following conditions:
12//
13// The above copyright notice and this permission notice shall be included in all
14// copies or substantial portions of the Software.
15//
16// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22// SOFTWARE.
23
24// Copyright (c) 2020 MinIO Inc. All rights reserved.
25// Use of this source code is governed by a license that can be
26// found in the LICENSE file.
27
28// This is the AVX2 implementation of the MD5 block function (8-way parallel)
29
30// block8(state *uint64, base uintptr, bufs *int32, cache *byte, n int)
31TEXT ·block8(SB), 4, $0-40
32 MOVQ state+0(FP), BX
33 MOVQ base+8(FP), SI
34 MOVQ bufs+16(FP), AX
35 MOVQ cache+24(FP), CX
36 MOVQ n+32(FP), DX
37 MOVQ ·avx256md5consts+0(SB), DI
38
39 // Align cache (which is stack allocated by the compiler)
40 // to a 256 bit boundary (ymm register alignment)
41 // The cache8 type is deliberately oversized to permit this.
42 ADDQ $31, CX
43 ANDB $-32, CL
44
45#define a Y0
46#define b Y1
47#define c Y2
48#define d Y3
49
50#define sa Y4
51#define sb Y5
52#define sc Y6
53#define sd Y7
54
55#define tmp Y8
56#define tmp2 Y9
57
58#define mask Y10
59#define off Y11
60
61#define ones Y12
62
63#define rtmp1 Y13
64#define rtmp2 Y14
65
66#define mem Y15
67
68#define dig BX
69#define cache CX
70#define count DX
71#define base SI
72#define consts DI
73
74#define prepmask \
75 VPXOR mask, mask, mask \
76 VPCMPGTD mask, off, mask
77
78#define prep(index) \
79 VMOVAPD mask, rtmp2 \
80 VPGATHERDD rtmp2, index*4(base)(off*1), mem
81
82#define load(index) \
83 VMOVAPD index*32(cache), mem
84
85#define store(index) \
86 VMOVAPD mem, index*32(cache)
87
88#define roll(shift, a) \
89 VPSLLD $shift, a, rtmp1 \
90 VPSRLD $32-shift, a, a \
91 VPOR rtmp1, a, a
92
93#define ROUND1(a, b, c, d, index, const, shift) \
94 VPXOR c, tmp, tmp \
95 VPADDD 32*const(consts), a, a \
96 VPADDD mem, a, a \
97 VPAND b, tmp, tmp \
98 VPXOR d, tmp, tmp \
99 prep(index) \
100 VPADDD tmp, a, a \
101 roll(shift,a) \
102 VMOVAPD c, tmp \
103 VPADDD b, a, a
104
105#define ROUND1load(a, b, c, d, index, const, shift) \
106 VXORPD c, tmp, tmp \
107 VPADDD 32*const(consts), a, a \
108 VPADDD mem, a, a \
109 VPAND b, tmp, tmp \
110 VPXOR d, tmp, tmp \
111 load(index) \
112 VPADDD tmp, a, a \
113 roll(shift,a) \
114 VMOVAPD c, tmp \
115 VPADDD b, a, a
116
117#define ROUND2(a, b, c, d, index, const, shift) \
118 VPADDD 32*const(consts), a, a \
119 VPADDD mem, a, a \
120 VPAND b, tmp2, tmp2 \
121 VANDNPD c, tmp, tmp \
122 load(index) \
123 VPOR tmp, tmp2, tmp2 \
124 VMOVAPD c, tmp \
125 VPADDD tmp2, a, a \
126 VMOVAPD c, tmp2 \
127 roll(shift,a) \
128 VPADDD b, a, a
129
130#define ROUND3(a, b, c, d, index, const, shift) \
131 VPADDD 32*const(consts), a, a \
132 VPADDD mem, a, a \
133 load(index) \
134 VPXOR d, tmp, tmp \
135 VPXOR b, tmp, tmp \
136 VPADDD tmp, a, a \
137 roll(shift,a) \
138 VMOVAPD b, tmp \
139 VPADDD b, a, a
140
141#define ROUND4(a, b, c, d, index, const, shift) \
142 VPADDD 32*const(consts), a, a \
143 VPADDD mem, a, a \
144 VPOR b, tmp, tmp \
145 VPXOR c, tmp, tmp \
146 VPADDD tmp, a, a \
147 load(index) \
148 roll(shift,a) \
149 VPXOR c, ones, tmp \
150 VPADDD b, a, a
151
152 // load digest into state registers
153 VMOVUPD (dig), a
154 VMOVUPD 32(dig), b
155 VMOVUPD 64(dig), c
156 VMOVUPD 96(dig), d
157
158 // load source buffer offsets
159 VMOVUPD (AX), off
160
161 prepmask
162 VPCMPEQD ones, ones, ones
163
164loop:
165 VMOVAPD a, sa
166 VMOVAPD b, sb
167 VMOVAPD c, sc
168 VMOVAPD d, sd
169
170 prep(0)
171 VMOVAPD d, tmp
172 store(0)
173
174 ROUND1(a,b,c,d, 1,0x00, 7)
175 store(1)
176 ROUND1(d,a,b,c, 2,0x01,12)
177 store(2)
178 ROUND1(c,d,a,b, 3,0x02,17)
179 store(3)
180 ROUND1(b,c,d,a, 4,0x03,22)
181 store(4)
182 ROUND1(a,b,c,d, 5,0x04, 7)
183 store(5)
184 ROUND1(d,a,b,c, 6,0x05,12)
185 store(6)
186 ROUND1(c,d,a,b, 7,0x06,17)
187 store(7)
188 ROUND1(b,c,d,a, 8,0x07,22)
189 store(8)
190 ROUND1(a,b,c,d, 9,0x08, 7)
191 store(9)
192 ROUND1(d,a,b,c,10,0x09,12)
193 store(10)
194 ROUND1(c,d,a,b,11,0x0a,17)
195 store(11)
196 ROUND1(b,c,d,a,12,0x0b,22)
197 store(12)
198 ROUND1(a,b,c,d,13,0x0c, 7)
199 store(13)
200 ROUND1(d,a,b,c,14,0x0d,12)
201 store(14)
202 ROUND1(c,d,a,b,15,0x0e,17)
203 store(15)
204 ROUND1load(b,c,d,a, 1,0x0f,22)
205
206 VMOVAPD d, tmp
207 VMOVAPD d, tmp2
208
209 ROUND2(a,b,c,d, 6,0x10, 5)
210 ROUND2(d,a,b,c,11,0x11, 9)
211 ROUND2(c,d,a,b, 0,0x12,14)
212 ROUND2(b,c,d,a, 5,0x13,20)
213 ROUND2(a,b,c,d,10,0x14, 5)
214 ROUND2(d,a,b,c,15,0x15, 9)
215 ROUND2(c,d,a,b, 4,0x16,14)
216 ROUND2(b,c,d,a, 9,0x17,20)
217 ROUND2(a,b,c,d,14,0x18, 5)
218 ROUND2(d,a,b,c, 3,0x19, 9)
219 ROUND2(c,d,a,b, 8,0x1a,14)
220 ROUND2(b,c,d,a,13,0x1b,20)
221 ROUND2(a,b,c,d, 2,0x1c, 5)
222 ROUND2(d,a,b,c, 7,0x1d, 9)
223 ROUND2(c,d,a,b,12,0x1e,14)
224 ROUND2(b,c,d,a, 0,0x1f,20)
225
226 load(5)
227 VMOVAPD c, tmp
228
229 ROUND3(a,b,c,d, 8,0x20, 4)
230 ROUND3(d,a,b,c,11,0x21,11)
231 ROUND3(c,d,a,b,14,0x22,16)
232 ROUND3(b,c,d,a, 1,0x23,23)
233 ROUND3(a,b,c,d, 4,0x24, 4)
234 ROUND3(d,a,b,c, 7,0x25,11)
235 ROUND3(c,d,a,b,10,0x26,16)
236 ROUND3(b,c,d,a,13,0x27,23)
237 ROUND3(a,b,c,d, 0,0x28, 4)
238 ROUND3(d,a,b,c, 3,0x29,11)
239 ROUND3(c,d,a,b, 6,0x2a,16)
240 ROUND3(b,c,d,a, 9,0x2b,23)
241 ROUND3(a,b,c,d,12,0x2c, 4)
242 ROUND3(d,a,b,c,15,0x2d,11)
243 ROUND3(c,d,a,b, 2,0x2e,16)
244 ROUND3(b,c,d,a, 0,0x2f,23)
245
246 load(0)
247 VPXOR d, ones, tmp
248
249 ROUND4(a,b,c,d, 7,0x30, 6)
250 ROUND4(d,a,b,c,14,0x31,10)
251 ROUND4(c,d,a,b, 5,0x32,15)
252 ROUND4(b,c,d,a,12,0x33,21)
253 ROUND4(a,b,c,d, 3,0x34, 6)
254 ROUND4(d,a,b,c,10,0x35,10)
255 ROUND4(c,d,a,b, 1,0x36,15)
256 ROUND4(b,c,d,a, 8,0x37,21)
257 ROUND4(a,b,c,d,15,0x38, 6)
258 ROUND4(d,a,b,c, 6,0x39,10)
259 ROUND4(c,d,a,b,13,0x3a,15)
260 ROUND4(b,c,d,a, 4,0x3b,21)
261 ROUND4(a,b,c,d,11,0x3c, 6)
262 ROUND4(d,a,b,c, 2,0x3d,10)
263 ROUND4(c,d,a,b, 9,0x3e,15)
264 ROUND4(b,c,d,a, 0,0x3f,21)
265
266 VPADDD sa, a, a
267 VPADDD sb, b, b
268 VPADDD sc, c, c
269 VPADDD sd, d, d
270
271 LEAQ 64(base), base
272 SUBQ $64, count
273 JNE loop
274
275 VMOVUPD a, (dig)
276 VMOVUPD b, 32(dig)
277 VMOVUPD c, 64(dig)
278 VMOVUPD d, 96(dig)
279
280 VZEROUPPER
281 RET
diff --git a/vendor/github.com/minio/md5-simd/block_amd64.go b/vendor/github.com/minio/md5-simd/block_amd64.go
new file mode 100644
index 0000000..16edda2
--- /dev/null
+++ b/vendor/github.com/minio/md5-simd/block_amd64.go
@@ -0,0 +1,210 @@
1//+build !noasm,!appengine,gc
2
3// Copyright (c) 2020 MinIO Inc. All rights reserved.
4// Use of this source code is governed by a license that can be
5// found in the LICENSE file.
6
7package md5simd
8
9import (
10 "fmt"
11 "math"
12 "unsafe"
13
14 "github.com/klauspost/cpuid/v2"
15)
16
17var hasAVX512 bool
18
19func init() {
20 // VANDNPD requires AVX512DQ. Technically it could be VPTERNLOGQ which is AVX512F.
21 hasAVX512 = cpuid.CPU.Supports(cpuid.AVX512F, cpuid.AVX512DQ)
22}
23
24//go:noescape
25func block8(state *uint32, base uintptr, bufs *int32, cache *byte, n int)
26
27//go:noescape
28func block16(state *uint32, base uintptr, ptrs *int32, mask uint64, n int)
29
30// 8-way 4x uint32 digests in 4 ymm registers
31// (ymm0, ymm1, ymm2, ymm3)
32type digest8 struct {
33 v0, v1, v2, v3 [8]uint32
34}
35
36// Stack cache for 8x64 byte md5.BlockSize bytes.
37// Must be 32-byte aligned, so allocate 512+32 and
38// align upwards at runtime.
39type cache8 [512 + 32]byte
40
41// MD5 magic numbers for one lane of hashing; inflated
42// 8x below at init time.
43var md5consts = [64]uint32{
44 0xd76aa478, 0xe8c7b756, 0x242070db, 0xc1bdceee,
45 0xf57c0faf, 0x4787c62a, 0xa8304613, 0xfd469501,
46 0x698098d8, 0x8b44f7af, 0xffff5bb1, 0x895cd7be,
47 0x6b901122, 0xfd987193, 0xa679438e, 0x49b40821,
48 0xf61e2562, 0xc040b340, 0x265e5a51, 0xe9b6c7aa,
49 0xd62f105d, 0x02441453, 0xd8a1e681, 0xe7d3fbc8,
50 0x21e1cde6, 0xc33707d6, 0xf4d50d87, 0x455a14ed,
51 0xa9e3e905, 0xfcefa3f8, 0x676f02d9, 0x8d2a4c8a,
52 0xfffa3942, 0x8771f681, 0x6d9d6122, 0xfde5380c,
53 0xa4beea44, 0x4bdecfa9, 0xf6bb4b60, 0xbebfbc70,
54 0x289b7ec6, 0xeaa127fa, 0xd4ef3085, 0x04881d05,
55 0xd9d4d039, 0xe6db99e5, 0x1fa27cf8, 0xc4ac5665,
56 0xf4292244, 0x432aff97, 0xab9423a7, 0xfc93a039,
57 0x655b59c3, 0x8f0ccc92, 0xffeff47d, 0x85845dd1,
58 0x6fa87e4f, 0xfe2ce6e0, 0xa3014314, 0x4e0811a1,
59 0xf7537e82, 0xbd3af235, 0x2ad7d2bb, 0xeb86d391,
60}
61
62// inflate the consts 8-way for 8x md5 (256 bit ymm registers)
63var avx256md5consts = func(c []uint32) []uint32 {
64 inf := make([]uint32, 8*len(c))
65 for i := range c {
66 for j := 0; j < 8; j++ {
67 inf[(i*8)+j] = c[i]
68 }
69 }
70 return inf
71}(md5consts[:])
72
73// 16-way 4x uint32 digests in 4 zmm registers
74type digest16 struct {
75 v0, v1, v2, v3 [16]uint32
76}
77
78// inflate the consts 16-way for 16x md5 (512 bit zmm registers)
79var avx512md5consts = func(c []uint32) []uint32 {
80 inf := make([]uint32, 16*len(c))
81 for i := range c {
82 for j := 0; j < 16; j++ {
83 inf[(i*16)+j] = c[i]
84 }
85 }
86 return inf
87}(md5consts[:])
88
89// Interface function to assembly code
90func (s *md5Server) blockMd5_x16(d *digest16, input [16][]byte, half bool) {
91 if hasAVX512 {
92 blockMd5_avx512(d, input, s.allBufs, &s.maskRounds16)
93 return
94 }
95
96 // Preparing data using copy is slower since copies aren't inlined.
97
98 // Calculate on this goroutine
99 if half {
100 for i := range s.i8[0][:] {
101 s.i8[0][i] = input[i]
102 }
103 for i := range s.d8a.v0[:] {
104 s.d8a.v0[i], s.d8a.v1[i], s.d8a.v2[i], s.d8a.v3[i] = d.v0[i], d.v1[i], d.v2[i], d.v3[i]
105 }
106 blockMd5_avx2(&s.d8a, s.i8[0], s.allBufs, &s.maskRounds8a)
107 for i := range s.d8a.v0[:] {
108 d.v0[i], d.v1[i], d.v2[i], d.v3[i] = s.d8a.v0[i], s.d8a.v1[i], s.d8a.v2[i], s.d8a.v3[i]
109 }
110 return
111 }
112
113 for i := range s.i8[0][:] {
114 s.i8[0][i], s.i8[1][i] = input[i], input[8+i]
115 }
116
117 for i := range s.d8a.v0[:] {
118 j := (i + 8) & 15
119 s.d8a.v0[i], s.d8a.v1[i], s.d8a.v2[i], s.d8a.v3[i] = d.v0[i], d.v1[i], d.v2[i], d.v3[i]
120 s.d8b.v0[i], s.d8b.v1[i], s.d8b.v2[i], s.d8b.v3[i] = d.v0[j], d.v1[j], d.v2[j], d.v3[j]
121 }
122
123 // Benchmarks appears to be slightly faster when spinning up 2 goroutines instead
124 // of using the current for one of the blocks.
125 s.wg.Add(2)
126 go func() { blockMd5_avx2(&s.d8a, s.i8[0], s.allBufs, &s.maskRounds8a); s.wg.Done() }()
127 go func() { blockMd5_avx2(&s.d8b, s.i8[1], s.allBufs, &s.maskRounds8b); s.wg.Done() }()
128 s.wg.Wait()
129 for i := range s.d8a.v0[:] {
130 d.v0[i], d.v1[i], d.v2[i], d.v3[i] = s.d8a.v0[i], s.d8a.v1[i], s.d8a.v2[i], s.d8a.v3[i]
131 }
132 for i := range s.d8b.v0[:] {
133 j := (i + 8) & 15
134 d.v0[j], d.v1[j], d.v2[j], d.v3[j] = s.d8b.v0[i], s.d8b.v1[i], s.d8b.v2[i], s.d8b.v3[i]
135 }
136}
137
138// Interface function to AVX512 assembly code
139func blockMd5_avx512(s *digest16, input [16][]byte, base []byte, maskRounds *[16]maskRounds) {
140 baseMin := uint64(uintptr(unsafe.Pointer(&(base[0]))))
141 ptrs := [16]int32{}
142
143 for i := range ptrs {
144 if len(input[i]) > 0 {
145 if len(input[i]) > internalBlockSize {
146 panic(fmt.Sprintf("Sanity check fails for lane %d: maximum input length cannot exceed internalBlockSize", i))
147 }
148
149 off := uint64(uintptr(unsafe.Pointer(&(input[i][0])))) - baseMin
150 if off > math.MaxUint32 {
151 panic(fmt.Sprintf("invalid buffer sent with offset %x", off))
152 }
153 ptrs[i] = int32(off)
154 }
155 }
156
157 sdup := *s // create copy of initial states to receive intermediate updates
158
159 rounds := generateMaskAndRounds16(input, maskRounds)
160
161 for r := 0; r < rounds; r++ {
162 m := maskRounds[r]
163
164 block16(&sdup.v0[0], uintptr(baseMin), &ptrs[0], m.mask, int(64*m.rounds))
165
166 for j := 0; j < len(ptrs); j++ {
167 ptrs[j] += int32(64 * m.rounds) // update pointers for next round
168 if m.mask&(1<<j) != 0 { // update digest if still masked as active
169 (*s).v0[j], (*s).v1[j], (*s).v2[j], (*s).v3[j] = sdup.v0[j], sdup.v1[j], sdup.v2[j], sdup.v3[j]
170 }
171 }
172 }
173}
174
175// Interface function to AVX2 assembly code
176func blockMd5_avx2(s *digest8, input [8][]byte, base []byte, maskRounds *[8]maskRounds) {
177 baseMin := uint64(uintptr(unsafe.Pointer(&(base[0])))) - 4
178 ptrs := [8]int32{}
179
180 for i := range ptrs {
181 if len(input[i]) > 0 {
182 if len(input[i]) > internalBlockSize {
183 panic(fmt.Sprintf("Sanity check fails for lane %d: maximum input length cannot exceed internalBlockSize", i))
184 }
185
186 off := uint64(uintptr(unsafe.Pointer(&(input[i][0])))) - baseMin
187 if off > math.MaxUint32 {
188 panic(fmt.Sprintf("invalid buffer sent with offset %x", off))
189 }
190 ptrs[i] = int32(off)
191 }
192 }
193
194 sdup := *s // create copy of initial states to receive intermediate updates
195
196 rounds := generateMaskAndRounds8(input, maskRounds)
197
198 for r := 0; r < rounds; r++ {
199 m := maskRounds[r]
200 var cache cache8 // stack storage for block8 tmp state
201 block8(&sdup.v0[0], uintptr(baseMin), &ptrs[0], &cache[0], int(64*m.rounds))
202
203 for j := 0; j < len(ptrs); j++ {
204 ptrs[j] += int32(64 * m.rounds) // update pointers for next round
205 if m.mask&(1<<j) != 0 { // update digest if still masked as active
206 (*s).v0[j], (*s).v1[j], (*s).v2[j], (*s).v3[j] = sdup.v0[j], sdup.v1[j], sdup.v2[j], sdup.v3[j]
207 }
208 }
209 }
210}
diff --git a/vendor/github.com/minio/md5-simd/md5-digest_amd64.go b/vendor/github.com/minio/md5-simd/md5-digest_amd64.go
new file mode 100644
index 0000000..5ea23a4
--- /dev/null
+++ b/vendor/github.com/minio/md5-simd/md5-digest_amd64.go
@@ -0,0 +1,188 @@
1//+build !noasm,!appengine,gc
2
3// Copyright (c) 2020 MinIO Inc. All rights reserved.
4// Use of this source code is governed by a license that can be
5// found in the LICENSE file.
6
7package md5simd
8
9import (
10 "encoding/binary"
11 "errors"
12 "fmt"
13 "sync"
14 "sync/atomic"
15)
16
17// md5Digest - Type for computing MD5 using either AVX2 or AVX512
18type md5Digest struct {
19 uid uint64
20 blocksCh chan blockInput
21 cycleServer chan uint64
22 x [BlockSize]byte
23 nx int
24 len uint64
25 buffers <-chan []byte
26}
27
28// NewHash - initialize instance for Md5 implementation.
29func (s *md5Server) NewHash() Hasher {
30 uid := atomic.AddUint64(&s.uidCounter, 1)
31 blockCh := make(chan blockInput, buffersPerLane)
32 s.newInput <- newClient{
33 uid: uid,
34 input: blockCh,
35 }
36 return &md5Digest{
37 uid: uid,
38 buffers: s.buffers,
39 blocksCh: blockCh,
40 cycleServer: s.cycle,
41 }
42}
43
44// Size - Return size of checksum
45func (d *md5Digest) Size() int { return Size }
46
47// BlockSize - Return blocksize of checksum
48func (d md5Digest) BlockSize() int { return BlockSize }
49
50func (d *md5Digest) Reset() {
51 if d.blocksCh == nil {
52 panic("reset after close")
53 }
54 d.nx = 0
55 d.len = 0
56 d.sendBlock(blockInput{uid: d.uid, reset: true}, false)
57}
58
59// write to digest
60func (d *md5Digest) Write(p []byte) (nn int, err error) {
61 if d.blocksCh == nil {
62 return 0, errors.New("md5Digest closed")
63 }
64
65 // break input into chunks of maximum internalBlockSize size
66 for {
67 l := len(p)
68 if l > internalBlockSize {
69 l = internalBlockSize
70 }
71 nnn, err := d.write(p[:l])
72 if err != nil {
73 return nn, err
74 }
75 nn += nnn
76 p = p[l:]
77
78 if len(p) == 0 {
79 break
80 }
81
82 }
83 return
84}
85
86func (d *md5Digest) write(p []byte) (nn int, err error) {
87
88 nn = len(p)
89 d.len += uint64(nn)
90 if d.nx > 0 {
91 n := copy(d.x[d.nx:], p)
92 d.nx += n
93 if d.nx == BlockSize {
94 // Create a copy of the overflow buffer in order to send it async over the channel
95 // (since we will modify the overflow buffer down below with any access beyond multiples of 64)
96 tmp := <-d.buffers
97 tmp = tmp[:BlockSize]
98 copy(tmp, d.x[:])
99 d.sendBlock(blockInput{uid: d.uid, msg: tmp}, len(p)-n < BlockSize)
100 d.nx = 0
101 }
102 p = p[n:]
103 }
104 if len(p) >= BlockSize {
105 n := len(p) &^ (BlockSize - 1)
106 buf := <-d.buffers
107 buf = buf[:n]
108 copy(buf, p)
109 d.sendBlock(blockInput{uid: d.uid, msg: buf}, len(p)-n < BlockSize)
110 p = p[n:]
111 }
112 if len(p) > 0 {
113 d.nx = copy(d.x[:], p)
114 }
115 return
116}
117
118func (d *md5Digest) Close() {
119 if d.blocksCh != nil {
120 close(d.blocksCh)
121 d.blocksCh = nil
122 }
123}
124
125var sumChPool sync.Pool
126
127func init() {
128 sumChPool.New = func() interface{} {
129 return make(chan sumResult, 1)
130 }
131}
132
133// Sum - Return MD5 sum in bytes
134func (d *md5Digest) Sum(in []byte) (result []byte) {
135 if d.blocksCh == nil {
136 panic("sum after close")
137 }
138
139 trail := <-d.buffers
140 trail = append(trail[:0], d.x[:d.nx]...)
141
142 length := d.len
143 // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
144 var tmp [64]byte
145 tmp[0] = 0x80
146 if length%64 < 56 {
147 trail = append(trail, tmp[0:56-length%64]...)
148 } else {
149 trail = append(trail, tmp[0:64+56-length%64]...)
150 }
151
152 // Length in bits.
153 length <<= 3
154 binary.LittleEndian.PutUint64(tmp[:], length) // append length in bits
155
156 trail = append(trail, tmp[0:8]...)
157 if len(trail)%BlockSize != 0 {
158 panic(fmt.Errorf("internal error: sum block was not aligned. len=%d, nx=%d", len(trail), d.nx))
159 }
160 sumCh := sumChPool.Get().(chan sumResult)
161 d.sendBlock(blockInput{uid: d.uid, msg: trail, sumCh: sumCh}, true)
162
163 sum := <-sumCh
164 sumChPool.Put(sumCh)
165
166 return append(in, sum.digest[:]...)
167}
168
169// sendBlock will send a block for processing.
170// If cycle is true we will block on cycle, otherwise we will only block
171// if the block channel is full.
172func (d *md5Digest) sendBlock(bi blockInput, cycle bool) {
173 if cycle {
174 select {
175 case d.blocksCh <- bi:
176 d.cycleServer <- d.uid
177 }
178 return
179 }
180 // Only block on cycle if we filled the buffer
181 select {
182 case d.blocksCh <- bi:
183 return
184 default:
185 d.cycleServer <- d.uid
186 d.blocksCh <- bi
187 }
188}
diff --git a/vendor/github.com/minio/md5-simd/md5-server_amd64.go b/vendor/github.com/minio/md5-simd/md5-server_amd64.go
new file mode 100644
index 0000000..94f741c
--- /dev/null
+++ b/vendor/github.com/minio/md5-simd/md5-server_amd64.go
@@ -0,0 +1,397 @@
1//+build !noasm,!appengine,gc
2
3// Copyright (c) 2020 MinIO Inc. All rights reserved.
4// Use of this source code is governed by a license that can be
5// found in the LICENSE file.
6
7package md5simd
8
9import (
10 "encoding/binary"
11 "fmt"
12 "runtime"
13 "sync"
14
15 "github.com/klauspost/cpuid/v2"
16)
17
18// MD5 initialization constants
19const (
20 // Lanes is the number of concurrently calculated hashes.
21 Lanes = 16
22
23 init0 = 0x67452301
24 init1 = 0xefcdab89
25 init2 = 0x98badcfe
26 init3 = 0x10325476
27
28 // Use scalar routine when below this many lanes
29 useScalarBelow = 3
30)
31
32// md5ServerUID - Does not start at 0 but next multiple of 16 so as to be able to
33// differentiate with default initialisation value of 0
34const md5ServerUID = Lanes
35
36const buffersPerLane = 3
37
38// Message to send across input channel
39type blockInput struct {
40 uid uint64
41 msg []byte
42 sumCh chan sumResult
43 reset bool
44}
45
46type sumResult struct {
47 digest [Size]byte
48}
49
50type lanesInfo [Lanes]blockInput
51
52// md5Server - Type to implement parallel handling of MD5 invocations
53type md5Server struct {
54 uidCounter uint64
55 cycle chan uint64 // client with uid has update.
56 newInput chan newClient // Add new client.
57 digests map[uint64][Size]byte // Map of uids to (interim) digest results
58 maskRounds16 [16]maskRounds // Pre-allocated static array for max 16 rounds
59 maskRounds8a [8]maskRounds // Pre-allocated static array for max 8 rounds (1st AVX2 core)
60 maskRounds8b [8]maskRounds // Pre-allocated static array for max 8 rounds (2nd AVX2 core)
61 allBufs []byte // Preallocated buffer.
62 buffers chan []byte // Preallocated buffers, sliced from allBufs.
63
64 i8 [2][8][]byte // avx2 temporary vars
65 d8a, d8b digest8
66 wg sync.WaitGroup
67}
68
69// NewServer - Create new object for parallel processing handling
70func NewServer() Server {
71 if !cpuid.CPU.Supports(cpuid.AVX2) {
72 return &fallbackServer{}
73 }
74 md5srv := &md5Server{}
75 md5srv.digests = make(map[uint64][Size]byte)
76 md5srv.newInput = make(chan newClient, Lanes)
77 md5srv.cycle = make(chan uint64, Lanes*10)
78 md5srv.uidCounter = md5ServerUID - 1
79 md5srv.allBufs = make([]byte, 32+buffersPerLane*Lanes*internalBlockSize)
80 md5srv.buffers = make(chan []byte, buffersPerLane*Lanes)
81 // Fill buffers.
82 for i := 0; i < buffersPerLane*Lanes; i++ {
83 s := 32 + i*internalBlockSize
84 md5srv.buffers <- md5srv.allBufs[s : s+internalBlockSize : s+internalBlockSize]
85 }
86
87 // Start a single thread for reading from the input channel
88 go md5srv.process(md5srv.newInput)
89 return md5srv
90}
91
92type newClient struct {
93 uid uint64
94 input chan blockInput
95}
96
97// process - Sole handler for reading from the input channel.
98func (s *md5Server) process(newClients chan newClient) {
99 // To fill up as many lanes as possible:
100 //
101 // 1. Wait for a cycle id.
102 // 2. If not already in a lane, add, otherwise leave on channel
103 // 3. Start timer
104 // 4. Check if lanes is full, if so, goto 10 (process).
105 // 5. If timeout, goto 10.
106 // 6. Wait for new id (goto 2) or timeout (goto 10).
107 // 10. Process.
108 // 11. Check all input if there is already input, if so add to lanes.
109 // 12. Goto 1
110
111 // lanes contains the lanes.
112 var lanes lanesInfo
113 // lanesFilled contains the number of filled lanes for current cycle.
114 var lanesFilled int
115 // clients contains active clients
116 var clients = make(map[uint64]chan blockInput, Lanes)
117
118 addToLane := func(uid uint64) {
119 cl, ok := clients[uid]
120 if !ok {
121 // Unknown client. Maybe it was already removed.
122 return
123 }
124 // Check if we already have it.
125 for _, lane := range lanes[:lanesFilled] {
126 if lane.uid == uid {
127 return
128 }
129 }
130 // Continue until we get a block or there is nothing on channel
131 for {
132 select {
133 case block, ok := <-cl:
134 if !ok {
135 // Client disconnected
136 delete(clients, block.uid)
137 return
138 }
139 if block.uid != uid {
140 panic(fmt.Errorf("uid mismatch, %d (block) != %d (client)", block.uid, uid))
141 }
142 // If reset message, reset and we're done
143 if block.reset {
144 delete(s.digests, uid)
145 continue
146 }
147
148 // If requesting sum, we will need to maintain state.
149 if block.sumCh != nil {
150 var dig digest
151 d, ok := s.digests[uid]
152 if ok {
153 dig.s[0] = binary.LittleEndian.Uint32(d[0:4])
154 dig.s[1] = binary.LittleEndian.Uint32(d[4:8])
155 dig.s[2] = binary.LittleEndian.Uint32(d[8:12])
156 dig.s[3] = binary.LittleEndian.Uint32(d[12:16])
157 } else {
158 dig.s[0], dig.s[1], dig.s[2], dig.s[3] = init0, init1, init2, init3
159 }
160
161 sum := sumResult{}
162 // Add end block to current digest.
163 blockScalar(&dig.s, block.msg)
164
165 binary.LittleEndian.PutUint32(sum.digest[0:], dig.s[0])
166 binary.LittleEndian.PutUint32(sum.digest[4:], dig.s[1])
167 binary.LittleEndian.PutUint32(sum.digest[8:], dig.s[2])
168 binary.LittleEndian.PutUint32(sum.digest[12:], dig.s[3])
169 block.sumCh <- sum
170 if block.msg != nil {
171 s.buffers <- block.msg
172 }
173 continue
174 }
175 if len(block.msg) == 0 {
176 continue
177 }
178 lanes[lanesFilled] = block
179 lanesFilled++
180 return
181 default:
182 return
183 }
184 }
185 }
186 addNewClient := func(cl newClient) {
187 if _, ok := clients[cl.uid]; ok {
188 panic("internal error: duplicate client registration")
189 }
190 clients[cl.uid] = cl.input
191 }
192
193 allLanesFilled := func() bool {
194 return lanesFilled == Lanes || lanesFilled >= len(clients)
195 }
196
197 for {
198 // Step 1.
199 for lanesFilled == 0 {
200 select {
201 case cl, ok := <-newClients:
202 if !ok {
203 return
204 }
205 addNewClient(cl)
206 // Check if it already sent a payload.
207 addToLane(cl.uid)
208 continue
209 case uid := <-s.cycle:
210 addToLane(uid)
211 }
212 }
213
214 fillLanes:
215 for !allLanesFilled() {
216 select {
217 case cl, ok := <-newClients:
218 if !ok {
219 return
220 }
221 addNewClient(cl)
222
223 case uid := <-s.cycle:
224 addToLane(uid)
225 default:
226 // Nothing more queued...
227 break fillLanes
228 }
229 }
230
231 // If we did not fill all lanes, check if there is more waiting
232 if !allLanesFilled() {
233 runtime.Gosched()
234 for uid := range clients {
235 addToLane(uid)
236 if allLanesFilled() {
237 break
238 }
239 }
240 }
241 if false {
242 if !allLanesFilled() {
243 fmt.Println("Not all lanes filled", lanesFilled, "of", len(clients))
244 //pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
245 } else if true {
246 fmt.Println("all lanes filled")
247 }
248 }
249 // Process the lanes we could collect
250 s.blocks(lanes[:lanesFilled])
251
252 // Clear lanes...
253 lanesFilled = 0
254 // Add all current queued
255 for uid := range clients {
256 addToLane(uid)
257 if allLanesFilled() {
258 break
259 }
260 }
261 }
262}
263
264func (s *md5Server) Close() {
265 if s.newInput != nil {
266 close(s.newInput)
267 s.newInput = nil
268 }
269}
270
271// Invoke assembly and send results back
272func (s *md5Server) blocks(lanes []blockInput) {
273 if len(lanes) < useScalarBelow {
274 // Use scalar routine when below this many lanes
275 switch len(lanes) {
276 case 0:
277 case 1:
278 lane := lanes[0]
279 var d digest
280 a, ok := s.digests[lane.uid]
281 if ok {
282 d.s[0] = binary.LittleEndian.Uint32(a[0:4])
283 d.s[1] = binary.LittleEndian.Uint32(a[4:8])
284 d.s[2] = binary.LittleEndian.Uint32(a[8:12])
285 d.s[3] = binary.LittleEndian.Uint32(a[12:16])
286 } else {
287 d.s[0] = init0
288 d.s[1] = init1
289 d.s[2] = init2
290 d.s[3] = init3
291 }
292 if len(lane.msg) > 0 {
293 // Update...
294 blockScalar(&d.s, lane.msg)
295 }
296 dig := [Size]byte{}
297 binary.LittleEndian.PutUint32(dig[0:], d.s[0])
298 binary.LittleEndian.PutUint32(dig[4:], d.s[1])
299 binary.LittleEndian.PutUint32(dig[8:], d.s[2])
300 binary.LittleEndian.PutUint32(dig[12:], d.s[3])
301 s.digests[lane.uid] = dig
302
303 if lane.msg != nil {
304 s.buffers <- lane.msg
305 }
306 lanes[0] = blockInput{}
307
308 default:
309 s.wg.Add(len(lanes))
310 var results [useScalarBelow]digest
311 for i := range lanes {
312 lane := lanes[i]
313 go func(i int) {
314 var d digest
315 defer s.wg.Done()
316 a, ok := s.digests[lane.uid]
317 if ok {
318 d.s[0] = binary.LittleEndian.Uint32(a[0:4])
319 d.s[1] = binary.LittleEndian.Uint32(a[4:8])
320 d.s[2] = binary.LittleEndian.Uint32(a[8:12])
321 d.s[3] = binary.LittleEndian.Uint32(a[12:16])
322 } else {
323 d.s[0] = init0
324 d.s[1] = init1
325 d.s[2] = init2
326 d.s[3] = init3
327 }
328 if len(lane.msg) == 0 {
329 results[i] = d
330 return
331 }
332 // Update...
333 blockScalar(&d.s, lane.msg)
334 results[i] = d
335 }(i)
336 }
337 s.wg.Wait()
338 for i, lane := range lanes {
339 dig := [Size]byte{}
340 binary.LittleEndian.PutUint32(dig[0:], results[i].s[0])
341 binary.LittleEndian.PutUint32(dig[4:], results[i].s[1])
342 binary.LittleEndian.PutUint32(dig[8:], results[i].s[2])
343 binary.LittleEndian.PutUint32(dig[12:], results[i].s[3])
344 s.digests[lane.uid] = dig
345
346 if lane.msg != nil {
347 s.buffers <- lane.msg
348 }
349 lanes[i] = blockInput{}
350 }
351 }
352 return
353 }
354
355 inputs := [16][]byte{}
356 for i := range lanes {
357 inputs[i] = lanes[i].msg
358 }
359
360 // Collect active digests...
361 state := s.getDigests(lanes)
362 // Process all lanes...
363 s.blockMd5_x16(&state, inputs, len(lanes) <= 8)
364
365 for i, lane := range lanes {
366 uid := lane.uid
367 dig := [Size]byte{}
368 binary.LittleEndian.PutUint32(dig[0:], state.v0[i])
369 binary.LittleEndian.PutUint32(dig[4:], state.v1[i])
370 binary.LittleEndian.PutUint32(dig[8:], state.v2[i])
371 binary.LittleEndian.PutUint32(dig[12:], state.v3[i])
372
373 s.digests[uid] = dig
374 if lane.msg != nil {
375 s.buffers <- lane.msg
376 }
377 lanes[i] = blockInput{}
378 }
379}
380
381func (s *md5Server) getDigests(lanes []blockInput) (d digest16) {
382 for i, lane := range lanes {
383 a, ok := s.digests[lane.uid]
384 if ok {
385 d.v0[i] = binary.LittleEndian.Uint32(a[0:4])
386 d.v1[i] = binary.LittleEndian.Uint32(a[4:8])
387 d.v2[i] = binary.LittleEndian.Uint32(a[8:12])
388 d.v3[i] = binary.LittleEndian.Uint32(a[12:16])
389 } else {
390 d.v0[i] = init0
391 d.v1[i] = init1
392 d.v2[i] = init2
393 d.v3[i] = init3
394 }
395 }
396 return
397}
diff --git a/vendor/github.com/minio/md5-simd/md5-server_fallback.go b/vendor/github.com/minio/md5-simd/md5-server_fallback.go
new file mode 100644
index 0000000..7814dad
--- /dev/null
+++ b/vendor/github.com/minio/md5-simd/md5-server_fallback.go
@@ -0,0 +1,12 @@
1//+build !amd64 appengine !gc noasm
2
3// Copyright (c) 2020 MinIO Inc. All rights reserved.
4// Use of this source code is governed by a license that can be
5// found in the LICENSE file.
6
7package md5simd
8
9// NewServer - Create new object for parallel processing handling
10func NewServer() *fallbackServer {
11 return &fallbackServer{}
12}
diff --git a/vendor/github.com/minio/md5-simd/md5-util_amd64.go b/vendor/github.com/minio/md5-simd/md5-util_amd64.go
new file mode 100644
index 0000000..73981b0
--- /dev/null
+++ b/vendor/github.com/minio/md5-simd/md5-util_amd64.go
@@ -0,0 +1,85 @@
1//+build !noasm,!appengine,gc
2
3// Copyright (c) 2020 MinIO Inc. All rights reserved.
4// Use of this source code is governed by a license that can be
5// found in the LICENSE file.
6
7package md5simd
8
9// Helper struct for sorting blocks based on length
10type lane struct {
11 len uint
12 pos uint
13}
14
15type digest struct {
16 s [4]uint32
17}
18
19// Helper struct for generating number of rounds in combination with mask for valid lanes
20type maskRounds struct {
21 mask uint64
22 rounds uint64
23}
24
25func generateMaskAndRounds8(input [8][]byte, mr *[8]maskRounds) (rounds int) {
26 // Sort on blocks length small to large
27 var sorted [8]lane
28 for c, inpt := range input[:] {
29 sorted[c] = lane{uint(len(inpt)), uint(c)}
30 for i := c - 1; i >= 0; i-- {
31 // swap so largest is at the end...
32 if sorted[i].len > sorted[i+1].len {
33 sorted[i], sorted[i+1] = sorted[i+1], sorted[i]
34 continue
35 }
36 break
37 }
38 }
39
40 // Create mask array including 'rounds' (of processing blocks of 64 bytes) between masks
41 m, round := uint64(0xff), uint64(0)
42
43 for _, s := range sorted[:] {
44 if s.len > 0 {
45 if uint64(s.len)>>6 > round {
46 mr[rounds] = maskRounds{m, (uint64(s.len) >> 6) - round}
47 rounds++
48 }
49 round = uint64(s.len) >> 6
50 }
51 m = m & ^(1 << uint(s.pos))
52 }
53 return
54}
55
56func generateMaskAndRounds16(input [16][]byte, mr *[16]maskRounds) (rounds int) {
57 // Sort on blocks length small to large
58 var sorted [16]lane
59 for c, inpt := range input[:] {
60 sorted[c] = lane{uint(len(inpt)), uint(c)}
61 for i := c - 1; i >= 0; i-- {
62 // swap so largest is at the end...
63 if sorted[i].len > sorted[i+1].len {
64 sorted[i], sorted[i+1] = sorted[i+1], sorted[i]
65 continue
66 }
67 break
68 }
69 }
70
71 // Create mask array including 'rounds' (of processing blocks of 64 bytes) between masks
72 m, round := uint64(0xffff), uint64(0)
73
74 for _, s := range sorted[:] {
75 if s.len > 0 {
76 if uint64(s.len)>>6 > round {
77 mr[rounds] = maskRounds{m, (uint64(s.len) >> 6) - round}
78 rounds++
79 }
80 round = uint64(s.len) >> 6
81 }
82 m = m & ^(1 << uint(s.pos))
83 }
84 return
85}
diff --git a/vendor/github.com/minio/md5-simd/md5.go b/vendor/github.com/minio/md5-simd/md5.go
new file mode 100644
index 0000000..11b0cb9
--- /dev/null
+++ b/vendor/github.com/minio/md5-simd/md5.go
@@ -0,0 +1,63 @@
1package md5simd
2
3import (
4 "crypto/md5"
5 "hash"
6 "sync"
7)
8
9const (
10 // The blocksize of MD5 in bytes.
11 BlockSize = 64
12
13 // The size of an MD5 checksum in bytes.
14 Size = 16
15
16 // internalBlockSize is the internal block size.
17 internalBlockSize = 32 << 10
18)
19
20type Server interface {
21 NewHash() Hasher
22 Close()
23}
24
25type Hasher interface {
26 hash.Hash
27 Close()
28}
29
30// StdlibHasher returns a Hasher that uses the stdlib for hashing.
31// Used hashers are stored in a pool for fast reuse.
32func StdlibHasher() Hasher {
33 return &md5Wrapper{Hash: md5Pool.New().(hash.Hash)}
34}
35
36// md5Wrapper is a wrapper around the builtin hasher.
37type md5Wrapper struct {
38 hash.Hash
39}
40
41var md5Pool = sync.Pool{New: func() interface{} {
42 return md5.New()
43}}
44
45// fallbackServer - Fallback when no assembly is available.
46type fallbackServer struct {
47}
48
49// NewHash -- return regular Golang md5 hashing from crypto
50func (s *fallbackServer) NewHash() Hasher {
51 return &md5Wrapper{Hash: md5Pool.New().(hash.Hash)}
52}
53
54func (s *fallbackServer) Close() {
55}
56
57func (m *md5Wrapper) Close() {
58 if m.Hash != nil {
59 m.Reset()
60 md5Pool.Put(m.Hash)
61 m.Hash = nil
62 }
63}
diff --git a/vendor/github.com/minio/md5-simd/md5block_amd64.go b/vendor/github.com/minio/md5-simd/md5block_amd64.go
new file mode 100644
index 0000000..4c27936
--- /dev/null
+++ b/vendor/github.com/minio/md5-simd/md5block_amd64.go
@@ -0,0 +1,11 @@
1// Code generated by command: go run gen.go -out ../md5block_amd64.s -stubs ../md5block_amd64.go -pkg=md5simd. DO NOT EDIT.
2
3// +build !appengine
4// +build !noasm
5// +build gc
6
7package md5simd
8
9// Encode p to digest
10//go:noescape
11func blockScalar(dig *[4]uint32, p []byte)
diff --git a/vendor/github.com/minio/md5-simd/md5block_amd64.s b/vendor/github.com/minio/md5-simd/md5block_amd64.s
new file mode 100644
index 0000000..fbc4a21
--- /dev/null
+++ b/vendor/github.com/minio/md5-simd/md5block_amd64.s
@@ -0,0 +1,714 @@
1// Code generated by command: go run gen.go -out ../md5block_amd64.s -stubs ../md5block_amd64.go -pkg=md5simd. DO NOT EDIT.
2
3// +build !appengine
4// +build !noasm
5// +build gc
6
7// func blockScalar(dig *[4]uint32, p []byte)
8TEXT ·blockScalar(SB), $0-32
9 MOVQ p_len+16(FP), AX
10 MOVQ dig+0(FP), CX
11 MOVQ p_base+8(FP), DX
12 SHRQ $0x06, AX
13 SHLQ $0x06, AX
14 LEAQ (DX)(AX*1), AX
15 CMPQ DX, AX
16 JEQ end
17 MOVL (CX), BX
18 MOVL 4(CX), BP
19 MOVL 8(CX), SI
20 MOVL 12(CX), CX
21 MOVL $0xffffffff, DI
22
23loop:
24 MOVL (DX), R8
25 MOVL CX, R9
26 MOVL BX, R10
27 MOVL BP, R11
28 MOVL SI, R12
29 MOVL CX, R13
30
31 // ROUND1
32 XORL SI, R9
33 ADDL $0xd76aa478, BX
34 ADDL R8, BX
35 ANDL BP, R9
36 XORL CX, R9
37 MOVL 4(DX), R8
38 ADDL R9, BX
39 ROLL $0x07, BX
40 MOVL SI, R9
41 ADDL BP, BX
42 XORL BP, R9
43 ADDL $0xe8c7b756, CX
44 ADDL R8, CX
45 ANDL BX, R9
46 XORL SI, R9
47 MOVL 8(DX), R8
48 ADDL R9, CX
49 ROLL $0x0c, CX
50 MOVL BP, R9
51 ADDL BX, CX
52 XORL BX, R9
53 ADDL $0x242070db, SI
54 ADDL R8, SI
55 ANDL CX, R9
56 XORL BP, R9
57 MOVL 12(DX), R8
58 ADDL R9, SI
59 ROLL $0x11, SI
60 MOVL BX, R9
61 ADDL CX, SI
62 XORL CX, R9
63 ADDL $0xc1bdceee, BP
64 ADDL R8, BP
65 ANDL SI, R9
66 XORL BX, R9
67 MOVL 16(DX), R8
68 ADDL R9, BP
69 ROLL $0x16, BP
70 MOVL CX, R9
71 ADDL SI, BP
72 XORL SI, R9
73 ADDL $0xf57c0faf, BX
74 ADDL R8, BX
75 ANDL BP, R9
76 XORL CX, R9
77 MOVL 20(DX), R8
78 ADDL R9, BX
79 ROLL $0x07, BX
80 MOVL SI, R9
81 ADDL BP, BX
82 XORL BP, R9
83 ADDL $0x4787c62a, CX
84 ADDL R8, CX
85 ANDL BX, R9
86 XORL SI, R9
87 MOVL 24(DX), R8
88 ADDL R9, CX
89 ROLL $0x0c, CX
90 MOVL BP, R9
91 ADDL BX, CX
92 XORL BX, R9
93 ADDL $0xa8304613, SI
94 ADDL R8, SI
95 ANDL CX, R9
96 XORL BP, R9
97 MOVL 28(DX), R8
98 ADDL R9, SI
99 ROLL $0x11, SI
100 MOVL BX, R9
101 ADDL CX, SI
102 XORL CX, R9
103 ADDL $0xfd469501, BP
104 ADDL R8, BP
105 ANDL SI, R9
106 XORL BX, R9
107 MOVL 32(DX), R8
108 ADDL R9, BP
109 ROLL $0x16, BP
110 MOVL CX, R9
111 ADDL SI, BP
112 XORL SI, R9
113 ADDL $0x698098d8, BX
114 ADDL R8, BX
115 ANDL BP, R9
116 XORL CX, R9
117 MOVL 36(DX), R8
118 ADDL R9, BX
119 ROLL $0x07, BX
120 MOVL SI, R9
121 ADDL BP, BX
122 XORL BP, R9
123 ADDL $0x8b44f7af, CX
124 ADDL R8, CX
125 ANDL BX, R9
126 XORL SI, R9
127 MOVL 40(DX), R8
128 ADDL R9, CX
129 ROLL $0x0c, CX
130 MOVL BP, R9
131 ADDL BX, CX
132 XORL BX, R9
133 ADDL $0xffff5bb1, SI
134 ADDL R8, SI
135 ANDL CX, R9
136 XORL BP, R9
137 MOVL 44(DX), R8
138 ADDL R9, SI
139 ROLL $0x11, SI
140 MOVL BX, R9
141 ADDL CX, SI
142 XORL CX, R9
143 ADDL $0x895cd7be, BP
144 ADDL R8, BP
145 ANDL SI, R9
146 XORL BX, R9
147 MOVL 48(DX), R8
148 ADDL R9, BP
149 ROLL $0x16, BP
150 MOVL CX, R9
151 ADDL SI, BP
152 XORL SI, R9
153 ADDL $0x6b901122, BX
154 ADDL R8, BX
155 ANDL BP, R9
156 XORL CX, R9
157 MOVL 52(DX), R8
158 ADDL R9, BX
159 ROLL $0x07, BX
160 MOVL SI, R9
161 ADDL BP, BX
162 XORL BP, R9
163 ADDL $0xfd987193, CX
164 ADDL R8, CX
165 ANDL BX, R9
166 XORL SI, R9
167 MOVL 56(DX), R8
168 ADDL R9, CX
169 ROLL $0x0c, CX
170 MOVL BP, R9
171 ADDL BX, CX
172 XORL BX, R9
173 ADDL $0xa679438e, SI
174 ADDL R8, SI
175 ANDL CX, R9
176 XORL BP, R9
177 MOVL 60(DX), R8
178 ADDL R9, SI
179 ROLL $0x11, SI
180 MOVL BX, R9
181 ADDL CX, SI
182 XORL CX, R9
183 ADDL $0x49b40821, BP
184 ADDL R8, BP
185 ANDL SI, R9
186 XORL BX, R9
187 MOVL 4(DX), R8
188 ADDL R9, BP
189 ROLL $0x16, BP
190 MOVL CX, R9
191 ADDL SI, BP
192
193 // ROUND2
194 MOVL CX, R9
195 MOVL CX, R14
196 XORL DI, R9
197 ADDL $0xf61e2562, BX
198 ADDL R8, BX
199 ANDL BP, R14
200 ANDL SI, R9
201 MOVL 24(DX), R8
202 ORL R9, R14
203 MOVL SI, R9
204 ADDL R14, BX
205 MOVL SI, R14
206 ROLL $0x05, BX
207 ADDL BP, BX
208 XORL DI, R9
209 ADDL $0xc040b340, CX
210 ADDL R8, CX
211 ANDL BX, R14
212 ANDL BP, R9
213 MOVL 44(DX), R8
214 ORL R9, R14
215 MOVL BP, R9
216 ADDL R14, CX
217 MOVL BP, R14
218 ROLL $0x09, CX
219 ADDL BX, CX
220 XORL DI, R9
221 ADDL $0x265e5a51, SI
222 ADDL R8, SI
223 ANDL CX, R14
224 ANDL BX, R9
225 MOVL (DX), R8
226 ORL R9, R14
227 MOVL BX, R9
228 ADDL R14, SI
229 MOVL BX, R14
230 ROLL $0x0e, SI
231 ADDL CX, SI
232 XORL DI, R9
233 ADDL $0xe9b6c7aa, BP
234 ADDL R8, BP
235 ANDL SI, R14
236 ANDL CX, R9
237 MOVL 20(DX), R8
238 ORL R9, R14
239 MOVL CX, R9
240 ADDL R14, BP
241 MOVL CX, R14
242 ROLL $0x14, BP
243 ADDL SI, BP
244 XORL DI, R9
245 ADDL $0xd62f105d, BX
246 ADDL R8, BX
247 ANDL BP, R14
248 ANDL SI, R9
249 MOVL 40(DX), R8
250 ORL R9, R14
251 MOVL SI, R9
252 ADDL R14, BX
253 MOVL SI, R14
254 ROLL $0x05, BX
255 ADDL BP, BX
256 XORL DI, R9
257 ADDL $0x02441453, CX
258 ADDL R8, CX
259 ANDL BX, R14
260 ANDL BP, R9
261 MOVL 60(DX), R8
262 ORL R9, R14
263 MOVL BP, R9
264 ADDL R14, CX
265 MOVL BP, R14
266 ROLL $0x09, CX
267 ADDL BX, CX
268 XORL DI, R9
269 ADDL $0xd8a1e681, SI
270 ADDL R8, SI
271 ANDL CX, R14
272 ANDL BX, R9
273 MOVL 16(DX), R8
274 ORL R9, R14
275 MOVL BX, R9
276 ADDL R14, SI
277 MOVL BX, R14
278 ROLL $0x0e, SI
279 ADDL CX, SI
280 XORL DI, R9
281 ADDL $0xe7d3fbc8, BP
282 ADDL R8, BP
283 ANDL SI, R14
284 ANDL CX, R9
285 MOVL 36(DX), R8
286 ORL R9, R14
287 MOVL CX, R9
288 ADDL R14, BP
289 MOVL CX, R14
290 ROLL $0x14, BP
291 ADDL SI, BP
292 XORL DI, R9
293 ADDL $0x21e1cde6, BX
294 ADDL R8, BX
295 ANDL BP, R14
296 ANDL SI, R9
297 MOVL 56(DX), R8
298 ORL R9, R14
299 MOVL SI, R9
300 ADDL R14, BX
301 MOVL SI, R14
302 ROLL $0x05, BX
303 ADDL BP, BX
304 XORL DI, R9
305 ADDL $0xc33707d6, CX
306 ADDL R8, CX
307 ANDL BX, R14
308 ANDL BP, R9
309 MOVL 12(DX), R8
310 ORL R9, R14
311 MOVL BP, R9
312 ADDL R14, CX
313 MOVL BP, R14
314 ROLL $0x09, CX
315 ADDL BX, CX
316 XORL DI, R9
317 ADDL $0xf4d50d87, SI
318 ADDL R8, SI
319 ANDL CX, R14
320 ANDL BX, R9
321 MOVL 32(DX), R8
322 ORL R9, R14
323 MOVL BX, R9
324 ADDL R14, SI
325 MOVL BX, R14
326 ROLL $0x0e, SI
327 ADDL CX, SI
328 XORL DI, R9
329 ADDL $0x455a14ed, BP
330 ADDL R8, BP
331 ANDL SI, R14
332 ANDL CX, R9
333 MOVL 52(DX), R8
334 ORL R9, R14
335 MOVL CX, R9
336 ADDL R14, BP
337 MOVL CX, R14
338 ROLL $0x14, BP
339 ADDL SI, BP
340 XORL DI, R9
341 ADDL $0xa9e3e905, BX
342 ADDL R8, BX
343 ANDL BP, R14
344 ANDL SI, R9
345 MOVL 8(DX), R8
346 ORL R9, R14
347 MOVL SI, R9
348 ADDL R14, BX
349 MOVL SI, R14
350 ROLL $0x05, BX
351 ADDL BP, BX
352 XORL DI, R9
353 ADDL $0xfcefa3f8, CX
354 ADDL R8, CX
355 ANDL BX, R14
356 ANDL BP, R9
357 MOVL 28(DX), R8
358 ORL R9, R14
359 MOVL BP, R9
360 ADDL R14, CX
361 MOVL BP, R14
362 ROLL $0x09, CX
363 ADDL BX, CX
364 XORL DI, R9
365 ADDL $0x676f02d9, SI
366 ADDL R8, SI
367 ANDL CX, R14
368 ANDL BX, R9
369 MOVL 48(DX), R8
370 ORL R9, R14
371 MOVL BX, R9
372 ADDL R14, SI
373 MOVL BX, R14
374 ROLL $0x0e, SI
375 ADDL CX, SI
376 XORL DI, R9
377 ADDL $0x8d2a4c8a, BP
378 ADDL R8, BP
379 ANDL SI, R14
380 ANDL CX, R9
381 MOVL 20(DX), R8
382 ORL R9, R14
383 MOVL CX, R9
384 ADDL R14, BP
385 MOVL CX, R14
386 ROLL $0x14, BP
387 ADDL SI, BP
388
389 // ROUND3
390 MOVL SI, R9
391 ADDL $0xfffa3942, BX
392 ADDL R8, BX
393 MOVL 32(DX), R8
394 XORL CX, R9
395 XORL BP, R9
396 ADDL R9, BX
397 ROLL $0x04, BX
398 MOVL BP, R9
399 ADDL BP, BX
400 ADDL $0x8771f681, CX
401 ADDL R8, CX
402 MOVL 44(DX), R8
403 XORL SI, R9
404 XORL BX, R9
405 ADDL R9, CX
406 ROLL $0x0b, CX
407 MOVL BX, R9
408 ADDL BX, CX
409 ADDL $0x6d9d6122, SI
410 ADDL R8, SI
411 MOVL 56(DX), R8
412 XORL BP, R9
413 XORL CX, R9
414 ADDL R9, SI
415 ROLL $0x10, SI
416 MOVL CX, R9
417 ADDL CX, SI
418 ADDL $0xfde5380c, BP
419 ADDL R8, BP
420 MOVL 4(DX), R8
421 XORL BX, R9
422 XORL SI, R9
423 ADDL R9, BP
424 ROLL $0x17, BP
425 MOVL SI, R9
426 ADDL SI, BP
427 ADDL $0xa4beea44, BX
428 ADDL R8, BX
429 MOVL 16(DX), R8
430 XORL CX, R9
431 XORL BP, R9
432 ADDL R9, BX
433 ROLL $0x04, BX
434 MOVL BP, R9
435 ADDL BP, BX
436 ADDL $0x4bdecfa9, CX
437 ADDL R8, CX
438 MOVL 28(DX), R8
439 XORL SI, R9
440 XORL BX, R9
441 ADDL R9, CX
442 ROLL $0x0b, CX
443 MOVL BX, R9
444 ADDL BX, CX
445 ADDL $0xf6bb4b60, SI
446 ADDL R8, SI
447 MOVL 40(DX), R8
448 XORL BP, R9
449 XORL CX, R9
450 ADDL R9, SI
451 ROLL $0x10, SI
452 MOVL CX, R9
453 ADDL CX, SI
454 ADDL $0xbebfbc70, BP
455 ADDL R8, BP
456 MOVL 52(DX), R8
457 XORL BX, R9
458 XORL SI, R9
459 ADDL R9, BP
460 ROLL $0x17, BP
461 MOVL SI, R9
462 ADDL SI, BP
463 ADDL $0x289b7ec6, BX
464 ADDL R8, BX
465 MOVL (DX), R8
466 XORL CX, R9
467 XORL BP, R9
468 ADDL R9, BX
469 ROLL $0x04, BX
470 MOVL BP, R9
471 ADDL BP, BX
472 ADDL $0xeaa127fa, CX
473 ADDL R8, CX
474 MOVL 12(DX), R8
475 XORL SI, R9
476 XORL BX, R9
477 ADDL R9, CX
478 ROLL $0x0b, CX
479 MOVL BX, R9
480 ADDL BX, CX
481 ADDL $0xd4ef3085, SI
482 ADDL R8, SI
483 MOVL 24(DX), R8
484 XORL BP, R9
485 XORL CX, R9
486 ADDL R9, SI
487 ROLL $0x10, SI
488 MOVL CX, R9
489 ADDL CX, SI
490 ADDL $0x04881d05, BP
491 ADDL R8, BP
492 MOVL 36(DX), R8
493 XORL BX, R9
494 XORL SI, R9
495 ADDL R9, BP
496 ROLL $0x17, BP
497 MOVL SI, R9
498 ADDL SI, BP
499 ADDL $0xd9d4d039, BX
500 ADDL R8, BX
501 MOVL 48(DX), R8
502 XORL CX, R9
503 XORL BP, R9
504 ADDL R9, BX
505 ROLL $0x04, BX
506 MOVL BP, R9
507 ADDL BP, BX
508 ADDL $0xe6db99e5, CX
509 ADDL R8, CX
510 MOVL 60(DX), R8
511 XORL SI, R9
512 XORL BX, R9
513 ADDL R9, CX
514 ROLL $0x0b, CX
515 MOVL BX, R9
516 ADDL BX, CX
517 ADDL $0x1fa27cf8, SI
518 ADDL R8, SI
519 MOVL 8(DX), R8
520 XORL BP, R9
521 XORL CX, R9
522 ADDL R9, SI
523 ROLL $0x10, SI
524 MOVL CX, R9
525 ADDL CX, SI
526 ADDL $0xc4ac5665, BP
527 ADDL R8, BP
528 MOVL (DX), R8
529 XORL BX, R9
530 XORL SI, R9
531 ADDL R9, BP
532 ROLL $0x17, BP
533 MOVL SI, R9
534 ADDL SI, BP
535
536 // ROUND4
537 MOVL DI, R9
538 XORL CX, R9
539 ADDL $0xf4292244, BX
540 ADDL R8, BX
541 ORL BP, R9
542 XORL SI, R9
543 ADDL R9, BX
544 MOVL 28(DX), R8
545 MOVL DI, R9
546 ROLL $0x06, BX
547 XORL SI, R9
548 ADDL BP, BX
549 ADDL $0x432aff97, CX
550 ADDL R8, CX
551 ORL BX, R9
552 XORL BP, R9
553 ADDL R9, CX
554 MOVL 56(DX), R8
555 MOVL DI, R9
556 ROLL $0x0a, CX
557 XORL BP, R9
558 ADDL BX, CX
559 ADDL $0xab9423a7, SI
560 ADDL R8, SI
561 ORL CX, R9
562 XORL BX, R9
563 ADDL R9, SI
564 MOVL 20(DX), R8
565 MOVL DI, R9
566 ROLL $0x0f, SI
567 XORL BX, R9
568 ADDL CX, SI
569 ADDL $0xfc93a039, BP
570 ADDL R8, BP
571 ORL SI, R9
572 XORL CX, R9
573 ADDL R9, BP
574 MOVL 48(DX), R8
575 MOVL DI, R9
576 ROLL $0x15, BP
577 XORL CX, R9
578 ADDL SI, BP
579 ADDL $0x655b59c3, BX
580 ADDL R8, BX
581 ORL BP, R9
582 XORL SI, R9
583 ADDL R9, BX
584 MOVL 12(DX), R8
585 MOVL DI, R9
586 ROLL $0x06, BX
587 XORL SI, R9
588 ADDL BP, BX
589 ADDL $0x8f0ccc92, CX
590 ADDL R8, CX
591 ORL BX, R9
592 XORL BP, R9
593 ADDL R9, CX
594 MOVL 40(DX), R8
595 MOVL DI, R9
596 ROLL $0x0a, CX
597 XORL BP, R9
598 ADDL BX, CX
599 ADDL $0xffeff47d, SI
600 ADDL R8, SI
601 ORL CX, R9
602 XORL BX, R9
603 ADDL R9, SI
604 MOVL 4(DX), R8
605 MOVL DI, R9
606 ROLL $0x0f, SI
607 XORL BX, R9
608 ADDL CX, SI
609 ADDL $0x85845dd1, BP
610 ADDL R8, BP
611 ORL SI, R9
612 XORL CX, R9
613 ADDL R9, BP
614 MOVL 32(DX), R8
615 MOVL DI, R9
616 ROLL $0x15, BP
617 XORL CX, R9
618 ADDL SI, BP
619 ADDL $0x6fa87e4f, BX
620 ADDL R8, BX
621 ORL BP, R9
622 XORL SI, R9
623 ADDL R9, BX
624 MOVL 60(DX), R8
625 MOVL DI, R9
626 ROLL $0x06, BX
627 XORL SI, R9
628 ADDL BP, BX
629 ADDL $0xfe2ce6e0, CX
630 ADDL R8, CX
631 ORL BX, R9
632 XORL BP, R9
633 ADDL R9, CX
634 MOVL 24(DX), R8
635 MOVL DI, R9
636 ROLL $0x0a, CX
637 XORL BP, R9
638 ADDL BX, CX
639 ADDL $0xa3014314, SI
640 ADDL R8, SI
641 ORL CX, R9
642 XORL BX, R9
643 ADDL R9, SI
644 MOVL 52(DX), R8
645 MOVL DI, R9
646 ROLL $0x0f, SI
647 XORL BX, R9
648 ADDL CX, SI
649 ADDL $0x4e0811a1, BP
650 ADDL R8, BP
651 ORL SI, R9
652 XORL CX, R9
653 ADDL R9, BP
654 MOVL 16(DX), R8
655 MOVL DI, R9
656 ROLL $0x15, BP
657 XORL CX, R9
658 ADDL SI, BP
659 ADDL $0xf7537e82, BX
660 ADDL R8, BX
661 ORL BP, R9
662 XORL SI, R9
663 ADDL R9, BX
664 MOVL 44(DX), R8
665 MOVL DI, R9
666 ROLL $0x06, BX
667 XORL SI, R9
668 ADDL BP, BX
669 ADDL $0xbd3af235, CX
670 ADDL R8, CX
671 ORL BX, R9
672 XORL BP, R9
673 ADDL R9, CX
674 MOVL 8(DX), R8
675 MOVL DI, R9
676 ROLL $0x0a, CX
677 XORL BP, R9
678 ADDL BX, CX
679 ADDL $0x2ad7d2bb, SI
680 ADDL R8, SI
681 ORL CX, R9
682 XORL BX, R9
683 ADDL R9, SI
684 MOVL 36(DX), R8
685 MOVL DI, R9
686 ROLL $0x0f, SI
687 XORL BX, R9
688 ADDL CX, SI
689 ADDL $0xeb86d391, BP
690 ADDL R8, BP
691 ORL SI, R9
692 XORL CX, R9
693 ADDL R9, BP
694 ROLL $0x15, BP
695 ADDL SI, BP
696 ADDL R10, BX
697 ADDL R11, BP
698 ADDL R12, SI
699 ADDL R13, CX
700
701 // Prepare next loop
702 ADDQ $0x40, DX
703 CMPQ DX, AX
704 JB loop
705
706 // Write output
707 MOVQ dig+0(FP), AX
708 MOVL BX, (AX)
709 MOVL BP, 4(AX)
710 MOVL SI, 8(AX)
711 MOVL CX, 12(AX)
712
713end:
714 RET
diff --git a/vendor/github.com/minio/minio-go/v7/.gitignore b/vendor/github.com/minio/minio-go/v7/.gitignore
new file mode 100644
index 0000000..8ae0384
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/.gitignore
@@ -0,0 +1,6 @@
1*~
2*.test
3validator
4golangci-lint
5functional_tests
6.idea \ No newline at end of file
diff --git a/vendor/github.com/minio/minio-go/v7/.golangci.yml b/vendor/github.com/minio/minio-go/v7/.golangci.yml
new file mode 100644
index 0000000..875b949
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/.golangci.yml
@@ -0,0 +1,27 @@
1linters-settings:
2 misspell:
3 locale: US
4
5linters:
6 disable-all: true
7 enable:
8 - typecheck
9 - goimports
10 - misspell
11 - revive
12 - govet
13 - ineffassign
14 - gosimple
15 - unused
16 - gocritic
17
18issues:
19 exclude-use-default: false
20 exclude:
21 # todo fix these when we get enough time.
22 - "singleCaseSwitch: should rewrite switch statement to if statement"
23 - "unlambda: replace"
24 - "captLocal:"
25 - "ifElseChain:"
26 - "elseif:"
27 - "should have a package comment"
diff --git a/vendor/github.com/minio/minio-go/v7/CNAME b/vendor/github.com/minio/minio-go/v7/CNAME
new file mode 100644
index 0000000..d365a7b
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/CNAME
@@ -0,0 +1 @@
minio-go.min.io \ No newline at end of file
diff --git a/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md b/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md
new file mode 100644
index 0000000..24522ef
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md
@@ -0,0 +1,22 @@
1### Developer Guidelines
2
3``minio-go`` welcomes your contribution. To make the process as seamless as possible, we ask for the following:
4
5* Go ahead and fork the project and make your changes. We encourage pull requests to discuss code changes.
6 - Fork it
7 - Create your feature branch (git checkout -b my-new-feature)
8 - Commit your changes (git commit -am 'Add some feature')
9 - Push to the branch (git push origin my-new-feature)
10 - Create new Pull Request
11
12* When you're ready to create a pull request, be sure to:
13 - Have test cases for the new code. If you have questions about how to do it, please ask in your pull request.
14 - Run `go fmt`
15 - Squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request.
16 - Make sure `go test -race ./...` and `go build` completes.
17 NOTE: go test runs functional tests and requires you to have a AWS S3 account. Set them as environment variables
18 ``ACCESS_KEY`` and ``SECRET_KEY``. To run shorter version of the tests please use ``go test -short -race ./...``
19
20* Read [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project
21 - `minio-go` project is strictly conformant with Golang style
22 - if you happen to observe offending code, please feel free to send a pull request
diff --git a/vendor/github.com/minio/minio-go/v7/LICENSE b/vendor/github.com/minio/minio-go/v7/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/LICENSE
@@ -0,0 +1,202 @@
1
2 Apache License
3 Version 2.0, January 2004
4 http://www.apache.org/licenses/
5
6 TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7
8 1. Definitions.
9
10 "License" shall mean the terms and conditions for use, reproduction,
11 and distribution as defined by Sections 1 through 9 of this document.
12
13 "Licensor" shall mean the copyright owner or entity authorized by
14 the copyright owner that is granting the License.
15
16 "Legal Entity" shall mean the union of the acting entity and all
17 other entities that control, are controlled by, or are under common
18 control with that entity. For the purposes of this definition,
19 "control" means (i) the power, direct or indirect, to cause the
20 direction or management of such entity, whether by contract or
21 otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 outstanding shares, or (iii) beneficial ownership of such entity.
23
24 "You" (or "Your") shall mean an individual or Legal Entity
25 exercising permissions granted by this License.
26
27 "Source" form shall mean the preferred form for making modifications,
28 including but not limited to software source code, documentation
29 source, and configuration files.
30
31 "Object" form shall mean any form resulting from mechanical
32 transformation or translation of a Source form, including but
33 not limited to compiled object code, generated documentation,
34 and conversions to other media types.
35
36 "Work" shall mean the work of authorship, whether in Source or
37 Object form, made available under the License, as indicated by a
38 copyright notice that is included in or attached to the work
39 (an example is provided in the Appendix below).
40
41 "Derivative Works" shall mean any work, whether in Source or Object
42 form, that is based on (or derived from) the Work and for which the
43 editorial revisions, annotations, elaborations, or other modifications
44 represent, as a whole, an original work of authorship. For the purposes
45 of this License, Derivative Works shall not include works that remain
46 separable from, or merely link (or bind by name) to the interfaces of,
47 the Work and Derivative Works thereof.
48
49 "Contribution" shall mean any work of authorship, including
50 the original version of the Work and any modifications or additions
51 to that Work or Derivative Works thereof, that is intentionally
52 submitted to Licensor for inclusion in the Work by the copyright owner
53 or by an individual or Legal Entity authorized to submit on behalf of
54 the copyright owner. For the purposes of this definition, "submitted"
55 means any form of electronic, verbal, or written communication sent
56 to the Licensor or its representatives, including but not limited to
57 communication on electronic mailing lists, source code control systems,
58 and issue tracking systems that are managed by, or on behalf of, the
59 Licensor for the purpose of discussing and improving the Work, but
60 excluding communication that is conspicuously marked or otherwise
61 designated in writing by the copyright owner as "Not a Contribution."
62
63 "Contributor" shall mean Licensor and any individual or Legal Entity
64 on behalf of whom a Contribution has been received by Licensor and
65 subsequently incorporated within the Work.
66
67 2. Grant of Copyright License. Subject to the terms and conditions of
68 this License, each Contributor hereby grants to You a perpetual,
69 worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 copyright license to reproduce, prepare Derivative Works of,
71 publicly display, publicly perform, sublicense, and distribute the
72 Work and such Derivative Works in Source or Object form.
73
74 3. Grant of Patent License. Subject to the terms and conditions of
75 this License, each Contributor hereby grants to You a perpetual,
76 worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 (except as stated in this section) patent license to make, have made,
78 use, offer to sell, sell, import, and otherwise transfer the Work,
79 where such license applies only to those patent claims licensable
80 by such Contributor that are necessarily infringed by their
81 Contribution(s) alone or by combination of their Contribution(s)
82 with the Work to which such Contribution(s) was submitted. If You
83 institute patent litigation against any entity (including a
84 cross-claim or counterclaim in a lawsuit) alleging that the Work
85 or a Contribution incorporated within the Work constitutes direct
86 or contributory patent infringement, then any patent licenses
87 granted to You under this License for that Work shall terminate
88 as of the date such litigation is filed.
89
90 4. Redistribution. You may reproduce and distribute copies of the
91 Work or Derivative Works thereof in any medium, with or without
92 modifications, and in Source or Object form, provided that You
93 meet the following conditions:
94
95 (a) You must give any other recipients of the Work or
96 Derivative Works a copy of this License; and
97
98 (b) You must cause any modified files to carry prominent notices
99 stating that You changed the files; and
100
101 (c) You must retain, in the Source form of any Derivative Works
102 that You distribute, all copyright, patent, trademark, and
103 attribution notices from the Source form of the Work,
104 excluding those notices that do not pertain to any part of
105 the Derivative Works; and
106
107 (d) If the Work includes a "NOTICE" text file as part of its
108 distribution, then any Derivative Works that You distribute must
109 include a readable copy of the attribution notices contained
110 within such NOTICE file, excluding those notices that do not
111 pertain to any part of the Derivative Works, in at least one
112 of the following places: within a NOTICE text file distributed
113 as part of the Derivative Works; within the Source form or
114 documentation, if provided along with the Derivative Works; or,
115 within a display generated by the Derivative Works, if and
116 wherever such third-party notices normally appear. The contents
117 of the NOTICE file are for informational purposes only and
118 do not modify the License. You may add Your own attribution
119 notices within Derivative Works that You distribute, alongside
120 or as an addendum to the NOTICE text from the Work, provided
121 that such additional attribution notices cannot be construed
122 as modifying the License.
123
124 You may add Your own copyright statement to Your modifications and
125 may provide additional or different license terms and conditions
126 for use, reproduction, or distribution of Your modifications, or
127 for any such Derivative Works as a whole, provided Your use,
128 reproduction, and distribution of the Work otherwise complies with
129 the conditions stated in this License.
130
131 5. Submission of Contributions. Unless You explicitly state otherwise,
132 any Contribution intentionally submitted for inclusion in the Work
133 by You to the Licensor shall be under the terms and conditions of
134 this License, without any additional terms or conditions.
135 Notwithstanding the above, nothing herein shall supersede or modify
136 the terms of any separate license agreement you may have executed
137 with Licensor regarding such Contributions.
138
139 6. Trademarks. This License does not grant permission to use the trade
140 names, trademarks, service marks, or product names of the Licensor,
141 except as required for reasonable and customary use in describing the
142 origin of the Work and reproducing the content of the NOTICE file.
143
144 7. Disclaimer of Warranty. Unless required by applicable law or
145 agreed to in writing, Licensor provides the Work (and each
146 Contributor provides its Contributions) on an "AS IS" BASIS,
147 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 implied, including, without limitation, any warranties or conditions
149 of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 PARTICULAR PURPOSE. You are solely responsible for determining the
151 appropriateness of using or redistributing the Work and assume any
152 risks associated with Your exercise of permissions under this License.
153
154 8. Limitation of Liability. In no event and under no legal theory,
155 whether in tort (including negligence), contract, or otherwise,
156 unless required by applicable law (such as deliberate and grossly
157 negligent acts) or agreed to in writing, shall any Contributor be
158 liable to You for damages, including any direct, indirect, special,
159 incidental, or consequential damages of any character arising as a
160 result of this License or out of the use or inability to use the
161 Work (including but not limited to damages for loss of goodwill,
162 work stoppage, computer failure or malfunction, or any and all
163 other commercial damages or losses), even if such Contributor
164 has been advised of the possibility of such damages.
165
166 9. Accepting Warranty or Additional Liability. While redistributing
167 the Work or Derivative Works thereof, You may choose to offer,
168 and charge a fee for, acceptance of support, warranty, indemnity,
169 or other liability obligations and/or rights consistent with this
170 License. However, in accepting such obligations, You may act only
171 on Your own behalf and on Your sole responsibility, not on behalf
172 of any other Contributor, and only if You agree to indemnify,
173 defend, and hold each Contributor harmless for any liability
174 incurred by, or claims asserted against, such Contributor by reason
175 of your accepting any such warranty or additional liability.
176
177 END OF TERMS AND CONDITIONS
178
179 APPENDIX: How to apply the Apache License to your work.
180
181 To apply the Apache License to your work, attach the following
182 boilerplate notice, with the fields enclosed by brackets "[]"
183 replaced with your own identifying information. (Don't include
184 the brackets!) The text should be enclosed in the appropriate
185 comment syntax for the file format. We also recommend that a
186 file or class name and description of purpose be included on the
187 same "printed page" as the copyright notice for easier
188 identification within third-party archives.
189
190 Copyright [yyyy] [name of copyright owner]
191
192 Licensed under the Apache License, Version 2.0 (the "License");
193 you may not use this file except in compliance with the License.
194 You may obtain a copy of the License at
195
196 http://www.apache.org/licenses/LICENSE-2.0
197
198 Unless required by applicable law or agreed to in writing, software
199 distributed under the License is distributed on an "AS IS" BASIS,
200 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201 See the License for the specific language governing permissions and
202 limitations under the License.
diff --git a/vendor/github.com/minio/minio-go/v7/MAINTAINERS.md b/vendor/github.com/minio/minio-go/v7/MAINTAINERS.md
new file mode 100644
index 0000000..f640dfb
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/MAINTAINERS.md
@@ -0,0 +1,35 @@
1# For maintainers only
2
3## Responsibilities
4
5Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522)
6
7### Making new releases
8Tag and sign your release commit, additionally this step requires you to have access to MinIO's trusted private key.
9```sh
10$ export GNUPGHOME=/media/${USER}/minio/trusted
11$ git tag -s 4.0.0
12$ git push
13$ git push --tags
14```
15
16### Update version
17Once release has been made update `libraryVersion` constant in `api.go` to next to be released version.
18
19```sh
20$ grep libraryVersion api.go
21 libraryVersion = "4.0.1"
22```
23
24Commit your changes
25```
26$ git commit -a -m "Update version for next release" --author "MinIO Trusted <[email protected]>"
27```
28
29### Announce
30Announce new release by adding release notes at https://github.com/minio/minio-go/releases from `[email protected]` account. Release notes requires two sections `highlights` and `changelog`. Highlights is a bulleted list of salient features in this release and Changelog contains list of all commits since the last release.
31
32To generate `changelog`
33```sh
34$ git log --no-color --pretty=format:'-%d %s (%cr) <%an>' <last_release_tag>..<latest_release_tag>
35```
diff --git a/vendor/github.com/minio/minio-go/v7/Makefile b/vendor/github.com/minio/minio-go/v7/Makefile
new file mode 100644
index 0000000..68444aa
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/Makefile
@@ -0,0 +1,38 @@
1GOPATH := $(shell go env GOPATH)
2TMPDIR := $(shell mktemp -d)
3
4all: checks
5
6.PHONY: examples docs
7
8checks: lint vet test examples functional-test
9
10lint:
11 @mkdir -p ${GOPATH}/bin
12 @echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin
13 @echo "Running $@ check"
14 @GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
15 @GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml
16
17vet:
18 @GO111MODULE=on go vet ./...
19 @echo "Installing staticcheck" && go install honnef.co/go/tools/cmd/staticcheck@latest
20 ${GOPATH}/bin/staticcheck -tests=false -checks="all,-ST1000,-ST1003,-ST1016,-ST1020,-ST1021,-ST1022,-ST1023,-ST1005"
21
22test:
23 @GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minioadmin SECRET_KEY=minioadmin ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./...
24
25examples:
26 @echo "Building s3 examples"
27 @cd ./examples/s3 && $(foreach v,$(wildcard examples/s3/*.go),go build -mod=mod -o ${TMPDIR}/$(basename $(v)) $(notdir $(v)) || exit 1;)
28 @echo "Building minio examples"
29 @cd ./examples/minio && $(foreach v,$(wildcard examples/minio/*.go),go build -mod=mod -o ${TMPDIR}/$(basename $(v)) $(notdir $(v)) || exit 1;)
30
31functional-test:
32 @GO111MODULE=on go build -race functional_tests.go
33 @SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minioadmin SECRET_KEY=minioadmin ENABLE_HTTPS=1 MINT_MODE=full ./functional_tests
34
35clean:
36 @echo "Cleaning up all the generated files"
37 @find . -name '*.test' | xargs rm -fv
38 @find . -name '*~' | xargs rm -fv
diff --git a/vendor/github.com/minio/minio-go/v7/NOTICE b/vendor/github.com/minio/minio-go/v7/NOTICE
new file mode 100644
index 0000000..1e8fd3b
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/NOTICE
@@ -0,0 +1,9 @@
1MinIO Cloud Storage, (C) 2014-2020 MinIO, Inc.
2
3This product includes software developed at MinIO, Inc.
4(https://min.io/).
5
6The MinIO project contains unmodified/modified subcomponents too with
7separate copyright notices and license terms. Your use of the source
8code for these subcomponents is subject to the terms and conditions
9of Apache License Version 2.0
diff --git a/vendor/github.com/minio/minio-go/v7/README.md b/vendor/github.com/minio/minio-go/v7/README.md
new file mode 100644
index 0000000..82f70a1
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/README.md
@@ -0,0 +1,312 @@
1# MinIO Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) [![Apache V2 License](https://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/minio/minio-go/blob/master/LICENSE)
2
3The MinIO Go Client SDK provides straightforward APIs to access any Amazon S3 compatible object storage.
4
5This Quickstart Guide covers how to install the MinIO client SDK, connect to MinIO, and create a sample file uploader.
6For a complete list of APIs and examples, see the [godoc documentation](https://pkg.go.dev/github.com/minio/minio-go/v7) or [Go Client API Reference](https://min.io/docs/minio/linux/developers/go/API.html).
7
8These examples presume a working [Go development environment](https://golang.org/doc/install) and the [MinIO `mc` command line tool](https://min.io/docs/minio/linux/reference/minio-mc.html).
9
10## Download from Github
11
12From your project directory:
13
14```sh
15go get github.com/minio/minio-go/v7
16```
17
18## Initialize a MinIO Client Object
19
20The MinIO client requires the following parameters to connect to an Amazon S3 compatible object storage:
21
22| Parameter | Description |
23| ----------------- | ---------------------------------------------------------- |
24| `endpoint` | URL to object storage service. |
25| `_minio.Options_` | All the options such as credentials, custom transport etc. |
26
27```go
28package main
29
30import (
31 "log"
32
33 "github.com/minio/minio-go/v7"
34 "github.com/minio/minio-go/v7/pkg/credentials"
35)
36
37func main() {
38 endpoint := "play.min.io"
39 accessKeyID := "Q3AM3UQ867SPQQA43P2F"
40 secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
41 useSSL := true
42
43 // Initialize minio client object.
44 minioClient, err := minio.New(endpoint, &minio.Options{
45 Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""),
46 Secure: useSSL,
47 })
48 if err != nil {
49 log.Fatalln(err)
50 }
51
52 log.Printf("%#v\n", minioClient) // minioClient is now set up
53}
54```
55
56## Example - File Uploader
57
58This sample code connects to an object storage server, creates a bucket, and uploads a file to the bucket.
59It uses the MinIO `play` server, a public MinIO cluster located at [https://play.min.io](https://play.min.io).
60
61The `play` server runs the latest stable version of MinIO and may be used for testing and development.
62The access credentials shown in this example are open to the public and all data uploaded to `play` should be considered public and non-protected.
63
64### FileUploader.go
65
66This example does the following:
67
68- Connects to the MinIO `play` server using the provided credentials.
69- Creates a bucket named `testbucket`.
70- Uploads a file named `testdata` from `/tmp`.
71- Verifies the file was created using `mc ls`.
72
73```go
74// FileUploader.go MinIO example
75package main
76
77import (
78 "context"
79 "log"
80
81 "github.com/minio/minio-go/v7"
82 "github.com/minio/minio-go/v7/pkg/credentials"
83)
84
85func main() {
86 ctx := context.Background()
87 endpoint := "play.min.io"
88 accessKeyID := "Q3AM3UQ867SPQQA43P2F"
89 secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
90 useSSL := true
91
92 // Initialize minio client object.
93 minioClient, err := minio.New(endpoint, &minio.Options{
94 Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""),
95 Secure: useSSL,
96 })
97 if err != nil {
98 log.Fatalln(err)
99 }
100
101 // Make a new bucket called testbucket.
102 bucketName := "testbucket"
103 location := "us-east-1"
104
105 err = minioClient.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: location})
106 if err != nil {
107 // Check to see if we already own this bucket (which happens if you run this twice)
108 exists, errBucketExists := minioClient.BucketExists(ctx, bucketName)
109 if errBucketExists == nil && exists {
110 log.Printf("We already own %s\n", bucketName)
111 } else {
112 log.Fatalln(err)
113 }
114 } else {
115 log.Printf("Successfully created %s\n", bucketName)
116 }
117
118 // Upload the test file
119 // Change the value of filePath if the file is in another location
120 objectName := "testdata"
121 filePath := "/tmp/testdata"
122 contentType := "application/octet-stream"
123
124 // Upload the test file with FPutObject
125 info, err := minioClient.FPutObject(ctx, bucketName, objectName, filePath, minio.PutObjectOptions{ContentType: contentType})
126 if err != nil {
127 log.Fatalln(err)
128 }
129
130 log.Printf("Successfully uploaded %s of size %d\n", objectName, info.Size)
131}
132```
133
134**1. Create a test file containing data:**
135
136You can do this with `dd` on Linux or macOS systems:
137
138```sh
139dd if=/dev/urandom of=/tmp/testdata bs=2048 count=10
140```
141
142or `fsutil` on Windows:
143
144```sh
145fsutil file createnew "C:\Users\<username>\Desktop\sample.txt" 20480
146```
147
148**2. Run FileUploader with the following commands:**
149
150```sh
151go mod init example/FileUploader
152go get github.com/minio/minio-go/v7
153go get github.com/minio/minio-go/v7/pkg/credentials
154go run FileUploader.go
155```
156
157The output resembles the following:
158
159```sh
1602023/11/01 14:27:55 Successfully created testbucket
1612023/11/01 14:27:55 Successfully uploaded testdata of size 20480
162```
163
164**3. Verify the Uploaded File With `mc ls`:**
165
166```sh
167mc ls play/testbucket
168[2023-11-01 14:27:55 UTC] 20KiB STANDARD TestDataFile
169```
170
171## API Reference
172
173The full API Reference is available here.
174
175* [Complete API Reference](https://min.io/docs/minio/linux/developers/go/API.html)
176
177### API Reference : Bucket Operations
178
179* [`MakeBucket`](https://min.io/docs/minio/linux/developers/go/API.html#MakeBucket)
180* [`ListBuckets`](https://min.io/docs/minio/linux/developers/go/API.html#ListBuckets)
181* [`BucketExists`](https://min.io/docs/minio/linux/developers/go/API.html#BucketExists)
182* [`RemoveBucket`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveBucket)
183* [`ListObjects`](https://min.io/docs/minio/linux/developers/go/API.html#ListObjects)
184* [`ListIncompleteUploads`](https://min.io/docs/minio/linux/developers/go/API.html#ListIncompleteUploads)
185
186### API Reference : Bucket policy Operations
187
188* [`SetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketPolicy)
189* [`GetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketPolicy)
190
191### API Reference : Bucket notification Operations
192
193* [`SetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketNotification)
194* [`GetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketNotification)
195* [`RemoveAllBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveAllBucketNotification)
196* [`ListenBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenBucketNotification) (MinIO Extension)
197* [`ListenNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenNotification) (MinIO Extension)
198
199### API Reference : File Object Operations
200
201* [`FPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#FPutObject)
202* [`FGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#FGetObject)
203
204### API Reference : Object Operations
205
206* [`GetObject`](https://min.io/docs/minio/linux/developers/go/API.html#GetObject)
207* [`PutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PutObject)
208* [`PutObjectStreaming`](https://min.io/docs/minio/linux/developers/go/API.html#PutObjectStreaming)
209* [`StatObject`](https://min.io/docs/minio/linux/developers/go/API.html#StatObject)
210* [`CopyObject`](https://min.io/docs/minio/linux/developers/go/API.html#CopyObject)
211* [`RemoveObject`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObject)
212* [`RemoveObjects`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObjects)
213* [`RemoveIncompleteUpload`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveIncompleteUpload)
214* [`SelectObjectContent`](https://min.io/docs/minio/linux/developers/go/API.html#SelectObjectContent)
215
216### API Reference : Presigned Operations
217
218* [`PresignedGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedGetObject)
219* [`PresignedPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPutObject)
220* [`PresignedHeadObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedHeadObject)
221* [`PresignedPostPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPostPolicy)
222
223### API Reference : Client custom settings
224
225* [`SetAppInfo`](https://min.io/docs/minio/linux/developers/go/API.html#SetAppInfo)
226* [`TraceOn`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOn)
227* [`TraceOff`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOff)
228
229## Full Examples
230
231### Full Examples : Bucket Operations
232
233* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go)
234* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go)
235* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go)
236* [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go)
237* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go)
238* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go)
239* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go)
240
241### Full Examples : Bucket policy Operations
242
243* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go)
244* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go)
245* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go)
246
247### Full Examples : Bucket lifecycle Operations
248
249* [setbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketlifecycle.go)
250* [getbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketlifecycle.go)
251
252### Full Examples : Bucket encryption Operations
253
254* [setbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketencryption.go)
255* [getbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketencryption.go)
256* [deletebucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/deletebucketencryption.go)
257
258### Full Examples : Bucket replication Operations
259
260* [setbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketreplication.go)
261* [getbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketreplication.go)
262* [removebucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucketreplication.go)
263
264### Full Examples : Bucket notification Operations
265
266* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go)
267* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go)
268* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go)
269* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (MinIO Extension)
270* [listennotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listen-notification.go) (MinIO Extension)
271
272### Full Examples : File Object Operations
273
274* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go)
275* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go)
276
277### Full Examples : Object Operations
278
279* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go)
280* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go)
281* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go)
282* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go)
283* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go)
284* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go)
285* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go)
286
287### Full Examples : Encrypted Object Operations
288
289* [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go)
290* [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go)
291* [fput-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputencrypted-object.go)
292
293### Full Examples : Presigned Operations
294
295* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go)
296* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go)
297* [presignedheadobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedheadobject.go)
298* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go)
299
300## Explore Further
301
302* [Godoc Documentation](https://pkg.go.dev/github.com/minio/minio-go/v7)
303* [Complete Documentation](https://min.io/docs/minio/kubernetes/upstream/index.html)
304* [MinIO Go Client SDK API Reference](https://min.io/docs/minio/linux/developers/go/API.html)
305
306## Contribute
307
308[Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md)
309
310## License
311
312This SDK is distributed under the [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0), see [LICENSE](https://github.com/minio/minio-go/blob/master/LICENSE) and [NOTICE](https://github.com/minio/minio-go/blob/master/NOTICE) for more information.
diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go b/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go
new file mode 100644
index 0000000..24f94e0
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go
@@ -0,0 +1,134 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2020 MinIO, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17package minio
18
19import (
20 "bytes"
21 "context"
22 "encoding/xml"
23 "net/http"
24 "net/url"
25
26 "github.com/minio/minio-go/v7/pkg/s3utils"
27 "github.com/minio/minio-go/v7/pkg/sse"
28)
29
30// SetBucketEncryption sets the default encryption configuration on an existing bucket.
31func (c *Client) SetBucketEncryption(ctx context.Context, bucketName string, config *sse.Configuration) error {
32 // Input validation.
33 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
34 return err
35 }
36
37 if config == nil {
38 return errInvalidArgument("configuration cannot be empty")
39 }
40
41 buf, err := xml.Marshal(config)
42 if err != nil {
43 return err
44 }
45
46 // Get resources properly escaped and lined up before
47 // using them in http request.
48 urlValues := make(url.Values)
49 urlValues.Set("encryption", "")
50
51 // Content-length is mandatory to set a default encryption configuration
52 reqMetadata := requestMetadata{
53 bucketName: bucketName,
54 queryValues: urlValues,
55 contentBody: bytes.NewReader(buf),
56 contentLength: int64(len(buf)),
57 contentMD5Base64: sumMD5Base64(buf),
58 }
59
60 // Execute PUT to upload a new bucket default encryption configuration.
61 resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
62 defer closeResponse(resp)
63 if err != nil {
64 return err
65 }
66 if resp.StatusCode != http.StatusOK {
67 return httpRespToErrorResponse(resp, bucketName, "")
68 }
69 return nil
70}
71
72// RemoveBucketEncryption removes the default encryption configuration on a bucket with a context to control cancellations and timeouts.
73func (c *Client) RemoveBucketEncryption(ctx context.Context, bucketName string) error {
74 // Input validation.
75 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
76 return err
77 }
78
79 // Get resources properly escaped and lined up before
80 // using them in http request.
81 urlValues := make(url.Values)
82 urlValues.Set("encryption", "")
83
84 // DELETE default encryption configuration on a bucket.
85 resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
86 bucketName: bucketName,
87 queryValues: urlValues,
88 contentSHA256Hex: emptySHA256Hex,
89 })
90 defer closeResponse(resp)
91 if err != nil {
92 return err
93 }
94 if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
95 return httpRespToErrorResponse(resp, bucketName, "")
96 }
97 return nil
98}
99
100// GetBucketEncryption gets the default encryption configuration
101// on an existing bucket with a context to control cancellations and timeouts.
102func (c *Client) GetBucketEncryption(ctx context.Context, bucketName string) (*sse.Configuration, error) {
103 // Input validation.
104 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
105 return nil, err
106 }
107
108 // Get resources properly escaped and lined up before
109 // using them in http request.
110 urlValues := make(url.Values)
111 urlValues.Set("encryption", "")
112
113 // Execute GET on bucket to get the default encryption configuration.
114 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
115 bucketName: bucketName,
116 queryValues: urlValues,
117 })
118
119 defer closeResponse(resp)
120 if err != nil {
121 return nil, err
122 }
123
124 if resp.StatusCode != http.StatusOK {
125 return nil, httpRespToErrorResponse(resp, bucketName, "")
126 }
127
128 encryptionConfig := &sse.Configuration{}
129 if err = xmlDecoder(resp.Body, encryptionConfig); err != nil {
130 return nil, err
131 }
132
133 return encryptionConfig, nil
134}
diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go b/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go
new file mode 100644
index 0000000..fec5cec
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go
@@ -0,0 +1,169 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "bytes"
22 "context"
23 "encoding/xml"
24 "io"
25 "net/http"
26 "net/url"
27 "time"
28
29 "github.com/minio/minio-go/v7/pkg/lifecycle"
30 "github.com/minio/minio-go/v7/pkg/s3utils"
31)
32
33// SetBucketLifecycle set the lifecycle on an existing bucket.
34func (c *Client) SetBucketLifecycle(ctx context.Context, bucketName string, config *lifecycle.Configuration) error {
35 // Input validation.
36 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
37 return err
38 }
39
40 // If lifecycle is empty then delete it.
41 if config.Empty() {
42 return c.removeBucketLifecycle(ctx, bucketName)
43 }
44
45 buf, err := xml.Marshal(config)
46 if err != nil {
47 return err
48 }
49
50 // Save the updated lifecycle.
51 return c.putBucketLifecycle(ctx, bucketName, buf)
52}
53
54// Saves a new bucket lifecycle.
55func (c *Client) putBucketLifecycle(ctx context.Context, bucketName string, buf []byte) error {
56 // Get resources properly escaped and lined up before
57 // using them in http request.
58 urlValues := make(url.Values)
59 urlValues.Set("lifecycle", "")
60
61 // Content-length is mandatory for put lifecycle request
62 reqMetadata := requestMetadata{
63 bucketName: bucketName,
64 queryValues: urlValues,
65 contentBody: bytes.NewReader(buf),
66 contentLength: int64(len(buf)),
67 contentMD5Base64: sumMD5Base64(buf),
68 }
69
70 // Execute PUT to upload a new bucket lifecycle.
71 resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
72 defer closeResponse(resp)
73 if err != nil {
74 return err
75 }
76 if resp != nil {
77 if resp.StatusCode != http.StatusOK {
78 return httpRespToErrorResponse(resp, bucketName, "")
79 }
80 }
81 return nil
82}
83
84// Remove lifecycle from a bucket.
85func (c *Client) removeBucketLifecycle(ctx context.Context, bucketName string) error {
86 // Get resources properly escaped and lined up before
87 // using them in http request.
88 urlValues := make(url.Values)
89 urlValues.Set("lifecycle", "")
90
91 // Execute DELETE on objectName.
92 resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
93 bucketName: bucketName,
94 queryValues: urlValues,
95 contentSHA256Hex: emptySHA256Hex,
96 })
97 defer closeResponse(resp)
98 if err != nil {
99 return err
100 }
101 return nil
102}
103
104// GetBucketLifecycle fetch bucket lifecycle configuration
105func (c *Client) GetBucketLifecycle(ctx context.Context, bucketName string) (*lifecycle.Configuration, error) {
106 lc, _, err := c.GetBucketLifecycleWithInfo(ctx, bucketName)
107 return lc, err
108}
109
110// GetBucketLifecycleWithInfo fetch bucket lifecycle configuration along with when it was last updated
111func (c *Client) GetBucketLifecycleWithInfo(ctx context.Context, bucketName string) (*lifecycle.Configuration, time.Time, error) {
112 // Input validation.
113 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
114 return nil, time.Time{}, err
115 }
116
117 bucketLifecycle, updatedAt, err := c.getBucketLifecycle(ctx, bucketName)
118 if err != nil {
119 return nil, time.Time{}, err
120 }
121
122 config := lifecycle.NewConfiguration()
123 if err = xml.Unmarshal(bucketLifecycle, config); err != nil {
124 return nil, time.Time{}, err
125 }
126 return config, updatedAt, nil
127}
128
129// Request server for current bucket lifecycle.
130func (c *Client) getBucketLifecycle(ctx context.Context, bucketName string) ([]byte, time.Time, error) {
131 // Get resources properly escaped and lined up before
132 // using them in http request.
133 urlValues := make(url.Values)
134 urlValues.Set("lifecycle", "")
135 urlValues.Set("withUpdatedAt", "true")
136
137 // Execute GET on bucket to get lifecycle.
138 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
139 bucketName: bucketName,
140 queryValues: urlValues,
141 })
142
143 defer closeResponse(resp)
144 if err != nil {
145 return nil, time.Time{}, err
146 }
147
148 if resp != nil {
149 if resp.StatusCode != http.StatusOK {
150 return nil, time.Time{}, httpRespToErrorResponse(resp, bucketName, "")
151 }
152 }
153
154 lcBytes, err := io.ReadAll(resp.Body)
155 if err != nil {
156 return nil, time.Time{}, err
157 }
158
159 const minIOLifecycleCfgUpdatedAt = "X-Minio-LifecycleConfig-UpdatedAt"
160 var updatedAt time.Time
161 if timeStr := resp.Header.Get(minIOLifecycleCfgUpdatedAt); timeStr != "" {
162 updatedAt, err = time.Parse(iso8601DateFormat, timeStr)
163 if err != nil {
164 return nil, time.Time{}, err
165 }
166 }
167
168 return lcBytes, updatedAt, nil
169}
diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go b/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go
new file mode 100644
index 0000000..8de5c01
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go
@@ -0,0 +1,261 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017-2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "bufio"
22 "bytes"
23 "context"
24 "encoding/xml"
25 "net/http"
26 "net/url"
27 "time"
28
29 jsoniter "github.com/json-iterator/go"
30 "github.com/minio/minio-go/v7/pkg/notification"
31 "github.com/minio/minio-go/v7/pkg/s3utils"
32)
33
34// SetBucketNotification saves a new bucket notification with a context to control cancellations and timeouts.
35func (c *Client) SetBucketNotification(ctx context.Context, bucketName string, config notification.Configuration) error {
36 // Input validation.
37 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
38 return err
39 }
40
41 // Get resources properly escaped and lined up before
42 // using them in http request.
43 urlValues := make(url.Values)
44 urlValues.Set("notification", "")
45
46 notifBytes, err := xml.Marshal(&config)
47 if err != nil {
48 return err
49 }
50
51 notifBuffer := bytes.NewReader(notifBytes)
52 reqMetadata := requestMetadata{
53 bucketName: bucketName,
54 queryValues: urlValues,
55 contentBody: notifBuffer,
56 contentLength: int64(len(notifBytes)),
57 contentMD5Base64: sumMD5Base64(notifBytes),
58 contentSHA256Hex: sum256Hex(notifBytes),
59 }
60
61 // Execute PUT to upload a new bucket notification.
62 resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
63 defer closeResponse(resp)
64 if err != nil {
65 return err
66 }
67 if resp != nil {
68 if resp.StatusCode != http.StatusOK {
69 return httpRespToErrorResponse(resp, bucketName, "")
70 }
71 }
72 return nil
73}
74
75// RemoveAllBucketNotification - Remove bucket notification clears all previously specified config
76func (c *Client) RemoveAllBucketNotification(ctx context.Context, bucketName string) error {
77 return c.SetBucketNotification(ctx, bucketName, notification.Configuration{})
78}
79
80// GetBucketNotification returns current bucket notification configuration
81func (c *Client) GetBucketNotification(ctx context.Context, bucketName string) (bucketNotification notification.Configuration, err error) {
82 // Input validation.
83 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
84 return notification.Configuration{}, err
85 }
86 return c.getBucketNotification(ctx, bucketName)
87}
88
89// Request server for notification rules.
90func (c *Client) getBucketNotification(ctx context.Context, bucketName string) (notification.Configuration, error) {
91 urlValues := make(url.Values)
92 urlValues.Set("notification", "")
93
94 // Execute GET on bucket to list objects.
95 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
96 bucketName: bucketName,
97 queryValues: urlValues,
98 contentSHA256Hex: emptySHA256Hex,
99 })
100
101 defer closeResponse(resp)
102 if err != nil {
103 return notification.Configuration{}, err
104 }
105 return processBucketNotificationResponse(bucketName, resp)
106}
107
108// processes the GetNotification http response from the server.
109func processBucketNotificationResponse(bucketName string, resp *http.Response) (notification.Configuration, error) {
110 if resp.StatusCode != http.StatusOK {
111 errResponse := httpRespToErrorResponse(resp, bucketName, "")
112 return notification.Configuration{}, errResponse
113 }
114 var bucketNotification notification.Configuration
115 err := xmlDecoder(resp.Body, &bucketNotification)
116 if err != nil {
117 return notification.Configuration{}, err
118 }
119 return bucketNotification, nil
120}
121
122// ListenNotification listen for all events, this is a MinIO specific API
123func (c *Client) ListenNotification(ctx context.Context, prefix, suffix string, events []string) <-chan notification.Info {
124 return c.ListenBucketNotification(ctx, "", prefix, suffix, events)
125}
126
127// ListenBucketNotification listen for bucket events, this is a MinIO specific API
128func (c *Client) ListenBucketNotification(ctx context.Context, bucketName, prefix, suffix string, events []string) <-chan notification.Info {
129 notificationInfoCh := make(chan notification.Info, 1)
130 const notificationCapacity = 4 * 1024 * 1024
131 notificationEventBuffer := make([]byte, notificationCapacity)
132 // Only success, start a routine to start reading line by line.
133 go func(notificationInfoCh chan<- notification.Info) {
134 defer close(notificationInfoCh)
135
136 // Validate the bucket name.
137 if bucketName != "" {
138 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
139 select {
140 case notificationInfoCh <- notification.Info{
141 Err: err,
142 }:
143 case <-ctx.Done():
144 }
145 return
146 }
147 }
148
149 // Check ARN partition to verify if listening bucket is supported
150 if s3utils.IsAmazonEndpoint(*c.endpointURL) || s3utils.IsGoogleEndpoint(*c.endpointURL) {
151 select {
152 case notificationInfoCh <- notification.Info{
153 Err: errAPINotSupported("Listening for bucket notification is specific only to `minio` server endpoints"),
154 }:
155 case <-ctx.Done():
156 }
157 return
158 }
159
160 // Continuously run and listen on bucket notification.
161 // Create a done channel to control 'ListObjects' go routine.
162 retryDoneCh := make(chan struct{}, 1)
163
164 // Indicate to our routine to exit cleanly upon return.
165 defer close(retryDoneCh)
166
167 // Prepare urlValues to pass into the request on every loop
168 urlValues := make(url.Values)
169 urlValues.Set("ping", "10")
170 urlValues.Set("prefix", prefix)
171 urlValues.Set("suffix", suffix)
172 urlValues["events"] = events
173
174 // Wait on the jitter retry loop.
175 for range c.newRetryTimerContinous(time.Second, time.Second*30, MaxJitter, retryDoneCh) {
176 // Execute GET on bucket to list objects.
177 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
178 bucketName: bucketName,
179 queryValues: urlValues,
180 contentSHA256Hex: emptySHA256Hex,
181 })
182 if err != nil {
183 select {
184 case notificationInfoCh <- notification.Info{
185 Err: err,
186 }:
187 case <-ctx.Done():
188 }
189 return
190 }
191
192 // Validate http response, upon error return quickly.
193 if resp.StatusCode != http.StatusOK {
194 errResponse := httpRespToErrorResponse(resp, bucketName, "")
195 select {
196 case notificationInfoCh <- notification.Info{
197 Err: errResponse,
198 }:
199 case <-ctx.Done():
200 }
201 return
202 }
203
204 // Initialize a new bufio scanner, to read line by line.
205 bio := bufio.NewScanner(resp.Body)
206
207 // Use a higher buffer to support unexpected
208 // caching done by proxies
209 bio.Buffer(notificationEventBuffer, notificationCapacity)
210 json := jsoniter.ConfigCompatibleWithStandardLibrary
211
212 // Unmarshal each line, returns marshaled values.
213 for bio.Scan() {
214 var notificationInfo notification.Info
215 if err = json.Unmarshal(bio.Bytes(), &notificationInfo); err != nil {
216 // Unexpected error during json unmarshal, send
217 // the error to caller for actionable as needed.
218 select {
219 case notificationInfoCh <- notification.Info{
220 Err: err,
221 }:
222 case <-ctx.Done():
223 return
224 }
225 closeResponse(resp)
226 continue
227 }
228
229 // Empty events pinged from the server
230 if len(notificationInfo.Records) == 0 && notificationInfo.Err == nil {
231 continue
232 }
233
234 // Send notificationInfo
235 select {
236 case notificationInfoCh <- notificationInfo:
237 case <-ctx.Done():
238 closeResponse(resp)
239 return
240 }
241 }
242
243 if err = bio.Err(); err != nil {
244 select {
245 case notificationInfoCh <- notification.Info{
246 Err: err,
247 }:
248 case <-ctx.Done():
249 return
250 }
251 }
252
253 // Close current connection before looping further.
254 closeResponse(resp)
255
256 }
257 }(notificationInfoCh)
258
259 // Returns the notification info channel, for caller to start reading from.
260 return notificationInfoCh
261}
diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go b/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go
new file mode 100644
index 0000000..dbb5259
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go
@@ -0,0 +1,147 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2020 MinIO, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17package minio
18
19import (
20 "context"
21 "io"
22 "net/http"
23 "net/url"
24 "strings"
25
26 "github.com/minio/minio-go/v7/pkg/s3utils"
27)
28
29// SetBucketPolicy sets the access permissions on an existing bucket.
30func (c *Client) SetBucketPolicy(ctx context.Context, bucketName, policy string) error {
31 // Input validation.
32 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
33 return err
34 }
35
36 // If policy is empty then delete the bucket policy.
37 if policy == "" {
38 return c.removeBucketPolicy(ctx, bucketName)
39 }
40
41 // Save the updated policies.
42 return c.putBucketPolicy(ctx, bucketName, policy)
43}
44
45// Saves a new bucket policy.
46func (c *Client) putBucketPolicy(ctx context.Context, bucketName, policy string) error {
47 // Get resources properly escaped and lined up before
48 // using them in http request.
49 urlValues := make(url.Values)
50 urlValues.Set("policy", "")
51
52 reqMetadata := requestMetadata{
53 bucketName: bucketName,
54 queryValues: urlValues,
55 contentBody: strings.NewReader(policy),
56 contentLength: int64(len(policy)),
57 }
58
59 // Execute PUT to upload a new bucket policy.
60 resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
61 defer closeResponse(resp)
62 if err != nil {
63 return err
64 }
65 if resp != nil {
66 if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
67 return httpRespToErrorResponse(resp, bucketName, "")
68 }
69 }
70 return nil
71}
72
73// Removes all policies on a bucket.
74func (c *Client) removeBucketPolicy(ctx context.Context, bucketName string) error {
75 // Get resources properly escaped and lined up before
76 // using them in http request.
77 urlValues := make(url.Values)
78 urlValues.Set("policy", "")
79
80 // Execute DELETE on objectName.
81 resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
82 bucketName: bucketName,
83 queryValues: urlValues,
84 contentSHA256Hex: emptySHA256Hex,
85 })
86 defer closeResponse(resp)
87 if err != nil {
88 return err
89 }
90
91 if resp.StatusCode != http.StatusNoContent {
92 return httpRespToErrorResponse(resp, bucketName, "")
93 }
94
95 return nil
96}
97
98// GetBucketPolicy returns the current policy
99func (c *Client) GetBucketPolicy(ctx context.Context, bucketName string) (string, error) {
100 // Input validation.
101 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
102 return "", err
103 }
104 bucketPolicy, err := c.getBucketPolicy(ctx, bucketName)
105 if err != nil {
106 errResponse := ToErrorResponse(err)
107 if errResponse.Code == "NoSuchBucketPolicy" {
108 return "", nil
109 }
110 return "", err
111 }
112 return bucketPolicy, nil
113}
114
115// Request server for current bucket policy.
116func (c *Client) getBucketPolicy(ctx context.Context, bucketName string) (string, error) {
117 // Get resources properly escaped and lined up before
118 // using them in http request.
119 urlValues := make(url.Values)
120 urlValues.Set("policy", "")
121
122 // Execute GET on bucket to list objects.
123 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
124 bucketName: bucketName,
125 queryValues: urlValues,
126 contentSHA256Hex: emptySHA256Hex,
127 })
128
129 defer closeResponse(resp)
130 if err != nil {
131 return "", err
132 }
133
134 if resp != nil {
135 if resp.StatusCode != http.StatusOK {
136 return "", httpRespToErrorResponse(resp, bucketName, "")
137 }
138 }
139
140 bucketPolicyBuf, err := io.ReadAll(resp.Body)
141 if err != nil {
142 return "", err
143 }
144
145 policy := string(bucketPolicyBuf)
146 return policy, err
147}
diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go b/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go
new file mode 100644
index 0000000..b12bb13
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go
@@ -0,0 +1,355 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "bytes"
22 "context"
23 "encoding/json"
24 "encoding/xml"
25 "io"
26 "net/http"
27 "net/url"
28 "time"
29
30 "github.com/google/uuid"
31 "github.com/minio/minio-go/v7/pkg/replication"
32 "github.com/minio/minio-go/v7/pkg/s3utils"
33)
34
35// RemoveBucketReplication removes a replication config on an existing bucket.
36func (c *Client) RemoveBucketReplication(ctx context.Context, bucketName string) error {
37 return c.removeBucketReplication(ctx, bucketName)
38}
39
40// SetBucketReplication sets a replication config on an existing bucket.
41func (c *Client) SetBucketReplication(ctx context.Context, bucketName string, cfg replication.Config) error {
42 // Input validation.
43 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
44 return err
45 }
46
47 // If replication is empty then delete it.
48 if cfg.Empty() {
49 return c.removeBucketReplication(ctx, bucketName)
50 }
51 // Save the updated replication.
52 return c.putBucketReplication(ctx, bucketName, cfg)
53}
54
55// Saves a new bucket replication.
56func (c *Client) putBucketReplication(ctx context.Context, bucketName string, cfg replication.Config) error {
57 // Get resources properly escaped and lined up before
58 // using them in http request.
59 urlValues := make(url.Values)
60 urlValues.Set("replication", "")
61 replication, err := xml.Marshal(cfg)
62 if err != nil {
63 return err
64 }
65
66 reqMetadata := requestMetadata{
67 bucketName: bucketName,
68 queryValues: urlValues,
69 contentBody: bytes.NewReader(replication),
70 contentLength: int64(len(replication)),
71 contentMD5Base64: sumMD5Base64(replication),
72 }
73
74 // Execute PUT to upload a new bucket replication config.
75 resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
76 defer closeResponse(resp)
77 if err != nil {
78 return err
79 }
80
81 if resp.StatusCode != http.StatusOK {
82 return httpRespToErrorResponse(resp, bucketName, "")
83 }
84
85 return nil
86}
87
88// Remove replication from a bucket.
89func (c *Client) removeBucketReplication(ctx context.Context, bucketName string) error {
90 // Get resources properly escaped and lined up before
91 // using them in http request.
92 urlValues := make(url.Values)
93 urlValues.Set("replication", "")
94
95 // Execute DELETE on objectName.
96 resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
97 bucketName: bucketName,
98 queryValues: urlValues,
99 contentSHA256Hex: emptySHA256Hex,
100 })
101 defer closeResponse(resp)
102 if err != nil {
103 return err
104 }
105 if resp.StatusCode != http.StatusOK {
106 return httpRespToErrorResponse(resp, bucketName, "")
107 }
108 return nil
109}
110
111// GetBucketReplication fetches bucket replication configuration.If config is not
112// found, returns empty config with nil error.
113func (c *Client) GetBucketReplication(ctx context.Context, bucketName string) (cfg replication.Config, err error) {
114 // Input validation.
115 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
116 return cfg, err
117 }
118 bucketReplicationCfg, err := c.getBucketReplication(ctx, bucketName)
119 if err != nil {
120 errResponse := ToErrorResponse(err)
121 if errResponse.Code == "ReplicationConfigurationNotFoundError" {
122 return cfg, nil
123 }
124 return cfg, err
125 }
126 return bucketReplicationCfg, nil
127}
128
129// Request server for current bucket replication config.
130func (c *Client) getBucketReplication(ctx context.Context, bucketName string) (cfg replication.Config, err error) {
131 // Get resources properly escaped and lined up before
132 // using them in http request.
133 urlValues := make(url.Values)
134 urlValues.Set("replication", "")
135
136 // Execute GET on bucket to get replication config.
137 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
138 bucketName: bucketName,
139 queryValues: urlValues,
140 })
141
142 defer closeResponse(resp)
143 if err != nil {
144 return cfg, err
145 }
146
147 if resp.StatusCode != http.StatusOK {
148 return cfg, httpRespToErrorResponse(resp, bucketName, "")
149 }
150
151 if err = xmlDecoder(resp.Body, &cfg); err != nil {
152 return cfg, err
153 }
154
155 return cfg, nil
156}
157
158// GetBucketReplicationMetrics fetches bucket replication status metrics
159func (c *Client) GetBucketReplicationMetrics(ctx context.Context, bucketName string) (s replication.Metrics, err error) {
160 // Input validation.
161 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
162 return s, err
163 }
164 // Get resources properly escaped and lined up before
165 // using them in http request.
166 urlValues := make(url.Values)
167 urlValues.Set("replication-metrics", "")
168
169 // Execute GET on bucket to get replication config.
170 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
171 bucketName: bucketName,
172 queryValues: urlValues,
173 })
174
175 defer closeResponse(resp)
176 if err != nil {
177 return s, err
178 }
179
180 if resp.StatusCode != http.StatusOK {
181 return s, httpRespToErrorResponse(resp, bucketName, "")
182 }
183 respBytes, err := io.ReadAll(resp.Body)
184 if err != nil {
185 return s, err
186 }
187
188 if err := json.Unmarshal(respBytes, &s); err != nil {
189 return s, err
190 }
191 return s, nil
192}
193
194// mustGetUUID - get a random UUID.
195func mustGetUUID() string {
196 u, err := uuid.NewRandom()
197 if err != nil {
198 return ""
199 }
200 return u.String()
201}
202
203// ResetBucketReplication kicks off replication of previously replicated objects if ExistingObjectReplication
204// is enabled in the replication config
205func (c *Client) ResetBucketReplication(ctx context.Context, bucketName string, olderThan time.Duration) (rID string, err error) {
206 rID = mustGetUUID()
207 _, err = c.resetBucketReplicationOnTarget(ctx, bucketName, olderThan, "", rID)
208 if err != nil {
209 return rID, err
210 }
211 return rID, nil
212}
213
214// ResetBucketReplicationOnTarget kicks off replication of previously replicated objects if
215// ExistingObjectReplication is enabled in the replication config
216func (c *Client) ResetBucketReplicationOnTarget(ctx context.Context, bucketName string, olderThan time.Duration, tgtArn string) (replication.ResyncTargetsInfo, error) {
217 return c.resetBucketReplicationOnTarget(ctx, bucketName, olderThan, tgtArn, mustGetUUID())
218}
219
220// ResetBucketReplication kicks off replication of previously replicated objects if ExistingObjectReplication
221// is enabled in the replication config
222func (c *Client) resetBucketReplicationOnTarget(ctx context.Context, bucketName string, olderThan time.Duration, tgtArn, resetID string) (rinfo replication.ResyncTargetsInfo, err error) {
223 // Input validation.
224 if err = s3utils.CheckValidBucketName(bucketName); err != nil {
225 return
226 }
227 // Get resources properly escaped and lined up before
228 // using them in http request.
229 urlValues := make(url.Values)
230 urlValues.Set("replication-reset", "")
231 if olderThan > 0 {
232 urlValues.Set("older-than", olderThan.String())
233 }
234 if tgtArn != "" {
235 urlValues.Set("arn", tgtArn)
236 }
237 urlValues.Set("reset-id", resetID)
238 // Execute GET on bucket to get replication config.
239 resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{
240 bucketName: bucketName,
241 queryValues: urlValues,
242 })
243
244 defer closeResponse(resp)
245 if err != nil {
246 return rinfo, err
247 }
248
249 if resp.StatusCode != http.StatusOK {
250 return rinfo, httpRespToErrorResponse(resp, bucketName, "")
251 }
252
253 if err = json.NewDecoder(resp.Body).Decode(&rinfo); err != nil {
254 return rinfo, err
255 }
256 return rinfo, nil
257}
258
259// GetBucketReplicationResyncStatus gets the status of replication resync
260func (c *Client) GetBucketReplicationResyncStatus(ctx context.Context, bucketName, arn string) (rinfo replication.ResyncTargetsInfo, err error) {
261 // Input validation.
262 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
263 return rinfo, err
264 }
265 // Get resources properly escaped and lined up before
266 // using them in http request.
267 urlValues := make(url.Values)
268 urlValues.Set("replication-reset-status", "")
269 if arn != "" {
270 urlValues.Set("arn", arn)
271 }
272 // Execute GET on bucket to get replication config.
273 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
274 bucketName: bucketName,
275 queryValues: urlValues,
276 })
277
278 defer closeResponse(resp)
279 if err != nil {
280 return rinfo, err
281 }
282
283 if resp.StatusCode != http.StatusOK {
284 return rinfo, httpRespToErrorResponse(resp, bucketName, "")
285 }
286
287 if err = json.NewDecoder(resp.Body).Decode(&rinfo); err != nil {
288 return rinfo, err
289 }
290 return rinfo, nil
291}
292
293// GetBucketReplicationMetricsV2 fetches bucket replication status metrics
294func (c *Client) GetBucketReplicationMetricsV2(ctx context.Context, bucketName string) (s replication.MetricsV2, err error) {
295 // Input validation.
296 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
297 return s, err
298 }
299 // Get resources properly escaped and lined up before
300 // using them in http request.
301 urlValues := make(url.Values)
302 urlValues.Set("replication-metrics", "2")
303
304 // Execute GET on bucket to get replication metrics.
305 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
306 bucketName: bucketName,
307 queryValues: urlValues,
308 })
309
310 defer closeResponse(resp)
311 if err != nil {
312 return s, err
313 }
314
315 if resp.StatusCode != http.StatusOK {
316 return s, httpRespToErrorResponse(resp, bucketName, "")
317 }
318 respBytes, err := io.ReadAll(resp.Body)
319 if err != nil {
320 return s, err
321 }
322
323 if err := json.Unmarshal(respBytes, &s); err != nil {
324 return s, err
325 }
326 return s, nil
327}
328
329// CheckBucketReplication validates if replication is set up properly for a bucket
330func (c *Client) CheckBucketReplication(ctx context.Context, bucketName string) (err error) {
331 // Input validation.
332 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
333 return err
334 }
335 // Get resources properly escaped and lined up before
336 // using them in http request.
337 urlValues := make(url.Values)
338 urlValues.Set("replication-check", "")
339
340 // Execute GET on bucket to get replication config.
341 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
342 bucketName: bucketName,
343 queryValues: urlValues,
344 })
345
346 defer closeResponse(resp)
347 if err != nil {
348 return err
349 }
350
351 if resp.StatusCode != http.StatusOK {
352 return httpRespToErrorResponse(resp, bucketName, "")
353 }
354 return nil
355}
diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go b/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go
new file mode 100644
index 0000000..86d7429
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go
@@ -0,0 +1,134 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2020 MinIO, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17package minio
18
19import (
20 "bytes"
21 "context"
22 "encoding/xml"
23 "errors"
24 "io"
25 "net/http"
26 "net/url"
27
28 "github.com/minio/minio-go/v7/pkg/s3utils"
29 "github.com/minio/minio-go/v7/pkg/tags"
30)
31
32// GetBucketTagging fetch tagging configuration for a bucket with a
33// context to control cancellations and timeouts.
34func (c *Client) GetBucketTagging(ctx context.Context, bucketName string) (*tags.Tags, error) {
35 // Input validation.
36 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
37 return nil, err
38 }
39
40 // Get resources properly escaped and lined up before
41 // using them in http request.
42 urlValues := make(url.Values)
43 urlValues.Set("tagging", "")
44
45 // Execute GET on bucket to get tagging configuration.
46 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
47 bucketName: bucketName,
48 queryValues: urlValues,
49 })
50
51 defer closeResponse(resp)
52 if err != nil {
53 return nil, err
54 }
55
56 if resp.StatusCode != http.StatusOK {
57 return nil, httpRespToErrorResponse(resp, bucketName, "")
58 }
59
60 defer io.Copy(io.Discard, resp.Body)
61 return tags.ParseBucketXML(resp.Body)
62}
63
64// SetBucketTagging sets tagging configuration for a bucket
65// with a context to control cancellations and timeouts.
66func (c *Client) SetBucketTagging(ctx context.Context, bucketName string, tags *tags.Tags) error {
67 // Input validation.
68 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
69 return err
70 }
71
72 if tags == nil {
73 return errors.New("nil tags passed")
74 }
75
76 buf, err := xml.Marshal(tags)
77 if err != nil {
78 return err
79 }
80
81 // Get resources properly escaped and lined up before
82 // using them in http request.
83 urlValues := make(url.Values)
84 urlValues.Set("tagging", "")
85
86 // Content-length is mandatory to set a default encryption configuration
87 reqMetadata := requestMetadata{
88 bucketName: bucketName,
89 queryValues: urlValues,
90 contentBody: bytes.NewReader(buf),
91 contentLength: int64(len(buf)),
92 contentMD5Base64: sumMD5Base64(buf),
93 }
94
95 // Execute PUT on bucket to put tagging configuration.
96 resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
97 defer closeResponse(resp)
98 if err != nil {
99 return err
100 }
101 if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
102 return httpRespToErrorResponse(resp, bucketName, "")
103 }
104 return nil
105}
106
107// RemoveBucketTagging removes tagging configuration for a
108// bucket with a context to control cancellations and timeouts.
109func (c *Client) RemoveBucketTagging(ctx context.Context, bucketName string) error {
110 // Input validation.
111 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
112 return err
113 }
114
115 // Get resources properly escaped and lined up before
116 // using them in http request.
117 urlValues := make(url.Values)
118 urlValues.Set("tagging", "")
119
120 // Execute DELETE on bucket to remove tagging configuration.
121 resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
122 bucketName: bucketName,
123 queryValues: urlValues,
124 contentSHA256Hex: emptySHA256Hex,
125 })
126 defer closeResponse(resp)
127 if err != nil {
128 return err
129 }
130 if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
131 return httpRespToErrorResponse(resp, bucketName, "")
132 }
133 return nil
134}
diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go b/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go
new file mode 100644
index 0000000..8c84e4f
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go
@@ -0,0 +1,146 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2020 MinIO, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17package minio
18
19import (
20 "bytes"
21 "context"
22 "encoding/xml"
23 "net/http"
24 "net/url"
25
26 "github.com/minio/minio-go/v7/pkg/s3utils"
27)
28
29// SetBucketVersioning sets a bucket versioning configuration
30func (c *Client) SetBucketVersioning(ctx context.Context, bucketName string, config BucketVersioningConfiguration) error {
31 // Input validation.
32 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
33 return err
34 }
35
36 buf, err := xml.Marshal(config)
37 if err != nil {
38 return err
39 }
40
41 // Get resources properly escaped and lined up before
42 // using them in http request.
43 urlValues := make(url.Values)
44 urlValues.Set("versioning", "")
45
46 reqMetadata := requestMetadata{
47 bucketName: bucketName,
48 queryValues: urlValues,
49 contentBody: bytes.NewReader(buf),
50 contentLength: int64(len(buf)),
51 contentMD5Base64: sumMD5Base64(buf),
52 contentSHA256Hex: sum256Hex(buf),
53 }
54
55 // Execute PUT to set a bucket versioning.
56 resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
57 defer closeResponse(resp)
58 if err != nil {
59 return err
60 }
61 if resp != nil {
62 if resp.StatusCode != http.StatusOK {
63 return httpRespToErrorResponse(resp, bucketName, "")
64 }
65 }
66 return nil
67}
68
69// EnableVersioning - enable object versioning in given bucket.
70func (c *Client) EnableVersioning(ctx context.Context, bucketName string) error {
71 return c.SetBucketVersioning(ctx, bucketName, BucketVersioningConfiguration{Status: "Enabled"})
72}
73
74// SuspendVersioning - suspend object versioning in given bucket.
75func (c *Client) SuspendVersioning(ctx context.Context, bucketName string) error {
76 return c.SetBucketVersioning(ctx, bucketName, BucketVersioningConfiguration{Status: "Suspended"})
77}
78
79// ExcludedPrefix - holds individual prefixes excluded from being versioned.
80type ExcludedPrefix struct {
81 Prefix string
82}
83
84// BucketVersioningConfiguration is the versioning configuration structure
85type BucketVersioningConfiguration struct {
86 XMLName xml.Name `xml:"VersioningConfiguration"`
87 Status string `xml:"Status"`
88 MFADelete string `xml:"MfaDelete,omitempty"`
89 // MinIO extension - allows selective, prefix-level versioning exclusion.
90 // Requires versioning to be enabled
91 ExcludedPrefixes []ExcludedPrefix `xml:",omitempty"`
92 ExcludeFolders bool `xml:",omitempty"`
93}
94
95// Various supported states
96const (
97 Enabled = "Enabled"
98 // Disabled State = "Disabled" only used by MFA Delete not supported yet.
99 Suspended = "Suspended"
100)
101
102// Enabled returns true if bucket versioning is enabled
103func (b BucketVersioningConfiguration) Enabled() bool {
104 return b.Status == Enabled
105}
106
107// Suspended returns true if bucket versioning is suspended
108func (b BucketVersioningConfiguration) Suspended() bool {
109 return b.Status == Suspended
110}
111
112// GetBucketVersioning gets the versioning configuration on
113// an existing bucket with a context to control cancellations and timeouts.
114func (c *Client) GetBucketVersioning(ctx context.Context, bucketName string) (BucketVersioningConfiguration, error) {
115 // Input validation.
116 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
117 return BucketVersioningConfiguration{}, err
118 }
119
120 // Get resources properly escaped and lined up before
121 // using them in http request.
122 urlValues := make(url.Values)
123 urlValues.Set("versioning", "")
124
125 // Execute GET on bucket to get the versioning configuration.
126 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
127 bucketName: bucketName,
128 queryValues: urlValues,
129 })
130
131 defer closeResponse(resp)
132 if err != nil {
133 return BucketVersioningConfiguration{}, err
134 }
135
136 if resp.StatusCode != http.StatusOK {
137 return BucketVersioningConfiguration{}, httpRespToErrorResponse(resp, bucketName, "")
138 }
139
140 versioningConfig := BucketVersioningConfiguration{}
141 if err = xmlDecoder(resp.Body, &versioningConfig); err != nil {
142 return versioningConfig, err
143 }
144
145 return versioningConfig, nil
146}
diff --git a/vendor/github.com/minio/minio-go/v7/api-compose-object.go b/vendor/github.com/minio/minio-go/v7/api-compose-object.go
new file mode 100644
index 0000000..e64a244
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-compose-object.go
@@ -0,0 +1,594 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017, 2018 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "context"
22 "fmt"
23 "io"
24 "net/http"
25 "net/url"
26 "strconv"
27 "strings"
28 "time"
29
30 "github.com/google/uuid"
31 "github.com/minio/minio-go/v7/pkg/encrypt"
32 "github.com/minio/minio-go/v7/pkg/s3utils"
33)
34
35// CopyDestOptions represents options specified by user for CopyObject/ComposeObject APIs
36type CopyDestOptions struct {
37 Bucket string // points to destination bucket
38 Object string // points to destination object
39
40 // `Encryption` is the key info for server-side-encryption with customer
41 // provided key. If it is nil, no encryption is performed.
42 Encryption encrypt.ServerSide
43
44 // `userMeta` is the user-metadata key-value pairs to be set on the
45 // destination. The keys are automatically prefixed with `x-amz-meta-`
46 // if needed. If nil is passed, and if only a single source (of any
47 // size) is provided in the ComposeObject call, then metadata from the
48 // source is copied to the destination.
49 // if no user-metadata is provided, it is copied from source
50 // (when there is only once source object in the compose
51 // request)
52 UserMetadata map[string]string
53 // UserMetadata is only set to destination if ReplaceMetadata is true
54 // other value is UserMetadata is ignored and we preserve src.UserMetadata
55 // NOTE: if you set this value to true and now metadata is present
56 // in UserMetadata your destination object will not have any metadata
57 // set.
58 ReplaceMetadata bool
59
60 // `userTags` is the user defined object tags to be set on destination.
61 // This will be set only if the `replaceTags` field is set to true.
62 // Otherwise this field is ignored
63 UserTags map[string]string
64 ReplaceTags bool
65
66 // Specifies whether you want to apply a Legal Hold to the copied object.
67 LegalHold LegalHoldStatus
68
69 // Object Retention related fields
70 Mode RetentionMode
71 RetainUntilDate time.Time
72
73 Size int64 // Needs to be specified if progress bar is specified.
74 // Progress of the entire copy operation will be sent here.
75 Progress io.Reader
76}
77
78// Process custom-metadata to remove a `x-amz-meta-` prefix if
79// present and validate that keys are distinct (after this
80// prefix removal).
81func filterCustomMeta(userMeta map[string]string) map[string]string {
82 m := make(map[string]string)
83 for k, v := range userMeta {
84 if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") {
85 k = k[len("x-amz-meta-"):]
86 }
87 if _, ok := m[k]; ok {
88 continue
89 }
90 m[k] = v
91 }
92 return m
93}
94
95// Marshal converts all the CopyDestOptions into their
96// equivalent HTTP header representation
97func (opts CopyDestOptions) Marshal(header http.Header) {
98 const replaceDirective = "REPLACE"
99 if opts.ReplaceTags {
100 header.Set(amzTaggingHeaderDirective, replaceDirective)
101 if tags := s3utils.TagEncode(opts.UserTags); tags != "" {
102 header.Set(amzTaggingHeader, tags)
103 }
104 }
105
106 if opts.LegalHold != LegalHoldStatus("") {
107 header.Set(amzLegalHoldHeader, opts.LegalHold.String())
108 }
109
110 if opts.Mode != RetentionMode("") && !opts.RetainUntilDate.IsZero() {
111 header.Set(amzLockMode, opts.Mode.String())
112 header.Set(amzLockRetainUntil, opts.RetainUntilDate.Format(time.RFC3339))
113 }
114
115 if opts.Encryption != nil {
116 opts.Encryption.Marshal(header)
117 }
118
119 if opts.ReplaceMetadata {
120 header.Set("x-amz-metadata-directive", replaceDirective)
121 for k, v := range filterCustomMeta(opts.UserMetadata) {
122 if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) {
123 header.Set(k, v)
124 } else {
125 header.Set("x-amz-meta-"+k, v)
126 }
127 }
128 }
129}
130
131// toDestinationInfo returns a validated copyOptions object.
132func (opts CopyDestOptions) validate() (err error) {
133 // Input validation.
134 if err = s3utils.CheckValidBucketName(opts.Bucket); err != nil {
135 return err
136 }
137 if err = s3utils.CheckValidObjectName(opts.Object); err != nil {
138 return err
139 }
140 if opts.Progress != nil && opts.Size < 0 {
141 return errInvalidArgument("For progress bar effective size needs to be specified")
142 }
143 return nil
144}
145
146// CopySrcOptions represents a source object to be copied, using
147// server-side copying APIs.
148type CopySrcOptions struct {
149 Bucket, Object string
150 VersionID string
151 MatchETag string
152 NoMatchETag string
153 MatchModifiedSince time.Time
154 MatchUnmodifiedSince time.Time
155 MatchRange bool
156 Start, End int64
157 Encryption encrypt.ServerSide
158}
159
160// Marshal converts all the CopySrcOptions into their
161// equivalent HTTP header representation
162func (opts CopySrcOptions) Marshal(header http.Header) {
163 // Set the source header
164 header.Set("x-amz-copy-source", s3utils.EncodePath(opts.Bucket+"/"+opts.Object))
165 if opts.VersionID != "" {
166 header.Set("x-amz-copy-source", s3utils.EncodePath(opts.Bucket+"/"+opts.Object)+"?versionId="+opts.VersionID)
167 }
168
169 if opts.MatchETag != "" {
170 header.Set("x-amz-copy-source-if-match", opts.MatchETag)
171 }
172 if opts.NoMatchETag != "" {
173 header.Set("x-amz-copy-source-if-none-match", opts.NoMatchETag)
174 }
175
176 if !opts.MatchModifiedSince.IsZero() {
177 header.Set("x-amz-copy-source-if-modified-since", opts.MatchModifiedSince.Format(http.TimeFormat))
178 }
179 if !opts.MatchUnmodifiedSince.IsZero() {
180 header.Set("x-amz-copy-source-if-unmodified-since", opts.MatchUnmodifiedSince.Format(http.TimeFormat))
181 }
182
183 if opts.Encryption != nil {
184 encrypt.SSECopy(opts.Encryption).Marshal(header)
185 }
186}
187
188func (opts CopySrcOptions) validate() (err error) {
189 // Input validation.
190 if err = s3utils.CheckValidBucketName(opts.Bucket); err != nil {
191 return err
192 }
193 if err = s3utils.CheckValidObjectName(opts.Object); err != nil {
194 return err
195 }
196 if opts.Start > opts.End || opts.Start < 0 {
197 return errInvalidArgument("start must be non-negative, and start must be at most end.")
198 }
199 return nil
200}
201
202// Low level implementation of CopyObject API, supports only upto 5GiB worth of copy.
203func (c *Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string,
204 metadata map[string]string, srcOpts CopySrcOptions, dstOpts PutObjectOptions,
205) (ObjectInfo, error) {
206 // Build headers.
207 headers := make(http.Header)
208
209 // Set all the metadata headers.
210 for k, v := range metadata {
211 headers.Set(k, v)
212 }
213 if !dstOpts.Internal.ReplicationStatus.Empty() {
214 headers.Set(amzBucketReplicationStatus, string(dstOpts.Internal.ReplicationStatus))
215 }
216 if !dstOpts.Internal.SourceMTime.IsZero() {
217 headers.Set(minIOBucketSourceMTime, dstOpts.Internal.SourceMTime.Format(time.RFC3339Nano))
218 }
219 if dstOpts.Internal.SourceETag != "" {
220 headers.Set(minIOBucketSourceETag, dstOpts.Internal.SourceETag)
221 }
222 if dstOpts.Internal.ReplicationRequest {
223 headers.Set(minIOBucketReplicationRequest, "true")
224 }
225 if dstOpts.Internal.ReplicationValidityCheck {
226 headers.Set(minIOBucketReplicationCheck, "true")
227 }
228 if !dstOpts.Internal.LegalholdTimestamp.IsZero() {
229 headers.Set(minIOBucketReplicationObjectLegalHoldTimestamp, dstOpts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano))
230 }
231 if !dstOpts.Internal.RetentionTimestamp.IsZero() {
232 headers.Set(minIOBucketReplicationObjectRetentionTimestamp, dstOpts.Internal.RetentionTimestamp.Format(time.RFC3339Nano))
233 }
234 if !dstOpts.Internal.TaggingTimestamp.IsZero() {
235 headers.Set(minIOBucketReplicationTaggingTimestamp, dstOpts.Internal.TaggingTimestamp.Format(time.RFC3339Nano))
236 }
237
238 if len(dstOpts.UserTags) != 0 {
239 headers.Set(amzTaggingHeader, s3utils.TagEncode(dstOpts.UserTags))
240 }
241
242 reqMetadata := requestMetadata{
243 bucketName: destBucket,
244 objectName: destObject,
245 customHeader: headers,
246 }
247 if dstOpts.Internal.SourceVersionID != "" {
248 if dstOpts.Internal.SourceVersionID != nullVersionID {
249 if _, err := uuid.Parse(dstOpts.Internal.SourceVersionID); err != nil {
250 return ObjectInfo{}, errInvalidArgument(err.Error())
251 }
252 }
253 urlValues := make(url.Values)
254 urlValues.Set("versionId", dstOpts.Internal.SourceVersionID)
255 reqMetadata.queryValues = urlValues
256 }
257
258 // Set the source header
259 headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject))
260 if srcOpts.VersionID != "" {
261 headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)+"?versionId="+srcOpts.VersionID)
262 }
263 // Send upload-part-copy request
264 resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
265 defer closeResponse(resp)
266 if err != nil {
267 return ObjectInfo{}, err
268 }
269
270 // Check if we got an error response.
271 if resp.StatusCode != http.StatusOK {
272 return ObjectInfo{}, httpRespToErrorResponse(resp, srcBucket, srcObject)
273 }
274
275 cpObjRes := copyObjectResult{}
276 err = xmlDecoder(resp.Body, &cpObjRes)
277 if err != nil {
278 return ObjectInfo{}, err
279 }
280
281 objInfo := ObjectInfo{
282 Key: destObject,
283 ETag: strings.Trim(cpObjRes.ETag, "\""),
284 LastModified: cpObjRes.LastModified,
285 }
286 return objInfo, nil
287}
288
289func (c *Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string,
290 partID int, startOffset, length int64, metadata map[string]string,
291) (p CompletePart, err error) {
292 headers := make(http.Header)
293
294 // Set source
295 headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject))
296
297 if startOffset < 0 {
298 return p, errInvalidArgument("startOffset must be non-negative")
299 }
300
301 if length >= 0 {
302 headers.Set("x-amz-copy-source-range", fmt.Sprintf("bytes=%d-%d", startOffset, startOffset+length-1))
303 }
304
305 for k, v := range metadata {
306 headers.Set(k, v)
307 }
308
309 queryValues := make(url.Values)
310 queryValues.Set("partNumber", strconv.Itoa(partID))
311 queryValues.Set("uploadId", uploadID)
312
313 resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{
314 bucketName: destBucket,
315 objectName: destObject,
316 customHeader: headers,
317 queryValues: queryValues,
318 })
319 defer closeResponse(resp)
320 if err != nil {
321 return
322 }
323
324 // Check if we got an error response.
325 if resp.StatusCode != http.StatusOK {
326 return p, httpRespToErrorResponse(resp, destBucket, destObject)
327 }
328
329 // Decode copy-part response on success.
330 cpObjRes := copyObjectResult{}
331 err = xmlDecoder(resp.Body, &cpObjRes)
332 if err != nil {
333 return p, err
334 }
335 p.PartNumber, p.ETag = partID, cpObjRes.ETag
336 return p, nil
337}
338
339// uploadPartCopy - helper function to create a part in a multipart
340// upload via an upload-part-copy request
341// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html
342func (c *Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID string, partNumber int,
343 headers http.Header,
344) (p CompletePart, err error) {
345 // Build query parameters
346 urlValues := make(url.Values)
347 urlValues.Set("partNumber", strconv.Itoa(partNumber))
348 urlValues.Set("uploadId", uploadID)
349
350 // Send upload-part-copy request
351 resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{
352 bucketName: bucket,
353 objectName: object,
354 customHeader: headers,
355 queryValues: urlValues,
356 })
357 defer closeResponse(resp)
358 if err != nil {
359 return p, err
360 }
361
362 // Check if we got an error response.
363 if resp.StatusCode != http.StatusOK {
364 return p, httpRespToErrorResponse(resp, bucket, object)
365 }
366
367 // Decode copy-part response on success.
368 cpObjRes := copyObjectResult{}
369 err = xmlDecoder(resp.Body, &cpObjRes)
370 if err != nil {
371 return p, err
372 }
373 p.PartNumber, p.ETag = partNumber, cpObjRes.ETag
374 return p, nil
375}
376
377// ComposeObject - creates an object using server-side copying
378// of existing objects. It takes a list of source objects (with optional offsets)
379// and concatenates them into a new object using only server-side copying
380// operations. Optionally takes progress reader hook for applications to
381// look at current progress.
382func (c *Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ...CopySrcOptions) (UploadInfo, error) {
383 if len(srcs) < 1 || len(srcs) > maxPartsCount {
384 return UploadInfo{}, errInvalidArgument("There must be as least one and up to 10000 source objects.")
385 }
386
387 for _, src := range srcs {
388 if err := src.validate(); err != nil {
389 return UploadInfo{}, err
390 }
391 }
392
393 if err := dst.validate(); err != nil {
394 return UploadInfo{}, err
395 }
396
397 srcObjectInfos := make([]ObjectInfo, len(srcs))
398 srcObjectSizes := make([]int64, len(srcs))
399 var totalSize, totalParts int64
400 var err error
401 for i, src := range srcs {
402 opts := StatObjectOptions{ServerSideEncryption: encrypt.SSE(src.Encryption), VersionID: src.VersionID}
403 srcObjectInfos[i], err = c.StatObject(context.Background(), src.Bucket, src.Object, opts)
404 if err != nil {
405 return UploadInfo{}, err
406 }
407
408 srcCopySize := srcObjectInfos[i].Size
409 // Check if a segment is specified, and if so, is the
410 // segment within object bounds?
411 if src.MatchRange {
412 // Since range is specified,
413 // 0 <= src.start <= src.end
414 // so only invalid case to check is:
415 if src.End >= srcCopySize || src.Start < 0 {
416 return UploadInfo{}, errInvalidArgument(
417 fmt.Sprintf("CopySrcOptions %d has invalid segment-to-copy [%d, %d] (size is %d)",
418 i, src.Start, src.End, srcCopySize))
419 }
420 srcCopySize = src.End - src.Start + 1
421 }
422
423 // Only the last source may be less than `absMinPartSize`
424 if srcCopySize < absMinPartSize && i < len(srcs)-1 {
425 return UploadInfo{}, errInvalidArgument(
426 fmt.Sprintf("CopySrcOptions %d is too small (%d) and it is not the last part", i, srcCopySize))
427 }
428
429 // Is data to copy too large?
430 totalSize += srcCopySize
431 if totalSize > maxMultipartPutObjectSize {
432 return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Cannot compose an object of size %d (> 5TiB)", totalSize))
433 }
434
435 // record source size
436 srcObjectSizes[i] = srcCopySize
437
438 // calculate parts needed for current source
439 totalParts += partsRequired(srcCopySize)
440 // Do we need more parts than we are allowed?
441 if totalParts > maxPartsCount {
442 return UploadInfo{}, errInvalidArgument(fmt.Sprintf(
443 "Your proposed compose object requires more than %d parts", maxPartsCount))
444 }
445 }
446
447 // Single source object case (i.e. when only one source is
448 // involved, it is being copied wholly and at most 5GiB in
449 // size, emptyfiles are also supported).
450 if (totalParts == 1 && srcs[0].Start == -1 && totalSize <= maxPartSize) || (totalSize == 0) {
451 return c.CopyObject(ctx, dst, srcs[0])
452 }
453
454 // Now, handle multipart-copy cases.
455
456 // 1. Ensure that the object has not been changed while
457 // we are copying data.
458 for i, src := range srcs {
459 src.MatchETag = srcObjectInfos[i].ETag
460 }
461
462 // 2. Initiate a new multipart upload.
463
464 // Set user-metadata on the destination object. If no
465 // user-metadata is specified, and there is only one source,
466 // (only) then metadata from source is copied.
467 var userMeta map[string]string
468 if dst.ReplaceMetadata {
469 userMeta = dst.UserMetadata
470 } else {
471 userMeta = srcObjectInfos[0].UserMetadata
472 }
473
474 var userTags map[string]string
475 if dst.ReplaceTags {
476 userTags = dst.UserTags
477 } else {
478 userTags = srcObjectInfos[0].UserTags
479 }
480
481 uploadID, err := c.newUploadID(ctx, dst.Bucket, dst.Object, PutObjectOptions{
482 ServerSideEncryption: dst.Encryption,
483 UserMetadata: userMeta,
484 UserTags: userTags,
485 Mode: dst.Mode,
486 RetainUntilDate: dst.RetainUntilDate,
487 LegalHold: dst.LegalHold,
488 })
489 if err != nil {
490 return UploadInfo{}, err
491 }
492
493 // 3. Perform copy part uploads
494 objParts := []CompletePart{}
495 partIndex := 1
496 for i, src := range srcs {
497 h := make(http.Header)
498 src.Marshal(h)
499 if dst.Encryption != nil && dst.Encryption.Type() == encrypt.SSEC {
500 dst.Encryption.Marshal(h)
501 }
502
503 // calculate start/end indices of parts after
504 // splitting.
505 startIdx, endIdx := calculateEvenSplits(srcObjectSizes[i], src)
506 for j, start := range startIdx {
507 end := endIdx[j]
508
509 // Add (or reset) source range header for
510 // upload part copy request.
511 h.Set("x-amz-copy-source-range",
512 fmt.Sprintf("bytes=%d-%d", start, end))
513
514 // make upload-part-copy request
515 complPart, err := c.uploadPartCopy(ctx, dst.Bucket,
516 dst.Object, uploadID, partIndex, h)
517 if err != nil {
518 return UploadInfo{}, err
519 }
520 if dst.Progress != nil {
521 io.CopyN(io.Discard, dst.Progress, end-start+1)
522 }
523 objParts = append(objParts, complPart)
524 partIndex++
525 }
526 }
527
528 // 4. Make final complete-multipart request.
529 uploadInfo, err := c.completeMultipartUpload(ctx, dst.Bucket, dst.Object, uploadID,
530 completeMultipartUpload{Parts: objParts}, PutObjectOptions{ServerSideEncryption: dst.Encryption})
531 if err != nil {
532 return UploadInfo{}, err
533 }
534
535 uploadInfo.Size = totalSize
536 return uploadInfo, nil
537}
538
539// partsRequired is maximum parts possible with
540// max part size of ceiling(maxMultipartPutObjectSize / (maxPartsCount - 1))
541func partsRequired(size int64) int64 {
542 maxPartSize := maxMultipartPutObjectSize / (maxPartsCount - 1)
543 r := size / int64(maxPartSize)
544 if size%int64(maxPartSize) > 0 {
545 r++
546 }
547 return r
548}
549
550// calculateEvenSplits - computes splits for a source and returns
551// start and end index slices. Splits happen evenly to be sure that no
552// part is less than 5MiB, as that could fail the multipart request if
553// it is not the last part.
554func calculateEvenSplits(size int64, src CopySrcOptions) (startIndex, endIndex []int64) {
555 if size == 0 {
556 return
557 }
558
559 reqParts := partsRequired(size)
560 startIndex = make([]int64, reqParts)
561 endIndex = make([]int64, reqParts)
562 // Compute number of required parts `k`, as:
563 //
564 // k = ceiling(size / copyPartSize)
565 //
566 // Now, distribute the `size` bytes in the source into
567 // k parts as evenly as possible:
568 //
569 // r parts sized (q+1) bytes, and
570 // (k - r) parts sized q bytes, where
571 //
572 // size = q * k + r (by simple division of size by k,
573 // so that 0 <= r < k)
574 //
575 start := src.Start
576 if start == -1 {
577 start = 0
578 }
579 quot, rem := size/reqParts, size%reqParts
580 nextStart := start
581 for j := int64(0); j < reqParts; j++ {
582 curPartSize := quot
583 if j < rem {
584 curPartSize++
585 }
586
587 cStart := nextStart
588 cEnd := cStart + curPartSize - 1
589 nextStart = cEnd + 1
590
591 startIndex[j], endIndex[j] = cStart, cEnd
592 }
593 return
594}
diff --git a/vendor/github.com/minio/minio-go/v7/api-copy-object.go b/vendor/github.com/minio/minio-go/v7/api-copy-object.go
new file mode 100644
index 0000000..0c95d91
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-copy-object.go
@@ -0,0 +1,76 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017, 2018 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "context"
22 "io"
23 "net/http"
24)
25
26// CopyObject - copy a source object into a new object
27func (c *Client) CopyObject(ctx context.Context, dst CopyDestOptions, src CopySrcOptions) (UploadInfo, error) {
28 if err := src.validate(); err != nil {
29 return UploadInfo{}, err
30 }
31
32 if err := dst.validate(); err != nil {
33 return UploadInfo{}, err
34 }
35
36 header := make(http.Header)
37 dst.Marshal(header)
38 src.Marshal(header)
39
40 resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{
41 bucketName: dst.Bucket,
42 objectName: dst.Object,
43 customHeader: header,
44 })
45 if err != nil {
46 return UploadInfo{}, err
47 }
48 defer closeResponse(resp)
49
50 if resp.StatusCode != http.StatusOK {
51 return UploadInfo{}, httpRespToErrorResponse(resp, dst.Bucket, dst.Object)
52 }
53
54 // Update the progress properly after successful copy.
55 if dst.Progress != nil {
56 io.Copy(io.Discard, io.LimitReader(dst.Progress, dst.Size))
57 }
58
59 cpObjRes := copyObjectResult{}
60 if err = xmlDecoder(resp.Body, &cpObjRes); err != nil {
61 return UploadInfo{}, err
62 }
63
64 // extract lifecycle expiry date and rule ID
65 expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration))
66
67 return UploadInfo{
68 Bucket: dst.Bucket,
69 Key: dst.Object,
70 LastModified: cpObjRes.LastModified,
71 ETag: trimEtag(resp.Header.Get("ETag")),
72 VersionID: resp.Header.Get(amzVersionID),
73 Expiration: expTime,
74 ExpirationRuleID: ruleID,
75 }, nil
76}
diff --git a/vendor/github.com/minio/minio-go/v7/api-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-datatypes.go
new file mode 100644
index 0000000..97a6f80
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-datatypes.go
@@ -0,0 +1,254 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "encoding/xml"
22 "io"
23 "net/http"
24 "net/url"
25 "strings"
26 "time"
27)
28
29// BucketInfo container for bucket metadata.
30type BucketInfo struct {
31 // The name of the bucket.
32 Name string `json:"name"`
33 // Date the bucket was created.
34 CreationDate time.Time `json:"creationDate"`
35}
36
37// StringMap represents map with custom UnmarshalXML
38type StringMap map[string]string
39
40// UnmarshalXML unmarshals the XML into a map of string to strings,
41// creating a key in the map for each tag and setting it's value to the
42// tags contents.
43//
44// The fact this function is on the pointer of Map is important, so that
45// if m is nil it can be initialized, which is often the case if m is
46// nested in another xml structural. This is also why the first thing done
47// on the first line is initialize it.
48func (m *StringMap) UnmarshalXML(d *xml.Decoder, _ xml.StartElement) error {
49 *m = StringMap{}
50 for {
51 // Format is <key>value</key>
52 var e struct {
53 XMLName xml.Name
54 Value string `xml:",chardata"`
55 }
56 err := d.Decode(&e)
57 if err == io.EOF {
58 break
59 }
60 if err != nil {
61 return err
62 }
63 (*m)[e.XMLName.Local] = e.Value
64 }
65 return nil
66}
67
68// URLMap represents map with custom UnmarshalXML
69type URLMap map[string]string
70
71// UnmarshalXML unmarshals the XML into a map of string to strings,
72// creating a key in the map for each tag and setting it's value to the
73// tags contents.
74//
75// The fact this function is on the pointer of Map is important, so that
76// if m is nil it can be initialized, which is often the case if m is
77// nested in another xml structural. This is also why the first thing done
78// on the first line is initialize it.
79func (m *URLMap) UnmarshalXML(d *xml.Decoder, se xml.StartElement) error {
80 *m = URLMap{}
81 var tgs string
82 if err := d.DecodeElement(&tgs, &se); err != nil {
83 if err == io.EOF {
84 return nil
85 }
86 return err
87 }
88 for tgs != "" {
89 var key string
90 key, tgs, _ = stringsCut(tgs, "&")
91 if key == "" {
92 continue
93 }
94 key, value, _ := stringsCut(key, "=")
95 key, err := url.QueryUnescape(key)
96 if err != nil {
97 return err
98 }
99
100 value, err = url.QueryUnescape(value)
101 if err != nil {
102 return err
103 }
104 (*m)[key] = value
105 }
106 return nil
107}
108
109// stringsCut slices s around the first instance of sep,
110// returning the text before and after sep.
111// The found result reports whether sep appears in s.
112// If sep does not appear in s, cut returns s, "", false.
113func stringsCut(s, sep string) (before, after string, found bool) {
114 if i := strings.Index(s, sep); i >= 0 {
115 return s[:i], s[i+len(sep):], true
116 }
117 return s, "", false
118}
119
120// Owner name.
121type Owner struct {
122 XMLName xml.Name `xml:"Owner" json:"owner"`
123 DisplayName string `xml:"ID" json:"name"`
124 ID string `xml:"DisplayName" json:"id"`
125}
126
127// UploadInfo contains information about the
128// newly uploaded or copied object.
129type UploadInfo struct {
130 Bucket string
131 Key string
132 ETag string
133 Size int64
134 LastModified time.Time
135 Location string
136 VersionID string
137
138 // Lifecycle expiry-date and ruleID associated with the expiry
139 // not to be confused with `Expires` HTTP header.
140 Expiration time.Time
141 ExpirationRuleID string
142
143 // Verified checksum values, if any.
144 // Values are base64 (standard) encoded.
145 // For multipart objects this is a checksum of the checksum of each part.
146 ChecksumCRC32 string
147 ChecksumCRC32C string
148 ChecksumSHA1 string
149 ChecksumSHA256 string
150}
151
152// RestoreInfo contains information of the restore operation of an archived object
153type RestoreInfo struct {
154 // Is the restoring operation is still ongoing
155 OngoingRestore bool
156 // When the restored copy of the archived object will be removed
157 ExpiryTime time.Time
158}
159
160// ObjectInfo container for object metadata.
161type ObjectInfo struct {
162 // An ETag is optionally set to md5sum of an object. In case of multipart objects,
163 // ETag is of the form MD5SUM-N where MD5SUM is md5sum of all individual md5sums of
164 // each parts concatenated into one string.
165 ETag string `json:"etag"`
166
167 Key string `json:"name"` // Name of the object
168 LastModified time.Time `json:"lastModified"` // Date and time the object was last modified.
169 Size int64 `json:"size"` // Size in bytes of the object.
170 ContentType string `json:"contentType"` // A standard MIME type describing the format of the object data.
171 Expires time.Time `json:"expires"` // The date and time at which the object is no longer able to be cached.
172
173 // Collection of additional metadata on the object.
174 // eg: x-amz-meta-*, content-encoding etc.
175 Metadata http.Header `json:"metadata" xml:"-"`
176
177 // x-amz-meta-* headers stripped "x-amz-meta-" prefix containing the first value.
178 // Only returned by MinIO servers.
179 UserMetadata StringMap `json:"userMetadata,omitempty"`
180
181 // x-amz-tagging values in their k/v values.
182 // Only returned by MinIO servers.
183 UserTags URLMap `json:"userTags,omitempty" xml:"UserTags"`
184
185 // x-amz-tagging-count value
186 UserTagCount int
187
188 // Owner name.
189 Owner Owner
190
191 // ACL grant.
192 Grant []Grant
193
194 // The class of storage used to store the object.
195 StorageClass string `json:"storageClass"`
196
197 // Versioning related information
198 IsLatest bool
199 IsDeleteMarker bool
200 VersionID string `xml:"VersionId"`
201
202 // x-amz-replication-status value is either in one of the following states
203 // - COMPLETED
204 // - PENDING
205 // - FAILED
206 // - REPLICA (on the destination)
207 ReplicationStatus string `xml:"ReplicationStatus"`
208 // set to true if delete marker has backing object version on target, and eligible to replicate
209 ReplicationReady bool
210 // Lifecycle expiry-date and ruleID associated with the expiry
211 // not to be confused with `Expires` HTTP header.
212 Expiration time.Time
213 ExpirationRuleID string
214
215 Restore *RestoreInfo
216
217 // Checksum values
218 ChecksumCRC32 string
219 ChecksumCRC32C string
220 ChecksumSHA1 string
221 ChecksumSHA256 string
222
223 Internal *struct {
224 K int // Data blocks
225 M int // Parity blocks
226 } `xml:"Internal"`
227
228 // Error
229 Err error `json:"-"`
230}
231
232// ObjectMultipartInfo container for multipart object metadata.
233type ObjectMultipartInfo struct {
234 // Date and time at which the multipart upload was initiated.
235 Initiated time.Time `type:"timestamp" timestampFormat:"iso8601"`
236
237 Initiator initiator
238 Owner owner
239
240 // The type of storage to use for the object. Defaults to 'STANDARD'.
241 StorageClass string
242
243 // Key of the object for which the multipart upload was initiated.
244 Key string
245
246 // Size in bytes of the object.
247 Size int64
248
249 // Upload ID that identifies the multipart upload.
250 UploadID string `xml:"UploadId"`
251
252 // Error
253 Err error
254}
diff --git a/vendor/github.com/minio/minio-go/v7/api-error-response.go b/vendor/github.com/minio/minio-go/v7/api-error-response.go
new file mode 100644
index 0000000..7df211f
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-error-response.go
@@ -0,0 +1,284 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "bytes"
22 "encoding/xml"
23 "fmt"
24 "io"
25 "net/http"
26 "strings"
27)
28
29/* **** SAMPLE ERROR RESPONSE ****
30<?xml version="1.0" encoding="UTF-8"?>
31<Error>
32 <Code>AccessDenied</Code>
33 <Message>Access Denied</Message>
34 <BucketName>bucketName</BucketName>
35 <Key>objectName</Key>
36 <RequestId>F19772218238A85A</RequestId>
37 <HostId>GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD</HostId>
38</Error>
39*/
40
41// ErrorResponse - Is the typed error returned by all API operations.
42// ErrorResponse struct should be comparable since it is compared inside
43// golang http API (https://github.com/golang/go/issues/29768)
44type ErrorResponse struct {
45 XMLName xml.Name `xml:"Error" json:"-"`
46 Code string
47 Message string
48 BucketName string
49 Key string
50 Resource string
51 RequestID string `xml:"RequestId"`
52 HostID string `xml:"HostId"`
53
54 // Region where the bucket is located. This header is returned
55 // only in HEAD bucket and ListObjects response.
56 Region string
57
58 // Captures the server string returned in response header.
59 Server string
60
61 // Underlying HTTP status code for the returned error
62 StatusCode int `xml:"-" json:"-"`
63}
64
65// ToErrorResponse - Returns parsed ErrorResponse struct from body and
66// http headers.
67//
68// For example:
69//
70// import s3 "github.com/minio/minio-go/v7"
71// ...
72// ...
73// reader, stat, err := s3.GetObject(...)
74// if err != nil {
75// resp := s3.ToErrorResponse(err)
76// }
77// ...
78func ToErrorResponse(err error) ErrorResponse {
79 switch err := err.(type) {
80 case ErrorResponse:
81 return err
82 default:
83 return ErrorResponse{}
84 }
85}
86
87// Error - Returns S3 error string.
88func (e ErrorResponse) Error() string {
89 if e.Message == "" {
90 msg, ok := s3ErrorResponseMap[e.Code]
91 if !ok {
92 msg = fmt.Sprintf("Error response code %s.", e.Code)
93 }
94 return msg
95 }
96 return e.Message
97}
98
99// Common string for errors to report issue location in unexpected
100// cases.
101const (
102 reportIssue = "Please report this issue at https://github.com/minio/minio-go/issues."
103)
104
105// xmlDecodeAndBody reads the whole body up to 1MB and
106// tries to XML decode it into v.
107// The body that was read and any error from reading or decoding is returned.
108func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) {
109 // read the whole body (up to 1MB)
110 const maxBodyLength = 1 << 20
111 body, err := io.ReadAll(io.LimitReader(bodyReader, maxBodyLength))
112 if err != nil {
113 return nil, err
114 }
115 return bytes.TrimSpace(body), xmlDecoder(bytes.NewReader(body), v)
116}
117
118// httpRespToErrorResponse returns a new encoded ErrorResponse
119// structure as error.
120func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) error {
121 if resp == nil {
122 msg := "Empty http response. " + reportIssue
123 return errInvalidArgument(msg)
124 }
125
126 errResp := ErrorResponse{
127 StatusCode: resp.StatusCode,
128 Server: resp.Header.Get("Server"),
129 }
130
131 errBody, err := xmlDecodeAndBody(resp.Body, &errResp)
132 // Xml decoding failed with no body, fall back to HTTP headers.
133 if err != nil {
134 switch resp.StatusCode {
135 case http.StatusNotFound:
136 if objectName == "" {
137 errResp = ErrorResponse{
138 StatusCode: resp.StatusCode,
139 Code: "NoSuchBucket",
140 Message: "The specified bucket does not exist.",
141 BucketName: bucketName,
142 }
143 } else {
144 errResp = ErrorResponse{
145 StatusCode: resp.StatusCode,
146 Code: "NoSuchKey",
147 Message: "The specified key does not exist.",
148 BucketName: bucketName,
149 Key: objectName,
150 }
151 }
152 case http.StatusForbidden:
153 errResp = ErrorResponse{
154 StatusCode: resp.StatusCode,
155 Code: "AccessDenied",
156 Message: "Access Denied.",
157 BucketName: bucketName,
158 Key: objectName,
159 }
160 case http.StatusConflict:
161 errResp = ErrorResponse{
162 StatusCode: resp.StatusCode,
163 Code: "Conflict",
164 Message: "Bucket not empty.",
165 BucketName: bucketName,
166 }
167 case http.StatusPreconditionFailed:
168 errResp = ErrorResponse{
169 StatusCode: resp.StatusCode,
170 Code: "PreconditionFailed",
171 Message: s3ErrorResponseMap["PreconditionFailed"],
172 BucketName: bucketName,
173 Key: objectName,
174 }
175 default:
176 msg := resp.Status
177 if len(errBody) > 0 {
178 msg = string(errBody)
179 if len(msg) > 1024 {
180 msg = msg[:1024] + "..."
181 }
182 }
183 errResp = ErrorResponse{
184 StatusCode: resp.StatusCode,
185 Code: resp.Status,
186 Message: msg,
187 BucketName: bucketName,
188 }
189 }
190 }
191
192 code := resp.Header.Get("x-minio-error-code")
193 if code != "" {
194 errResp.Code = code
195 }
196 desc := resp.Header.Get("x-minio-error-desc")
197 if desc != "" {
198 errResp.Message = strings.Trim(desc, `"`)
199 }
200
201 // Save hostID, requestID and region information
202 // from headers if not available through error XML.
203 if errResp.RequestID == "" {
204 errResp.RequestID = resp.Header.Get("x-amz-request-id")
205 }
206 if errResp.HostID == "" {
207 errResp.HostID = resp.Header.Get("x-amz-id-2")
208 }
209 if errResp.Region == "" {
210 errResp.Region = resp.Header.Get("x-amz-bucket-region")
211 }
212 if errResp.Code == "InvalidRegion" && errResp.Region != "" {
213 errResp.Message = fmt.Sprintf("Region does not match, expecting region ‘%s’.", errResp.Region)
214 }
215
216 return errResp
217}
218
219// errTransferAccelerationBucket - bucket name is invalid to be used with transfer acceleration.
220func errTransferAccelerationBucket(bucketName string) error {
221 return ErrorResponse{
222 StatusCode: http.StatusBadRequest,
223 Code: "InvalidArgument",
224 Message: "The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods ‘.’.",
225 BucketName: bucketName,
226 }
227}
228
229// errEntityTooLarge - Input size is larger than supported maximum.
230func errEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error {
231 msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", totalSize, maxObjectSize)
232 return ErrorResponse{
233 StatusCode: http.StatusBadRequest,
234 Code: "EntityTooLarge",
235 Message: msg,
236 BucketName: bucketName,
237 Key: objectName,
238 }
239}
240
241// errEntityTooSmall - Input size is smaller than supported minimum.
242func errEntityTooSmall(totalSize int64, bucketName, objectName string) error {
243 msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size ‘0B’ for single PUT operation.", totalSize)
244 return ErrorResponse{
245 StatusCode: http.StatusBadRequest,
246 Code: "EntityTooSmall",
247 Message: msg,
248 BucketName: bucketName,
249 Key: objectName,
250 }
251}
252
253// errUnexpectedEOF - Unexpected end of file reached.
254func errUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) error {
255 msg := fmt.Sprintf("Data read ‘%d’ is not equal to the size ‘%d’ of the input Reader.", totalRead, totalSize)
256 return ErrorResponse{
257 StatusCode: http.StatusBadRequest,
258 Code: "UnexpectedEOF",
259 Message: msg,
260 BucketName: bucketName,
261 Key: objectName,
262 }
263}
264
265// errInvalidArgument - Invalid argument response.
266func errInvalidArgument(message string) error {
267 return ErrorResponse{
268 StatusCode: http.StatusBadRequest,
269 Code: "InvalidArgument",
270 Message: message,
271 RequestID: "minio",
272 }
273}
274
275// errAPINotSupported - API not supported response
276// The specified API call is not supported
277func errAPINotSupported(message string) error {
278 return ErrorResponse{
279 StatusCode: http.StatusNotImplemented,
280 Code: "APINotSupported",
281 Message: message,
282 RequestID: "minio",
283 }
284}
diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go b/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go
new file mode 100644
index 0000000..9041d99
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go
@@ -0,0 +1,152 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2018 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "context"
22 "encoding/xml"
23 "net/http"
24 "net/url"
25)
26
27// Grantee represents the person being granted permissions.
28type Grantee struct {
29 XMLName xml.Name `xml:"Grantee"`
30 ID string `xml:"ID"`
31 DisplayName string `xml:"DisplayName"`
32 URI string `xml:"URI"`
33}
34
35// Grant holds grant information
36type Grant struct {
37 XMLName xml.Name `xml:"Grant"`
38 Grantee Grantee
39 Permission string `xml:"Permission"`
40}
41
42// AccessControlList contains the set of grantees and the permissions assigned to each grantee.
43type AccessControlList struct {
44 XMLName xml.Name `xml:"AccessControlList"`
45 Grant []Grant
46 Permission string `xml:"Permission"`
47}
48
49type accessControlPolicy struct {
50 XMLName xml.Name `xml:"AccessControlPolicy"`
51 Owner Owner
52 AccessControlList AccessControlList
53}
54
55// GetObjectACL get object ACLs
56func (c *Client) GetObjectACL(ctx context.Context, bucketName, objectName string) (*ObjectInfo, error) {
57 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
58 bucketName: bucketName,
59 objectName: objectName,
60 queryValues: url.Values{
61 "acl": []string{""},
62 },
63 })
64 if err != nil {
65 return nil, err
66 }
67 defer closeResponse(resp)
68
69 if resp.StatusCode != http.StatusOK {
70 return nil, httpRespToErrorResponse(resp, bucketName, objectName)
71 }
72
73 res := &accessControlPolicy{}
74
75 if err := xmlDecoder(resp.Body, res); err != nil {
76 return nil, err
77 }
78
79 objInfo, err := c.StatObject(ctx, bucketName, objectName, StatObjectOptions{})
80 if err != nil {
81 return nil, err
82 }
83
84 objInfo.Owner.DisplayName = res.Owner.DisplayName
85 objInfo.Owner.ID = res.Owner.ID
86
87 objInfo.Grant = append(objInfo.Grant, res.AccessControlList.Grant...)
88
89 cannedACL := getCannedACL(res)
90 if cannedACL != "" {
91 objInfo.Metadata.Add("X-Amz-Acl", cannedACL)
92 return &objInfo, nil
93 }
94
95 grantACL := getAmzGrantACL(res)
96 for k, v := range grantACL {
97 objInfo.Metadata[k] = v
98 }
99
100 return &objInfo, nil
101}
102
103func getCannedACL(aCPolicy *accessControlPolicy) string {
104 grants := aCPolicy.AccessControlList.Grant
105
106 switch {
107 case len(grants) == 1:
108 if grants[0].Grantee.URI == "" && grants[0].Permission == "FULL_CONTROL" {
109 return "private"
110 }
111 case len(grants) == 2:
112 for _, g := range grants {
113 if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" && g.Permission == "READ" {
114 return "authenticated-read"
115 }
116 if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "READ" {
117 return "public-read"
118 }
119 if g.Permission == "READ" && g.Grantee.ID == aCPolicy.Owner.ID {
120 return "bucket-owner-read"
121 }
122 }
123 case len(grants) == 3:
124 for _, g := range grants {
125 if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "WRITE" {
126 return "public-read-write"
127 }
128 }
129 }
130 return ""
131}
132
133func getAmzGrantACL(aCPolicy *accessControlPolicy) map[string][]string {
134 grants := aCPolicy.AccessControlList.Grant
135 res := map[string][]string{}
136
137 for _, g := range grants {
138 switch {
139 case g.Permission == "READ":
140 res["X-Amz-Grant-Read"] = append(res["X-Amz-Grant-Read"], "id="+g.Grantee.ID)
141 case g.Permission == "WRITE":
142 res["X-Amz-Grant-Write"] = append(res["X-Amz-Grant-Write"], "id="+g.Grantee.ID)
143 case g.Permission == "READ_ACP":
144 res["X-Amz-Grant-Read-Acp"] = append(res["X-Amz-Grant-Read-Acp"], "id="+g.Grantee.ID)
145 case g.Permission == "WRITE_ACP":
146 res["X-Amz-Grant-Write-Acp"] = append(res["X-Amz-Grant-Write-Acp"], "id="+g.Grantee.ID)
147 case g.Permission == "FULL_CONTROL":
148 res["X-Amz-Grant-Full-Control"] = append(res["X-Amz-Grant-Full-Control"], "id="+g.Grantee.ID)
149 }
150 }
151 return res
152}
diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object-file.go b/vendor/github.com/minio/minio-go/v7/api-get-object-file.go
new file mode 100644
index 0000000..2332dbf
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-get-object-file.go
@@ -0,0 +1,127 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "context"
22 "io"
23 "os"
24 "path/filepath"
25
26 "github.com/minio/minio-go/v7/pkg/s3utils"
27)
28
29// FGetObject - download contents of an object to a local file.
30// The options can be used to specify the GET request further.
31func (c *Client) FGetObject(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error {
32 // Input validation.
33 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
34 return err
35 }
36 if err := s3utils.CheckValidObjectName(objectName); err != nil {
37 return err
38 }
39
40 // Verify if destination already exists.
41 st, err := os.Stat(filePath)
42 if err == nil {
43 // If the destination exists and is a directory.
44 if st.IsDir() {
45 return errInvalidArgument("fileName is a directory.")
46 }
47 }
48
49 // Proceed if file does not exist. return for all other errors.
50 if err != nil {
51 if !os.IsNotExist(err) {
52 return err
53 }
54 }
55
56 // Extract top level directory.
57 objectDir, _ := filepath.Split(filePath)
58 if objectDir != "" {
59 // Create any missing top level directories.
60 if err := os.MkdirAll(objectDir, 0o700); err != nil {
61 return err
62 }
63 }
64
65 // Gather md5sum.
66 objectStat, err := c.StatObject(ctx, bucketName, objectName, StatObjectOptions(opts))
67 if err != nil {
68 return err
69 }
70
71 // Write to a temporary file "fileName.part.minio" before saving.
72 filePartPath := filePath + objectStat.ETag + ".part.minio"
73
74 // If exists, open in append mode. If not create it as a part file.
75 filePart, err := os.OpenFile(filePartPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0o600)
76 if err != nil {
77 return err
78 }
79
80 // If we return early with an error, be sure to close and delete
81 // filePart. If we have an error along the way there is a chance
82 // that filePart is somehow damaged, and we should discard it.
83 closeAndRemove := true
84 defer func() {
85 if closeAndRemove {
86 _ = filePart.Close()
87 _ = os.Remove(filePartPath)
88 }
89 }()
90
91 // Issue Stat to get the current offset.
92 st, err = filePart.Stat()
93 if err != nil {
94 return err
95 }
96
97 // Initialize get object request headers to set the
98 // appropriate range offsets to read from.
99 if st.Size() > 0 {
100 opts.SetRange(st.Size(), 0)
101 }
102
103 // Seek to current position for incoming reader.
104 objectReader, objectStat, _, err := c.getObject(ctx, bucketName, objectName, opts)
105 if err != nil {
106 return err
107 }
108
109 // Write to the part file.
110 if _, err = io.CopyN(filePart, objectReader, objectStat.Size); err != nil {
111 return err
112 }
113
114 // Close the file before rename, this is specifically needed for Windows users.
115 closeAndRemove = false
116 if err = filePart.Close(); err != nil {
117 return err
118 }
119
120 // Safely completed. Now commit by renaming to actual filename.
121 if err = os.Rename(filePartPath, filePath); err != nil {
122 return err
123 }
124
125 // Return.
126 return nil
127}
diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object.go b/vendor/github.com/minio/minio-go/v7/api-get-object.go
new file mode 100644
index 0000000..9e6b154
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-get-object.go
@@ -0,0 +1,683 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "context"
22 "errors"
23 "fmt"
24 "io"
25 "net/http"
26 "sync"
27
28 "github.com/minio/minio-go/v7/pkg/s3utils"
29)
30
31// GetObject wrapper function that accepts a request context
32func (c *Client) GetObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) {
33 // Input validation.
34 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
35 return nil, err
36 }
37 if err := s3utils.CheckValidObjectName(objectName); err != nil {
38 return nil, err
39 }
40
41 gctx, cancel := context.WithCancel(ctx)
42
43 // Detect if snowball is server location we are talking to.
44 var snowball bool
45 if location, ok := c.bucketLocCache.Get(bucketName); ok {
46 snowball = location == "snowball"
47 }
48
49 var (
50 err error
51 httpReader io.ReadCloser
52 objectInfo ObjectInfo
53 totalRead int
54 )
55
56 // Create request channel.
57 reqCh := make(chan getRequest)
58 // Create response channel.
59 resCh := make(chan getResponse)
60
61 // This routine feeds partial object data as and when the caller reads.
62 go func() {
63 defer close(resCh)
64 defer func() {
65 // Close the http response body before returning.
66 // This ends the connection with the server.
67 if httpReader != nil {
68 httpReader.Close()
69 }
70 }()
71 defer cancel()
72
73 // Used to verify if etag of object has changed since last read.
74 var etag string
75
76 for req := range reqCh {
77 // If this is the first request we may not need to do a getObject request yet.
78 if req.isFirstReq {
79 // First request is a Read/ReadAt.
80 if req.isReadOp {
81 // Differentiate between wanting the whole object and just a range.
82 if req.isReadAt {
83 // If this is a ReadAt request only get the specified range.
84 // Range is set with respect to the offset and length of the buffer requested.
85 // Do not set objectInfo from the first readAt request because it will not get
86 // the whole object.
87 opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1)
88 } else if req.Offset > 0 {
89 opts.SetRange(req.Offset, 0)
90 }
91 httpReader, objectInfo, _, err = c.getObject(gctx, bucketName, objectName, opts)
92 if err != nil {
93 resCh <- getResponse{Error: err}
94 return
95 }
96 etag = objectInfo.ETag
97 // Read at least firstReq.Buffer bytes, if not we have
98 // reached our EOF.
99 size, err := readFull(httpReader, req.Buffer)
100 totalRead += size
101 if size > 0 && err == io.ErrUnexpectedEOF {
102 if int64(size) < objectInfo.Size {
103 // In situations when returned size
104 // is less than the expected content
105 // length set by the server, make sure
106 // we return io.ErrUnexpectedEOF
107 err = io.ErrUnexpectedEOF
108 } else {
109 // If an EOF happens after reading some but not
110 // all the bytes ReadFull returns ErrUnexpectedEOF
111 err = io.EOF
112 }
113 } else if size == 0 && err == io.EOF && objectInfo.Size > 0 {
114 // Special cases when server writes more data
115 // than the content-length, net/http response
116 // body returns an error, instead of converting
117 // it to io.EOF - return unexpected EOF.
118 err = io.ErrUnexpectedEOF
119 }
120 // Send back the first response.
121 resCh <- getResponse{
122 objectInfo: objectInfo,
123 Size: size,
124 Error: err,
125 didRead: true,
126 }
127 } else {
128 // First request is a Stat or Seek call.
129 // Only need to run a StatObject until an actual Read or ReadAt request comes through.
130
131 // Remove range header if already set, for stat Operations to get original file size.
132 delete(opts.headers, "Range")
133 objectInfo, err = c.StatObject(gctx, bucketName, objectName, StatObjectOptions(opts))
134 if err != nil {
135 resCh <- getResponse{
136 Error: err,
137 }
138 // Exit the go-routine.
139 return
140 }
141 etag = objectInfo.ETag
142 // Send back the first response.
143 resCh <- getResponse{
144 objectInfo: objectInfo,
145 }
146 }
147 } else if req.settingObjectInfo { // Request is just to get objectInfo.
148 // Remove range header if already set, for stat Operations to get original file size.
149 delete(opts.headers, "Range")
150 // Check whether this is snowball
151 // if yes do not use If-Match feature
152 // it doesn't work.
153 if etag != "" && !snowball {
154 opts.SetMatchETag(etag)
155 }
156 objectInfo, err := c.StatObject(gctx, bucketName, objectName, StatObjectOptions(opts))
157 if err != nil {
158 resCh <- getResponse{
159 Error: err,
160 }
161 // Exit the goroutine.
162 return
163 }
164 // Send back the objectInfo.
165 resCh <- getResponse{
166 objectInfo: objectInfo,
167 }
168 } else {
169 // Offset changes fetch the new object at an Offset.
170 // Because the httpReader may not be set by the first
171 // request if it was a stat or seek it must be checked
172 // if the object has been read or not to only initialize
173 // new ones when they haven't been already.
174 // All readAt requests are new requests.
175 if req.DidOffsetChange || !req.beenRead {
176 // Check whether this is snowball
177 // if yes do not use If-Match feature
178 // it doesn't work.
179 if etag != "" && !snowball {
180 opts.SetMatchETag(etag)
181 }
182 if httpReader != nil {
183 // Close previously opened http reader.
184 httpReader.Close()
185 }
186 // If this request is a readAt only get the specified range.
187 if req.isReadAt {
188 // Range is set with respect to the offset and length of the buffer requested.
189 opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1)
190 } else if req.Offset > 0 { // Range is set with respect to the offset.
191 opts.SetRange(req.Offset, 0)
192 } else {
193 // Remove range header if already set
194 delete(opts.headers, "Range")
195 }
196 httpReader, objectInfo, _, err = c.getObject(gctx, bucketName, objectName, opts)
197 if err != nil {
198 resCh <- getResponse{
199 Error: err,
200 }
201 return
202 }
203 totalRead = 0
204 }
205
206 // Read at least req.Buffer bytes, if not we have
207 // reached our EOF.
208 size, err := readFull(httpReader, req.Buffer)
209 totalRead += size
210 if size > 0 && err == io.ErrUnexpectedEOF {
211 if int64(totalRead) < objectInfo.Size {
212 // In situations when returned size
213 // is less than the expected content
214 // length set by the server, make sure
215 // we return io.ErrUnexpectedEOF
216 err = io.ErrUnexpectedEOF
217 } else {
218 // If an EOF happens after reading some but not
219 // all the bytes ReadFull returns ErrUnexpectedEOF
220 err = io.EOF
221 }
222 } else if size == 0 && err == io.EOF && objectInfo.Size > 0 {
223 // Special cases when server writes more data
224 // than the content-length, net/http response
225 // body returns an error, instead of converting
226 // it to io.EOF - return unexpected EOF.
227 err = io.ErrUnexpectedEOF
228 }
229
230 // Reply back how much was read.
231 resCh <- getResponse{
232 Size: size,
233 Error: err,
234 didRead: true,
235 objectInfo: objectInfo,
236 }
237 }
238 }
239 }()
240
241 // Create a newObject through the information sent back by reqCh.
242 return newObject(gctx, cancel, reqCh, resCh), nil
243}
244
245// get request message container to communicate with internal
246// go-routine.
247type getRequest struct {
248 Buffer []byte
249 Offset int64 // readAt offset.
250 DidOffsetChange bool // Tracks the offset changes for Seek requests.
251 beenRead bool // Determines if this is the first time an object is being read.
252 isReadAt bool // Determines if this request is a request to a specific range
253 isReadOp bool // Determines if this request is a Read or Read/At request.
254 isFirstReq bool // Determines if this request is the first time an object is being accessed.
255 settingObjectInfo bool // Determines if this request is to set the objectInfo of an object.
256}
257
258// get response message container to reply back for the request.
259type getResponse struct {
260 Size int
261 Error error
262 didRead bool // Lets subsequent calls know whether or not httpReader has been initiated.
263 objectInfo ObjectInfo // Used for the first request.
264}
265
266// Object represents an open object. It implements
267// Reader, ReaderAt, Seeker, Closer for a HTTP stream.
268type Object struct {
269 // Mutex.
270 mutex *sync.Mutex
271
272 // User allocated and defined.
273 reqCh chan<- getRequest
274 resCh <-chan getResponse
275 ctx context.Context
276 cancel context.CancelFunc
277 currOffset int64
278 objectInfo ObjectInfo
279
280 // Ask lower level to initiate data fetching based on currOffset
281 seekData bool
282
283 // Keeps track of closed call.
284 isClosed bool
285
286 // Keeps track of if this is the first call.
287 isStarted bool
288
289 // Previous error saved for future calls.
290 prevErr error
291
292 // Keeps track of if this object has been read yet.
293 beenRead bool
294
295 // Keeps track of if objectInfo has been set yet.
296 objectInfoSet bool
297}
298
299// doGetRequest - sends and blocks on the firstReqCh and reqCh of an object.
300// Returns back the size of the buffer read, if anything was read, as well
301// as any error encountered. For all first requests sent on the object
302// it is also responsible for sending back the objectInfo.
303func (o *Object) doGetRequest(request getRequest) (getResponse, error) {
304 select {
305 case <-o.ctx.Done():
306 return getResponse{}, o.ctx.Err()
307 case o.reqCh <- request:
308 }
309
310 response := <-o.resCh
311
312 // Return any error to the top level.
313 if response.Error != nil {
314 return response, response.Error
315 }
316
317 // This was the first request.
318 if !o.isStarted {
319 // The object has been operated on.
320 o.isStarted = true
321 }
322 // Set the objectInfo if the request was not readAt
323 // and it hasn't been set before.
324 if !o.objectInfoSet && !request.isReadAt {
325 o.objectInfo = response.objectInfo
326 o.objectInfoSet = true
327 }
328 // Set beenRead only if it has not been set before.
329 if !o.beenRead {
330 o.beenRead = response.didRead
331 }
332 // Data are ready on the wire, no need to reinitiate connection in lower level
333 o.seekData = false
334
335 return response, nil
336}
337
338// setOffset - handles the setting of offsets for
339// Read/ReadAt/Seek requests.
340func (o *Object) setOffset(bytesRead int64) error {
341 // Update the currentOffset.
342 o.currOffset += bytesRead
343
344 if o.objectInfo.Size > -1 && o.currOffset >= o.objectInfo.Size {
345 return io.EOF
346 }
347 return nil
348}
349
350// Read reads up to len(b) bytes into b. It returns the number of
351// bytes read (0 <= n <= len(b)) and any error encountered. Returns
352// io.EOF upon end of file.
353func (o *Object) Read(b []byte) (n int, err error) {
354 if o == nil {
355 return 0, errInvalidArgument("Object is nil")
356 }
357
358 // Locking.
359 o.mutex.Lock()
360 defer o.mutex.Unlock()
361
362 // prevErr is previous error saved from previous operation.
363 if o.prevErr != nil || o.isClosed {
364 return 0, o.prevErr
365 }
366
367 // Create a new request.
368 readReq := getRequest{
369 isReadOp: true,
370 beenRead: o.beenRead,
371 Buffer: b,
372 }
373
374 // Alert that this is the first request.
375 if !o.isStarted {
376 readReq.isFirstReq = true
377 }
378
379 // Ask to establish a new data fetch routine based on seekData flag
380 readReq.DidOffsetChange = o.seekData
381 readReq.Offset = o.currOffset
382
383 // Send and receive from the first request.
384 response, err := o.doGetRequest(readReq)
385 if err != nil && err != io.EOF {
386 // Save the error for future calls.
387 o.prevErr = err
388 return response.Size, err
389 }
390
391 // Bytes read.
392 bytesRead := int64(response.Size)
393
394 // Set the new offset.
395 oerr := o.setOffset(bytesRead)
396 if oerr != nil {
397 // Save the error for future calls.
398 o.prevErr = oerr
399 return response.Size, oerr
400 }
401
402 // Return the response.
403 return response.Size, err
404}
405
406// Stat returns the ObjectInfo structure describing Object.
407func (o *Object) Stat() (ObjectInfo, error) {
408 if o == nil {
409 return ObjectInfo{}, errInvalidArgument("Object is nil")
410 }
411 // Locking.
412 o.mutex.Lock()
413 defer o.mutex.Unlock()
414
415 if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed {
416 return ObjectInfo{}, o.prevErr
417 }
418
419 // This is the first request.
420 if !o.isStarted || !o.objectInfoSet {
421 // Send the request and get the response.
422 _, err := o.doGetRequest(getRequest{
423 isFirstReq: !o.isStarted,
424 settingObjectInfo: !o.objectInfoSet,
425 })
426 if err != nil {
427 o.prevErr = err
428 return ObjectInfo{}, err
429 }
430 }
431
432 return o.objectInfo, nil
433}
434
435// ReadAt reads len(b) bytes from the File starting at byte offset
436// off. It returns the number of bytes read and the error, if any.
437// ReadAt always returns a non-nil error when n < len(b). At end of
438// file, that error is io.EOF.
439func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) {
440 if o == nil {
441 return 0, errInvalidArgument("Object is nil")
442 }
443
444 // Locking.
445 o.mutex.Lock()
446 defer o.mutex.Unlock()
447
448 // prevErr is error which was saved in previous operation.
449 if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed {
450 return 0, o.prevErr
451 }
452
453 // Set the current offset to ReadAt offset, because the current offset will be shifted at the end of this method.
454 o.currOffset = offset
455
456 // Can only compare offsets to size when size has been set.
457 if o.objectInfoSet {
458 // If offset is negative than we return io.EOF.
459 // If offset is greater than or equal to object size we return io.EOF.
460 if (o.objectInfo.Size > -1 && offset >= o.objectInfo.Size) || offset < 0 {
461 return 0, io.EOF
462 }
463 }
464
465 // Create the new readAt request.
466 readAtReq := getRequest{
467 isReadOp: true,
468 isReadAt: true,
469 DidOffsetChange: true, // Offset always changes.
470 beenRead: o.beenRead, // Set if this is the first request to try and read.
471 Offset: offset, // Set the offset.
472 Buffer: b,
473 }
474
475 // Alert that this is the first request.
476 if !o.isStarted {
477 readAtReq.isFirstReq = true
478 }
479
480 // Send and receive from the first request.
481 response, err := o.doGetRequest(readAtReq)
482 if err != nil && err != io.EOF {
483 // Save the error.
484 o.prevErr = err
485 return response.Size, err
486 }
487 // Bytes read.
488 bytesRead := int64(response.Size)
489 // There is no valid objectInfo yet
490 // to compare against for EOF.
491 if !o.objectInfoSet {
492 // Update the currentOffset.
493 o.currOffset += bytesRead
494 } else {
495 // If this was not the first request update
496 // the offsets and compare against objectInfo
497 // for EOF.
498 oerr := o.setOffset(bytesRead)
499 if oerr != nil {
500 o.prevErr = oerr
501 return response.Size, oerr
502 }
503 }
504 return response.Size, err
505}
506
507// Seek sets the offset for the next Read or Write to offset,
508// interpreted according to whence: 0 means relative to the
509// origin of the file, 1 means relative to the current offset,
510// and 2 means relative to the end.
511// Seek returns the new offset and an error, if any.
512//
513// Seeking to a negative offset is an error. Seeking to any positive
514// offset is legal, subsequent io operations succeed until the
515// underlying object is not closed.
516func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
517 if o == nil {
518 return 0, errInvalidArgument("Object is nil")
519 }
520
521 // Locking.
522 o.mutex.Lock()
523 defer o.mutex.Unlock()
524
525 // At EOF seeking is legal allow only io.EOF, for any other errors we return.
526 if o.prevErr != nil && o.prevErr != io.EOF {
527 return 0, o.prevErr
528 }
529
530 // Negative offset is valid for whence of '2'.
531 if offset < 0 && whence != 2 {
532 return 0, errInvalidArgument(fmt.Sprintf("Negative position not allowed for %d", whence))
533 }
534
535 // This is the first request. So before anything else
536 // get the ObjectInfo.
537 if !o.isStarted || !o.objectInfoSet {
538 // Create the new Seek request.
539 seekReq := getRequest{
540 isReadOp: false,
541 Offset: offset,
542 isFirstReq: true,
543 }
544 // Send and receive from the seek request.
545 _, err := o.doGetRequest(seekReq)
546 if err != nil {
547 // Save the error.
548 o.prevErr = err
549 return 0, err
550 }
551 }
552
553 newOffset := o.currOffset
554
555 // Switch through whence.
556 switch whence {
557 default:
558 return 0, errInvalidArgument(fmt.Sprintf("Invalid whence %d", whence))
559 case 0:
560 if o.objectInfo.Size > -1 && offset > o.objectInfo.Size {
561 return 0, io.EOF
562 }
563 newOffset = offset
564 case 1:
565 if o.objectInfo.Size > -1 && o.currOffset+offset > o.objectInfo.Size {
566 return 0, io.EOF
567 }
568 newOffset += offset
569 case 2:
570 // If we don't know the object size return an error for io.SeekEnd
571 if o.objectInfo.Size < 0 {
572 return 0, errInvalidArgument("Whence END is not supported when the object size is unknown")
573 }
574 // Seeking to positive offset is valid for whence '2', but
575 // since we are backing a Reader we have reached 'EOF' if
576 // offset is positive.
577 if offset > 0 {
578 return 0, io.EOF
579 }
580 // Seeking to negative position not allowed for whence.
581 if o.objectInfo.Size+offset < 0 {
582 return 0, errInvalidArgument(fmt.Sprintf("Seeking at negative offset not allowed for %d", whence))
583 }
584 newOffset = o.objectInfo.Size + offset
585 }
586 // Reset the saved error since we successfully seeked, let the Read
587 // and ReadAt decide.
588 if o.prevErr == io.EOF {
589 o.prevErr = nil
590 }
591
592 // Ask lower level to fetch again from source when necessary
593 o.seekData = (newOffset != o.currOffset) || o.seekData
594 o.currOffset = newOffset
595
596 // Return the effective offset.
597 return o.currOffset, nil
598}
599
600// Close - The behavior of Close after the first call returns error
601// for subsequent Close() calls.
602func (o *Object) Close() (err error) {
603 if o == nil {
604 return errInvalidArgument("Object is nil")
605 }
606
607 // Locking.
608 o.mutex.Lock()
609 defer o.mutex.Unlock()
610
611 // if already closed return an error.
612 if o.isClosed {
613 return o.prevErr
614 }
615
616 // Close successfully.
617 o.cancel()
618
619 // Close the request channel to indicate the internal go-routine to exit.
620 close(o.reqCh)
621
622 // Save for future operations.
623 errMsg := "Object is already closed. Bad file descriptor."
624 o.prevErr = errors.New(errMsg)
625 // Save here that we closed done channel successfully.
626 o.isClosed = true
627 return nil
628}
629
630// newObject instantiates a new *minio.Object*
631// ObjectInfo will be set by setObjectInfo
632func newObject(ctx context.Context, cancel context.CancelFunc, reqCh chan<- getRequest, resCh <-chan getResponse) *Object {
633 return &Object{
634 ctx: ctx,
635 cancel: cancel,
636 mutex: &sync.Mutex{},
637 reqCh: reqCh,
638 resCh: resCh,
639 }
640}
641
642// getObject - retrieve object from Object Storage.
643//
644// Additionally this function also takes range arguments to download the specified
645// range bytes of an object. Setting offset and length = 0 will download the full object.
646//
647// For more information about the HTTP Range header.
648// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
649func (c *Client) getObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) {
650 // Validate input arguments.
651 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
652 return nil, ObjectInfo{}, nil, err
653 }
654 if err := s3utils.CheckValidObjectName(objectName); err != nil {
655 return nil, ObjectInfo{}, nil, err
656 }
657
658 // Execute GET on objectName.
659 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
660 bucketName: bucketName,
661 objectName: objectName,
662 queryValues: opts.toQueryValues(),
663 customHeader: opts.Header(),
664 contentSHA256Hex: emptySHA256Hex,
665 })
666 if err != nil {
667 return nil, ObjectInfo{}, nil, err
668 }
669 if resp != nil {
670 if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
671 return nil, ObjectInfo{}, nil, httpRespToErrorResponse(resp, bucketName, objectName)
672 }
673 }
674
675 objectStat, err := ToObjectInfo(bucketName, objectName, resp.Header)
676 if err != nil {
677 closeResponse(resp)
678 return nil, ObjectInfo{}, nil, err
679 }
680
681 // do not close body here, caller will close
682 return resp.Body, objectStat, resp.Header, nil
683}
diff --git a/vendor/github.com/minio/minio-go/v7/api-get-options.go b/vendor/github.com/minio/minio-go/v7/api-get-options.go
new file mode 100644
index 0000000..a0216e2
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-get-options.go
@@ -0,0 +1,203 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "fmt"
22 "net/http"
23 "net/url"
24 "strconv"
25 "time"
26
27 "github.com/minio/minio-go/v7/pkg/encrypt"
28)
29
30// AdvancedGetOptions for internal use by MinIO server - not intended for client use.
31type AdvancedGetOptions struct {
32 ReplicationDeleteMarker bool
33 IsReplicationReadyForDeleteMarker bool
34 ReplicationProxyRequest string
35}
36
37// GetObjectOptions are used to specify additional headers or options
38// during GET requests.
39type GetObjectOptions struct {
40 headers map[string]string
41 reqParams url.Values
42 ServerSideEncryption encrypt.ServerSide
43 VersionID string
44 PartNumber int
45
46 // Include any checksums, if object was uploaded with checksum.
47 // For multipart objects this is a checksum of part checksums.
48 // https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
49 Checksum bool
50
51 // To be not used by external applications
52 Internal AdvancedGetOptions
53}
54
55// StatObjectOptions are used to specify additional headers or options
56// during GET info/stat requests.
57type StatObjectOptions = GetObjectOptions
58
59// Header returns the http.Header representation of the GET options.
60func (o GetObjectOptions) Header() http.Header {
61 headers := make(http.Header, len(o.headers))
62 for k, v := range o.headers {
63 headers.Set(k, v)
64 }
65 if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() == encrypt.SSEC {
66 o.ServerSideEncryption.Marshal(headers)
67 }
68 // this header is set for active-active replication scenario where GET/HEAD
69 // to site A is proxy'd to site B if object/version missing on site A.
70 if o.Internal.ReplicationProxyRequest != "" {
71 headers.Set(minIOBucketReplicationProxyRequest, o.Internal.ReplicationProxyRequest)
72 }
73 if o.Checksum {
74 headers.Set("x-amz-checksum-mode", "ENABLED")
75 }
76 return headers
77}
78
79// Set adds a key value pair to the options. The
80// key-value pair will be part of the HTTP GET request
81// headers.
82func (o *GetObjectOptions) Set(key, value string) {
83 if o.headers == nil {
84 o.headers = make(map[string]string)
85 }
86 o.headers[http.CanonicalHeaderKey(key)] = value
87}
88
89// SetReqParam - set request query string parameter
90// supported key: see supportedQueryValues and allowedCustomQueryPrefix.
91// If an unsupported key is passed in, it will be ignored and nothing will be done.
92func (o *GetObjectOptions) SetReqParam(key, value string) {
93 if !isCustomQueryValue(key) && !isStandardQueryValue(key) {
94 // do nothing
95 return
96 }
97 if o.reqParams == nil {
98 o.reqParams = make(url.Values)
99 }
100 o.reqParams.Set(key, value)
101}
102
103// AddReqParam - add request query string parameter
104// supported key: see supportedQueryValues and allowedCustomQueryPrefix.
105// If an unsupported key is passed in, it will be ignored and nothing will be done.
106func (o *GetObjectOptions) AddReqParam(key, value string) {
107 if !isCustomQueryValue(key) && !isStandardQueryValue(key) {
108 // do nothing
109 return
110 }
111 if o.reqParams == nil {
112 o.reqParams = make(url.Values)
113 }
114 o.reqParams.Add(key, value)
115}
116
117// SetMatchETag - set match etag.
118func (o *GetObjectOptions) SetMatchETag(etag string) error {
119 if etag == "" {
120 return errInvalidArgument("ETag cannot be empty.")
121 }
122 o.Set("If-Match", "\""+etag+"\"")
123 return nil
124}
125
126// SetMatchETagExcept - set match etag except.
127func (o *GetObjectOptions) SetMatchETagExcept(etag string) error {
128 if etag == "" {
129 return errInvalidArgument("ETag cannot be empty.")
130 }
131 o.Set("If-None-Match", "\""+etag+"\"")
132 return nil
133}
134
135// SetUnmodified - set unmodified time since.
136func (o *GetObjectOptions) SetUnmodified(modTime time.Time) error {
137 if modTime.IsZero() {
138 return errInvalidArgument("Modified since cannot be empty.")
139 }
140 o.Set("If-Unmodified-Since", modTime.Format(http.TimeFormat))
141 return nil
142}
143
144// SetModified - set modified time since.
145func (o *GetObjectOptions) SetModified(modTime time.Time) error {
146 if modTime.IsZero() {
147 return errInvalidArgument("Modified since cannot be empty.")
148 }
149 o.Set("If-Modified-Since", modTime.Format(http.TimeFormat))
150 return nil
151}
152
153// SetRange - set the start and end offset of the object to be read.
154// See https://tools.ietf.org/html/rfc7233#section-3.1 for reference.
155func (o *GetObjectOptions) SetRange(start, end int64) error {
156 switch {
157 case start == 0 && end < 0:
158 // Read last '-end' bytes. `bytes=-N`.
159 o.Set("Range", fmt.Sprintf("bytes=%d", end))
160 case 0 < start && end == 0:
161 // Read everything starting from offset
162 // 'start'. `bytes=N-`.
163 o.Set("Range", fmt.Sprintf("bytes=%d-", start))
164 case 0 <= start && start <= end:
165 // Read everything starting at 'start' till the
166 // 'end'. `bytes=N-M`
167 o.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end))
168 default:
169 // All other cases such as
170 // bytes=-3-
171 // bytes=5-3
172 // bytes=-2-4
173 // bytes=-3-0
174 // bytes=-3--2
175 // are invalid.
176 return errInvalidArgument(
177 fmt.Sprintf(
178 "Invalid range specified: start=%d end=%d",
179 start, end))
180 }
181 return nil
182}
183
184// toQueryValues - Convert the versionId, partNumber, and reqParams in Options to query string parameters.
185func (o *GetObjectOptions) toQueryValues() url.Values {
186 urlValues := make(url.Values)
187 if o.VersionID != "" {
188 urlValues.Set("versionId", o.VersionID)
189 }
190 if o.PartNumber > 0 {
191 urlValues.Set("partNumber", strconv.Itoa(o.PartNumber))
192 }
193
194 if o.reqParams != nil {
195 for key, values := range o.reqParams {
196 for _, value := range values {
197 urlValues.Add(key, value)
198 }
199 }
200 }
201
202 return urlValues
203}
diff --git a/vendor/github.com/minio/minio-go/v7/api-list.go b/vendor/github.com/minio/minio-go/v7/api-list.go
new file mode 100644
index 0000000..31b6edf
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-list.go
@@ -0,0 +1,1057 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "context"
22 "fmt"
23 "net/http"
24 "net/url"
25 "time"
26
27 "github.com/minio/minio-go/v7/pkg/s3utils"
28)
29
30// ListBuckets list all buckets owned by this authenticated user.
31//
32// This call requires explicit authentication, no anonymous requests are
33// allowed for listing buckets.
34//
35// api := client.New(....)
36// for message := range api.ListBuckets(context.Background()) {
37// fmt.Println(message)
38// }
39func (c *Client) ListBuckets(ctx context.Context) ([]BucketInfo, error) {
40 // Execute GET on service.
41 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{contentSHA256Hex: emptySHA256Hex})
42 defer closeResponse(resp)
43 if err != nil {
44 return nil, err
45 }
46 if resp != nil {
47 if resp.StatusCode != http.StatusOK {
48 return nil, httpRespToErrorResponse(resp, "", "")
49 }
50 }
51 listAllMyBucketsResult := listAllMyBucketsResult{}
52 err = xmlDecoder(resp.Body, &listAllMyBucketsResult)
53 if err != nil {
54 return nil, err
55 }
56 return listAllMyBucketsResult.Buckets.Bucket, nil
57}
58
59// Bucket List Operations.
60func (c *Client) listObjectsV2(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo {
61 // Allocate new list objects channel.
62 objectStatCh := make(chan ObjectInfo, 1)
63 // Default listing is delimited at "/"
64 delimiter := "/"
65 if opts.Recursive {
66 // If recursive we do not delimit.
67 delimiter = ""
68 }
69
70 // Return object owner information by default
71 fetchOwner := true
72
73 sendObjectInfo := func(info ObjectInfo) {
74 select {
75 case objectStatCh <- info:
76 case <-ctx.Done():
77 }
78 }
79
80 // Validate bucket name.
81 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
82 defer close(objectStatCh)
83 sendObjectInfo(ObjectInfo{
84 Err: err,
85 })
86 return objectStatCh
87 }
88
89 // Validate incoming object prefix.
90 if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil {
91 defer close(objectStatCh)
92 sendObjectInfo(ObjectInfo{
93 Err: err,
94 })
95 return objectStatCh
96 }
97
98 // Initiate list objects goroutine here.
99 go func(objectStatCh chan<- ObjectInfo) {
100 defer func() {
101 if contextCanceled(ctx) {
102 objectStatCh <- ObjectInfo{
103 Err: ctx.Err(),
104 }
105 }
106 close(objectStatCh)
107 }()
108
109 // Save continuationToken for next request.
110 var continuationToken string
111 for {
112 // Get list of objects a maximum of 1000 per request.
113 result, err := c.listObjectsV2Query(ctx, bucketName, opts.Prefix, continuationToken,
114 fetchOwner, opts.WithMetadata, delimiter, opts.StartAfter, opts.MaxKeys, opts.headers)
115 if err != nil {
116 sendObjectInfo(ObjectInfo{
117 Err: err,
118 })
119 return
120 }
121
122 // If contents are available loop through and send over channel.
123 for _, object := range result.Contents {
124 object.ETag = trimEtag(object.ETag)
125 select {
126 // Send object content.
127 case objectStatCh <- object:
128 // If receives done from the caller, return here.
129 case <-ctx.Done():
130 return
131 }
132 }
133
134 // Send all common prefixes if any.
135 // NOTE: prefixes are only present if the request is delimited.
136 for _, obj := range result.CommonPrefixes {
137 select {
138 // Send object prefixes.
139 case objectStatCh <- ObjectInfo{Key: obj.Prefix}:
140 // If receives done from the caller, return here.
141 case <-ctx.Done():
142 return
143 }
144 }
145
146 // If continuation token present, save it for next request.
147 if result.NextContinuationToken != "" {
148 continuationToken = result.NextContinuationToken
149 }
150
151 // Listing ends result is not truncated, return right here.
152 if !result.IsTruncated {
153 return
154 }
155
156 // Add this to catch broken S3 API implementations.
157 if continuationToken == "" {
158 sendObjectInfo(ObjectInfo{
159 Err: fmt.Errorf("listObjectsV2 is truncated without continuationToken, %s S3 server is incompatible with S3 API", c.endpointURL),
160 })
161 return
162 }
163 }
164 }(objectStatCh)
165 return objectStatCh
166}
167
168// listObjectsV2Query - (List Objects V2) - List some or all (up to 1000) of the objects in a bucket.
169//
170// You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
171// request parameters :-
172// ---------
173// ?prefix - Limits the response to keys that begin with the specified prefix.
174// ?continuation-token - Used to continue iterating over a set of objects
175// ?metadata - Specifies if we want metadata for the objects as part of list operation.
176// ?delimiter - A delimiter is a character you use to group keys.
177// ?start-after - Sets a marker to start listing lexically at this key onwards.
178// ?max-keys - Sets the maximum number of keys returned in the response body.
179func (c *Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix, continuationToken string, fetchOwner, metadata bool, delimiter, startAfter string, maxkeys int, headers http.Header) (ListBucketV2Result, error) {
180 // Validate bucket name.
181 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
182 return ListBucketV2Result{}, err
183 }
184 // Validate object prefix.
185 if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
186 return ListBucketV2Result{}, err
187 }
188 // Get resources properly escaped and lined up before
189 // using them in http request.
190 urlValues := make(url.Values)
191
192 // Always set list-type in ListObjects V2
193 urlValues.Set("list-type", "2")
194
195 if metadata {
196 urlValues.Set("metadata", "true")
197 }
198
199 // Set this conditionally if asked
200 if startAfter != "" {
201 urlValues.Set("start-after", startAfter)
202 }
203
204 // Always set encoding-type in ListObjects V2
205 urlValues.Set("encoding-type", "url")
206
207 // Set object prefix, prefix value to be set to empty is okay.
208 urlValues.Set("prefix", objectPrefix)
209
210 // Set delimiter, delimiter value to be set to empty is okay.
211 urlValues.Set("delimiter", delimiter)
212
213 // Set continuation token
214 if continuationToken != "" {
215 urlValues.Set("continuation-token", continuationToken)
216 }
217
218 // Fetch owner when listing
219 if fetchOwner {
220 urlValues.Set("fetch-owner", "true")
221 }
222
223 // Set max keys.
224 if maxkeys > 0 {
225 urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
226 }
227
228 // Execute GET on bucket to list objects.
229 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
230 bucketName: bucketName,
231 queryValues: urlValues,
232 contentSHA256Hex: emptySHA256Hex,
233 customHeader: headers,
234 })
235 defer closeResponse(resp)
236 if err != nil {
237 return ListBucketV2Result{}, err
238 }
239 if resp != nil {
240 if resp.StatusCode != http.StatusOK {
241 return ListBucketV2Result{}, httpRespToErrorResponse(resp, bucketName, "")
242 }
243 }
244
245 // Decode listBuckets XML.
246 listBucketResult := ListBucketV2Result{}
247 if err = xmlDecoder(resp.Body, &listBucketResult); err != nil {
248 return listBucketResult, err
249 }
250
251 // This is an additional verification check to make
252 // sure proper responses are received.
253 if listBucketResult.IsTruncated && listBucketResult.NextContinuationToken == "" {
254 return listBucketResult, ErrorResponse{
255 Code: "NotImplemented",
256 Message: "Truncated response should have continuation token set",
257 }
258 }
259
260 for i, obj := range listBucketResult.Contents {
261 listBucketResult.Contents[i].Key, err = decodeS3Name(obj.Key, listBucketResult.EncodingType)
262 if err != nil {
263 return listBucketResult, err
264 }
265 listBucketResult.Contents[i].LastModified = listBucketResult.Contents[i].LastModified.Truncate(time.Millisecond)
266 }
267
268 for i, obj := range listBucketResult.CommonPrefixes {
269 listBucketResult.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listBucketResult.EncodingType)
270 if err != nil {
271 return listBucketResult, err
272 }
273 }
274
275 // Success.
276 return listBucketResult, nil
277}
278
279func (c *Client) listObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo {
280 // Allocate new list objects channel.
281 objectStatCh := make(chan ObjectInfo, 1)
282 // Default listing is delimited at "/"
283 delimiter := "/"
284 if opts.Recursive {
285 // If recursive we do not delimit.
286 delimiter = ""
287 }
288
289 sendObjectInfo := func(info ObjectInfo) {
290 select {
291 case objectStatCh <- info:
292 case <-ctx.Done():
293 }
294 }
295
296 // Validate bucket name.
297 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
298 defer close(objectStatCh)
299 sendObjectInfo(ObjectInfo{
300 Err: err,
301 })
302 return objectStatCh
303 }
304 // Validate incoming object prefix.
305 if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil {
306 defer close(objectStatCh)
307 sendObjectInfo(ObjectInfo{
308 Err: err,
309 })
310 return objectStatCh
311 }
312
313 // Initiate list objects goroutine here.
314 go func(objectStatCh chan<- ObjectInfo) {
315 defer func() {
316 if contextCanceled(ctx) {
317 objectStatCh <- ObjectInfo{
318 Err: ctx.Err(),
319 }
320 }
321 close(objectStatCh)
322 }()
323
324 marker := opts.StartAfter
325 for {
326 // Get list of objects a maximum of 1000 per request.
327 result, err := c.listObjectsQuery(ctx, bucketName, opts.Prefix, marker, delimiter, opts.MaxKeys, opts.headers)
328 if err != nil {
329 sendObjectInfo(ObjectInfo{
330 Err: err,
331 })
332 return
333 }
334
335 // If contents are available loop through and send over channel.
336 for _, object := range result.Contents {
337 // Save the marker.
338 marker = object.Key
339 object.ETag = trimEtag(object.ETag)
340 select {
341 // Send object content.
342 case objectStatCh <- object:
343 // If receives done from the caller, return here.
344 case <-ctx.Done():
345 return
346 }
347 }
348
349 // Send all common prefixes if any.
350 // NOTE: prefixes are only present if the request is delimited.
351 for _, obj := range result.CommonPrefixes {
352 select {
353 // Send object prefixes.
354 case objectStatCh <- ObjectInfo{Key: obj.Prefix}:
355 // If receives done from the caller, return here.
356 case <-ctx.Done():
357 return
358 }
359 }
360
361 // If next marker present, save it for next request.
362 if result.NextMarker != "" {
363 marker = result.NextMarker
364 }
365
366 // Listing ends result is not truncated, return right here.
367 if !result.IsTruncated {
368 return
369 }
370 }
371 }(objectStatCh)
372 return objectStatCh
373}
374
375func (c *Client) listObjectVersions(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo {
376 // Allocate new list objects channel.
377 resultCh := make(chan ObjectInfo, 1)
378 // Default listing is delimited at "/"
379 delimiter := "/"
380 if opts.Recursive {
381 // If recursive we do not delimit.
382 delimiter = ""
383 }
384
385 sendObjectInfo := func(info ObjectInfo) {
386 select {
387 case resultCh <- info:
388 case <-ctx.Done():
389 }
390 }
391
392 // Validate bucket name.
393 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
394 defer close(resultCh)
395 sendObjectInfo(ObjectInfo{
396 Err: err,
397 })
398 return resultCh
399 }
400
401 // Validate incoming object prefix.
402 if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil {
403 defer close(resultCh)
404 sendObjectInfo(ObjectInfo{
405 Err: err,
406 })
407 return resultCh
408 }
409
410 // Initiate list objects goroutine here.
411 go func(resultCh chan<- ObjectInfo) {
412 defer func() {
413 if contextCanceled(ctx) {
414 resultCh <- ObjectInfo{
415 Err: ctx.Err(),
416 }
417 }
418 close(resultCh)
419 }()
420
421 var (
422 keyMarker = ""
423 versionIDMarker = ""
424 )
425
426 for {
427 // Get list of objects a maximum of 1000 per request.
428 result, err := c.listObjectVersionsQuery(ctx, bucketName, opts, keyMarker, versionIDMarker, delimiter)
429 if err != nil {
430 sendObjectInfo(ObjectInfo{
431 Err: err,
432 })
433 return
434 }
435
436 // If contents are available loop through and send over channel.
437 for _, version := range result.Versions {
438 info := ObjectInfo{
439 ETag: trimEtag(version.ETag),
440 Key: version.Key,
441 LastModified: version.LastModified.Truncate(time.Millisecond),
442 Size: version.Size,
443 Owner: version.Owner,
444 StorageClass: version.StorageClass,
445 IsLatest: version.IsLatest,
446 VersionID: version.VersionID,
447 IsDeleteMarker: version.isDeleteMarker,
448 UserTags: version.UserTags,
449 UserMetadata: version.UserMetadata,
450 Internal: version.Internal,
451 }
452 select {
453 // Send object version info.
454 case resultCh <- info:
455 // If receives done from the caller, return here.
456 case <-ctx.Done():
457 return
458 }
459 }
460
461 // Send all common prefixes if any.
462 // NOTE: prefixes are only present if the request is delimited.
463 for _, obj := range result.CommonPrefixes {
464 select {
465 // Send object prefixes.
466 case resultCh <- ObjectInfo{Key: obj.Prefix}:
467 // If receives done from the caller, return here.
468 case <-ctx.Done():
469 return
470 }
471 }
472
473 // If next key marker is present, save it for next request.
474 if result.NextKeyMarker != "" {
475 keyMarker = result.NextKeyMarker
476 }
477
478 // If next version id marker is present, save it for next request.
479 if result.NextVersionIDMarker != "" {
480 versionIDMarker = result.NextVersionIDMarker
481 }
482
483 // Listing ends result is not truncated, return right here.
484 if !result.IsTruncated {
485 return
486 }
487 }
488 }(resultCh)
489 return resultCh
490}
491
492// listObjectVersions - (List Object Versions) - List some or all (up to 1000) of the existing objects
493// and their versions in a bucket.
494//
495// You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
496// request parameters :-
497// ---------
498// ?key-marker - Specifies the key to start with when listing objects in a bucket.
499// ?version-id-marker - Specifies the version id marker to start with when listing objects with versions in a bucket.
500// ?delimiter - A delimiter is a character you use to group keys.
501// ?prefix - Limits the response to keys that begin with the specified prefix.
502// ?max-keys - Sets the maximum number of keys returned in the response body.
503func (c *Client) listObjectVersionsQuery(ctx context.Context, bucketName string, opts ListObjectsOptions, keyMarker, versionIDMarker, delimiter string) (ListVersionsResult, error) {
504 // Validate bucket name.
505 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
506 return ListVersionsResult{}, err
507 }
508 // Validate object prefix.
509 if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil {
510 return ListVersionsResult{}, err
511 }
512 // Get resources properly escaped and lined up before
513 // using them in http request.
514 urlValues := make(url.Values)
515
516 // Set versions to trigger versioning API
517 urlValues.Set("versions", "")
518
519 // Set object prefix, prefix value to be set to empty is okay.
520 urlValues.Set("prefix", opts.Prefix)
521
522 // Set delimiter, delimiter value to be set to empty is okay.
523 urlValues.Set("delimiter", delimiter)
524
525 // Set object marker.
526 if keyMarker != "" {
527 urlValues.Set("key-marker", keyMarker)
528 }
529
530 // Set max keys.
531 if opts.MaxKeys > 0 {
532 urlValues.Set("max-keys", fmt.Sprintf("%d", opts.MaxKeys))
533 }
534
535 // Set version ID marker
536 if versionIDMarker != "" {
537 urlValues.Set("version-id-marker", versionIDMarker)
538 }
539
540 if opts.WithMetadata {
541 urlValues.Set("metadata", "true")
542 }
543
544 // Always set encoding-type
545 urlValues.Set("encoding-type", "url")
546
547 // Execute GET on bucket to list objects.
548 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
549 bucketName: bucketName,
550 queryValues: urlValues,
551 contentSHA256Hex: emptySHA256Hex,
552 customHeader: opts.headers,
553 })
554 defer closeResponse(resp)
555 if err != nil {
556 return ListVersionsResult{}, err
557 }
558 if resp != nil {
559 if resp.StatusCode != http.StatusOK {
560 return ListVersionsResult{}, httpRespToErrorResponse(resp, bucketName, "")
561 }
562 }
563
564 // Decode ListVersionsResult XML.
565 listObjectVersionsOutput := ListVersionsResult{}
566 err = xmlDecoder(resp.Body, &listObjectVersionsOutput)
567 if err != nil {
568 return ListVersionsResult{}, err
569 }
570
571 for i, obj := range listObjectVersionsOutput.Versions {
572 listObjectVersionsOutput.Versions[i].Key, err = decodeS3Name(obj.Key, listObjectVersionsOutput.EncodingType)
573 if err != nil {
574 return listObjectVersionsOutput, err
575 }
576 }
577
578 for i, obj := range listObjectVersionsOutput.CommonPrefixes {
579 listObjectVersionsOutput.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listObjectVersionsOutput.EncodingType)
580 if err != nil {
581 return listObjectVersionsOutput, err
582 }
583 }
584
585 if listObjectVersionsOutput.NextKeyMarker != "" {
586 listObjectVersionsOutput.NextKeyMarker, err = decodeS3Name(listObjectVersionsOutput.NextKeyMarker, listObjectVersionsOutput.EncodingType)
587 if err != nil {
588 return listObjectVersionsOutput, err
589 }
590 }
591
592 return listObjectVersionsOutput, nil
593}
594
595// listObjects - (List Objects) - List some or all (up to 1000) of the objects in a bucket.
596//
597// You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
598// request parameters :-
599// ---------
600// ?marker - Specifies the key to start with when listing objects in a bucket.
601// ?delimiter - A delimiter is a character you use to group keys.
602// ?prefix - Limits the response to keys that begin with the specified prefix.
603// ?max-keys - Sets the maximum number of keys returned in the response body.
604func (c *Client) listObjectsQuery(ctx context.Context, bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int, headers http.Header) (ListBucketResult, error) {
605 // Validate bucket name.
606 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
607 return ListBucketResult{}, err
608 }
609 // Validate object prefix.
610 if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
611 return ListBucketResult{}, err
612 }
613 // Get resources properly escaped and lined up before
614 // using them in http request.
615 urlValues := make(url.Values)
616
617 // Set object prefix, prefix value to be set to empty is okay.
618 urlValues.Set("prefix", objectPrefix)
619
620 // Set delimiter, delimiter value to be set to empty is okay.
621 urlValues.Set("delimiter", delimiter)
622
623 // Set object marker.
624 if objectMarker != "" {
625 urlValues.Set("marker", objectMarker)
626 }
627
628 // Set max keys.
629 if maxkeys > 0 {
630 urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
631 }
632
633 // Always set encoding-type
634 urlValues.Set("encoding-type", "url")
635
636 // Execute GET on bucket to list objects.
637 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
638 bucketName: bucketName,
639 queryValues: urlValues,
640 contentSHA256Hex: emptySHA256Hex,
641 customHeader: headers,
642 })
643 defer closeResponse(resp)
644 if err != nil {
645 return ListBucketResult{}, err
646 }
647 if resp != nil {
648 if resp.StatusCode != http.StatusOK {
649 return ListBucketResult{}, httpRespToErrorResponse(resp, bucketName, "")
650 }
651 }
652 // Decode listBuckets XML.
653 listBucketResult := ListBucketResult{}
654 err = xmlDecoder(resp.Body, &listBucketResult)
655 if err != nil {
656 return listBucketResult, err
657 }
658
659 for i, obj := range listBucketResult.Contents {
660 listBucketResult.Contents[i].Key, err = decodeS3Name(obj.Key, listBucketResult.EncodingType)
661 if err != nil {
662 return listBucketResult, err
663 }
664 listBucketResult.Contents[i].LastModified = listBucketResult.Contents[i].LastModified.Truncate(time.Millisecond)
665 }
666
667 for i, obj := range listBucketResult.CommonPrefixes {
668 listBucketResult.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listBucketResult.EncodingType)
669 if err != nil {
670 return listBucketResult, err
671 }
672 }
673
674 if listBucketResult.NextMarker != "" {
675 listBucketResult.NextMarker, err = decodeS3Name(listBucketResult.NextMarker, listBucketResult.EncodingType)
676 if err != nil {
677 return listBucketResult, err
678 }
679 }
680
681 return listBucketResult, nil
682}
683
684// ListObjectsOptions holds all options of a list object request
685type ListObjectsOptions struct {
686 // Include objects versions in the listing
687 WithVersions bool
688 // Include objects metadata in the listing
689 WithMetadata bool
690 // Only list objects with the prefix
691 Prefix string
692 // Ignore '/' delimiter
693 Recursive bool
694 // The maximum number of objects requested per
695 // batch, advanced use-case not useful for most
696 // applications
697 MaxKeys int
698 // StartAfter start listing lexically at this
699 // object onwards, this value can also be set
700 // for Marker when `UseV1` is set to true.
701 StartAfter string
702
703 // Use the deprecated list objects V1 API
704 UseV1 bool
705
706 headers http.Header
707}
708
709// Set adds a key value pair to the options. The
710// key-value pair will be part of the HTTP GET request
711// headers.
712func (o *ListObjectsOptions) Set(key, value string) {
713 if o.headers == nil {
714 o.headers = make(http.Header)
715 }
716 o.headers.Set(key, value)
717}
718
719// ListObjects returns objects list after evaluating the passed options.
720//
721// api := client.New(....)
722// for object := range api.ListObjects(ctx, "mytestbucket", minio.ListObjectsOptions{Prefix: "starthere", Recursive:true}) {
723// fmt.Println(object)
724// }
725//
726// If caller cancels the context, then the last entry on the 'chan ObjectInfo' will be the context.Error()
727// caller must drain the channel entirely and wait until channel is closed before proceeding, without
728// waiting on the channel to be closed completely you might leak goroutines.
729func (c *Client) ListObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo {
730 if opts.WithVersions {
731 return c.listObjectVersions(ctx, bucketName, opts)
732 }
733
734 // Use legacy list objects v1 API
735 if opts.UseV1 {
736 return c.listObjects(ctx, bucketName, opts)
737 }
738
739 // Check whether this is snowball region, if yes ListObjectsV2 doesn't work, fallback to listObjectsV1.
740 if location, ok := c.bucketLocCache.Get(bucketName); ok {
741 if location == "snowball" {
742 return c.listObjects(ctx, bucketName, opts)
743 }
744 }
745
746 return c.listObjectsV2(ctx, bucketName, opts)
747}
748
749// ListIncompleteUploads - List incompletely uploaded multipart objects.
750//
751// ListIncompleteUploads lists all incompleted objects matching the
752// objectPrefix from the specified bucket. If recursion is enabled
753// it would list all subdirectories and all its contents.
754//
755// Your input parameters are just bucketName, objectPrefix, recursive.
756// If you enable recursive as 'true' this function will return back all
757// the multipart objects in a given bucket name.
758//
759// api := client.New(....)
760// // Recurively list all objects in 'mytestbucket'
761// recursive := true
762// for message := range api.ListIncompleteUploads(context.Background(), "mytestbucket", "starthere", recursive) {
763// fmt.Println(message)
764// }
765func (c *Client) ListIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo {
766 return c.listIncompleteUploads(ctx, bucketName, objectPrefix, recursive)
767}
768
769// contextCanceled returns whether a context is canceled.
770func contextCanceled(ctx context.Context) bool {
771 select {
772 case <-ctx.Done():
773 return true
774 default:
775 return false
776 }
777}
778
779// listIncompleteUploads lists all incomplete uploads.
780func (c *Client) listIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo {
781 // Allocate channel for multipart uploads.
782 objectMultipartStatCh := make(chan ObjectMultipartInfo, 1)
783 // Delimiter is set to "/" by default.
784 delimiter := "/"
785 if recursive {
786 // If recursive do not delimit.
787 delimiter = ""
788 }
789 // Validate bucket name.
790 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
791 defer close(objectMultipartStatCh)
792 objectMultipartStatCh <- ObjectMultipartInfo{
793 Err: err,
794 }
795 return objectMultipartStatCh
796 }
797 // Validate incoming object prefix.
798 if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
799 defer close(objectMultipartStatCh)
800 objectMultipartStatCh <- ObjectMultipartInfo{
801 Err: err,
802 }
803 return objectMultipartStatCh
804 }
805 go func(objectMultipartStatCh chan<- ObjectMultipartInfo) {
806 defer func() {
807 if contextCanceled(ctx) {
808 objectMultipartStatCh <- ObjectMultipartInfo{
809 Err: ctx.Err(),
810 }
811 }
812 close(objectMultipartStatCh)
813 }()
814
815 // object and upload ID marker for future requests.
816 var objectMarker string
817 var uploadIDMarker string
818 for {
819 // list all multipart uploads.
820 result, err := c.listMultipartUploadsQuery(ctx, bucketName, objectMarker, uploadIDMarker, objectPrefix, delimiter, 0)
821 if err != nil {
822 objectMultipartStatCh <- ObjectMultipartInfo{
823 Err: err,
824 }
825 return
826 }
827 objectMarker = result.NextKeyMarker
828 uploadIDMarker = result.NextUploadIDMarker
829
830 // Send all multipart uploads.
831 for _, obj := range result.Uploads {
832 // Calculate total size of the uploaded parts if 'aggregateSize' is enabled.
833 select {
834 // Send individual uploads here.
835 case objectMultipartStatCh <- obj:
836 // If the context is canceled
837 case <-ctx.Done():
838 return
839 }
840 }
841 // Send all common prefixes if any.
842 // NOTE: prefixes are only present if the request is delimited.
843 for _, obj := range result.CommonPrefixes {
844 select {
845 // Send delimited prefixes here.
846 case objectMultipartStatCh <- ObjectMultipartInfo{Key: obj.Prefix, Size: 0}:
847 // If context is canceled.
848 case <-ctx.Done():
849 return
850 }
851 }
852 // Listing ends if result not truncated, return right here.
853 if !result.IsTruncated {
854 return
855 }
856 }
857 }(objectMultipartStatCh)
858 // return.
859 return objectMultipartStatCh
860}
861
862// listMultipartUploadsQuery - (List Multipart Uploads).
863// - Lists some or all (up to 1000) in-progress multipart uploads in a bucket.
864//
865// You can use the request parameters as selection criteria to return a subset of the uploads in a bucket.
866// request parameters. :-
867// ---------
868// ?key-marker - Specifies the multipart upload after which listing should begin.
869// ?upload-id-marker - Together with key-marker specifies the multipart upload after which listing should begin.
870// ?delimiter - A delimiter is a character you use to group keys.
871// ?prefix - Limits the response to keys that begin with the specified prefix.
872// ?max-uploads - Sets the maximum number of multipart uploads returned in the response body.
873func (c *Client) listMultipartUploadsQuery(ctx context.Context, bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (ListMultipartUploadsResult, error) {
874 // Get resources properly escaped and lined up before using them in http request.
875 urlValues := make(url.Values)
876 // Set uploads.
877 urlValues.Set("uploads", "")
878 // Set object key marker.
879 if keyMarker != "" {
880 urlValues.Set("key-marker", keyMarker)
881 }
882 // Set upload id marker.
883 if uploadIDMarker != "" {
884 urlValues.Set("upload-id-marker", uploadIDMarker)
885 }
886
887 // Set object prefix, prefix value to be set to empty is okay.
888 urlValues.Set("prefix", prefix)
889
890 // Set delimiter, delimiter value to be set to empty is okay.
891 urlValues.Set("delimiter", delimiter)
892
893 // Always set encoding-type
894 urlValues.Set("encoding-type", "url")
895
896 // maxUploads should be 1000 or less.
897 if maxUploads > 0 {
898 // Set max-uploads.
899 urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads))
900 }
901
902 // Execute GET on bucketName to list multipart uploads.
903 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
904 bucketName: bucketName,
905 queryValues: urlValues,
906 contentSHA256Hex: emptySHA256Hex,
907 })
908 defer closeResponse(resp)
909 if err != nil {
910 return ListMultipartUploadsResult{}, err
911 }
912 if resp != nil {
913 if resp.StatusCode != http.StatusOK {
914 return ListMultipartUploadsResult{}, httpRespToErrorResponse(resp, bucketName, "")
915 }
916 }
917 // Decode response body.
918 listMultipartUploadsResult := ListMultipartUploadsResult{}
919 err = xmlDecoder(resp.Body, &listMultipartUploadsResult)
920 if err != nil {
921 return listMultipartUploadsResult, err
922 }
923
924 listMultipartUploadsResult.NextKeyMarker, err = decodeS3Name(listMultipartUploadsResult.NextKeyMarker, listMultipartUploadsResult.EncodingType)
925 if err != nil {
926 return listMultipartUploadsResult, err
927 }
928
929 listMultipartUploadsResult.NextUploadIDMarker, err = decodeS3Name(listMultipartUploadsResult.NextUploadIDMarker, listMultipartUploadsResult.EncodingType)
930 if err != nil {
931 return listMultipartUploadsResult, err
932 }
933
934 for i, obj := range listMultipartUploadsResult.Uploads {
935 listMultipartUploadsResult.Uploads[i].Key, err = decodeS3Name(obj.Key, listMultipartUploadsResult.EncodingType)
936 if err != nil {
937 return listMultipartUploadsResult, err
938 }
939 }
940
941 for i, obj := range listMultipartUploadsResult.CommonPrefixes {
942 listMultipartUploadsResult.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listMultipartUploadsResult.EncodingType)
943 if err != nil {
944 return listMultipartUploadsResult, err
945 }
946 }
947
948 return listMultipartUploadsResult, nil
949}
950
951// listObjectParts list all object parts recursively.
952//
953//lint:ignore U1000 Keep this around
954func (c *Client) listObjectParts(ctx context.Context, bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) {
955 // Part number marker for the next batch of request.
956 var nextPartNumberMarker int
957 partsInfo = make(map[int]ObjectPart)
958 for {
959 // Get list of uploaded parts a maximum of 1000 per request.
960 listObjPartsResult, err := c.listObjectPartsQuery(ctx, bucketName, objectName, uploadID, nextPartNumberMarker, 1000)
961 if err != nil {
962 return nil, err
963 }
964 // Append to parts info.
965 for _, part := range listObjPartsResult.ObjectParts {
966 // Trim off the odd double quotes from ETag in the beginning and end.
967 part.ETag = trimEtag(part.ETag)
968 partsInfo[part.PartNumber] = part
969 }
970 // Keep part number marker, for the next iteration.
971 nextPartNumberMarker = listObjPartsResult.NextPartNumberMarker
972 // Listing ends result is not truncated, return right here.
973 if !listObjPartsResult.IsTruncated {
974 break
975 }
976 }
977
978 // Return all the parts.
979 return partsInfo, nil
980}
981
982// findUploadIDs lists all incomplete uploads and find the uploadIDs of the matching object name.
983func (c *Client) findUploadIDs(ctx context.Context, bucketName, objectName string) ([]string, error) {
984 var uploadIDs []string
985 // Make list incomplete uploads recursive.
986 isRecursive := true
987 // List all incomplete uploads.
988 for mpUpload := range c.listIncompleteUploads(ctx, bucketName, objectName, isRecursive) {
989 if mpUpload.Err != nil {
990 return nil, mpUpload.Err
991 }
992 if objectName == mpUpload.Key {
993 uploadIDs = append(uploadIDs, mpUpload.UploadID)
994 }
995 }
996 // Return the latest upload id.
997 return uploadIDs, nil
998}
999
1000// listObjectPartsQuery (List Parts query)
1001// - lists some or all (up to 1000) parts that have been uploaded
1002// for a specific multipart upload
1003//
1004// You can use the request parameters as selection criteria to return
1005// a subset of the uploads in a bucket, request parameters :-
1006// ---------
1007// ?part-number-marker - Specifies the part after which listing should
1008// begin.
1009// ?max-parts - Maximum parts to be listed per request.
1010func (c *Client) listObjectPartsQuery(ctx context.Context, bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (ListObjectPartsResult, error) {
1011 // Get resources properly escaped and lined up before using them in http request.
1012 urlValues := make(url.Values)
1013 // Set part number marker.
1014 urlValues.Set("part-number-marker", fmt.Sprintf("%d", partNumberMarker))
1015 // Set upload id.
1016 urlValues.Set("uploadId", uploadID)
1017
1018 // maxParts should be 1000 or less.
1019 if maxParts > 0 {
1020 // Set max parts.
1021 urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts))
1022 }
1023
1024 // Execute GET on objectName to get list of parts.
1025 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
1026 bucketName: bucketName,
1027 objectName: objectName,
1028 queryValues: urlValues,
1029 contentSHA256Hex: emptySHA256Hex,
1030 })
1031 defer closeResponse(resp)
1032 if err != nil {
1033 return ListObjectPartsResult{}, err
1034 }
1035 if resp != nil {
1036 if resp.StatusCode != http.StatusOK {
1037 return ListObjectPartsResult{}, httpRespToErrorResponse(resp, bucketName, objectName)
1038 }
1039 }
1040 // Decode list object parts XML.
1041 listObjectPartsResult := ListObjectPartsResult{}
1042 err = xmlDecoder(resp.Body, &listObjectPartsResult)
1043 if err != nil {
1044 return listObjectPartsResult, err
1045 }
1046 return listObjectPartsResult, nil
1047}
1048
1049// Decode an S3 object name according to the encoding type
1050func decodeS3Name(name, encodingType string) (string, error) {
1051 switch encodingType {
1052 case "url":
1053 return url.QueryUnescape(name)
1054 default:
1055 return name, nil
1056 }
1057}
diff --git a/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go b/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go
new file mode 100644
index 0000000..0c027d5
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go
@@ -0,0 +1,176 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "bytes"
22 "context"
23 "encoding/xml"
24 "fmt"
25 "net/http"
26 "net/url"
27
28 "github.com/minio/minio-go/v7/pkg/s3utils"
29)
30
31// objectLegalHold - object legal hold specified in
32// https://docs.aws.amazon.com/AmazonS3/latest/API/archive-RESTObjectPUTLegalHold.html
33type objectLegalHold struct {
34 XMLNS string `xml:"xmlns,attr,omitempty"`
35 XMLName xml.Name `xml:"LegalHold"`
36 Status LegalHoldStatus `xml:"Status,omitempty"`
37}
38
39// PutObjectLegalHoldOptions represents options specified by user for PutObjectLegalHold call
40type PutObjectLegalHoldOptions struct {
41 VersionID string
42 Status *LegalHoldStatus
43}
44
45// GetObjectLegalHoldOptions represents options specified by user for GetObjectLegalHold call
46type GetObjectLegalHoldOptions struct {
47 VersionID string
48}
49
50// LegalHoldStatus - object legal hold status.
51type LegalHoldStatus string
52
53const (
54 // LegalHoldEnabled indicates legal hold is enabled
55 LegalHoldEnabled LegalHoldStatus = "ON"
56
57 // LegalHoldDisabled indicates legal hold is disabled
58 LegalHoldDisabled LegalHoldStatus = "OFF"
59)
60
61func (r LegalHoldStatus) String() string {
62 return string(r)
63}
64
65// IsValid - check whether this legal hold status is valid or not.
66func (r LegalHoldStatus) IsValid() bool {
67 return r == LegalHoldEnabled || r == LegalHoldDisabled
68}
69
70func newObjectLegalHold(status *LegalHoldStatus) (*objectLegalHold, error) {
71 if status == nil {
72 return nil, fmt.Errorf("Status not set")
73 }
74 if !status.IsValid() {
75 return nil, fmt.Errorf("invalid legal hold status `%v`", status)
76 }
77 legalHold := &objectLegalHold{
78 Status: *status,
79 }
80 return legalHold, nil
81}
82
83// PutObjectLegalHold : sets object legal hold for a given object and versionID.
84func (c *Client) PutObjectLegalHold(ctx context.Context, bucketName, objectName string, opts PutObjectLegalHoldOptions) error {
85 // Input validation.
86 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
87 return err
88 }
89
90 if err := s3utils.CheckValidObjectName(objectName); err != nil {
91 return err
92 }
93
94 // Get resources properly escaped and lined up before
95 // using them in http request.
96 urlValues := make(url.Values)
97 urlValues.Set("legal-hold", "")
98
99 if opts.VersionID != "" {
100 urlValues.Set("versionId", opts.VersionID)
101 }
102
103 lh, err := newObjectLegalHold(opts.Status)
104 if err != nil {
105 return err
106 }
107
108 lhData, err := xml.Marshal(lh)
109 if err != nil {
110 return err
111 }
112
113 reqMetadata := requestMetadata{
114 bucketName: bucketName,
115 objectName: objectName,
116 queryValues: urlValues,
117 contentBody: bytes.NewReader(lhData),
118 contentLength: int64(len(lhData)),
119 contentMD5Base64: sumMD5Base64(lhData),
120 contentSHA256Hex: sum256Hex(lhData),
121 }
122
123 // Execute PUT Object Legal Hold.
124 resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
125 defer closeResponse(resp)
126 if err != nil {
127 return err
128 }
129 if resp != nil {
130 if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
131 return httpRespToErrorResponse(resp, bucketName, objectName)
132 }
133 }
134 return nil
135}
136
137// GetObjectLegalHold gets legal-hold status of given object.
138func (c *Client) GetObjectLegalHold(ctx context.Context, bucketName, objectName string, opts GetObjectLegalHoldOptions) (status *LegalHoldStatus, err error) {
139 // Input validation.
140 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
141 return nil, err
142 }
143
144 if err := s3utils.CheckValidObjectName(objectName); err != nil {
145 return nil, err
146 }
147 urlValues := make(url.Values)
148 urlValues.Set("legal-hold", "")
149
150 if opts.VersionID != "" {
151 urlValues.Set("versionId", opts.VersionID)
152 }
153
154 // Execute GET on bucket to list objects.
155 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
156 bucketName: bucketName,
157 objectName: objectName,
158 queryValues: urlValues,
159 contentSHA256Hex: emptySHA256Hex,
160 })
161 defer closeResponse(resp)
162 if err != nil {
163 return nil, err
164 }
165 if resp != nil {
166 if resp.StatusCode != http.StatusOK {
167 return nil, httpRespToErrorResponse(resp, bucketName, objectName)
168 }
169 }
170 lh := &objectLegalHold{}
171 if err = xml.NewDecoder(resp.Body).Decode(lh); err != nil {
172 return nil, err
173 }
174
175 return &lh.Status, nil
176}
diff --git a/vendor/github.com/minio/minio-go/v7/api-object-lock.go b/vendor/github.com/minio/minio-go/v7/api-object-lock.go
new file mode 100644
index 0000000..f0a4398
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-object-lock.go
@@ -0,0 +1,241 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2019 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "bytes"
22 "context"
23 "encoding/xml"
24 "fmt"
25 "net/http"
26 "net/url"
27 "time"
28
29 "github.com/minio/minio-go/v7/pkg/s3utils"
30)
31
32// RetentionMode - object retention mode.
33type RetentionMode string
34
35const (
36 // Governance - governance mode.
37 Governance RetentionMode = "GOVERNANCE"
38
39 // Compliance - compliance mode.
40 Compliance RetentionMode = "COMPLIANCE"
41)
42
43func (r RetentionMode) String() string {
44 return string(r)
45}
46
47// IsValid - check whether this retention mode is valid or not.
48func (r RetentionMode) IsValid() bool {
49 return r == Governance || r == Compliance
50}
51
52// ValidityUnit - retention validity unit.
53type ValidityUnit string
54
55const (
56 // Days - denotes no. of days.
57 Days ValidityUnit = "DAYS"
58
59 // Years - denotes no. of years.
60 Years ValidityUnit = "YEARS"
61)
62
63func (unit ValidityUnit) String() string {
64 return string(unit)
65}
66
67// IsValid - check whether this validity unit is valid or not.
68func (unit ValidityUnit) isValid() bool {
69 return unit == Days || unit == Years
70}
71
72// Retention - bucket level retention configuration.
73type Retention struct {
74 Mode RetentionMode
75 Validity time.Duration
76}
77
78func (r Retention) String() string {
79 return fmt.Sprintf("{Mode:%v, Validity:%v}", r.Mode, r.Validity)
80}
81
82// IsEmpty - returns whether retention is empty or not.
83func (r Retention) IsEmpty() bool {
84 return r.Mode == "" || r.Validity == 0
85}
86
87// objectLockConfig - object lock configuration specified in
88// https://docs.aws.amazon.com/AmazonS3/latest/API/Type_API_ObjectLockConfiguration.html
89type objectLockConfig struct {
90 XMLNS string `xml:"xmlns,attr,omitempty"`
91 XMLName xml.Name `xml:"ObjectLockConfiguration"`
92 ObjectLockEnabled string `xml:"ObjectLockEnabled"`
93 Rule *struct {
94 DefaultRetention struct {
95 Mode RetentionMode `xml:"Mode"`
96 Days *uint `xml:"Days"`
97 Years *uint `xml:"Years"`
98 } `xml:"DefaultRetention"`
99 } `xml:"Rule,omitempty"`
100}
101
102func newObjectLockConfig(mode *RetentionMode, validity *uint, unit *ValidityUnit) (*objectLockConfig, error) {
103 config := &objectLockConfig{
104 ObjectLockEnabled: "Enabled",
105 }
106
107 if mode != nil && validity != nil && unit != nil {
108 if !mode.IsValid() {
109 return nil, fmt.Errorf("invalid retention mode `%v`", mode)
110 }
111
112 if !unit.isValid() {
113 return nil, fmt.Errorf("invalid validity unit `%v`", unit)
114 }
115
116 config.Rule = &struct {
117 DefaultRetention struct {
118 Mode RetentionMode `xml:"Mode"`
119 Days *uint `xml:"Days"`
120 Years *uint `xml:"Years"`
121 } `xml:"DefaultRetention"`
122 }{}
123
124 config.Rule.DefaultRetention.Mode = *mode
125 if *unit == Days {
126 config.Rule.DefaultRetention.Days = validity
127 } else {
128 config.Rule.DefaultRetention.Years = validity
129 }
130
131 return config, nil
132 }
133
134 if mode == nil && validity == nil && unit == nil {
135 return config, nil
136 }
137
138 return nil, fmt.Errorf("all of retention mode, validity and validity unit must be passed")
139}
140
141// SetBucketObjectLockConfig sets object lock configuration in given bucket. mode, validity and unit are either all set or all nil.
142func (c *Client) SetBucketObjectLockConfig(ctx context.Context, bucketName string, mode *RetentionMode, validity *uint, unit *ValidityUnit) error {
143 // Input validation.
144 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
145 return err
146 }
147
148 // Get resources properly escaped and lined up before
149 // using them in http request.
150 urlValues := make(url.Values)
151 urlValues.Set("object-lock", "")
152
153 config, err := newObjectLockConfig(mode, validity, unit)
154 if err != nil {
155 return err
156 }
157
158 configData, err := xml.Marshal(config)
159 if err != nil {
160 return err
161 }
162
163 reqMetadata := requestMetadata{
164 bucketName: bucketName,
165 queryValues: urlValues,
166 contentBody: bytes.NewReader(configData),
167 contentLength: int64(len(configData)),
168 contentMD5Base64: sumMD5Base64(configData),
169 contentSHA256Hex: sum256Hex(configData),
170 }
171
172 // Execute PUT bucket object lock configuration.
173 resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
174 defer closeResponse(resp)
175 if err != nil {
176 return err
177 }
178 if resp != nil {
179 if resp.StatusCode != http.StatusOK {
180 return httpRespToErrorResponse(resp, bucketName, "")
181 }
182 }
183 return nil
184}
185
186// GetObjectLockConfig gets object lock configuration of given bucket.
187func (c *Client) GetObjectLockConfig(ctx context.Context, bucketName string) (objectLock string, mode *RetentionMode, validity *uint, unit *ValidityUnit, err error) {
188 // Input validation.
189 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
190 return "", nil, nil, nil, err
191 }
192
193 urlValues := make(url.Values)
194 urlValues.Set("object-lock", "")
195
196 // Execute GET on bucket to list objects.
197 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
198 bucketName: bucketName,
199 queryValues: urlValues,
200 contentSHA256Hex: emptySHA256Hex,
201 })
202 defer closeResponse(resp)
203 if err != nil {
204 return "", nil, nil, nil, err
205 }
206 if resp != nil {
207 if resp.StatusCode != http.StatusOK {
208 return "", nil, nil, nil, httpRespToErrorResponse(resp, bucketName, "")
209 }
210 }
211 config := &objectLockConfig{}
212 if err = xml.NewDecoder(resp.Body).Decode(config); err != nil {
213 return "", nil, nil, nil, err
214 }
215
216 if config.Rule != nil {
217 mode = &config.Rule.DefaultRetention.Mode
218 if config.Rule.DefaultRetention.Days != nil {
219 validity = config.Rule.DefaultRetention.Days
220 days := Days
221 unit = &days
222 } else {
223 validity = config.Rule.DefaultRetention.Years
224 years := Years
225 unit = &years
226 }
227 return config.ObjectLockEnabled, mode, validity, unit, nil
228 }
229 return config.ObjectLockEnabled, nil, nil, nil, nil
230}
231
232// GetBucketObjectLockConfig gets object lock configuration of given bucket.
233func (c *Client) GetBucketObjectLockConfig(ctx context.Context, bucketName string) (mode *RetentionMode, validity *uint, unit *ValidityUnit, err error) {
234 _, mode, validity, unit, err = c.GetObjectLockConfig(ctx, bucketName)
235 return mode, validity, unit, err
236}
237
238// SetObjectLockConfig sets object lock configuration in given bucket. mode, validity and unit are either all set or all nil.
239func (c *Client) SetObjectLockConfig(ctx context.Context, bucketName string, mode *RetentionMode, validity *uint, unit *ValidityUnit) error {
240 return c.SetBucketObjectLockConfig(ctx, bucketName, mode, validity, unit)
241}
diff --git a/vendor/github.com/minio/minio-go/v7/api-object-retention.go b/vendor/github.com/minio/minio-go/v7/api-object-retention.go
new file mode 100644
index 0000000..b29cb1f
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-object-retention.go
@@ -0,0 +1,165 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2019-2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "bytes"
22 "context"
23 "encoding/xml"
24 "fmt"
25 "net/http"
26 "net/url"
27 "time"
28
29 "github.com/minio/minio-go/v7/pkg/s3utils"
30)
31
32// objectRetention - object retention specified in
33// https://docs.aws.amazon.com/AmazonS3/latest/API/Type_API_ObjectLockConfiguration.html
34type objectRetention struct {
35 XMLNS string `xml:"xmlns,attr,omitempty"`
36 XMLName xml.Name `xml:"Retention"`
37 Mode RetentionMode `xml:"Mode,omitempty"`
38 RetainUntilDate *time.Time `type:"timestamp" timestampFormat:"iso8601" xml:"RetainUntilDate,omitempty"`
39}
40
41func newObjectRetention(mode *RetentionMode, date *time.Time) (*objectRetention, error) {
42 objectRetention := &objectRetention{}
43
44 if date != nil && !date.IsZero() {
45 objectRetention.RetainUntilDate = date
46 }
47 if mode != nil {
48 if !mode.IsValid() {
49 return nil, fmt.Errorf("invalid retention mode `%v`", mode)
50 }
51 objectRetention.Mode = *mode
52 }
53
54 return objectRetention, nil
55}
56
57// PutObjectRetentionOptions represents options specified by user for PutObject call
58type PutObjectRetentionOptions struct {
59 GovernanceBypass bool
60 Mode *RetentionMode
61 RetainUntilDate *time.Time
62 VersionID string
63}
64
65// PutObjectRetention sets object retention for a given object and versionID.
66func (c *Client) PutObjectRetention(ctx context.Context, bucketName, objectName string, opts PutObjectRetentionOptions) error {
67 // Input validation.
68 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
69 return err
70 }
71
72 if err := s3utils.CheckValidObjectName(objectName); err != nil {
73 return err
74 }
75
76 // Get resources properly escaped and lined up before
77 // using them in http request.
78 urlValues := make(url.Values)
79 urlValues.Set("retention", "")
80
81 if opts.VersionID != "" {
82 urlValues.Set("versionId", opts.VersionID)
83 }
84
85 retention, err := newObjectRetention(opts.Mode, opts.RetainUntilDate)
86 if err != nil {
87 return err
88 }
89
90 retentionData, err := xml.Marshal(retention)
91 if err != nil {
92 return err
93 }
94
95 // Build headers.
96 headers := make(http.Header)
97
98 if opts.GovernanceBypass {
99 // Set the bypass goverenance retention header
100 headers.Set(amzBypassGovernance, "true")
101 }
102
103 reqMetadata := requestMetadata{
104 bucketName: bucketName,
105 objectName: objectName,
106 queryValues: urlValues,
107 contentBody: bytes.NewReader(retentionData),
108 contentLength: int64(len(retentionData)),
109 contentMD5Base64: sumMD5Base64(retentionData),
110 contentSHA256Hex: sum256Hex(retentionData),
111 customHeader: headers,
112 }
113
114 // Execute PUT Object Retention.
115 resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
116 defer closeResponse(resp)
117 if err != nil {
118 return err
119 }
120 if resp != nil {
121 if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
122 return httpRespToErrorResponse(resp, bucketName, objectName)
123 }
124 }
125 return nil
126}
127
128// GetObjectRetention gets retention of given object.
129func (c *Client) GetObjectRetention(ctx context.Context, bucketName, objectName, versionID string) (mode *RetentionMode, retainUntilDate *time.Time, err error) {
130 // Input validation.
131 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
132 return nil, nil, err
133 }
134
135 if err := s3utils.CheckValidObjectName(objectName); err != nil {
136 return nil, nil, err
137 }
138 urlValues := make(url.Values)
139 urlValues.Set("retention", "")
140 if versionID != "" {
141 urlValues.Set("versionId", versionID)
142 }
143 // Execute GET on bucket to list objects.
144 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
145 bucketName: bucketName,
146 objectName: objectName,
147 queryValues: urlValues,
148 contentSHA256Hex: emptySHA256Hex,
149 })
150 defer closeResponse(resp)
151 if err != nil {
152 return nil, nil, err
153 }
154 if resp != nil {
155 if resp.StatusCode != http.StatusOK {
156 return nil, nil, httpRespToErrorResponse(resp, bucketName, objectName)
157 }
158 }
159 retention := &objectRetention{}
160 if err = xml.NewDecoder(resp.Body).Decode(retention); err != nil {
161 return nil, nil, err
162 }
163
164 return &retention.Mode, retention.RetainUntilDate, nil
165}
diff --git a/vendor/github.com/minio/minio-go/v7/api-object-tagging.go b/vendor/github.com/minio/minio-go/v7/api-object-tagging.go
new file mode 100644
index 0000000..6623e26
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-object-tagging.go
@@ -0,0 +1,177 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "bytes"
22 "context"
23 "encoding/xml"
24 "net/http"
25 "net/url"
26
27 "github.com/minio/minio-go/v7/pkg/s3utils"
28 "github.com/minio/minio-go/v7/pkg/tags"
29)
30
31// PutObjectTaggingOptions holds an object version id
32// to update tag(s) of a specific object version
33type PutObjectTaggingOptions struct {
34 VersionID string
35 Internal AdvancedObjectTaggingOptions
36}
37
38// AdvancedObjectTaggingOptions for internal use by MinIO server - not intended for client use.
39type AdvancedObjectTaggingOptions struct {
40 ReplicationProxyRequest string
41}
42
43// PutObjectTagging replaces or creates object tag(s) and can target
44// a specific object version in a versioned bucket.
45func (c *Client) PutObjectTagging(ctx context.Context, bucketName, objectName string, otags *tags.Tags, opts PutObjectTaggingOptions) error {
46 // Input validation.
47 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
48 return err
49 }
50
51 // Get resources properly escaped and lined up before
52 // using them in http request.
53 urlValues := make(url.Values)
54 urlValues.Set("tagging", "")
55
56 if opts.VersionID != "" {
57 urlValues.Set("versionId", opts.VersionID)
58 }
59 headers := make(http.Header, 0)
60 if opts.Internal.ReplicationProxyRequest != "" {
61 headers.Set(minIOBucketReplicationProxyRequest, opts.Internal.ReplicationProxyRequest)
62 }
63 reqBytes, err := xml.Marshal(otags)
64 if err != nil {
65 return err
66 }
67
68 reqMetadata := requestMetadata{
69 bucketName: bucketName,
70 objectName: objectName,
71 queryValues: urlValues,
72 contentBody: bytes.NewReader(reqBytes),
73 contentLength: int64(len(reqBytes)),
74 contentMD5Base64: sumMD5Base64(reqBytes),
75 customHeader: headers,
76 }
77
78 // Execute PUT to set a object tagging.
79 resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
80 defer closeResponse(resp)
81 if err != nil {
82 return err
83 }
84 if resp != nil {
85 if resp.StatusCode != http.StatusOK {
86 return httpRespToErrorResponse(resp, bucketName, objectName)
87 }
88 }
89 return nil
90}
91
92// GetObjectTaggingOptions holds the object version ID
93// to fetch the tagging key/value pairs
94type GetObjectTaggingOptions struct {
95 VersionID string
96 Internal AdvancedObjectTaggingOptions
97}
98
99// GetObjectTagging fetches object tag(s) with options to target
100// a specific object version in a versioned bucket.
101func (c *Client) GetObjectTagging(ctx context.Context, bucketName, objectName string, opts GetObjectTaggingOptions) (*tags.Tags, error) {
102 // Get resources properly escaped and lined up before
103 // using them in http request.
104 urlValues := make(url.Values)
105 urlValues.Set("tagging", "")
106
107 if opts.VersionID != "" {
108 urlValues.Set("versionId", opts.VersionID)
109 }
110 headers := make(http.Header, 0)
111 if opts.Internal.ReplicationProxyRequest != "" {
112 headers.Set(minIOBucketReplicationProxyRequest, opts.Internal.ReplicationProxyRequest)
113 }
114 // Execute GET on object to get object tag(s)
115 resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
116 bucketName: bucketName,
117 objectName: objectName,
118 queryValues: urlValues,
119 customHeader: headers,
120 })
121
122 defer closeResponse(resp)
123 if err != nil {
124 return nil, err
125 }
126
127 if resp != nil {
128 if resp.StatusCode != http.StatusOK {
129 return nil, httpRespToErrorResponse(resp, bucketName, objectName)
130 }
131 }
132
133 return tags.ParseObjectXML(resp.Body)
134}
135
136// RemoveObjectTaggingOptions holds the version id of the object to remove
137type RemoveObjectTaggingOptions struct {
138 VersionID string
139 Internal AdvancedObjectTaggingOptions
140}
141
142// RemoveObjectTagging removes object tag(s) with options to control a specific object
143// version in a versioned bucket
144func (c *Client) RemoveObjectTagging(ctx context.Context, bucketName, objectName string, opts RemoveObjectTaggingOptions) error {
145 // Get resources properly escaped and lined up before
146 // using them in http request.
147 urlValues := make(url.Values)
148 urlValues.Set("tagging", "")
149
150 if opts.VersionID != "" {
151 urlValues.Set("versionId", opts.VersionID)
152 }
153 headers := make(http.Header, 0)
154 if opts.Internal.ReplicationProxyRequest != "" {
155 headers.Set(minIOBucketReplicationProxyRequest, opts.Internal.ReplicationProxyRequest)
156 }
157 // Execute DELETE on object to remove object tag(s)
158 resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
159 bucketName: bucketName,
160 objectName: objectName,
161 queryValues: urlValues,
162 customHeader: headers,
163 })
164
165 defer closeResponse(resp)
166 if err != nil {
167 return err
168 }
169
170 if resp != nil {
171 // S3 returns "204 No content" after Object tag deletion.
172 if resp.StatusCode != http.StatusNoContent {
173 return httpRespToErrorResponse(resp, bucketName, objectName)
174 }
175 }
176 return err
177}
diff --git a/vendor/github.com/minio/minio-go/v7/api-presigned.go b/vendor/github.com/minio/minio-go/v7/api-presigned.go
new file mode 100644
index 0000000..9e85f81
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-presigned.go
@@ -0,0 +1,228 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "context"
22 "errors"
23 "net/http"
24 "net/url"
25 "time"
26
27 "github.com/minio/minio-go/v7/pkg/s3utils"
28 "github.com/minio/minio-go/v7/pkg/signer"
29)
30
31// presignURL - Returns a presigned URL for an input 'method'.
32// Expires maximum is 7days - ie. 604800 and minimum is 1.
33func (c *Client) presignURL(ctx context.Context, method, bucketName, objectName string, expires time.Duration, reqParams url.Values, extraHeaders http.Header) (u *url.URL, err error) {
34 // Input validation.
35 if method == "" {
36 return nil, errInvalidArgument("method cannot be empty.")
37 }
38 if err = s3utils.CheckValidBucketName(bucketName); err != nil {
39 return nil, err
40 }
41 if err = isValidExpiry(expires); err != nil {
42 return nil, err
43 }
44
45 // Convert expires into seconds.
46 expireSeconds := int64(expires / time.Second)
47 reqMetadata := requestMetadata{
48 presignURL: true,
49 bucketName: bucketName,
50 objectName: objectName,
51 expires: expireSeconds,
52 queryValues: reqParams,
53 extraPresignHeader: extraHeaders,
54 }
55
56 // Instantiate a new request.
57 // Since expires is set newRequest will presign the request.
58 var req *http.Request
59 if req, err = c.newRequest(ctx, method, reqMetadata); err != nil {
60 return nil, err
61 }
62 return req.URL, nil
63}
64
65// PresignedGetObject - Returns a presigned URL to access an object
66// data without credentials. URL can have a maximum expiry of
67// upto 7days or a minimum of 1sec. Additionally you can override
68// a set of response headers using the query parameters.
69func (c *Client) PresignedGetObject(ctx context.Context, bucketName, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
70 if err = s3utils.CheckValidObjectName(objectName); err != nil {
71 return nil, err
72 }
73 return c.presignURL(ctx, http.MethodGet, bucketName, objectName, expires, reqParams, nil)
74}
75
76// PresignedHeadObject - Returns a presigned URL to access
77// object metadata without credentials. URL can have a maximum expiry
78// of upto 7days or a minimum of 1sec. Additionally you can override
79// a set of response headers using the query parameters.
80func (c *Client) PresignedHeadObject(ctx context.Context, bucketName, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
81 if err = s3utils.CheckValidObjectName(objectName); err != nil {
82 return nil, err
83 }
84 return c.presignURL(ctx, http.MethodHead, bucketName, objectName, expires, reqParams, nil)
85}
86
87// PresignedPutObject - Returns a presigned URL to upload an object
88// without credentials. URL can have a maximum expiry of upto 7days
89// or a minimum of 1sec.
90func (c *Client) PresignedPutObject(ctx context.Context, bucketName, objectName string, expires time.Duration) (u *url.URL, err error) {
91 if err = s3utils.CheckValidObjectName(objectName); err != nil {
92 return nil, err
93 }
94 return c.presignURL(ctx, http.MethodPut, bucketName, objectName, expires, nil, nil)
95}
96
97// PresignHeader - similar to Presign() but allows including HTTP headers that
98// will be used to build the signature. The request using the resulting URL will
99// need to have the exact same headers to be added for signature validation to
100// pass.
101//
102// FIXME: The extra header parameter should be included in Presign() in the next
103// major version bump, and this function should then be deprecated.
104func (c *Client) PresignHeader(ctx context.Context, method, bucketName, objectName string, expires time.Duration, reqParams url.Values, extraHeaders http.Header) (u *url.URL, err error) {
105 return c.presignURL(ctx, method, bucketName, objectName, expires, reqParams, extraHeaders)
106}
107
108// Presign - returns a presigned URL for any http method of your choice along
109// with custom request params and extra signed headers. URL can have a maximum
110// expiry of upto 7days or a minimum of 1sec.
111func (c *Client) Presign(ctx context.Context, method, bucketName, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
112 return c.presignURL(ctx, method, bucketName, objectName, expires, reqParams, nil)
113}
114
115// PresignedPostPolicy - Returns POST urlString, form data to upload an object.
116func (c *Client) PresignedPostPolicy(ctx context.Context, p *PostPolicy) (u *url.URL, formData map[string]string, err error) {
117 // Validate input arguments.
118 if p.expiration.IsZero() {
119 return nil, nil, errors.New("Expiration time must be specified")
120 }
121 if _, ok := p.formData["key"]; !ok {
122 return nil, nil, errors.New("object key must be specified")
123 }
124 if _, ok := p.formData["bucket"]; !ok {
125 return nil, nil, errors.New("bucket name must be specified")
126 }
127
128 bucketName := p.formData["bucket"]
129 // Fetch the bucket location.
130 location, err := c.getBucketLocation(ctx, bucketName)
131 if err != nil {
132 return nil, nil, err
133 }
134
135 isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, bucketName)
136
137 u, err = c.makeTargetURL(bucketName, "", location, isVirtualHost, nil)
138 if err != nil {
139 return nil, nil, err
140 }
141
142 // Get credentials from the configured credentials provider.
143 credValues, err := c.credsProvider.Get()
144 if err != nil {
145 return nil, nil, err
146 }
147
148 var (
149 signerType = credValues.SignerType
150 sessionToken = credValues.SessionToken
151 accessKeyID = credValues.AccessKeyID
152 secretAccessKey = credValues.SecretAccessKey
153 )
154
155 if signerType.IsAnonymous() {
156 return nil, nil, errInvalidArgument("Presigned operations are not supported for anonymous credentials")
157 }
158
159 // Keep time.
160 t := time.Now().UTC()
161 // For signature version '2' handle here.
162 if signerType.IsV2() {
163 policyBase64 := p.base64()
164 p.formData["policy"] = policyBase64
165 // For Google endpoint set this value to be 'GoogleAccessId'.
166 if s3utils.IsGoogleEndpoint(*c.endpointURL) {
167 p.formData["GoogleAccessId"] = accessKeyID
168 } else {
169 // For all other endpoints set this value to be 'AWSAccessKeyId'.
170 p.formData["AWSAccessKeyId"] = accessKeyID
171 }
172 // Sign the policy.
173 p.formData["signature"] = signer.PostPresignSignatureV2(policyBase64, secretAccessKey)
174 return u, p.formData, nil
175 }
176
177 // Add date policy.
178 if err = p.addNewPolicy(policyCondition{
179 matchType: "eq",
180 condition: "$x-amz-date",
181 value: t.Format(iso8601DateFormat),
182 }); err != nil {
183 return nil, nil, err
184 }
185
186 // Add algorithm policy.
187 if err = p.addNewPolicy(policyCondition{
188 matchType: "eq",
189 condition: "$x-amz-algorithm",
190 value: signV4Algorithm,
191 }); err != nil {
192 return nil, nil, err
193 }
194
195 // Add a credential policy.
196 credential := signer.GetCredential(accessKeyID, location, t, signer.ServiceTypeS3)
197 if err = p.addNewPolicy(policyCondition{
198 matchType: "eq",
199 condition: "$x-amz-credential",
200 value: credential,
201 }); err != nil {
202 return nil, nil, err
203 }
204
205 if sessionToken != "" {
206 if err = p.addNewPolicy(policyCondition{
207 matchType: "eq",
208 condition: "$x-amz-security-token",
209 value: sessionToken,
210 }); err != nil {
211 return nil, nil, err
212 }
213 }
214
215 // Get base64 encoded policy.
216 policyBase64 := p.base64()
217
218 // Fill in the form data.
219 p.formData["policy"] = policyBase64
220 p.formData["x-amz-algorithm"] = signV4Algorithm
221 p.formData["x-amz-credential"] = credential
222 p.formData["x-amz-date"] = t.Format(iso8601DateFormat)
223 if sessionToken != "" {
224 p.formData["x-amz-security-token"] = sessionToken
225 }
226 p.formData["x-amz-signature"] = signer.PostPresignSignatureV4(policyBase64, t, secretAccessKey, location)
227 return u, p.formData, nil
228}
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-bucket.go b/vendor/github.com/minio/minio-go/v7/api-put-bucket.go
new file mode 100644
index 0000000..7376669
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-put-bucket.go
@@ -0,0 +1,123 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "bytes"
22 "context"
23 "encoding/xml"
24 "net/http"
25
26 "github.com/minio/minio-go/v7/pkg/s3utils"
27)
28
29// Bucket operations
30func (c *Client) makeBucket(ctx context.Context, bucketName string, opts MakeBucketOptions) (err error) {
31 // Validate the input arguments.
32 if err := s3utils.CheckValidBucketNameStrict(bucketName); err != nil {
33 return err
34 }
35
36 err = c.doMakeBucket(ctx, bucketName, opts.Region, opts.ObjectLocking)
37 if err != nil && (opts.Region == "" || opts.Region == "us-east-1") {
38 if resp, ok := err.(ErrorResponse); ok && resp.Code == "AuthorizationHeaderMalformed" && resp.Region != "" {
39 err = c.doMakeBucket(ctx, bucketName, resp.Region, opts.ObjectLocking)
40 }
41 }
42 return err
43}
44
45func (c *Client) doMakeBucket(ctx context.Context, bucketName, location string, objectLockEnabled bool) (err error) {
46 defer func() {
47 // Save the location into cache on a successful makeBucket response.
48 if err == nil {
49 c.bucketLocCache.Set(bucketName, location)
50 }
51 }()
52
53 // If location is empty, treat is a default region 'us-east-1'.
54 if location == "" {
55 location = "us-east-1"
56 // For custom region clients, default
57 // to custom region instead not 'us-east-1'.
58 if c.region != "" {
59 location = c.region
60 }
61 }
62 // PUT bucket request metadata.
63 reqMetadata := requestMetadata{
64 bucketName: bucketName,
65 bucketLocation: location,
66 }
67
68 if objectLockEnabled {
69 headers := make(http.Header)
70 headers.Add("x-amz-bucket-object-lock-enabled", "true")
71 reqMetadata.customHeader = headers
72 }
73
74 // If location is not 'us-east-1' create bucket location config.
75 if location != "us-east-1" && location != "" {
76 createBucketConfig := createBucketConfiguration{}
77 createBucketConfig.Location = location
78 var createBucketConfigBytes []byte
79 createBucketConfigBytes, err = xml.Marshal(createBucketConfig)
80 if err != nil {
81 return err
82 }
83 reqMetadata.contentMD5Base64 = sumMD5Base64(createBucketConfigBytes)
84 reqMetadata.contentSHA256Hex = sum256Hex(createBucketConfigBytes)
85 reqMetadata.contentBody = bytes.NewReader(createBucketConfigBytes)
86 reqMetadata.contentLength = int64(len(createBucketConfigBytes))
87 }
88
89 // Execute PUT to create a new bucket.
90 resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
91 defer closeResponse(resp)
92 if err != nil {
93 return err
94 }
95
96 if resp != nil {
97 if resp.StatusCode != http.StatusOK {
98 return httpRespToErrorResponse(resp, bucketName, "")
99 }
100 }
101
102 // Success.
103 return nil
104}
105
106// MakeBucketOptions holds all options to tweak bucket creation
107type MakeBucketOptions struct {
108 // Bucket location
109 Region string
110 // Enable object locking
111 ObjectLocking bool
112}
113
114// MakeBucket creates a new bucket with bucketName with a context to control cancellations and timeouts.
115//
116// Location is an optional argument, by default all buckets are
117// created in US Standard Region.
118//
119// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html
120// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations
121func (c *Client) MakeBucket(ctx context.Context, bucketName string, opts MakeBucketOptions) (err error) {
122 return c.makeBucket(ctx, bucketName, opts)
123}
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-common.go b/vendor/github.com/minio/minio-go/v7/api-put-object-common.go
new file mode 100644
index 0000000..9ccb97c
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-put-object-common.go
@@ -0,0 +1,149 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "context"
22 "io"
23 "math"
24 "os"
25
26 "github.com/minio/minio-go/v7/pkg/s3utils"
27)
28
29const nullVersionID = "null"
30
31// Verify if reader is *minio.Object
32func isObject(reader io.Reader) (ok bool) {
33 _, ok = reader.(*Object)
34 return
35}
36
37// Verify if reader is a generic ReaderAt
38func isReadAt(reader io.Reader) (ok bool) {
39 var v *os.File
40 v, ok = reader.(*os.File)
41 if ok {
42 // Stdin, Stdout and Stderr all have *os.File type
43 // which happen to also be io.ReaderAt compatible
44 // we need to add special conditions for them to
45 // be ignored by this function.
46 for _, f := range []string{
47 "/dev/stdin",
48 "/dev/stdout",
49 "/dev/stderr",
50 } {
51 if f == v.Name() {
52 ok = false
53 break
54 }
55 }
56 } else {
57 _, ok = reader.(io.ReaderAt)
58 }
59 return
60}
61
62// OptimalPartInfo - calculate the optimal part info for a given
63// object size.
64//
65// NOTE: Assumption here is that for any object to be uploaded to any S3 compatible
66// object storage it will have the following parameters as constants.
67//
68// maxPartsCount - 10000
69// minPartSize - 16MiB
70// maxMultipartPutObjectSize - 5TiB
71func OptimalPartInfo(objectSize int64, configuredPartSize uint64) (totalPartsCount int, partSize, lastPartSize int64, err error) {
72 // object size is '-1' set it to 5TiB.
73 var unknownSize bool
74 if objectSize == -1 {
75 unknownSize = true
76 objectSize = maxMultipartPutObjectSize
77 }
78
79 // object size is larger than supported maximum.
80 if objectSize > maxMultipartPutObjectSize {
81 err = errEntityTooLarge(objectSize, maxMultipartPutObjectSize, "", "")
82 return
83 }
84
85 var partSizeFlt float64
86 if configuredPartSize > 0 {
87 if int64(configuredPartSize) > objectSize {
88 err = errEntityTooLarge(int64(configuredPartSize), objectSize, "", "")
89 return
90 }
91
92 if !unknownSize {
93 if objectSize > (int64(configuredPartSize) * maxPartsCount) {
94 err = errInvalidArgument("Part size * max_parts(10000) is lesser than input objectSize.")
95 return
96 }
97 }
98
99 if configuredPartSize < absMinPartSize {
100 err = errInvalidArgument("Input part size is smaller than allowed minimum of 5MiB.")
101 return
102 }
103
104 if configuredPartSize > maxPartSize {
105 err = errInvalidArgument("Input part size is bigger than allowed maximum of 5GiB.")
106 return
107 }
108
109 partSizeFlt = float64(configuredPartSize)
110 if unknownSize {
111 // If input has unknown size and part size is configured
112 // keep it to maximum allowed as per 10000 parts.
113 objectSize = int64(configuredPartSize) * maxPartsCount
114 }
115 } else {
116 configuredPartSize = minPartSize
117 // Use floats for part size for all calculations to avoid
118 // overflows during float64 to int64 conversions.
119 partSizeFlt = float64(objectSize / maxPartsCount)
120 partSizeFlt = math.Ceil(partSizeFlt/float64(configuredPartSize)) * float64(configuredPartSize)
121 }
122
123 // Total parts count.
124 totalPartsCount = int(math.Ceil(float64(objectSize) / partSizeFlt))
125 // Part size.
126 partSize = int64(partSizeFlt)
127 // Last part size.
128 lastPartSize = objectSize - int64(totalPartsCount-1)*partSize
129 return totalPartsCount, partSize, lastPartSize, nil
130}
131
132// getUploadID - fetch upload id if already present for an object name
133// or initiate a new request to fetch a new upload id.
134func (c *Client) newUploadID(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (uploadID string, err error) {
135 // Input validation.
136 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
137 return "", err
138 }
139 if err := s3utils.CheckValidObjectName(objectName); err != nil {
140 return "", err
141 }
142
143 // Initiate multipart upload for an object.
144 initMultipartUploadResult, err := c.initiateMultipartUpload(ctx, bucketName, objectName, opts)
145 if err != nil {
146 return "", err
147 }
148 return initMultipartUploadResult.UploadID, nil
149}
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go b/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go
new file mode 100644
index 0000000..0ae9142
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go
@@ -0,0 +1,164 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2023 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "context"
22 "encoding/json"
23 "errors"
24 "io"
25 "mime/multipart"
26 "net/http"
27 "strconv"
28 "strings"
29 "time"
30
31 "github.com/minio/minio-go/v7/pkg/encrypt"
32)
33
34// PutObjectFanOutEntry is per object entry fan-out metadata
35type PutObjectFanOutEntry struct {
36 Key string `json:"key"`
37 UserMetadata map[string]string `json:"metadata,omitempty"`
38 UserTags map[string]string `json:"tags,omitempty"`
39 ContentType string `json:"contentType,omitempty"`
40 ContentEncoding string `json:"contentEncoding,omitempty"`
41 ContentDisposition string `json:"contentDisposition,omitempty"`
42 ContentLanguage string `json:"contentLanguage,omitempty"`
43 CacheControl string `json:"cacheControl,omitempty"`
44 Retention RetentionMode `json:"retention,omitempty"`
45 RetainUntilDate *time.Time `json:"retainUntil,omitempty"`
46}
47
48// PutObjectFanOutRequest this is the request structure sent
49// to the server to fan-out the stream to multiple objects.
50type PutObjectFanOutRequest struct {
51 Entries []PutObjectFanOutEntry
52 Checksum Checksum
53 SSE encrypt.ServerSide
54}
55
56// PutObjectFanOutResponse this is the response structure sent
57// by the server upon success or failure for each object
58// fan-out keys. Additionally, this response carries ETag,
59// VersionID and LastModified for each object fan-out.
60type PutObjectFanOutResponse struct {
61 Key string `json:"key"`
62 ETag string `json:"etag,omitempty"`
63 VersionID string `json:"versionId,omitempty"`
64 LastModified *time.Time `json:"lastModified,omitempty"`
65 Error string `json:"error,omitempty"`
66}
67
68// PutObjectFanOut - is a variant of PutObject instead of writing a single object from a single
69// stream multiple objects are written, defined via a list of PutObjectFanOutRequests. Each entry
70// in PutObjectFanOutRequest carries an object keyname and its relevant metadata if any. `Key` is
71// mandatory, rest of the other options in PutObjectFanOutRequest are optional.
72func (c *Client) PutObjectFanOut(ctx context.Context, bucket string, fanOutData io.Reader, fanOutReq PutObjectFanOutRequest) ([]PutObjectFanOutResponse, error) {
73 if len(fanOutReq.Entries) == 0 {
74 return nil, errInvalidArgument("fan out requests cannot be empty")
75 }
76
77 policy := NewPostPolicy()
78 policy.SetBucket(bucket)
79 policy.SetKey(strconv.FormatInt(time.Now().UnixNano(), 16))
80
81 // Expires in 15 minutes.
82 policy.SetExpires(time.Now().UTC().Add(15 * time.Minute))
83
84 // Set encryption headers if any.
85 policy.SetEncryption(fanOutReq.SSE)
86
87 // Set checksum headers if any.
88 policy.SetChecksum(fanOutReq.Checksum)
89
90 url, formData, err := c.PresignedPostPolicy(ctx, policy)
91 if err != nil {
92 return nil, err
93 }
94
95 r, w := io.Pipe()
96
97 req, err := http.NewRequest(http.MethodPost, url.String(), r)
98 if err != nil {
99 w.Close()
100 return nil, err
101 }
102
103 var b strings.Builder
104 enc := json.NewEncoder(&b)
105 for _, req := range fanOutReq.Entries {
106 if req.Key == "" {
107 w.Close()
108 return nil, errors.New("PutObjectFanOutRequest.Key is mandatory and cannot be empty")
109 }
110 if err = enc.Encode(&req); err != nil {
111 w.Close()
112 return nil, err
113 }
114 }
115
116 mwriter := multipart.NewWriter(w)
117 req.Header.Add("Content-Type", mwriter.FormDataContentType())
118
119 go func() {
120 defer w.Close()
121 defer mwriter.Close()
122
123 for k, v := range formData {
124 if err := mwriter.WriteField(k, v); err != nil {
125 return
126 }
127 }
128
129 if err := mwriter.WriteField("x-minio-fanout-list", b.String()); err != nil {
130 return
131 }
132
133 mw, err := mwriter.CreateFormFile("file", "fanout-content")
134 if err != nil {
135 return
136 }
137
138 if _, err = io.Copy(mw, fanOutData); err != nil {
139 return
140 }
141 }()
142
143 resp, err := c.do(req)
144 if err != nil {
145 return nil, err
146 }
147 defer closeResponse(resp)
148
149 if resp.StatusCode != http.StatusOK {
150 return nil, httpRespToErrorResponse(resp, bucket, "fanout-content")
151 }
152
153 dec := json.NewDecoder(resp.Body)
154 fanOutResp := make([]PutObjectFanOutResponse, 0, len(fanOutReq.Entries))
155 for dec.More() {
156 var m PutObjectFanOutResponse
157 if err = dec.Decode(&m); err != nil {
158 return nil, err
159 }
160 fanOutResp = append(fanOutResp, m)
161 }
162
163 return fanOutResp, nil
164}
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go b/vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go
new file mode 100644
index 0000000..4d29dfc
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go
@@ -0,0 +1,64 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "context"
22 "mime"
23 "os"
24 "path/filepath"
25
26 "github.com/minio/minio-go/v7/pkg/s3utils"
27)
28
29// FPutObject - Create an object in a bucket, with contents from file at filePath. Allows request cancellation.
30func (c *Client) FPutObject(ctx context.Context, bucketName, objectName, filePath string, opts PutObjectOptions) (info UploadInfo, err error) {
31 // Input validation.
32 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
33 return UploadInfo{}, err
34 }
35 if err := s3utils.CheckValidObjectName(objectName); err != nil {
36 return UploadInfo{}, err
37 }
38
39 // Open the referenced file.
40 fileReader, err := os.Open(filePath)
41 // If any error fail quickly here.
42 if err != nil {
43 return UploadInfo{}, err
44 }
45 defer fileReader.Close()
46
47 // Save the file stat.
48 fileStat, err := fileReader.Stat()
49 if err != nil {
50 return UploadInfo{}, err
51 }
52
53 // Save the file size.
54 fileSize := fileStat.Size()
55
56 // Set contentType based on filepath extension if not given or default
57 // value of "application/octet-stream" if the extension has no associated type.
58 if opts.ContentType == "" {
59 if opts.ContentType = mime.TypeByExtension(filepath.Ext(filePath)); opts.ContentType == "" {
60 opts.ContentType = "application/octet-stream"
61 }
62 }
63 return c.PutObject(ctx, bucketName, objectName, fileReader, fileSize, opts)
64}
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
new file mode 100644
index 0000000..5f117af
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
@@ -0,0 +1,465 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "bytes"
22 "context"
23 "encoding/base64"
24 "encoding/hex"
25 "encoding/xml"
26 "fmt"
27 "hash/crc32"
28 "io"
29 "net/http"
30 "net/url"
31 "sort"
32 "strconv"
33 "strings"
34
35 "github.com/google/uuid"
36 "github.com/minio/minio-go/v7/pkg/encrypt"
37 "github.com/minio/minio-go/v7/pkg/s3utils"
38)
39
40func (c *Client) putObjectMultipart(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64,
41 opts PutObjectOptions,
42) (info UploadInfo, err error) {
43 info, err = c.putObjectMultipartNoStream(ctx, bucketName, objectName, reader, opts)
44 if err != nil {
45 errResp := ToErrorResponse(err)
46 // Verify if multipart functionality is not available, if not
47 // fall back to single PutObject operation.
48 if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
49 // Verify if size of reader is greater than '5GiB'.
50 if size > maxSinglePutObjectSize {
51 return UploadInfo{}, errEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
52 }
53 // Fall back to uploading as single PutObject operation.
54 return c.putObject(ctx, bucketName, objectName, reader, size, opts)
55 }
56 }
57 return info, err
58}
59
60func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) {
61 // Input validation.
62 if err = s3utils.CheckValidBucketName(bucketName); err != nil {
63 return UploadInfo{}, err
64 }
65 if err = s3utils.CheckValidObjectName(objectName); err != nil {
66 return UploadInfo{}, err
67 }
68
69 // Total data read and written to server. should be equal to
70 // 'size' at the end of the call.
71 var totalUploadedSize int64
72
73 // Complete multipart upload.
74 var complMultipartUpload completeMultipartUpload
75
76 // Calculate the optimal parts info for a given size.
77 totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize)
78 if err != nil {
79 return UploadInfo{}, err
80 }
81
82 // Choose hash algorithms to be calculated by hashCopyN,
83 // avoid sha256 with non-v4 signature request or
84 // HTTPS connection.
85 hashAlgos, hashSums := c.hashMaterials(opts.SendContentMd5, !opts.DisableContentSha256)
86 if len(hashSums) == 0 {
87 if opts.UserMetadata == nil {
88 opts.UserMetadata = make(map[string]string, 1)
89 }
90 opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C"
91 }
92
93 // Initiate a new multipart upload.
94 uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
95 if err != nil {
96 return UploadInfo{}, err
97 }
98 delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm")
99
100 defer func() {
101 if err != nil {
102 c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
103 }
104 }()
105
106 // Part number always starts with '1'.
107 partNumber := 1
108
109 // Initialize parts uploaded map.
110 partsInfo := make(map[int]ObjectPart)
111
112 // Create a buffer.
113 buf := make([]byte, partSize)
114
115 // Create checksums
116 // CRC32C is ~50% faster on AMD64 @ 30GB/s
117 var crcBytes []byte
118 customHeader := make(http.Header)
119 crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
120 for partNumber <= totalPartsCount {
121 length, rErr := readFull(reader, buf)
122 if rErr == io.EOF && partNumber > 1 {
123 break
124 }
125
126 if rErr != nil && rErr != io.ErrUnexpectedEOF && rErr != io.EOF {
127 return UploadInfo{}, rErr
128 }
129
130 // Calculates hash sums while copying partSize bytes into cw.
131 for k, v := range hashAlgos {
132 v.Write(buf[:length])
133 hashSums[k] = v.Sum(nil)
134 v.Close()
135 }
136
137 // Update progress reader appropriately to the latest offset
138 // as we read from the source.
139 rd := newHook(bytes.NewReader(buf[:length]), opts.Progress)
140
141 // Checksums..
142 var (
143 md5Base64 string
144 sha256Hex string
145 )
146
147 if hashSums["md5"] != nil {
148 md5Base64 = base64.StdEncoding.EncodeToString(hashSums["md5"])
149 }
150 if hashSums["sha256"] != nil {
151 sha256Hex = hex.EncodeToString(hashSums["sha256"])
152 }
153 if len(hashSums) == 0 {
154 crc.Reset()
155 crc.Write(buf[:length])
156 cSum := crc.Sum(nil)
157 customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum))
158 crcBytes = append(crcBytes, cSum...)
159 }
160
161 p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: rd, partNumber: partNumber, md5Base64: md5Base64, sha256Hex: sha256Hex, size: int64(length), sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader}
162 // Proceed to upload the part.
163 objPart, uerr := c.uploadPart(ctx, p)
164 if uerr != nil {
165 return UploadInfo{}, uerr
166 }
167
168 // Save successfully uploaded part metadata.
169 partsInfo[partNumber] = objPart
170
171 // Save successfully uploaded size.
172 totalUploadedSize += int64(length)
173
174 // Increment part number.
175 partNumber++
176
177 // For unknown size, Read EOF we break away.
178 // We do not have to upload till totalPartsCount.
179 if rErr == io.EOF {
180 break
181 }
182 }
183
184 // Loop over total uploaded parts to save them in
185 // Parts array before completing the multipart request.
186 for i := 1; i < partNumber; i++ {
187 part, ok := partsInfo[i]
188 if !ok {
189 return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i))
190 }
191 complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
192 ETag: part.ETag,
193 PartNumber: part.PartNumber,
194 ChecksumCRC32: part.ChecksumCRC32,
195 ChecksumCRC32C: part.ChecksumCRC32C,
196 ChecksumSHA1: part.ChecksumSHA1,
197 ChecksumSHA256: part.ChecksumSHA256,
198 })
199 }
200
201 // Sort all completed parts.
202 sort.Sort(completedParts(complMultipartUpload.Parts))
203 opts = PutObjectOptions{
204 ServerSideEncryption: opts.ServerSideEncryption,
205 }
206 if len(crcBytes) > 0 {
207 // Add hash of hashes.
208 crc.Reset()
209 crc.Write(crcBytes)
210 opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
211 }
212 uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
213 if err != nil {
214 return UploadInfo{}, err
215 }
216
217 uploadInfo.Size = totalUploadedSize
218 return uploadInfo, nil
219}
220
221// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID.
222func (c *Client) initiateMultipartUpload(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (initiateMultipartUploadResult, error) {
223 // Input validation.
224 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
225 return initiateMultipartUploadResult{}, err
226 }
227 if err := s3utils.CheckValidObjectName(objectName); err != nil {
228 return initiateMultipartUploadResult{}, err
229 }
230
231 // Initialize url queries.
232 urlValues := make(url.Values)
233 urlValues.Set("uploads", "")
234
235 if opts.Internal.SourceVersionID != "" {
236 if opts.Internal.SourceVersionID != nullVersionID {
237 if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil {
238 return initiateMultipartUploadResult{}, errInvalidArgument(err.Error())
239 }
240 }
241 urlValues.Set("versionId", opts.Internal.SourceVersionID)
242 }
243
244 // Set ContentType header.
245 customHeader := opts.Header()
246
247 reqMetadata := requestMetadata{
248 bucketName: bucketName,
249 objectName: objectName,
250 queryValues: urlValues,
251 customHeader: customHeader,
252 }
253
254 // Execute POST on an objectName to initiate multipart upload.
255 resp, err := c.executeMethod(ctx, http.MethodPost, reqMetadata)
256 defer closeResponse(resp)
257 if err != nil {
258 return initiateMultipartUploadResult{}, err
259 }
260 if resp != nil {
261 if resp.StatusCode != http.StatusOK {
262 return initiateMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName)
263 }
264 }
265 // Decode xml for new multipart upload.
266 initiateMultipartUploadResult := initiateMultipartUploadResult{}
267 err = xmlDecoder(resp.Body, &initiateMultipartUploadResult)
268 if err != nil {
269 return initiateMultipartUploadResult, err
270 }
271 return initiateMultipartUploadResult, nil
272}
273
274type uploadPartParams struct {
275 bucketName string
276 objectName string
277 uploadID string
278 reader io.Reader
279 partNumber int
280 md5Base64 string
281 sha256Hex string
282 size int64
283 sse encrypt.ServerSide
284 streamSha256 bool
285 customHeader http.Header
286 trailer http.Header
287}
288
289// uploadPart - Uploads a part in a multipart upload.
290func (c *Client) uploadPart(ctx context.Context, p uploadPartParams) (ObjectPart, error) {
291 // Input validation.
292 if err := s3utils.CheckValidBucketName(p.bucketName); err != nil {
293 return ObjectPart{}, err
294 }
295 if err := s3utils.CheckValidObjectName(p.objectName); err != nil {
296 return ObjectPart{}, err
297 }
298 if p.size > maxPartSize {
299 return ObjectPart{}, errEntityTooLarge(p.size, maxPartSize, p.bucketName, p.objectName)
300 }
301 if p.size <= -1 {
302 return ObjectPart{}, errEntityTooSmall(p.size, p.bucketName, p.objectName)
303 }
304 if p.partNumber <= 0 {
305 return ObjectPart{}, errInvalidArgument("Part number cannot be negative or equal to zero.")
306 }
307 if p.uploadID == "" {
308 return ObjectPart{}, errInvalidArgument("UploadID cannot be empty.")
309 }
310
311 // Get resources properly escaped and lined up before using them in http request.
312 urlValues := make(url.Values)
313 // Set part number.
314 urlValues.Set("partNumber", strconv.Itoa(p.partNumber))
315 // Set upload id.
316 urlValues.Set("uploadId", p.uploadID)
317
318 // Set encryption headers, if any.
319 if p.customHeader == nil {
320 p.customHeader = make(http.Header)
321 }
322 // https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html
323 // Server-side encryption is supported by the S3 Multipart Upload actions.
324 // Unless you are using a customer-provided encryption key, you don't need
325 // to specify the encryption parameters in each UploadPart request.
326 if p.sse != nil && p.sse.Type() == encrypt.SSEC {
327 p.sse.Marshal(p.customHeader)
328 }
329
330 reqMetadata := requestMetadata{
331 bucketName: p.bucketName,
332 objectName: p.objectName,
333 queryValues: urlValues,
334 customHeader: p.customHeader,
335 contentBody: p.reader,
336 contentLength: p.size,
337 contentMD5Base64: p.md5Base64,
338 contentSHA256Hex: p.sha256Hex,
339 streamSha256: p.streamSha256,
340 trailer: p.trailer,
341 }
342
343 // Execute PUT on each part.
344 resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
345 defer closeResponse(resp)
346 if err != nil {
347 return ObjectPart{}, err
348 }
349 if resp != nil {
350 if resp.StatusCode != http.StatusOK {
351 return ObjectPart{}, httpRespToErrorResponse(resp, p.bucketName, p.objectName)
352 }
353 }
354 // Once successfully uploaded, return completed part.
355 h := resp.Header
356 objPart := ObjectPart{
357 ChecksumCRC32: h.Get("x-amz-checksum-crc32"),
358 ChecksumCRC32C: h.Get("x-amz-checksum-crc32c"),
359 ChecksumSHA1: h.Get("x-amz-checksum-sha1"),
360 ChecksumSHA256: h.Get("x-amz-checksum-sha256"),
361 }
362 objPart.Size = p.size
363 objPart.PartNumber = p.partNumber
364 // Trim off the odd double quotes from ETag in the beginning and end.
365 objPart.ETag = trimEtag(h.Get("ETag"))
366 return objPart, nil
367}
368
369// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts.
370func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string,
371 complete completeMultipartUpload, opts PutObjectOptions,
372) (UploadInfo, error) {
373 // Input validation.
374 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
375 return UploadInfo{}, err
376 }
377 if err := s3utils.CheckValidObjectName(objectName); err != nil {
378 return UploadInfo{}, err
379 }
380
381 // Initialize url queries.
382 urlValues := make(url.Values)
383 urlValues.Set("uploadId", uploadID)
384 // Marshal complete multipart body.
385 completeMultipartUploadBytes, err := xml.Marshal(complete)
386 if err != nil {
387 return UploadInfo{}, err
388 }
389
390 headers := opts.Header()
391 if s3utils.IsAmazonEndpoint(*c.endpointURL) {
392 headers.Del(encrypt.SseKmsKeyID) // Remove X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id not supported in CompleteMultipartUpload
393 headers.Del(encrypt.SseGenericHeader) // Remove X-Amz-Server-Side-Encryption not supported in CompleteMultipartUpload
394 headers.Del(encrypt.SseEncryptionContext) // Remove X-Amz-Server-Side-Encryption-Context not supported in CompleteMultipartUpload
395 }
396
397 // Instantiate all the complete multipart buffer.
398 completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes)
399 reqMetadata := requestMetadata{
400 bucketName: bucketName,
401 objectName: objectName,
402 queryValues: urlValues,
403 contentBody: completeMultipartUploadBuffer,
404 contentLength: int64(len(completeMultipartUploadBytes)),
405 contentSHA256Hex: sum256Hex(completeMultipartUploadBytes),
406 customHeader: headers,
407 }
408
409 // Execute POST to complete multipart upload for an objectName.
410 resp, err := c.executeMethod(ctx, http.MethodPost, reqMetadata)
411 defer closeResponse(resp)
412 if err != nil {
413 return UploadInfo{}, err
414 }
415 if resp != nil {
416 if resp.StatusCode != http.StatusOK {
417 return UploadInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
418 }
419 }
420
421 // Read resp.Body into a []bytes to parse for Error response inside the body
422 var b []byte
423 b, err = io.ReadAll(resp.Body)
424 if err != nil {
425 return UploadInfo{}, err
426 }
427 // Decode completed multipart upload response on success.
428 completeMultipartUploadResult := completeMultipartUploadResult{}
429 err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadResult)
430 if err != nil {
431 // xml parsing failure due to presence an ill-formed xml fragment
432 return UploadInfo{}, err
433 } else if completeMultipartUploadResult.Bucket == "" {
434 // xml's Decode method ignores well-formed xml that don't apply to the type of value supplied.
435 // In this case, it would leave completeMultipartUploadResult with the corresponding zero-values
436 // of the members.
437
438 // Decode completed multipart upload response on failure
439 completeMultipartUploadErr := ErrorResponse{}
440 err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadErr)
441 if err != nil {
442 // xml parsing failure due to presence an ill-formed xml fragment
443 return UploadInfo{}, err
444 }
445 return UploadInfo{}, completeMultipartUploadErr
446 }
447
448 // extract lifecycle expiry date and rule ID
449 expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration))
450
451 return UploadInfo{
452 Bucket: completeMultipartUploadResult.Bucket,
453 Key: completeMultipartUploadResult.Key,
454 ETag: trimEtag(completeMultipartUploadResult.ETag),
455 VersionID: resp.Header.Get(amzVersionID),
456 Location: completeMultipartUploadResult.Location,
457 Expiration: expTime,
458 ExpirationRuleID: ruleID,
459
460 ChecksumSHA256: completeMultipartUploadResult.ChecksumSHA256,
461 ChecksumSHA1: completeMultipartUploadResult.ChecksumSHA1,
462 ChecksumCRC32: completeMultipartUploadResult.ChecksumCRC32,
463 ChecksumCRC32C: completeMultipartUploadResult.ChecksumCRC32C,
464 }, nil
465}
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
new file mode 100644
index 0000000..9182d4e
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
@@ -0,0 +1,809 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "bytes"
22 "context"
23 "encoding/base64"
24 "fmt"
25 "hash/crc32"
26 "io"
27 "net/http"
28 "net/url"
29 "sort"
30 "strings"
31 "sync"
32
33 "github.com/google/uuid"
34 "github.com/minio/minio-go/v7/pkg/s3utils"
35)
36
37// putObjectMultipartStream - upload a large object using
38// multipart upload and streaming signature for signing payload.
39// Comprehensive put object operation involving multipart uploads.
40//
41// Following code handles these types of readers.
42//
43// - *minio.Object
44// - Any reader which has a method 'ReadAt()'
45func (c *Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string,
46 reader io.Reader, size int64, opts PutObjectOptions,
47) (info UploadInfo, err error) {
48 if opts.ConcurrentStreamParts && opts.NumThreads > 1 {
49 info, err = c.putObjectMultipartStreamParallel(ctx, bucketName, objectName, reader, opts)
50 } else if !isObject(reader) && isReadAt(reader) && !opts.SendContentMd5 {
51 // Verify if the reader implements ReadAt and it is not a *minio.Object then we will use parallel uploader.
52 info, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts)
53 } else {
54 info, err = c.putObjectMultipartStreamOptionalChecksum(ctx, bucketName, objectName, reader, size, opts)
55 }
56 if err != nil {
57 errResp := ToErrorResponse(err)
58 // Verify if multipart functionality is not available, if not
59 // fall back to single PutObject operation.
60 if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
61 // Verify if size of reader is greater than '5GiB'.
62 if size > maxSinglePutObjectSize {
63 return UploadInfo{}, errEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
64 }
65 // Fall back to uploading as single PutObject operation.
66 return c.putObject(ctx, bucketName, objectName, reader, size, opts)
67 }
68 }
69 return info, err
70}
71
72// uploadedPartRes - the response received from a part upload.
73type uploadedPartRes struct {
74 Error error // Any error encountered while uploading the part.
75 PartNum int // Number of the part uploaded.
76 Size int64 // Size of the part uploaded.
77 Part ObjectPart
78}
79
80type uploadPartReq struct {
81 PartNum int // Number of the part uploaded.
82 Part ObjectPart // Size of the part uploaded.
83}
84
85// putObjectMultipartFromReadAt - Uploads files bigger than 128MiB.
86// Supports all readers which implements io.ReaderAt interface
87// (ReadAt method).
88//
89// NOTE: This function is meant to be used for all readers which
90// implement io.ReaderAt which allows us for resuming multipart
91// uploads but reading at an offset, which would avoid re-read the
92// data which was already uploaded. Internally this function uses
93// temporary files for staging all the data, these temporary files are
94// cleaned automatically when the caller i.e http client closes the
95// stream after uploading all the contents successfully.
96func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketName, objectName string,
97 reader io.ReaderAt, size int64, opts PutObjectOptions,
98) (info UploadInfo, err error) {
99 // Input validation.
100 if err = s3utils.CheckValidBucketName(bucketName); err != nil {
101 return UploadInfo{}, err
102 }
103 if err = s3utils.CheckValidObjectName(objectName); err != nil {
104 return UploadInfo{}, err
105 }
106
107 // Calculate the optimal parts info for a given size.
108 totalPartsCount, partSize, lastPartSize, err := OptimalPartInfo(size, opts.PartSize)
109 if err != nil {
110 return UploadInfo{}, err
111 }
112
113 withChecksum := c.trailingHeaderSupport
114 if withChecksum {
115 if opts.UserMetadata == nil {
116 opts.UserMetadata = make(map[string]string, 1)
117 }
118 opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C"
119 }
120 // Initiate a new multipart upload.
121 uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
122 if err != nil {
123 return UploadInfo{}, err
124 }
125 delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm")
126
127 // Aborts the multipart upload in progress, if the
128 // function returns any error, since we do not resume
129 // we should purge the parts which have been uploaded
130 // to relinquish storage space.
131 defer func() {
132 if err != nil {
133 c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
134 }
135 }()
136
137 // Total data read and written to server. should be equal to 'size' at the end of the call.
138 var totalUploadedSize int64
139
140 // Complete multipart upload.
141 var complMultipartUpload completeMultipartUpload
142
143 // Declare a channel that sends the next part number to be uploaded.
144 uploadPartsCh := make(chan uploadPartReq)
145
146 // Declare a channel that sends back the response of a part upload.
147 uploadedPartsCh := make(chan uploadedPartRes)
148
149 // Used for readability, lastPartNumber is always totalPartsCount.
150 lastPartNumber := totalPartsCount
151
152 partitionCtx, partitionCancel := context.WithCancel(ctx)
153 defer partitionCancel()
154 // Send each part number to the channel to be processed.
155 go func() {
156 defer close(uploadPartsCh)
157
158 for p := 1; p <= totalPartsCount; p++ {
159 select {
160 case <-partitionCtx.Done():
161 return
162 case uploadPartsCh <- uploadPartReq{PartNum: p}:
163 }
164 }
165 }()
166
167 // Receive each part number from the channel allowing three parallel uploads.
168 for w := 1; w <= opts.getNumThreads(); w++ {
169 go func(partSize int64) {
170 for {
171 var uploadReq uploadPartReq
172 var ok bool
173 select {
174 case <-ctx.Done():
175 return
176 case uploadReq, ok = <-uploadPartsCh:
177 if !ok {
178 return
179 }
180 // Each worker will draw from the part channel and upload in parallel.
181 }
182
183 // If partNumber was not uploaded we calculate the missing
184 // part offset and size. For all other part numbers we
185 // calculate offset based on multiples of partSize.
186 readOffset := int64(uploadReq.PartNum-1) * partSize
187
188 // As a special case if partNumber is lastPartNumber, we
189 // calculate the offset based on the last part size.
190 if uploadReq.PartNum == lastPartNumber {
191 readOffset = size - lastPartSize
192 partSize = lastPartSize
193 }
194
195 sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), opts.Progress)
196 trailer := make(http.Header, 1)
197 if withChecksum {
198 crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
199 trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(crc.Sum(nil)))
200 sectionReader = newHashReaderWrapper(sectionReader, crc, func(hash []byte) {
201 trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(hash))
202 })
203 }
204
205 // Proceed to upload the part.
206 p := uploadPartParams{
207 bucketName: bucketName,
208 objectName: objectName,
209 uploadID: uploadID,
210 reader: sectionReader,
211 partNumber: uploadReq.PartNum,
212 size: partSize,
213 sse: opts.ServerSideEncryption,
214 streamSha256: !opts.DisableContentSha256,
215 sha256Hex: "",
216 trailer: trailer,
217 }
218 objPart, err := c.uploadPart(ctx, p)
219 if err != nil {
220 uploadedPartsCh <- uploadedPartRes{
221 Error: err,
222 }
223 // Exit the goroutine.
224 return
225 }
226
227 // Save successfully uploaded part metadata.
228 uploadReq.Part = objPart
229
230 // Send successful part info through the channel.
231 uploadedPartsCh <- uploadedPartRes{
232 Size: objPart.Size,
233 PartNum: uploadReq.PartNum,
234 Part: uploadReq.Part,
235 }
236 }
237 }(partSize)
238 }
239
240 // Gather the responses as they occur and update any
241 // progress bar.
242 for u := 1; u <= totalPartsCount; u++ {
243 select {
244 case <-ctx.Done():
245 return UploadInfo{}, ctx.Err()
246 case uploadRes := <-uploadedPartsCh:
247 if uploadRes.Error != nil {
248 return UploadInfo{}, uploadRes.Error
249 }
250
251 // Update the totalUploadedSize.
252 totalUploadedSize += uploadRes.Size
253 complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
254 ETag: uploadRes.Part.ETag,
255 PartNumber: uploadRes.Part.PartNumber,
256 ChecksumCRC32: uploadRes.Part.ChecksumCRC32,
257 ChecksumCRC32C: uploadRes.Part.ChecksumCRC32C,
258 ChecksumSHA1: uploadRes.Part.ChecksumSHA1,
259 ChecksumSHA256: uploadRes.Part.ChecksumSHA256,
260 })
261 }
262 }
263
264 // Verify if we uploaded all the data.
265 if totalUploadedSize != size {
266 return UploadInfo{}, errUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
267 }
268
269 // Sort all completed parts.
270 sort.Sort(completedParts(complMultipartUpload.Parts))
271
272 opts = PutObjectOptions{
273 ServerSideEncryption: opts.ServerSideEncryption,
274 }
275 if withChecksum {
276 // Add hash of hashes.
277 crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
278 for _, part := range complMultipartUpload.Parts {
279 cs, err := base64.StdEncoding.DecodeString(part.ChecksumCRC32C)
280 if err == nil {
281 crc.Write(cs)
282 }
283 }
284 opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
285 }
286
287 uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
288 if err != nil {
289 return UploadInfo{}, err
290 }
291
292 uploadInfo.Size = totalUploadedSize
293 return uploadInfo, nil
294}
295
296func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, bucketName, objectName string,
297 reader io.Reader, size int64, opts PutObjectOptions,
298) (info UploadInfo, err error) {
299 // Input validation.
300 if err = s3utils.CheckValidBucketName(bucketName); err != nil {
301 return UploadInfo{}, err
302 }
303 if err = s3utils.CheckValidObjectName(objectName); err != nil {
304 return UploadInfo{}, err
305 }
306
307 if !opts.SendContentMd5 {
308 if opts.UserMetadata == nil {
309 opts.UserMetadata = make(map[string]string, 1)
310 }
311 opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C"
312 }
313
314 // Calculate the optimal parts info for a given size.
315 totalPartsCount, partSize, lastPartSize, err := OptimalPartInfo(size, opts.PartSize)
316 if err != nil {
317 return UploadInfo{}, err
318 }
319 // Initiates a new multipart request
320 uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
321 if err != nil {
322 return UploadInfo{}, err
323 }
324 delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm")
325
326 // Aborts the multipart upload if the function returns
327 // any error, since we do not resume we should purge
328 // the parts which have been uploaded to relinquish
329 // storage space.
330 defer func() {
331 if err != nil {
332 c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
333 }
334 }()
335
336 // Create checksums
337 // CRC32C is ~50% faster on AMD64 @ 30GB/s
338 var crcBytes []byte
339 customHeader := make(http.Header)
340 crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
341 md5Hash := c.md5Hasher()
342 defer md5Hash.Close()
343
344 // Total data read and written to server. should be equal to 'size' at the end of the call.
345 var totalUploadedSize int64
346
347 // Initialize parts uploaded map.
348 partsInfo := make(map[int]ObjectPart)
349
350 // Create a buffer.
351 buf := make([]byte, partSize)
352
353 // Avoid declaring variables in the for loop
354 var md5Base64 string
355
356 // Part number always starts with '1'.
357 var partNumber int
358 for partNumber = 1; partNumber <= totalPartsCount; partNumber++ {
359
360 // Proceed to upload the part.
361 if partNumber == totalPartsCount {
362 partSize = lastPartSize
363 }
364
365 length, rerr := readFull(reader, buf)
366 if rerr == io.EOF && partNumber > 1 {
367 break
368 }
369
370 if rerr != nil && rerr != io.ErrUnexpectedEOF && err != io.EOF {
371 return UploadInfo{}, rerr
372 }
373
374 // Calculate md5sum.
375 if opts.SendContentMd5 {
376 md5Hash.Reset()
377 md5Hash.Write(buf[:length])
378 md5Base64 = base64.StdEncoding.EncodeToString(md5Hash.Sum(nil))
379 } else {
380 // Add CRC32C instead.
381 crc.Reset()
382 crc.Write(buf[:length])
383 cSum := crc.Sum(nil)
384 customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum))
385 crcBytes = append(crcBytes, cSum...)
386 }
387
388 // Update progress reader appropriately to the latest offset
389 // as we read from the source.
390 hooked := newHook(bytes.NewReader(buf[:length]), opts.Progress)
391 p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: hooked, partNumber: partNumber, md5Base64: md5Base64, size: partSize, sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader}
392 objPart, uerr := c.uploadPart(ctx, p)
393 if uerr != nil {
394 return UploadInfo{}, uerr
395 }
396
397 // Save successfully uploaded part metadata.
398 partsInfo[partNumber] = objPart
399
400 // Save successfully uploaded size.
401 totalUploadedSize += partSize
402 }
403
404 // Verify if we uploaded all the data.
405 if size > 0 {
406 if totalUploadedSize != size {
407 return UploadInfo{}, errUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
408 }
409 }
410
411 // Complete multipart upload.
412 var complMultipartUpload completeMultipartUpload
413
414 // Loop over total uploaded parts to save them in
415 // Parts array before completing the multipart request.
416 for i := 1; i < partNumber; i++ {
417 part, ok := partsInfo[i]
418 if !ok {
419 return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i))
420 }
421 complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
422 ETag: part.ETag,
423 PartNumber: part.PartNumber,
424 ChecksumCRC32: part.ChecksumCRC32,
425 ChecksumCRC32C: part.ChecksumCRC32C,
426 ChecksumSHA1: part.ChecksumSHA1,
427 ChecksumSHA256: part.ChecksumSHA256,
428 })
429 }
430
431 // Sort all completed parts.
432 sort.Sort(completedParts(complMultipartUpload.Parts))
433
434 opts = PutObjectOptions{
435 ServerSideEncryption: opts.ServerSideEncryption,
436 }
437 if len(crcBytes) > 0 {
438 // Add hash of hashes.
439 crc.Reset()
440 crc.Write(crcBytes)
441 opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
442 }
443 uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
444 if err != nil {
445 return UploadInfo{}, err
446 }
447
448 uploadInfo.Size = totalUploadedSize
449 return uploadInfo, nil
450}
451
452// putObjectMultipartStreamParallel uploads opts.NumThreads parts in parallel.
453// This is expected to take opts.PartSize * opts.NumThreads * (GOGC / 100) bytes of buffer.
454func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketName, objectName string,
455 reader io.Reader, opts PutObjectOptions,
456) (info UploadInfo, err error) {
457 // Input validation.
458 if err = s3utils.CheckValidBucketName(bucketName); err != nil {
459 return UploadInfo{}, err
460 }
461
462 if err = s3utils.CheckValidObjectName(objectName); err != nil {
463 return UploadInfo{}, err
464 }
465
466 if !opts.SendContentMd5 {
467 if opts.UserMetadata == nil {
468 opts.UserMetadata = make(map[string]string, 1)
469 }
470 opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C"
471 }
472
473 // Cancel all when an error occurs.
474 ctx, cancel := context.WithCancel(ctx)
475 defer cancel()
476
477 // Calculate the optimal parts info for a given size.
478 totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize)
479 if err != nil {
480 return UploadInfo{}, err
481 }
482
483 // Initiates a new multipart request
484 uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
485 if err != nil {
486 return UploadInfo{}, err
487 }
488 delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm")
489
490 // Aborts the multipart upload if the function returns
491 // any error, since we do not resume we should purge
492 // the parts which have been uploaded to relinquish
493 // storage space.
494 defer func() {
495 if err != nil {
496 c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
497 }
498 }()
499
500 // Create checksums
501 // CRC32C is ~50% faster on AMD64 @ 30GB/s
502 var crcBytes []byte
503 crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
504
505 // Total data read and written to server. should be equal to 'size' at the end of the call.
506 var totalUploadedSize int64
507
508 // Initialize parts uploaded map.
509 partsInfo := make(map[int]ObjectPart)
510
511 // Create a buffer.
512 nBuffers := int64(opts.NumThreads)
513 bufs := make(chan []byte, nBuffers)
514 all := make([]byte, nBuffers*partSize)
515 for i := int64(0); i < nBuffers; i++ {
516 bufs <- all[i*partSize : i*partSize+partSize]
517 }
518
519 var wg sync.WaitGroup
520 var mu sync.Mutex
521 errCh := make(chan error, opts.NumThreads)
522
523 reader = newHook(reader, opts.Progress)
524
525 // Part number always starts with '1'.
526 var partNumber int
527 for partNumber = 1; partNumber <= totalPartsCount; partNumber++ {
528 // Proceed to upload the part.
529 var buf []byte
530 select {
531 case buf = <-bufs:
532 case err = <-errCh:
533 cancel()
534 wg.Wait()
535 return UploadInfo{}, err
536 }
537
538 if int64(len(buf)) != partSize {
539 return UploadInfo{}, fmt.Errorf("read buffer < %d than expected partSize: %d", len(buf), partSize)
540 }
541
542 length, rerr := readFull(reader, buf)
543 if rerr == io.EOF && partNumber > 1 {
544 // Done
545 break
546 }
547
548 if rerr != nil && rerr != io.ErrUnexpectedEOF && err != io.EOF {
549 cancel()
550 wg.Wait()
551 return UploadInfo{}, rerr
552 }
553
554 // Calculate md5sum.
555 customHeader := make(http.Header)
556 if !opts.SendContentMd5 {
557 // Add CRC32C instead.
558 crc.Reset()
559 crc.Write(buf[:length])
560 cSum := crc.Sum(nil)
561 customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum))
562 crcBytes = append(crcBytes, cSum...)
563 }
564
565 wg.Add(1)
566 go func(partNumber int) {
567 // Avoid declaring variables in the for loop
568 var md5Base64 string
569
570 if opts.SendContentMd5 {
571 md5Hash := c.md5Hasher()
572 md5Hash.Write(buf[:length])
573 md5Base64 = base64.StdEncoding.EncodeToString(md5Hash.Sum(nil))
574 md5Hash.Close()
575 }
576
577 defer wg.Done()
578 p := uploadPartParams{
579 bucketName: bucketName,
580 objectName: objectName,
581 uploadID: uploadID,
582 reader: bytes.NewReader(buf[:length]),
583 partNumber: partNumber,
584 md5Base64: md5Base64,
585 size: int64(length),
586 sse: opts.ServerSideEncryption,
587 streamSha256: !opts.DisableContentSha256,
588 customHeader: customHeader,
589 }
590 objPart, uerr := c.uploadPart(ctx, p)
591 if uerr != nil {
592 errCh <- uerr
593 return
594 }
595
596 // Save successfully uploaded part metadata.
597 mu.Lock()
598 partsInfo[partNumber] = objPart
599 mu.Unlock()
600
601 // Send buffer back so it can be reused.
602 bufs <- buf
603 }(partNumber)
604
605 // Save successfully uploaded size.
606 totalUploadedSize += int64(length)
607 }
608 wg.Wait()
609
610 // Collect any error
611 select {
612 case err = <-errCh:
613 return UploadInfo{}, err
614 default:
615 }
616
617 // Complete multipart upload.
618 var complMultipartUpload completeMultipartUpload
619
620 // Loop over total uploaded parts to save them in
621 // Parts array before completing the multipart request.
622 for i := 1; i < partNumber; i++ {
623 part, ok := partsInfo[i]
624 if !ok {
625 return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i))
626 }
627 complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
628 ETag: part.ETag,
629 PartNumber: part.PartNumber,
630 ChecksumCRC32: part.ChecksumCRC32,
631 ChecksumCRC32C: part.ChecksumCRC32C,
632 ChecksumSHA1: part.ChecksumSHA1,
633 ChecksumSHA256: part.ChecksumSHA256,
634 })
635 }
636
637 // Sort all completed parts.
638 sort.Sort(completedParts(complMultipartUpload.Parts))
639
640 opts = PutObjectOptions{}
641 if len(crcBytes) > 0 {
642 // Add hash of hashes.
643 crc.Reset()
644 crc.Write(crcBytes)
645 opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
646 }
647 uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
648 if err != nil {
649 return UploadInfo{}, err
650 }
651
652 uploadInfo.Size = totalUploadedSize
653 return uploadInfo, nil
654}
655
656// putObject special function used Google Cloud Storage. This special function
657// is used for Google Cloud Storage since Google's multipart API is not S3 compatible.
658func (c *Client) putObject(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) {
659 // Input validation.
660 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
661 return UploadInfo{}, err
662 }
663 if err := s3utils.CheckValidObjectName(objectName); err != nil {
664 return UploadInfo{}, err
665 }
666
667 // Size -1 is only supported on Google Cloud Storage, we error
668 // out in all other situations.
669 if size < 0 && !s3utils.IsGoogleEndpoint(*c.endpointURL) {
670 return UploadInfo{}, errEntityTooSmall(size, bucketName, objectName)
671 }
672
673 if opts.SendContentMd5 && s3utils.IsGoogleEndpoint(*c.endpointURL) && size < 0 {
674 return UploadInfo{}, errInvalidArgument("MD5Sum cannot be calculated with size '-1'")
675 }
676
677 var readSeeker io.Seeker
678 if size > 0 {
679 if isReadAt(reader) && !isObject(reader) {
680 seeker, ok := reader.(io.Seeker)
681 if ok {
682 offset, err := seeker.Seek(0, io.SeekCurrent)
683 if err != nil {
684 return UploadInfo{}, errInvalidArgument(err.Error())
685 }
686 reader = io.NewSectionReader(reader.(io.ReaderAt), offset, size)
687 readSeeker = reader.(io.Seeker)
688 }
689 }
690 }
691
692 var md5Base64 string
693 if opts.SendContentMd5 {
694 // Calculate md5sum.
695 hash := c.md5Hasher()
696
697 if readSeeker != nil {
698 if _, err := io.Copy(hash, reader); err != nil {
699 return UploadInfo{}, err
700 }
701 // Seek back to beginning of io.NewSectionReader's offset.
702 _, err = readSeeker.Seek(0, io.SeekStart)
703 if err != nil {
704 return UploadInfo{}, errInvalidArgument(err.Error())
705 }
706 } else {
707 // Create a buffer.
708 buf := make([]byte, size)
709
710 length, err := readFull(reader, buf)
711 if err != nil && err != io.ErrUnexpectedEOF && err != io.EOF {
712 return UploadInfo{}, err
713 }
714
715 hash.Write(buf[:length])
716 reader = bytes.NewReader(buf[:length])
717 }
718
719 md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil))
720 hash.Close()
721 }
722
723 // Update progress reader appropriately to the latest offset as we
724 // read from the source.
725 progressReader := newHook(reader, opts.Progress)
726
727 // This function does not calculate sha256 and md5sum for payload.
728 // Execute put object.
729 return c.putObjectDo(ctx, bucketName, objectName, progressReader, md5Base64, "", size, opts)
730}
731
732// putObjectDo - executes the put object http operation.
733// NOTE: You must have WRITE permissions on a bucket to add an object to it.
734func (c *Client) putObjectDo(ctx context.Context, bucketName, objectName string, reader io.Reader, md5Base64, sha256Hex string, size int64, opts PutObjectOptions) (UploadInfo, error) {
735 // Input validation.
736 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
737 return UploadInfo{}, err
738 }
739 if err := s3utils.CheckValidObjectName(objectName); err != nil {
740 return UploadInfo{}, err
741 }
742 // Set headers.
743 customHeader := opts.Header()
744
745 // Add CRC when client supports it, MD5 is not set, not Google and we don't add SHA256 to chunks.
746 addCrc := c.trailingHeaderSupport && md5Base64 == "" && !s3utils.IsGoogleEndpoint(*c.endpointURL) && (opts.DisableContentSha256 || c.secure)
747
748 if addCrc {
749 // If user has added checksums, don't add them ourselves.
750 for k := range opts.UserMetadata {
751 if strings.HasPrefix(strings.ToLower(k), "x-amz-checksum-") {
752 addCrc = false
753 }
754 }
755 }
756 // Populate request metadata.
757 reqMetadata := requestMetadata{
758 bucketName: bucketName,
759 objectName: objectName,
760 customHeader: customHeader,
761 contentBody: reader,
762 contentLength: size,
763 contentMD5Base64: md5Base64,
764 contentSHA256Hex: sha256Hex,
765 streamSha256: !opts.DisableContentSha256,
766 addCrc: addCrc,
767 }
768 if opts.Internal.SourceVersionID != "" {
769 if opts.Internal.SourceVersionID != nullVersionID {
770 if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil {
771 return UploadInfo{}, errInvalidArgument(err.Error())
772 }
773 }
774 urlValues := make(url.Values)
775 urlValues.Set("versionId", opts.Internal.SourceVersionID)
776 reqMetadata.queryValues = urlValues
777 }
778
779 // Execute PUT an objectName.
780 resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
781 defer closeResponse(resp)
782 if err != nil {
783 return UploadInfo{}, err
784 }
785 if resp != nil {
786 if resp.StatusCode != http.StatusOK {
787 return UploadInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
788 }
789 }
790
791 // extract lifecycle expiry date and rule ID
792 expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration))
793 h := resp.Header
794 return UploadInfo{
795 Bucket: bucketName,
796 Key: objectName,
797 ETag: trimEtag(h.Get("ETag")),
798 VersionID: h.Get(amzVersionID),
799 Size: size,
800 Expiration: expTime,
801 ExpirationRuleID: ruleID,
802
803 // Checksum values
804 ChecksumCRC32: h.Get("x-amz-checksum-crc32"),
805 ChecksumCRC32C: h.Get("x-amz-checksum-crc32c"),
806 ChecksumSHA1: h.Get("x-amz-checksum-sha1"),
807 ChecksumSHA256: h.Get("x-amz-checksum-sha256"),
808 }, nil
809}
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object.go b/vendor/github.com/minio/minio-go/v7/api-put-object.go
new file mode 100644
index 0000000..bbd8924
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-put-object.go
@@ -0,0 +1,473 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "bytes"
22 "context"
23 "encoding/base64"
24 "errors"
25 "fmt"
26 "hash/crc32"
27 "io"
28 "net/http"
29 "sort"
30 "time"
31
32 "github.com/minio/minio-go/v7/pkg/encrypt"
33 "github.com/minio/minio-go/v7/pkg/s3utils"
34 "golang.org/x/net/http/httpguts"
35)
36
37// ReplicationStatus represents replication status of object
38type ReplicationStatus string
39
40const (
41 // ReplicationStatusPending indicates replication is pending
42 ReplicationStatusPending ReplicationStatus = "PENDING"
43 // ReplicationStatusComplete indicates replication completed ok
44 ReplicationStatusComplete ReplicationStatus = "COMPLETED"
45 // ReplicationStatusFailed indicates replication failed
46 ReplicationStatusFailed ReplicationStatus = "FAILED"
47 // ReplicationStatusReplica indicates object is a replica of a source
48 ReplicationStatusReplica ReplicationStatus = "REPLICA"
49)
50
51// Empty returns true if no replication status set.
52func (r ReplicationStatus) Empty() bool {
53 return r == ""
54}
55
56// AdvancedPutOptions for internal use - to be utilized by replication, ILM transition
57// implementation on MinIO server
58type AdvancedPutOptions struct {
59 SourceVersionID string
60 SourceETag string
61 ReplicationStatus ReplicationStatus
62 SourceMTime time.Time
63 ReplicationRequest bool
64 RetentionTimestamp time.Time
65 TaggingTimestamp time.Time
66 LegalholdTimestamp time.Time
67 ReplicationValidityCheck bool
68}
69
70// PutObjectOptions represents options specified by user for PutObject call
71type PutObjectOptions struct {
72 UserMetadata map[string]string
73 UserTags map[string]string
74 Progress io.Reader
75 ContentType string
76 ContentEncoding string
77 ContentDisposition string
78 ContentLanguage string
79 CacheControl string
80 Expires time.Time
81 Mode RetentionMode
82 RetainUntilDate time.Time
83 ServerSideEncryption encrypt.ServerSide
84 NumThreads uint
85 StorageClass string
86 WebsiteRedirectLocation string
87 PartSize uint64
88 LegalHold LegalHoldStatus
89 SendContentMd5 bool
90 DisableContentSha256 bool
91 DisableMultipart bool
92
93 // ConcurrentStreamParts will create NumThreads buffers of PartSize bytes,
94 // fill them serially and upload them in parallel.
95 // This can be used for faster uploads on non-seekable or slow-to-seek input.
96 ConcurrentStreamParts bool
97 Internal AdvancedPutOptions
98
99 customHeaders http.Header
100}
101
102// SetMatchETag if etag matches while PUT MinIO returns an error
103// this is a MinIO specific extension to support optimistic locking
104// semantics.
105func (opts *PutObjectOptions) SetMatchETag(etag string) {
106 if opts.customHeaders == nil {
107 opts.customHeaders = http.Header{}
108 }
109 opts.customHeaders.Set("If-Match", "\""+etag+"\"")
110}
111
112// SetMatchETagExcept if etag does not match while PUT MinIO returns an
113// error this is a MinIO specific extension to support optimistic locking
114// semantics.
115func (opts *PutObjectOptions) SetMatchETagExcept(etag string) {
116 if opts.customHeaders == nil {
117 opts.customHeaders = http.Header{}
118 }
119 opts.customHeaders.Set("If-None-Match", "\""+etag+"\"")
120}
121
122// getNumThreads - gets the number of threads to be used in the multipart
123// put object operation
124func (opts PutObjectOptions) getNumThreads() (numThreads int) {
125 if opts.NumThreads > 0 {
126 numThreads = int(opts.NumThreads)
127 } else {
128 numThreads = totalWorkers
129 }
130 return
131}
132
133// Header - constructs the headers from metadata entered by user in
134// PutObjectOptions struct
135func (opts PutObjectOptions) Header() (header http.Header) {
136 header = make(http.Header)
137
138 contentType := opts.ContentType
139 if contentType == "" {
140 contentType = "application/octet-stream"
141 }
142 header.Set("Content-Type", contentType)
143
144 if opts.ContentEncoding != "" {
145 header.Set("Content-Encoding", opts.ContentEncoding)
146 }
147 if opts.ContentDisposition != "" {
148 header.Set("Content-Disposition", opts.ContentDisposition)
149 }
150 if opts.ContentLanguage != "" {
151 header.Set("Content-Language", opts.ContentLanguage)
152 }
153 if opts.CacheControl != "" {
154 header.Set("Cache-Control", opts.CacheControl)
155 }
156
157 if !opts.Expires.IsZero() {
158 header.Set("Expires", opts.Expires.UTC().Format(http.TimeFormat))
159 }
160
161 if opts.Mode != "" {
162 header.Set(amzLockMode, opts.Mode.String())
163 }
164
165 if !opts.RetainUntilDate.IsZero() {
166 header.Set("X-Amz-Object-Lock-Retain-Until-Date", opts.RetainUntilDate.Format(time.RFC3339))
167 }
168
169 if opts.LegalHold != "" {
170 header.Set(amzLegalHoldHeader, opts.LegalHold.String())
171 }
172
173 if opts.ServerSideEncryption != nil {
174 opts.ServerSideEncryption.Marshal(header)
175 }
176
177 if opts.StorageClass != "" {
178 header.Set(amzStorageClass, opts.StorageClass)
179 }
180
181 if opts.WebsiteRedirectLocation != "" {
182 header.Set(amzWebsiteRedirectLocation, opts.WebsiteRedirectLocation)
183 }
184
185 if !opts.Internal.ReplicationStatus.Empty() {
186 header.Set(amzBucketReplicationStatus, string(opts.Internal.ReplicationStatus))
187 }
188 if !opts.Internal.SourceMTime.IsZero() {
189 header.Set(minIOBucketSourceMTime, opts.Internal.SourceMTime.Format(time.RFC3339Nano))
190 }
191 if opts.Internal.SourceETag != "" {
192 header.Set(minIOBucketSourceETag, opts.Internal.SourceETag)
193 }
194 if opts.Internal.ReplicationRequest {
195 header.Set(minIOBucketReplicationRequest, "true")
196 }
197 if opts.Internal.ReplicationValidityCheck {
198 header.Set(minIOBucketReplicationCheck, "true")
199 }
200 if !opts.Internal.LegalholdTimestamp.IsZero() {
201 header.Set(minIOBucketReplicationObjectLegalHoldTimestamp, opts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano))
202 }
203 if !opts.Internal.RetentionTimestamp.IsZero() {
204 header.Set(minIOBucketReplicationObjectRetentionTimestamp, opts.Internal.RetentionTimestamp.Format(time.RFC3339Nano))
205 }
206 if !opts.Internal.TaggingTimestamp.IsZero() {
207 header.Set(minIOBucketReplicationTaggingTimestamp, opts.Internal.TaggingTimestamp.Format(time.RFC3339Nano))
208 }
209
210 if len(opts.UserTags) != 0 {
211 header.Set(amzTaggingHeader, s3utils.TagEncode(opts.UserTags))
212 }
213
214 for k, v := range opts.UserMetadata {
215 if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) {
216 header.Set(k, v)
217 } else {
218 header.Set("x-amz-meta-"+k, v)
219 }
220 }
221
222 // set any other additional custom headers.
223 for k, v := range opts.customHeaders {
224 header[k] = v
225 }
226
227 return
228}
229
230// validate() checks if the UserMetadata map has standard headers or and raises an error if so.
231func (opts PutObjectOptions) validate() (err error) {
232 for k, v := range opts.UserMetadata {
233 if !httpguts.ValidHeaderFieldName(k) || isStandardHeader(k) || isSSEHeader(k) || isStorageClassHeader(k) {
234 return errInvalidArgument(k + " unsupported user defined metadata name")
235 }
236 if !httpguts.ValidHeaderFieldValue(v) {
237 return errInvalidArgument(v + " unsupported user defined metadata value")
238 }
239 }
240 if opts.Mode != "" && !opts.Mode.IsValid() {
241 return errInvalidArgument(opts.Mode.String() + " unsupported retention mode")
242 }
243 if opts.LegalHold != "" && !opts.LegalHold.IsValid() {
244 return errInvalidArgument(opts.LegalHold.String() + " unsupported legal-hold status")
245 }
246 return nil
247}
248
249// completedParts is a collection of parts sortable by their part numbers.
250// used for sorting the uploaded parts before completing the multipart request.
251type completedParts []CompletePart
252
253func (a completedParts) Len() int { return len(a) }
254func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
255func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }
256
257// PutObject creates an object in a bucket.
258//
259// You must have WRITE permissions on a bucket to create an object.
260//
261// - For size smaller than 16MiB PutObject automatically does a
262// single atomic PUT operation.
263//
264// - For size larger than 16MiB PutObject automatically does a
265// multipart upload operation.
266//
267// - For size input as -1 PutObject does a multipart Put operation
268// until input stream reaches EOF. Maximum object size that can
269// be uploaded through this operation will be 5TiB.
270//
271// WARNING: Passing down '-1' will use memory and these cannot
272// be reused for best outcomes for PutObject(), pass the size always.
273//
274// NOTE: Upon errors during upload multipart operation is entirely aborted.
275func (c *Client) PutObject(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64,
276 opts PutObjectOptions,
277) (info UploadInfo, err error) {
278 if objectSize < 0 && opts.DisableMultipart {
279 return UploadInfo{}, errors.New("object size must be provided with disable multipart upload")
280 }
281
282 err = opts.validate()
283 if err != nil {
284 return UploadInfo{}, err
285 }
286
287 return c.putObjectCommon(ctx, bucketName, objectName, reader, objectSize, opts)
288}
289
290func (c *Client) putObjectCommon(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) {
291 // Check for largest object size allowed.
292 if size > int64(maxMultipartPutObjectSize) {
293 return UploadInfo{}, errEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName)
294 }
295
296 // NOTE: Streaming signature is not supported by GCS.
297 if s3utils.IsGoogleEndpoint(*c.endpointURL) {
298 return c.putObject(ctx, bucketName, objectName, reader, size, opts)
299 }
300
301 partSize := opts.PartSize
302 if opts.PartSize == 0 {
303 partSize = minPartSize
304 }
305
306 if c.overrideSignerType.IsV2() {
307 if size >= 0 && size < int64(partSize) || opts.DisableMultipart {
308 return c.putObject(ctx, bucketName, objectName, reader, size, opts)
309 }
310 return c.putObjectMultipart(ctx, bucketName, objectName, reader, size, opts)
311 }
312
313 if size < 0 {
314 if opts.DisableMultipart {
315 return UploadInfo{}, errors.New("no length provided and multipart disabled")
316 }
317 if opts.ConcurrentStreamParts && opts.NumThreads > 1 {
318 return c.putObjectMultipartStreamParallel(ctx, bucketName, objectName, reader, opts)
319 }
320 return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts)
321 }
322
323 if size < int64(partSize) || opts.DisableMultipart {
324 return c.putObject(ctx, bucketName, objectName, reader, size, opts)
325 }
326
327 return c.putObjectMultipartStream(ctx, bucketName, objectName, reader, size, opts)
328}
329
330func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) {
331 // Input validation.
332 if err = s3utils.CheckValidBucketName(bucketName); err != nil {
333 return UploadInfo{}, err
334 }
335 if err = s3utils.CheckValidObjectName(objectName); err != nil {
336 return UploadInfo{}, err
337 }
338
339 // Total data read and written to server. should be equal to
340 // 'size' at the end of the call.
341 var totalUploadedSize int64
342
343 // Complete multipart upload.
344 var complMultipartUpload completeMultipartUpload
345
346 // Calculate the optimal parts info for a given size.
347 totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize)
348 if err != nil {
349 return UploadInfo{}, err
350 }
351
352 if !opts.SendContentMd5 {
353 if opts.UserMetadata == nil {
354 opts.UserMetadata = make(map[string]string, 1)
355 }
356 opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C"
357 }
358
359 // Initiate a new multipart upload.
360 uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
361 if err != nil {
362 return UploadInfo{}, err
363 }
364 delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm")
365
366 defer func() {
367 if err != nil {
368 c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
369 }
370 }()
371
372 // Part number always starts with '1'.
373 partNumber := 1
374
375 // Initialize parts uploaded map.
376 partsInfo := make(map[int]ObjectPart)
377
378 // Create a buffer.
379 buf := make([]byte, partSize)
380
381 // Create checksums
382 // CRC32C is ~50% faster on AMD64 @ 30GB/s
383 var crcBytes []byte
384 customHeader := make(http.Header)
385 crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
386
387 for partNumber <= totalPartsCount {
388 length, rerr := readFull(reader, buf)
389 if rerr == io.EOF && partNumber > 1 {
390 break
391 }
392
393 if rerr != nil && rerr != io.ErrUnexpectedEOF && rerr != io.EOF {
394 return UploadInfo{}, rerr
395 }
396
397 var md5Base64 string
398 if opts.SendContentMd5 {
399 // Calculate md5sum.
400 hash := c.md5Hasher()
401 hash.Write(buf[:length])
402 md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil))
403 hash.Close()
404 } else {
405 crc.Reset()
406 crc.Write(buf[:length])
407 cSum := crc.Sum(nil)
408 customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum))
409 crcBytes = append(crcBytes, cSum...)
410 }
411
412 // Update progress reader appropriately to the latest offset
413 // as we read from the source.
414 rd := newHook(bytes.NewReader(buf[:length]), opts.Progress)
415
416 // Proceed to upload the part.
417 p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: rd, partNumber: partNumber, md5Base64: md5Base64, size: int64(length), sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader}
418 objPart, uerr := c.uploadPart(ctx, p)
419 if uerr != nil {
420 return UploadInfo{}, uerr
421 }
422
423 // Save successfully uploaded part metadata.
424 partsInfo[partNumber] = objPart
425
426 // Save successfully uploaded size.
427 totalUploadedSize += int64(length)
428
429 // Increment part number.
430 partNumber++
431
432 // For unknown size, Read EOF we break away.
433 // We do not have to upload till totalPartsCount.
434 if rerr == io.EOF {
435 break
436 }
437 }
438
439 // Loop over total uploaded parts to save them in
440 // Parts array before completing the multipart request.
441 for i := 1; i < partNumber; i++ {
442 part, ok := partsInfo[i]
443 if !ok {
444 return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i))
445 }
446 complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
447 ETag: part.ETag,
448 PartNumber: part.PartNumber,
449 ChecksumCRC32: part.ChecksumCRC32,
450 ChecksumCRC32C: part.ChecksumCRC32C,
451 ChecksumSHA1: part.ChecksumSHA1,
452 ChecksumSHA256: part.ChecksumSHA256,
453 })
454 }
455
456 // Sort all completed parts.
457 sort.Sort(completedParts(complMultipartUpload.Parts))
458
459 opts = PutObjectOptions{}
460 if len(crcBytes) > 0 {
461 // Add hash of hashes.
462 crc.Reset()
463 crc.Write(crcBytes)
464 opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
465 }
466 uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
467 if err != nil {
468 return UploadInfo{}, err
469 }
470
471 uploadInfo.Size = totalUploadedSize
472 return uploadInfo, nil
473}
diff --git a/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go b/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go
new file mode 100644
index 0000000..eb4da41
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go
@@ -0,0 +1,246 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2021 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "archive/tar"
22 "bufio"
23 "bytes"
24 "context"
25 "fmt"
26 "io"
27 "net/http"
28 "os"
29 "strings"
30 "sync"
31 "time"
32
33 "github.com/klauspost/compress/s2"
34)
35
36// SnowballOptions contains options for PutObjectsSnowball calls.
37type SnowballOptions struct {
38 // Opts is options applied to all objects.
39 Opts PutObjectOptions
40
41 // Processing options:
42
43 // InMemory specifies that all objects should be collected in memory
44 // before they are uploaded.
45 // If false a temporary file will be created.
46 InMemory bool
47
48 // Compress enabled content compression before upload.
49 // Compression will typically reduce memory and network usage,
50 // Compression can safely be enabled with MinIO hosts.
51 Compress bool
52
53 // SkipErrs if enabled will skip any errors while reading the
54 // object content while creating the snowball archive
55 SkipErrs bool
56}
57
58// SnowballObject contains information about a single object to be added to the snowball.
59type SnowballObject struct {
60 // Key is the destination key, including prefix.
61 Key string
62
63 // Size is the content size of this object.
64 Size int64
65
66 // Modtime to apply to the object.
67 // If Modtime is the zero value current time will be used.
68 ModTime time.Time
69
70 // Content of the object.
71 // Exactly 'Size' number of bytes must be provided.
72 Content io.Reader
73
74 // VersionID of the object; if empty, a new versionID will be generated
75 VersionID string
76
77 // Headers contains more options for this object upload, the same as you
78 // would include in a regular PutObject operation, such as user metadata
79 // and content-disposition, expires, ..
80 Headers http.Header
81
82 // Close will be called when an object has finished processing.
83 // Note that if PutObjectsSnowball returns because of an error,
84 // objects not consumed from the input will NOT have been closed.
85 // Leave as nil for no callback.
86 Close func()
87}
88
89type nopReadSeekCloser struct {
90 io.ReadSeeker
91}
92
93func (n nopReadSeekCloser) Close() error {
94 return nil
95}
96
97// This is available as io.ReadSeekCloser from go1.16
98type readSeekCloser interface {
99 io.Reader
100 io.Closer
101 io.Seeker
102}
103
104// PutObjectsSnowball will put multiple objects with a single put call.
105// A (compressed) TAR file will be created which will contain multiple objects.
106// The key for each object will be used for the destination in the specified bucket.
107// Total size should be < 5TB.
108// This function blocks until 'objs' is closed and the content has been uploaded.
109func (c Client) PutObjectsSnowball(ctx context.Context, bucketName string, opts SnowballOptions, objs <-chan SnowballObject) (err error) {
110 err = opts.Opts.validate()
111 if err != nil {
112 return err
113 }
114 var tmpWriter io.Writer
115 var getTmpReader func() (rc readSeekCloser, sz int64, err error)
116 if opts.InMemory {
117 b := bytes.NewBuffer(nil)
118 tmpWriter = b
119 getTmpReader = func() (readSeekCloser, int64, error) {
120 return nopReadSeekCloser{bytes.NewReader(b.Bytes())}, int64(b.Len()), nil
121 }
122 } else {
123 f, err := os.CreateTemp("", "s3-putsnowballobjects-*")
124 if err != nil {
125 return err
126 }
127 name := f.Name()
128 tmpWriter = f
129 var once sync.Once
130 defer once.Do(func() {
131 f.Close()
132 })
133 defer os.Remove(name)
134 getTmpReader = func() (readSeekCloser, int64, error) {
135 once.Do(func() {
136 f.Close()
137 })
138 f, err := os.Open(name)
139 if err != nil {
140 return nil, 0, err
141 }
142 st, err := f.Stat()
143 if err != nil {
144 return nil, 0, err
145 }
146 return f, st.Size(), nil
147 }
148 }
149 flush := func() error { return nil }
150 if !opts.Compress {
151 if !opts.InMemory {
152 // Insert buffer for writes.
153 buf := bufio.NewWriterSize(tmpWriter, 1<<20)
154 flush = buf.Flush
155 tmpWriter = buf
156 }
157 } else {
158 s2c := s2.NewWriter(tmpWriter, s2.WriterBetterCompression())
159 flush = s2c.Close
160 defer s2c.Close()
161 tmpWriter = s2c
162 }
163 t := tar.NewWriter(tmpWriter)
164
165objectLoop:
166 for {
167 select {
168 case <-ctx.Done():
169 return ctx.Err()
170 case obj, ok := <-objs:
171 if !ok {
172 break objectLoop
173 }
174
175 closeObj := func() {}
176 if obj.Close != nil {
177 closeObj = obj.Close
178 }
179
180 // Trim accidental slash prefix.
181 obj.Key = strings.TrimPrefix(obj.Key, "/")
182 header := tar.Header{
183 Typeflag: tar.TypeReg,
184 Name: obj.Key,
185 Size: obj.Size,
186 ModTime: obj.ModTime,
187 Format: tar.FormatPAX,
188 }
189 if header.ModTime.IsZero() {
190 header.ModTime = time.Now().UTC()
191 }
192
193 header.PAXRecords = make(map[string]string)
194 if obj.VersionID != "" {
195 header.PAXRecords["minio.versionId"] = obj.VersionID
196 }
197 for k, vals := range obj.Headers {
198 header.PAXRecords["minio.metadata."+k] = strings.Join(vals, ",")
199 }
200
201 if err := t.WriteHeader(&header); err != nil {
202 closeObj()
203 return err
204 }
205 n, err := io.Copy(t, obj.Content)
206 if err != nil {
207 closeObj()
208 if opts.SkipErrs {
209 continue
210 }
211 return err
212 }
213 if n != obj.Size {
214 closeObj()
215 if opts.SkipErrs {
216 continue
217 }
218 return io.ErrUnexpectedEOF
219 }
220 closeObj()
221 }
222 }
223 // Flush tar
224 err = t.Flush()
225 if err != nil {
226 return err
227 }
228 // Flush compression
229 err = flush()
230 if err != nil {
231 return err
232 }
233 if opts.Opts.UserMetadata == nil {
234 opts.Opts.UserMetadata = map[string]string{}
235 }
236 opts.Opts.UserMetadata["X-Amz-Meta-Snowball-Auto-Extract"] = "true"
237 opts.Opts.DisableMultipart = true
238 rc, sz, err := getTmpReader()
239 if err != nil {
240 return err
241 }
242 defer rc.Close()
243 rand := c.random.Uint64()
244 _, err = c.PutObject(ctx, bucketName, fmt.Sprintf("snowball-upload-%x.tar", rand), rc, sz, opts.Opts)
245 return err
246}
diff --git a/vendor/github.com/minio/minio-go/v7/api-remove.go b/vendor/github.com/minio/minio-go/v7/api-remove.go
new file mode 100644
index 0000000..9c0ac44
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-remove.go
@@ -0,0 +1,548 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "bytes"
22 "context"
23 "encoding/xml"
24 "io"
25 "net/http"
26 "net/url"
27 "time"
28
29 "github.com/minio/minio-go/v7/pkg/s3utils"
30)
31
32//revive:disable
33
34// Deprecated: BucketOptions will be renamed to RemoveBucketOptions in future versions.
35type BucketOptions = RemoveBucketOptions
36
37//revive:enable
38
39// RemoveBucketOptions special headers to purge buckets, only
40// useful when endpoint is MinIO
41type RemoveBucketOptions struct {
42 ForceDelete bool
43}
44
45// RemoveBucketWithOptions deletes the bucket name.
46//
47// All objects (including all object versions and delete markers)
48// in the bucket will be deleted forcibly if bucket options set
49// ForceDelete to 'true'.
50func (c *Client) RemoveBucketWithOptions(ctx context.Context, bucketName string, opts RemoveBucketOptions) error {
51 // Input validation.
52 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
53 return err
54 }
55
56 // Build headers.
57 headers := make(http.Header)
58 if opts.ForceDelete {
59 headers.Set(minIOForceDelete, "true")
60 }
61
62 // Execute DELETE on bucket.
63 resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
64 bucketName: bucketName,
65 contentSHA256Hex: emptySHA256Hex,
66 customHeader: headers,
67 })
68 defer closeResponse(resp)
69 if err != nil {
70 return err
71 }
72 if resp != nil {
73 if resp.StatusCode != http.StatusNoContent {
74 return httpRespToErrorResponse(resp, bucketName, "")
75 }
76 }
77
78 // Remove the location from cache on a successful delete.
79 c.bucketLocCache.Delete(bucketName)
80 return nil
81}
82
83// RemoveBucket deletes the bucket name.
84//
85// All objects (including all object versions and delete markers).
86// in the bucket must be deleted before successfully attempting this request.
87func (c *Client) RemoveBucket(ctx context.Context, bucketName string) error {
88 // Input validation.
89 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
90 return err
91 }
92 // Execute DELETE on bucket.
93 resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
94 bucketName: bucketName,
95 contentSHA256Hex: emptySHA256Hex,
96 })
97 defer closeResponse(resp)
98 if err != nil {
99 return err
100 }
101 if resp != nil {
102 if resp.StatusCode != http.StatusNoContent {
103 return httpRespToErrorResponse(resp, bucketName, "")
104 }
105 }
106
107 // Remove the location from cache on a successful delete.
108 c.bucketLocCache.Delete(bucketName)
109
110 return nil
111}
112
113// AdvancedRemoveOptions intended for internal use by replication
114type AdvancedRemoveOptions struct {
115 ReplicationDeleteMarker bool
116 ReplicationStatus ReplicationStatus
117 ReplicationMTime time.Time
118 ReplicationRequest bool
119 ReplicationValidityCheck bool // check permissions
120}
121
122// RemoveObjectOptions represents options specified by user for RemoveObject call
123type RemoveObjectOptions struct {
124 ForceDelete bool
125 GovernanceBypass bool
126 VersionID string
127 Internal AdvancedRemoveOptions
128}
129
130// RemoveObject removes an object from a bucket.
131func (c *Client) RemoveObject(ctx context.Context, bucketName, objectName string, opts RemoveObjectOptions) error {
132 // Input validation.
133 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
134 return err
135 }
136 if err := s3utils.CheckValidObjectName(objectName); err != nil {
137 return err
138 }
139
140 res := c.removeObject(ctx, bucketName, objectName, opts)
141 return res.Err
142}
143
144func (c *Client) removeObject(ctx context.Context, bucketName, objectName string, opts RemoveObjectOptions) RemoveObjectResult {
145 // Get resources properly escaped and lined up before
146 // using them in http request.
147 urlValues := make(url.Values)
148
149 if opts.VersionID != "" {
150 urlValues.Set("versionId", opts.VersionID)
151 }
152
153 // Build headers.
154 headers := make(http.Header)
155
156 if opts.GovernanceBypass {
157 // Set the bypass goverenance retention header
158 headers.Set(amzBypassGovernance, "true")
159 }
160 if opts.Internal.ReplicationDeleteMarker {
161 headers.Set(minIOBucketReplicationDeleteMarker, "true")
162 }
163 if !opts.Internal.ReplicationMTime.IsZero() {
164 headers.Set(minIOBucketSourceMTime, opts.Internal.ReplicationMTime.Format(time.RFC3339Nano))
165 }
166 if !opts.Internal.ReplicationStatus.Empty() {
167 headers.Set(amzBucketReplicationStatus, string(opts.Internal.ReplicationStatus))
168 }
169 if opts.Internal.ReplicationRequest {
170 headers.Set(minIOBucketReplicationRequest, "true")
171 }
172 if opts.Internal.ReplicationValidityCheck {
173 headers.Set(minIOBucketReplicationCheck, "true")
174 }
175 if opts.ForceDelete {
176 headers.Set(minIOForceDelete, "true")
177 }
178 // Execute DELETE on objectName.
179 resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
180 bucketName: bucketName,
181 objectName: objectName,
182 contentSHA256Hex: emptySHA256Hex,
183 queryValues: urlValues,
184 customHeader: headers,
185 })
186 defer closeResponse(resp)
187 if err != nil {
188 return RemoveObjectResult{Err: err}
189 }
190 if resp != nil {
191 // if some unexpected error happened and max retry is reached, we want to let client know
192 if resp.StatusCode != http.StatusNoContent {
193 err := httpRespToErrorResponse(resp, bucketName, objectName)
194 return RemoveObjectResult{Err: err}
195 }
196 }
197
198 // DeleteObject always responds with http '204' even for
199 // objects which do not exist. So no need to handle them
200 // specifically.
201 return RemoveObjectResult{
202 ObjectName: objectName,
203 ObjectVersionID: opts.VersionID,
204 DeleteMarker: resp.Header.Get("x-amz-delete-marker") == "true",
205 DeleteMarkerVersionID: resp.Header.Get("x-amz-version-id"),
206 }
207}
208
209// RemoveObjectError - container of Multi Delete S3 API error
210type RemoveObjectError struct {
211 ObjectName string
212 VersionID string
213 Err error
214}
215
216// RemoveObjectResult - container of Multi Delete S3 API result
217type RemoveObjectResult struct {
218 ObjectName string
219 ObjectVersionID string
220
221 DeleteMarker bool
222 DeleteMarkerVersionID string
223
224 Err error
225}
226
227// generateRemoveMultiObjects - generate the XML request for remove multi objects request
228func generateRemoveMultiObjectsRequest(objects []ObjectInfo) []byte {
229 delObjects := []deleteObject{}
230 for _, obj := range objects {
231 delObjects = append(delObjects, deleteObject{
232 Key: obj.Key,
233 VersionID: obj.VersionID,
234 })
235 }
236 xmlBytes, _ := xml.Marshal(deleteMultiObjects{Objects: delObjects, Quiet: false})
237 return xmlBytes
238}
239
240// processRemoveMultiObjectsResponse - parse the remove multi objects web service
241// and return the success/failure result status for each object
242func processRemoveMultiObjectsResponse(body io.Reader, resultCh chan<- RemoveObjectResult) {
243 // Parse multi delete XML response
244 rmResult := &deleteMultiObjectsResult{}
245 err := xmlDecoder(body, rmResult)
246 if err != nil {
247 resultCh <- RemoveObjectResult{ObjectName: "", Err: err}
248 return
249 }
250
251 // Fill deletion that returned success
252 for _, obj := range rmResult.DeletedObjects {
253 resultCh <- RemoveObjectResult{
254 ObjectName: obj.Key,
255 // Only filled with versioned buckets
256 ObjectVersionID: obj.VersionID,
257 DeleteMarker: obj.DeleteMarker,
258 DeleteMarkerVersionID: obj.DeleteMarkerVersionID,
259 }
260 }
261
262 // Fill deletion that returned an error.
263 for _, obj := range rmResult.UnDeletedObjects {
264 // Version does not exist is not an error ignore and continue.
265 switch obj.Code {
266 case "InvalidArgument", "NoSuchVersion":
267 continue
268 }
269 resultCh <- RemoveObjectResult{
270 ObjectName: obj.Key,
271 ObjectVersionID: obj.VersionID,
272 Err: ErrorResponse{
273 Code: obj.Code,
274 Message: obj.Message,
275 },
276 }
277 }
278}
279
280// RemoveObjectsOptions represents options specified by user for RemoveObjects call
281type RemoveObjectsOptions struct {
282 GovernanceBypass bool
283}
284
285// RemoveObjects removes multiple objects from a bucket while
286// it is possible to specify objects versions which are received from
287// objectsCh. Remove failures are sent back via error channel.
288func (c *Client) RemoveObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, opts RemoveObjectsOptions) <-chan RemoveObjectError {
289 errorCh := make(chan RemoveObjectError, 1)
290
291 // Validate if bucket name is valid.
292 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
293 defer close(errorCh)
294 errorCh <- RemoveObjectError{
295 Err: err,
296 }
297 return errorCh
298 }
299 // Validate objects channel to be properly allocated.
300 if objectsCh == nil {
301 defer close(errorCh)
302 errorCh <- RemoveObjectError{
303 Err: errInvalidArgument("Objects channel cannot be nil"),
304 }
305 return errorCh
306 }
307
308 resultCh := make(chan RemoveObjectResult, 1)
309 go c.removeObjects(ctx, bucketName, objectsCh, resultCh, opts)
310 go func() {
311 defer close(errorCh)
312 for res := range resultCh {
313 // Send only errors to the error channel
314 if res.Err == nil {
315 continue
316 }
317 errorCh <- RemoveObjectError{
318 ObjectName: res.ObjectName,
319 VersionID: res.ObjectVersionID,
320 Err: res.Err,
321 }
322 }
323 }()
324
325 return errorCh
326}
327
328// RemoveObjectsWithResult removes multiple objects from a bucket while
329// it is possible to specify objects versions which are received from
330// objectsCh. Remove results, successes and failures are sent back via
331// RemoveObjectResult channel
332func (c *Client) RemoveObjectsWithResult(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, opts RemoveObjectsOptions) <-chan RemoveObjectResult {
333 resultCh := make(chan RemoveObjectResult, 1)
334
335 // Validate if bucket name is valid.
336 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
337 defer close(resultCh)
338 resultCh <- RemoveObjectResult{
339 Err: err,
340 }
341 return resultCh
342 }
343 // Validate objects channel to be properly allocated.
344 if objectsCh == nil {
345 defer close(resultCh)
346 resultCh <- RemoveObjectResult{
347 Err: errInvalidArgument("Objects channel cannot be nil"),
348 }
349 return resultCh
350 }
351
352 go c.removeObjects(ctx, bucketName, objectsCh, resultCh, opts)
353 return resultCh
354}
355
356// Return true if the character is within the allowed characters in an XML 1.0 document
357// The list of allowed characters can be found here: https://www.w3.org/TR/xml/#charsets
358func validXMLChar(r rune) (ok bool) {
359 return r == 0x09 ||
360 r == 0x0A ||
361 r == 0x0D ||
362 r >= 0x20 && r <= 0xD7FF ||
363 r >= 0xE000 && r <= 0xFFFD ||
364 r >= 0x10000 && r <= 0x10FFFF
365}
366
367func hasInvalidXMLChar(str string) bool {
368 for _, s := range str {
369 if !validXMLChar(s) {
370 return true
371 }
372 }
373 return false
374}
375
376// Generate and call MultiDelete S3 requests based on entries received from objectsCh
377func (c *Client) removeObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, resultCh chan<- RemoveObjectResult, opts RemoveObjectsOptions) {
378 maxEntries := 1000
379 finish := false
380 urlValues := make(url.Values)
381 urlValues.Set("delete", "")
382
383 // Close result channel when Multi delete finishes.
384 defer close(resultCh)
385
386 // Loop over entries by 1000 and call MultiDelete requests
387 for {
388 if finish {
389 break
390 }
391 count := 0
392 var batch []ObjectInfo
393
394 // Try to gather 1000 entries
395 for object := range objectsCh {
396 if hasInvalidXMLChar(object.Key) {
397 // Use single DELETE so the object name will be in the request URL instead of the multi-delete XML document.
398 removeResult := c.removeObject(ctx, bucketName, object.Key, RemoveObjectOptions{
399 VersionID: object.VersionID,
400 GovernanceBypass: opts.GovernanceBypass,
401 })
402 if err := removeResult.Err; err != nil {
403 // Version does not exist is not an error ignore and continue.
404 switch ToErrorResponse(err).Code {
405 case "InvalidArgument", "NoSuchVersion":
406 continue
407 }
408 resultCh <- removeResult
409 }
410
411 resultCh <- removeResult
412 continue
413 }
414
415 batch = append(batch, object)
416 if count++; count >= maxEntries {
417 break
418 }
419 }
420 if count == 0 {
421 // Multi Objects Delete API doesn't accept empty object list, quit immediately
422 break
423 }
424 if count < maxEntries {
425 // We didn't have 1000 entries, so this is the last batch
426 finish = true
427 }
428
429 // Build headers.
430 headers := make(http.Header)
431 if opts.GovernanceBypass {
432 // Set the bypass goverenance retention header
433 headers.Set(amzBypassGovernance, "true")
434 }
435
436 // Generate remove multi objects XML request
437 removeBytes := generateRemoveMultiObjectsRequest(batch)
438 // Execute GET on bucket to list objects.
439 resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{
440 bucketName: bucketName,
441 queryValues: urlValues,
442 contentBody: bytes.NewReader(removeBytes),
443 contentLength: int64(len(removeBytes)),
444 contentMD5Base64: sumMD5Base64(removeBytes),
445 contentSHA256Hex: sum256Hex(removeBytes),
446 customHeader: headers,
447 })
448 if resp != nil {
449 if resp.StatusCode != http.StatusOK {
450 e := httpRespToErrorResponse(resp, bucketName, "")
451 resultCh <- RemoveObjectResult{ObjectName: "", Err: e}
452 }
453 }
454 if err != nil {
455 for _, b := range batch {
456 resultCh <- RemoveObjectResult{
457 ObjectName: b.Key,
458 ObjectVersionID: b.VersionID,
459 Err: err,
460 }
461 }
462 continue
463 }
464
465 // Process multiobjects remove xml response
466 processRemoveMultiObjectsResponse(resp.Body, resultCh)
467
468 closeResponse(resp)
469 }
470}
471
472// RemoveIncompleteUpload aborts an partially uploaded object.
473func (c *Client) RemoveIncompleteUpload(ctx context.Context, bucketName, objectName string) error {
474 // Input validation.
475 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
476 return err
477 }
478 if err := s3utils.CheckValidObjectName(objectName); err != nil {
479 return err
480 }
481 // Find multipart upload ids of the object to be aborted.
482 uploadIDs, err := c.findUploadIDs(ctx, bucketName, objectName)
483 if err != nil {
484 return err
485 }
486
487 for _, uploadID := range uploadIDs {
488 // abort incomplete multipart upload, based on the upload id passed.
489 err := c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
490 if err != nil {
491 return err
492 }
493 }
494
495 return nil
496}
497
498// abortMultipartUpload aborts a multipart upload for the given
499// uploadID, all previously uploaded parts are deleted.
500func (c *Client) abortMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string) error {
501 // Input validation.
502 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
503 return err
504 }
505 if err := s3utils.CheckValidObjectName(objectName); err != nil {
506 return err
507 }
508
509 // Initialize url queries.
510 urlValues := make(url.Values)
511 urlValues.Set("uploadId", uploadID)
512
513 // Execute DELETE on multipart upload.
514 resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
515 bucketName: bucketName,
516 objectName: objectName,
517 queryValues: urlValues,
518 contentSHA256Hex: emptySHA256Hex,
519 })
520 defer closeResponse(resp)
521 if err != nil {
522 return err
523 }
524 if resp != nil {
525 if resp.StatusCode != http.StatusNoContent {
526 // Abort has no response body, handle it for any errors.
527 var errorResponse ErrorResponse
528 switch resp.StatusCode {
529 case http.StatusNotFound:
530 // This is needed specifically for abort and it cannot
531 // be converged into default case.
532 errorResponse = ErrorResponse{
533 Code: "NoSuchUpload",
534 Message: "The specified multipart upload does not exist.",
535 BucketName: bucketName,
536 Key: objectName,
537 RequestID: resp.Header.Get("x-amz-request-id"),
538 HostID: resp.Header.Get("x-amz-id-2"),
539 Region: resp.Header.Get("x-amz-bucket-region"),
540 }
541 default:
542 return httpRespToErrorResponse(resp, bucketName, objectName)
543 }
544 return errorResponse
545 }
546 }
547 return nil
548}
diff --git a/vendor/github.com/minio/minio-go/v7/api-restore.go b/vendor/github.com/minio/minio-go/v7/api-restore.go
new file mode 100644
index 0000000..9ec8f4f
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-restore.go
@@ -0,0 +1,182 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * (C) 2018-2021 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "bytes"
22 "context"
23 "encoding/xml"
24 "net/http"
25 "net/url"
26
27 "github.com/minio/minio-go/v7/pkg/s3utils"
28 "github.com/minio/minio-go/v7/pkg/tags"
29)
30
31// RestoreType represents the restore request type
32type RestoreType string
33
34const (
35 // RestoreSelect represents the restore SELECT operation
36 RestoreSelect = RestoreType("SELECT")
37)
38
39// TierType represents a retrieval tier
40type TierType string
41
42const (
43 // TierStandard is the standard retrieval tier
44 TierStandard = TierType("Standard")
45 // TierBulk is the bulk retrieval tier
46 TierBulk = TierType("Bulk")
47 // TierExpedited is the expedited retrieval tier
48 TierExpedited = TierType("Expedited")
49)
50
51// GlacierJobParameters represents the retrieval tier parameter
52type GlacierJobParameters struct {
53 Tier TierType
54}
55
56// Encryption contains the type of server-side encryption used during object retrieval
57type Encryption struct {
58 EncryptionType string
59 KMSContext string
60 KMSKeyID string `xml:"KMSKeyId"`
61}
62
63// MetadataEntry represents a metadata information of the restored object.
64type MetadataEntry struct {
65 Name string
66 Value string
67}
68
69// S3 holds properties of the copy of the archived object
70type S3 struct {
71 AccessControlList *AccessControlList `xml:"AccessControlList,omitempty"`
72 BucketName string
73 Prefix string
74 CannedACL *string `xml:"CannedACL,omitempty"`
75 Encryption *Encryption `xml:"Encryption,omitempty"`
76 StorageClass *string `xml:"StorageClass,omitempty"`
77 Tagging *tags.Tags `xml:"Tagging,omitempty"`
78 UserMetadata *MetadataEntry `xml:"UserMetadata,omitempty"`
79}
80
81// SelectParameters holds the select request parameters
82type SelectParameters struct {
83 XMLName xml.Name `xml:"SelectParameters"`
84 ExpressionType QueryExpressionType
85 Expression string
86 InputSerialization SelectObjectInputSerialization
87 OutputSerialization SelectObjectOutputSerialization
88}
89
90// OutputLocation holds properties of the copy of the archived object
91type OutputLocation struct {
92 XMLName xml.Name `xml:"OutputLocation"`
93 S3 S3 `xml:"S3"`
94}
95
96// RestoreRequest holds properties of the restore object request
97type RestoreRequest struct {
98 XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ RestoreRequest"`
99 Type *RestoreType `xml:"Type,omitempty"`
100 Tier *TierType `xml:"Tier,omitempty"`
101 Days *int `xml:"Days,omitempty"`
102 GlacierJobParameters *GlacierJobParameters `xml:"GlacierJobParameters,omitempty"`
103 Description *string `xml:"Description,omitempty"`
104 SelectParameters *SelectParameters `xml:"SelectParameters,omitempty"`
105 OutputLocation *OutputLocation `xml:"OutputLocation,omitempty"`
106}
107
108// SetDays sets the days parameter of the restore request
109func (r *RestoreRequest) SetDays(v int) {
110 r.Days = &v
111}
112
113// SetGlacierJobParameters sets the GlacierJobParameters of the restore request
114func (r *RestoreRequest) SetGlacierJobParameters(v GlacierJobParameters) {
115 r.GlacierJobParameters = &v
116}
117
118// SetType sets the type of the restore request
119func (r *RestoreRequest) SetType(v RestoreType) {
120 r.Type = &v
121}
122
123// SetTier sets the retrieval tier of the restore request
124func (r *RestoreRequest) SetTier(v TierType) {
125 r.Tier = &v
126}
127
128// SetDescription sets the description of the restore request
129func (r *RestoreRequest) SetDescription(v string) {
130 r.Description = &v
131}
132
133// SetSelectParameters sets SelectParameters of the restore select request
134func (r *RestoreRequest) SetSelectParameters(v SelectParameters) {
135 r.SelectParameters = &v
136}
137
138// SetOutputLocation sets the properties of the copy of the archived object
139func (r *RestoreRequest) SetOutputLocation(v OutputLocation) {
140 r.OutputLocation = &v
141}
142
143// RestoreObject is a implementation of https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html AWS S3 API
144func (c *Client) RestoreObject(ctx context.Context, bucketName, objectName, versionID string, req RestoreRequest) error {
145 // Input validation.
146 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
147 return err
148 }
149 if err := s3utils.CheckValidObjectName(objectName); err != nil {
150 return err
151 }
152
153 restoreRequestBytes, err := xml.Marshal(req)
154 if err != nil {
155 return err
156 }
157
158 urlValues := make(url.Values)
159 urlValues.Set("restore", "")
160 if versionID != "" {
161 urlValues.Set("versionId", versionID)
162 }
163
164 // Execute POST on bucket/object.
165 resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{
166 bucketName: bucketName,
167 objectName: objectName,
168 queryValues: urlValues,
169 contentMD5Base64: sumMD5Base64(restoreRequestBytes),
170 contentSHA256Hex: sum256Hex(restoreRequestBytes),
171 contentBody: bytes.NewReader(restoreRequestBytes),
172 contentLength: int64(len(restoreRequestBytes)),
173 })
174 defer closeResponse(resp)
175 if err != nil {
176 return err
177 }
178 if resp.StatusCode != http.StatusAccepted && resp.StatusCode != http.StatusOK {
179 return httpRespToErrorResponse(resp, bucketName, "")
180 }
181 return nil
182}
diff --git a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
new file mode 100644
index 0000000..1527b74
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
@@ -0,0 +1,390 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "encoding/xml"
22 "errors"
23 "io"
24 "reflect"
25 "time"
26)
27
28// listAllMyBucketsResult container for listBuckets response.
29type listAllMyBucketsResult struct {
30 // Container for one or more buckets.
31 Buckets struct {
32 Bucket []BucketInfo
33 }
34 Owner owner
35}
36
37// owner container for bucket owner information.
38type owner struct {
39 DisplayName string
40 ID string
41}
42
43// CommonPrefix container for prefix response.
44type CommonPrefix struct {
45 Prefix string
46}
47
48// ListBucketV2Result container for listObjects response version 2.
49type ListBucketV2Result struct {
50 // A response can contain CommonPrefixes only if you have
51 // specified a delimiter.
52 CommonPrefixes []CommonPrefix
53 // Metadata about each object returned.
54 Contents []ObjectInfo
55 Delimiter string
56
57 // Encoding type used to encode object keys in the response.
58 EncodingType string
59
60 // A flag that indicates whether or not ListObjects returned all of the results
61 // that satisfied the search criteria.
62 IsTruncated bool
63 MaxKeys int64
64 Name string
65
66 // Hold the token that will be sent in the next request to fetch the next group of keys
67 NextContinuationToken string
68
69 ContinuationToken string
70 Prefix string
71
72 // FetchOwner and StartAfter are currently not used
73 FetchOwner string
74 StartAfter string
75}
76
77// Version is an element in the list object versions response
78type Version struct {
79 ETag string
80 IsLatest bool
81 Key string
82 LastModified time.Time
83 Owner Owner
84 Size int64
85 StorageClass string
86 VersionID string `xml:"VersionId"`
87
88 // x-amz-meta-* headers stripped "x-amz-meta-" prefix containing the first value.
89 // Only returned by MinIO servers.
90 UserMetadata StringMap `json:"userMetadata,omitempty"`
91
92 // x-amz-tagging values in their k/v values.
93 // Only returned by MinIO servers.
94 UserTags URLMap `json:"userTags,omitempty" xml:"UserTags"`
95
96 Internal *struct {
97 K int // Data blocks
98 M int // Parity blocks
99 } `xml:"Internal"`
100
101 isDeleteMarker bool
102}
103
104// ListVersionsResult is an element in the list object versions response
105// and has a special Unmarshaler because we need to preserver the order
106// of <Version> and <DeleteMarker> in ListVersionsResult.Versions slice
107type ListVersionsResult struct {
108 Versions []Version
109
110 CommonPrefixes []CommonPrefix
111 Name string
112 Prefix string
113 Delimiter string
114 MaxKeys int64
115 EncodingType string
116 IsTruncated bool
117 KeyMarker string
118 VersionIDMarker string
119 NextKeyMarker string
120 NextVersionIDMarker string
121}
122
123// UnmarshalXML is a custom unmarshal code for the response of ListObjectVersions, the custom
124// code will unmarshal <Version> and <DeleteMarker> tags and save them in Versions field to
125// preserve the lexical order of the listing.
126func (l *ListVersionsResult) UnmarshalXML(d *xml.Decoder, _ xml.StartElement) (err error) {
127 for {
128 // Read tokens from the XML document in a stream.
129 t, err := d.Token()
130 if err != nil {
131 if err == io.EOF {
132 break
133 }
134 return err
135 }
136
137 se, ok := t.(xml.StartElement)
138 if ok {
139 tagName := se.Name.Local
140 switch tagName {
141 case "Name", "Prefix",
142 "Delimiter", "EncodingType",
143 "KeyMarker", "NextKeyMarker":
144 var s string
145 if err = d.DecodeElement(&s, &se); err != nil {
146 return err
147 }
148 v := reflect.ValueOf(l).Elem().FieldByName(tagName)
149 if v.IsValid() {
150 v.SetString(s)
151 }
152 case "VersionIdMarker":
153 // VersionIdMarker is a special case because of 'Id' instead of 'ID' in field name
154 var s string
155 if err = d.DecodeElement(&s, &se); err != nil {
156 return err
157 }
158 l.VersionIDMarker = s
159 case "NextVersionIdMarker":
160 // NextVersionIdMarker is a special case because of 'Id' instead of 'ID' in field name
161 var s string
162 if err = d.DecodeElement(&s, &se); err != nil {
163 return err
164 }
165 l.NextVersionIDMarker = s
166 case "IsTruncated": // bool
167 var b bool
168 if err = d.DecodeElement(&b, &se); err != nil {
169 return err
170 }
171 l.IsTruncated = b
172 case "MaxKeys": // int64
173 var i int64
174 if err = d.DecodeElement(&i, &se); err != nil {
175 return err
176 }
177 l.MaxKeys = i
178 case "CommonPrefixes":
179 var cp CommonPrefix
180 if err = d.DecodeElement(&cp, &se); err != nil {
181 return err
182 }
183 l.CommonPrefixes = append(l.CommonPrefixes, cp)
184 case "DeleteMarker", "Version":
185 var v Version
186 if err = d.DecodeElement(&v, &se); err != nil {
187 return err
188 }
189 if tagName == "DeleteMarker" {
190 v.isDeleteMarker = true
191 }
192 l.Versions = append(l.Versions, v)
193 default:
194 return errors.New("unrecognized option:" + tagName)
195 }
196
197 }
198 }
199 return nil
200}
201
202// ListBucketResult container for listObjects response.
203type ListBucketResult struct {
204 // A response can contain CommonPrefixes only if you have
205 // specified a delimiter.
206 CommonPrefixes []CommonPrefix
207 // Metadata about each object returned.
208 Contents []ObjectInfo
209 Delimiter string
210
211 // Encoding type used to encode object keys in the response.
212 EncodingType string
213
214 // A flag that indicates whether or not ListObjects returned all of the results
215 // that satisfied the search criteria.
216 IsTruncated bool
217 Marker string
218 MaxKeys int64
219 Name string
220
221 // When response is truncated (the IsTruncated element value in
222 // the response is true), you can use the key name in this field
223 // as marker in the subsequent request to get next set of objects.
224 // Object storage lists objects in alphabetical order Note: This
225 // element is returned only if you have delimiter request
226 // parameter specified. If response does not include the NextMaker
227 // and it is truncated, you can use the value of the last Key in
228 // the response as the marker in the subsequent request to get the
229 // next set of object keys.
230 NextMarker string
231 Prefix string
232}
233
234// ListMultipartUploadsResult container for ListMultipartUploads response
235type ListMultipartUploadsResult struct {
236 Bucket string
237 KeyMarker string
238 UploadIDMarker string `xml:"UploadIdMarker"`
239 NextKeyMarker string
240 NextUploadIDMarker string `xml:"NextUploadIdMarker"`
241 EncodingType string
242 MaxUploads int64
243 IsTruncated bool
244 Uploads []ObjectMultipartInfo `xml:"Upload"`
245 Prefix string
246 Delimiter string
247 // A response can contain CommonPrefixes only if you specify a delimiter.
248 CommonPrefixes []CommonPrefix
249}
250
251// initiator container for who initiated multipart upload.
252type initiator struct {
253 ID string
254 DisplayName string
255}
256
257// copyObjectResult container for copy object response.
258type copyObjectResult struct {
259 ETag string
260 LastModified time.Time // time string format "2006-01-02T15:04:05.000Z"
261}
262
263// ObjectPart container for particular part of an object.
264type ObjectPart struct {
265 // Part number identifies the part.
266 PartNumber int
267
268 // Date and time the part was uploaded.
269 LastModified time.Time
270
271 // Entity tag returned when the part was uploaded, usually md5sum
272 // of the part.
273 ETag string
274
275 // Size of the uploaded part data.
276 Size int64
277
278 // Checksum values of each part.
279 ChecksumCRC32 string
280 ChecksumCRC32C string
281 ChecksumSHA1 string
282 ChecksumSHA256 string
283}
284
285// ListObjectPartsResult container for ListObjectParts response.
286type ListObjectPartsResult struct {
287 Bucket string
288 Key string
289 UploadID string `xml:"UploadId"`
290
291 Initiator initiator
292 Owner owner
293
294 StorageClass string
295 PartNumberMarker int
296 NextPartNumberMarker int
297 MaxParts int
298
299 // Indicates whether the returned list of parts is truncated.
300 IsTruncated bool
301 ObjectParts []ObjectPart `xml:"Part"`
302
303 EncodingType string
304}
305
306// initiateMultipartUploadResult container for InitiateMultiPartUpload
307// response.
308type initiateMultipartUploadResult struct {
309 Bucket string
310 Key string
311 UploadID string `xml:"UploadId"`
312}
313
314// completeMultipartUploadResult container for completed multipart
315// upload response.
316type completeMultipartUploadResult struct {
317 Location string
318 Bucket string
319 Key string
320 ETag string
321
322 // Checksum values, hash of hashes of parts.
323 ChecksumCRC32 string
324 ChecksumCRC32C string
325 ChecksumSHA1 string
326 ChecksumSHA256 string
327}
328
329// CompletePart sub container lists individual part numbers and their
330// md5sum, part of completeMultipartUpload.
331type CompletePart struct {
332 // Part number identifies the part.
333 PartNumber int
334 ETag string
335
336 // Checksum values
337 ChecksumCRC32 string `xml:"ChecksumCRC32,omitempty"`
338 ChecksumCRC32C string `xml:"ChecksumCRC32C,omitempty"`
339 ChecksumSHA1 string `xml:"ChecksumSHA1,omitempty"`
340 ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"`
341}
342
343// completeMultipartUpload container for completing multipart upload.
344type completeMultipartUpload struct {
345 XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"`
346 Parts []CompletePart `xml:"Part"`
347}
348
349// createBucketConfiguration container for bucket configuration.
350type createBucketConfiguration struct {
351 XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"`
352 Location string `xml:"LocationConstraint"`
353}
354
355// deleteObject container for Delete element in MultiObjects Delete XML request
356type deleteObject struct {
357 Key string
358 VersionID string `xml:"VersionId,omitempty"`
359}
360
361// deletedObject container for Deleted element in MultiObjects Delete XML response
362type deletedObject struct {
363 Key string
364 VersionID string `xml:"VersionId,omitempty"`
365 // These fields are ignored.
366 DeleteMarker bool
367 DeleteMarkerVersionID string `xml:"DeleteMarkerVersionId,omitempty"`
368}
369
370// nonDeletedObject container for Error element (failed deletion) in MultiObjects Delete XML response
371type nonDeletedObject struct {
372 Key string
373 Code string
374 Message string
375 VersionID string `xml:"VersionId"`
376}
377
378// deletedMultiObjects container for MultiObjects Delete XML request
379type deleteMultiObjects struct {
380 XMLName xml.Name `xml:"Delete"`
381 Quiet bool
382 Objects []deleteObject `xml:"Object"`
383}
384
385// deletedMultiObjectsResult container for MultiObjects Delete XML response
386type deleteMultiObjectsResult struct {
387 XMLName xml.Name `xml:"DeleteResult"`
388 DeletedObjects []deletedObject `xml:"Deleted"`
389 UnDeletedObjects []nonDeletedObject `xml:"Error"`
390}
diff --git a/vendor/github.com/minio/minio-go/v7/api-select.go b/vendor/github.com/minio/minio-go/v7/api-select.go
new file mode 100644
index 0000000..628d967
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-select.go
@@ -0,0 +1,757 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * (C) 2018-2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "bytes"
22 "context"
23 "encoding/binary"
24 "encoding/xml"
25 "errors"
26 "fmt"
27 "hash"
28 "hash/crc32"
29 "io"
30 "net/http"
31 "net/url"
32 "strings"
33
34 "github.com/minio/minio-go/v7/pkg/encrypt"
35 "github.com/minio/minio-go/v7/pkg/s3utils"
36)
37
38// CSVFileHeaderInfo - is the parameter for whether to utilize headers.
39type CSVFileHeaderInfo string
40
41// Constants for file header info.
42const (
43 CSVFileHeaderInfoNone CSVFileHeaderInfo = "NONE"
44 CSVFileHeaderInfoIgnore CSVFileHeaderInfo = "IGNORE"
45 CSVFileHeaderInfoUse CSVFileHeaderInfo = "USE"
46)
47
48// SelectCompressionType - is the parameter for what type of compression is
49// present
50type SelectCompressionType string
51
52// Constants for compression types under select API.
53const (
54 SelectCompressionNONE SelectCompressionType = "NONE"
55 SelectCompressionGZIP SelectCompressionType = "GZIP"
56 SelectCompressionBZIP SelectCompressionType = "BZIP2"
57
58 // Non-standard compression schemes, supported by MinIO hosts:
59
60 SelectCompressionZSTD SelectCompressionType = "ZSTD" // Zstandard compression.
61 SelectCompressionLZ4 SelectCompressionType = "LZ4" // LZ4 Stream
62 SelectCompressionS2 SelectCompressionType = "S2" // S2 Stream
63 SelectCompressionSNAPPY SelectCompressionType = "SNAPPY" // Snappy stream
64)
65
66// CSVQuoteFields - is the parameter for how CSV fields are quoted.
67type CSVQuoteFields string
68
69// Constants for csv quote styles.
70const (
71 CSVQuoteFieldsAlways CSVQuoteFields = "Always"
72 CSVQuoteFieldsAsNeeded CSVQuoteFields = "AsNeeded"
73)
74
75// QueryExpressionType - is of what syntax the expression is, this should only
76// be SQL
77type QueryExpressionType string
78
79// Constants for expression type.
80const (
81 QueryExpressionTypeSQL QueryExpressionType = "SQL"
82)
83
84// JSONType determines json input serialization type.
85type JSONType string
86
87// Constants for JSONTypes.
88const (
89 JSONDocumentType JSONType = "DOCUMENT"
90 JSONLinesType JSONType = "LINES"
91)
92
93// ParquetInputOptions parquet input specific options
94type ParquetInputOptions struct{}
95
96// CSVInputOptions csv input specific options
97type CSVInputOptions struct {
98 FileHeaderInfo CSVFileHeaderInfo
99 fileHeaderInfoSet bool
100
101 RecordDelimiter string
102 recordDelimiterSet bool
103
104 FieldDelimiter string
105 fieldDelimiterSet bool
106
107 QuoteCharacter string
108 quoteCharacterSet bool
109
110 QuoteEscapeCharacter string
111 quoteEscapeCharacterSet bool
112
113 Comments string
114 commentsSet bool
115}
116
117// SetFileHeaderInfo sets the file header info in the CSV input options
118func (c *CSVInputOptions) SetFileHeaderInfo(val CSVFileHeaderInfo) {
119 c.FileHeaderInfo = val
120 c.fileHeaderInfoSet = true
121}
122
123// SetRecordDelimiter sets the record delimiter in the CSV input options
124func (c *CSVInputOptions) SetRecordDelimiter(val string) {
125 c.RecordDelimiter = val
126 c.recordDelimiterSet = true
127}
128
129// SetFieldDelimiter sets the field delimiter in the CSV input options
130func (c *CSVInputOptions) SetFieldDelimiter(val string) {
131 c.FieldDelimiter = val
132 c.fieldDelimiterSet = true
133}
134
135// SetQuoteCharacter sets the quote character in the CSV input options
136func (c *CSVInputOptions) SetQuoteCharacter(val string) {
137 c.QuoteCharacter = val
138 c.quoteCharacterSet = true
139}
140
141// SetQuoteEscapeCharacter sets the quote escape character in the CSV input options
142func (c *CSVInputOptions) SetQuoteEscapeCharacter(val string) {
143 c.QuoteEscapeCharacter = val
144 c.quoteEscapeCharacterSet = true
145}
146
147// SetComments sets the comments character in the CSV input options
148func (c *CSVInputOptions) SetComments(val string) {
149 c.Comments = val
150 c.commentsSet = true
151}
152
153// MarshalXML - produces the xml representation of the CSV input options struct
154func (c CSVInputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
155 if err := e.EncodeToken(start); err != nil {
156 return err
157 }
158 if c.FileHeaderInfo != "" || c.fileHeaderInfoSet {
159 if err := e.EncodeElement(c.FileHeaderInfo, xml.StartElement{Name: xml.Name{Local: "FileHeaderInfo"}}); err != nil {
160 return err
161 }
162 }
163
164 if c.RecordDelimiter != "" || c.recordDelimiterSet {
165 if err := e.EncodeElement(c.RecordDelimiter, xml.StartElement{Name: xml.Name{Local: "RecordDelimiter"}}); err != nil {
166 return err
167 }
168 }
169
170 if c.FieldDelimiter != "" || c.fieldDelimiterSet {
171 if err := e.EncodeElement(c.FieldDelimiter, xml.StartElement{Name: xml.Name{Local: "FieldDelimiter"}}); err != nil {
172 return err
173 }
174 }
175
176 if c.QuoteCharacter != "" || c.quoteCharacterSet {
177 if err := e.EncodeElement(c.QuoteCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteCharacter"}}); err != nil {
178 return err
179 }
180 }
181
182 if c.QuoteEscapeCharacter != "" || c.quoteEscapeCharacterSet {
183 if err := e.EncodeElement(c.QuoteEscapeCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteEscapeCharacter"}}); err != nil {
184 return err
185 }
186 }
187
188 if c.Comments != "" || c.commentsSet {
189 if err := e.EncodeElement(c.Comments, xml.StartElement{Name: xml.Name{Local: "Comments"}}); err != nil {
190 return err
191 }
192 }
193
194 return e.EncodeToken(xml.EndElement{Name: start.Name})
195}
196
197// CSVOutputOptions csv output specific options
198type CSVOutputOptions struct {
199 QuoteFields CSVQuoteFields
200 quoteFieldsSet bool
201
202 RecordDelimiter string
203 recordDelimiterSet bool
204
205 FieldDelimiter string
206 fieldDelimiterSet bool
207
208 QuoteCharacter string
209 quoteCharacterSet bool
210
211 QuoteEscapeCharacter string
212 quoteEscapeCharacterSet bool
213}
214
215// SetQuoteFields sets the quote field parameter in the CSV output options
216func (c *CSVOutputOptions) SetQuoteFields(val CSVQuoteFields) {
217 c.QuoteFields = val
218 c.quoteFieldsSet = true
219}
220
221// SetRecordDelimiter sets the record delimiter character in the CSV output options
222func (c *CSVOutputOptions) SetRecordDelimiter(val string) {
223 c.RecordDelimiter = val
224 c.recordDelimiterSet = true
225}
226
227// SetFieldDelimiter sets the field delimiter character in the CSV output options
228func (c *CSVOutputOptions) SetFieldDelimiter(val string) {
229 c.FieldDelimiter = val
230 c.fieldDelimiterSet = true
231}
232
233// SetQuoteCharacter sets the quote character in the CSV output options
234func (c *CSVOutputOptions) SetQuoteCharacter(val string) {
235 c.QuoteCharacter = val
236 c.quoteCharacterSet = true
237}
238
239// SetQuoteEscapeCharacter sets the quote escape character in the CSV output options
240func (c *CSVOutputOptions) SetQuoteEscapeCharacter(val string) {
241 c.QuoteEscapeCharacter = val
242 c.quoteEscapeCharacterSet = true
243}
244
245// MarshalXML - produces the xml representation of the CSVOutputOptions struct
246func (c CSVOutputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
247 if err := e.EncodeToken(start); err != nil {
248 return err
249 }
250
251 if c.QuoteFields != "" || c.quoteFieldsSet {
252 if err := e.EncodeElement(c.QuoteFields, xml.StartElement{Name: xml.Name{Local: "QuoteFields"}}); err != nil {
253 return err
254 }
255 }
256
257 if c.RecordDelimiter != "" || c.recordDelimiterSet {
258 if err := e.EncodeElement(c.RecordDelimiter, xml.StartElement{Name: xml.Name{Local: "RecordDelimiter"}}); err != nil {
259 return err
260 }
261 }
262
263 if c.FieldDelimiter != "" || c.fieldDelimiterSet {
264 if err := e.EncodeElement(c.FieldDelimiter, xml.StartElement{Name: xml.Name{Local: "FieldDelimiter"}}); err != nil {
265 return err
266 }
267 }
268
269 if c.QuoteCharacter != "" || c.quoteCharacterSet {
270 if err := e.EncodeElement(c.QuoteCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteCharacter"}}); err != nil {
271 return err
272 }
273 }
274
275 if c.QuoteEscapeCharacter != "" || c.quoteEscapeCharacterSet {
276 if err := e.EncodeElement(c.QuoteEscapeCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteEscapeCharacter"}}); err != nil {
277 return err
278 }
279 }
280
281 return e.EncodeToken(xml.EndElement{Name: start.Name})
282}
283
284// JSONInputOptions json input specific options
285type JSONInputOptions struct {
286 Type JSONType
287 typeSet bool
288}
289
290// SetType sets the JSON type in the JSON input options
291func (j *JSONInputOptions) SetType(typ JSONType) {
292 j.Type = typ
293 j.typeSet = true
294}
295
296// MarshalXML - produces the xml representation of the JSONInputOptions struct
297func (j JSONInputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
298 if err := e.EncodeToken(start); err != nil {
299 return err
300 }
301
302 if j.Type != "" || j.typeSet {
303 if err := e.EncodeElement(j.Type, xml.StartElement{Name: xml.Name{Local: "Type"}}); err != nil {
304 return err
305 }
306 }
307
308 return e.EncodeToken(xml.EndElement{Name: start.Name})
309}
310
311// JSONOutputOptions - json output specific options
312type JSONOutputOptions struct {
313 RecordDelimiter string
314 recordDelimiterSet bool
315}
316
317// SetRecordDelimiter sets the record delimiter in the JSON output options
318func (j *JSONOutputOptions) SetRecordDelimiter(val string) {
319 j.RecordDelimiter = val
320 j.recordDelimiterSet = true
321}
322
323// MarshalXML - produces the xml representation of the JSONOutputOptions struct
324func (j JSONOutputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
325 if err := e.EncodeToken(start); err != nil {
326 return err
327 }
328
329 if j.RecordDelimiter != "" || j.recordDelimiterSet {
330 if err := e.EncodeElement(j.RecordDelimiter, xml.StartElement{Name: xml.Name{Local: "RecordDelimiter"}}); err != nil {
331 return err
332 }
333 }
334
335 return e.EncodeToken(xml.EndElement{Name: start.Name})
336}
337
338// SelectObjectInputSerialization - input serialization parameters
339type SelectObjectInputSerialization struct {
340 CompressionType SelectCompressionType `xml:"CompressionType,omitempty"`
341 Parquet *ParquetInputOptions `xml:"Parquet,omitempty"`
342 CSV *CSVInputOptions `xml:"CSV,omitempty"`
343 JSON *JSONInputOptions `xml:"JSON,omitempty"`
344}
345
346// SelectObjectOutputSerialization - output serialization parameters.
347type SelectObjectOutputSerialization struct {
348 CSV *CSVOutputOptions `xml:"CSV,omitempty"`
349 JSON *JSONOutputOptions `xml:"JSON,omitempty"`
350}
351
352// SelectObjectOptions - represents the input select body
353type SelectObjectOptions struct {
354 XMLName xml.Name `xml:"SelectObjectContentRequest" json:"-"`
355 ServerSideEncryption encrypt.ServerSide `xml:"-"`
356 Expression string
357 ExpressionType QueryExpressionType
358 InputSerialization SelectObjectInputSerialization
359 OutputSerialization SelectObjectOutputSerialization
360 RequestProgress struct {
361 Enabled bool
362 }
363}
364
365// Header returns the http.Header representation of the SelectObject options.
366func (o SelectObjectOptions) Header() http.Header {
367 headers := make(http.Header)
368 if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() == encrypt.SSEC {
369 o.ServerSideEncryption.Marshal(headers)
370 }
371 return headers
372}
373
374// SelectObjectType - is the parameter which defines what type of object the
375// operation is being performed on.
376type SelectObjectType string
377
378// Constants for input data types.
379const (
380 SelectObjectTypeCSV SelectObjectType = "CSV"
381 SelectObjectTypeJSON SelectObjectType = "JSON"
382 SelectObjectTypeParquet SelectObjectType = "Parquet"
383)
384
385// preludeInfo is used for keeping track of necessary information from the
386// prelude.
387type preludeInfo struct {
388 totalLen uint32
389 headerLen uint32
390}
391
392// SelectResults is used for the streaming responses from the server.
393type SelectResults struct {
394 pipeReader *io.PipeReader
395 resp *http.Response
396 stats *StatsMessage
397 progress *ProgressMessage
398}
399
400// ProgressMessage is a struct for progress xml message.
401type ProgressMessage struct {
402 XMLName xml.Name `xml:"Progress" json:"-"`
403 StatsMessage
404}
405
406// StatsMessage is a struct for stat xml message.
407type StatsMessage struct {
408 XMLName xml.Name `xml:"Stats" json:"-"`
409 BytesScanned int64
410 BytesProcessed int64
411 BytesReturned int64
412}
413
414// messageType represents the type of message.
415type messageType string
416
417const (
418 errorMsg messageType = "error"
419 commonMsg messageType = "event"
420)
421
422// eventType represents the type of event.
423type eventType string
424
425// list of event-types returned by Select API.
426const (
427 endEvent eventType = "End"
428 recordsEvent eventType = "Records"
429 progressEvent eventType = "Progress"
430 statsEvent eventType = "Stats"
431)
432
433// contentType represents content type of event.
434type contentType string
435
436const (
437 xmlContent contentType = "text/xml"
438)
439
440// SelectObjectContent is a implementation of http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html AWS S3 API.
441func (c *Client) SelectObjectContent(ctx context.Context, bucketName, objectName string, opts SelectObjectOptions) (*SelectResults, error) {
442 // Input validation.
443 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
444 return nil, err
445 }
446 if err := s3utils.CheckValidObjectName(objectName); err != nil {
447 return nil, err
448 }
449
450 selectReqBytes, err := xml.Marshal(opts)
451 if err != nil {
452 return nil, err
453 }
454
455 urlValues := make(url.Values)
456 urlValues.Set("select", "")
457 urlValues.Set("select-type", "2")
458
459 // Execute POST on bucket/object.
460 resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{
461 bucketName: bucketName,
462 objectName: objectName,
463 queryValues: urlValues,
464 customHeader: opts.Header(),
465 contentMD5Base64: sumMD5Base64(selectReqBytes),
466 contentSHA256Hex: sum256Hex(selectReqBytes),
467 contentBody: bytes.NewReader(selectReqBytes),
468 contentLength: int64(len(selectReqBytes)),
469 })
470 if err != nil {
471 return nil, err
472 }
473
474 return NewSelectResults(resp, bucketName)
475}
476
477// NewSelectResults creates a Select Result parser that parses the response
478// and returns a Reader that will return parsed and assembled select output.
479func NewSelectResults(resp *http.Response, bucketName string) (*SelectResults, error) {
480 if resp.StatusCode != http.StatusOK {
481 return nil, httpRespToErrorResponse(resp, bucketName, "")
482 }
483
484 pipeReader, pipeWriter := io.Pipe()
485 streamer := &SelectResults{
486 resp: resp,
487 stats: &StatsMessage{},
488 progress: &ProgressMessage{},
489 pipeReader: pipeReader,
490 }
491 streamer.start(pipeWriter)
492 return streamer, nil
493}
494
495// Close - closes the underlying response body and the stream reader.
496func (s *SelectResults) Close() error {
497 defer closeResponse(s.resp)
498 return s.pipeReader.Close()
499}
500
501// Read - is a reader compatible implementation for SelectObjectContent records.
502func (s *SelectResults) Read(b []byte) (n int, err error) {
503 return s.pipeReader.Read(b)
504}
505
506// Stats - information about a request's stats when processing is complete.
507func (s *SelectResults) Stats() *StatsMessage {
508 return s.stats
509}
510
511// Progress - information about the progress of a request.
512func (s *SelectResults) Progress() *ProgressMessage {
513 return s.progress
514}
515
516// start is the main function that decodes the large byte array into
517// several events that are sent through the eventstream.
518func (s *SelectResults) start(pipeWriter *io.PipeWriter) {
519 go func() {
520 for {
521 var prelude preludeInfo
522 headers := make(http.Header)
523 var err error
524
525 // Create CRC code
526 crc := crc32.New(crc32.IEEETable)
527 crcReader := io.TeeReader(s.resp.Body, crc)
528
529 // Extract the prelude(12 bytes) into a struct to extract relevant information.
530 prelude, err = processPrelude(crcReader, crc)
531 if err != nil {
532 pipeWriter.CloseWithError(err)
533 closeResponse(s.resp)
534 return
535 }
536
537 // Extract the headers(variable bytes) into a struct to extract relevant information
538 if prelude.headerLen > 0 {
539 if err = extractHeader(io.LimitReader(crcReader, int64(prelude.headerLen)), headers); err != nil {
540 pipeWriter.CloseWithError(err)
541 closeResponse(s.resp)
542 return
543 }
544 }
545
546 // Get the actual payload length so that the appropriate amount of
547 // bytes can be read or parsed.
548 payloadLen := prelude.PayloadLen()
549
550 m := messageType(headers.Get("message-type"))
551
552 switch m {
553 case errorMsg:
554 pipeWriter.CloseWithError(errors.New(headers.Get("error-code") + ":\"" + headers.Get("error-message") + "\""))
555 closeResponse(s.resp)
556 return
557 case commonMsg:
558 // Get content-type of the payload.
559 c := contentType(headers.Get("content-type"))
560
561 // Get event type of the payload.
562 e := eventType(headers.Get("event-type"))
563
564 // Handle all supported events.
565 switch e {
566 case endEvent:
567 pipeWriter.Close()
568 closeResponse(s.resp)
569 return
570 case recordsEvent:
571 if _, err = io.Copy(pipeWriter, io.LimitReader(crcReader, payloadLen)); err != nil {
572 pipeWriter.CloseWithError(err)
573 closeResponse(s.resp)
574 return
575 }
576 case progressEvent:
577 switch c {
578 case xmlContent:
579 if err = xmlDecoder(io.LimitReader(crcReader, payloadLen), s.progress); err != nil {
580 pipeWriter.CloseWithError(err)
581 closeResponse(s.resp)
582 return
583 }
584 default:
585 pipeWriter.CloseWithError(fmt.Errorf("Unexpected content-type %s sent for event-type %s", c, progressEvent))
586 closeResponse(s.resp)
587 return
588 }
589 case statsEvent:
590 switch c {
591 case xmlContent:
592 if err = xmlDecoder(io.LimitReader(crcReader, payloadLen), s.stats); err != nil {
593 pipeWriter.CloseWithError(err)
594 closeResponse(s.resp)
595 return
596 }
597 default:
598 pipeWriter.CloseWithError(fmt.Errorf("Unexpected content-type %s sent for event-type %s", c, statsEvent))
599 closeResponse(s.resp)
600 return
601 }
602 }
603 }
604
605 // Ensures that the full message's CRC is correct and
606 // that the message is not corrupted
607 if err := checkCRC(s.resp.Body, crc.Sum32()); err != nil {
608 pipeWriter.CloseWithError(err)
609 closeResponse(s.resp)
610 return
611 }
612
613 }
614 }()
615}
616
617// PayloadLen is a function that calculates the length of the payload.
618func (p preludeInfo) PayloadLen() int64 {
619 return int64(p.totalLen - p.headerLen - 16)
620}
621
622// processPrelude is the function that reads the 12 bytes of the prelude and
623// ensures the CRC is correct while also extracting relevant information into
624// the struct,
625func processPrelude(prelude io.Reader, crc hash.Hash32) (preludeInfo, error) {
626 var err error
627 pInfo := preludeInfo{}
628
629 // reads total length of the message (first 4 bytes)
630 pInfo.totalLen, err = extractUint32(prelude)
631 if err != nil {
632 return pInfo, err
633 }
634
635 // reads total header length of the message (2nd 4 bytes)
636 pInfo.headerLen, err = extractUint32(prelude)
637 if err != nil {
638 return pInfo, err
639 }
640
641 // checks that the CRC is correct (3rd 4 bytes)
642 preCRC := crc.Sum32()
643 if err := checkCRC(prelude, preCRC); err != nil {
644 return pInfo, err
645 }
646
647 return pInfo, nil
648}
649
650// extracts the relevant information from the Headers.
651func extractHeader(body io.Reader, myHeaders http.Header) error {
652 for {
653 // extracts the first part of the header,
654 headerTypeName, err := extractHeaderType(body)
655 if err != nil {
656 // Since end of file, we have read all of our headers
657 if err == io.EOF {
658 break
659 }
660 return err
661 }
662
663 // reads the 7 present in the header and ignores it.
664 extractUint8(body)
665
666 headerValueName, err := extractHeaderValue(body)
667 if err != nil {
668 return err
669 }
670
671 myHeaders.Set(headerTypeName, headerValueName)
672
673 }
674 return nil
675}
676
677// extractHeaderType extracts the first half of the header message, the header type.
678func extractHeaderType(body io.Reader) (string, error) {
679 // extracts 2 bit integer
680 headerNameLen, err := extractUint8(body)
681 if err != nil {
682 return "", err
683 }
684 // extracts the string with the appropriate number of bytes
685 headerName, err := extractString(body, int(headerNameLen))
686 if err != nil {
687 return "", err
688 }
689 return strings.TrimPrefix(headerName, ":"), nil
690}
691
692// extractsHeaderValue extracts the second half of the header message, the
693// header value
694func extractHeaderValue(body io.Reader) (string, error) {
695 bodyLen, err := extractUint16(body)
696 if err != nil {
697 return "", err
698 }
699 bodyName, err := extractString(body, int(bodyLen))
700 if err != nil {
701 return "", err
702 }
703 return bodyName, nil
704}
705
706// extracts a string from byte array of a particular number of bytes.
707func extractString(source io.Reader, lenBytes int) (string, error) {
708 myVal := make([]byte, lenBytes)
709 _, err := source.Read(myVal)
710 if err != nil {
711 return "", err
712 }
713 return string(myVal), nil
714}
715
716// extractUint32 extracts a 4 byte integer from the byte array.
717func extractUint32(r io.Reader) (uint32, error) {
718 buf := make([]byte, 4)
719 _, err := readFull(r, buf)
720 if err != nil {
721 return 0, err
722 }
723 return binary.BigEndian.Uint32(buf), nil
724}
725
726// extractUint16 extracts a 2 byte integer from the byte array.
727func extractUint16(r io.Reader) (uint16, error) {
728 buf := make([]byte, 2)
729 _, err := readFull(r, buf)
730 if err != nil {
731 return 0, err
732 }
733 return binary.BigEndian.Uint16(buf), nil
734}
735
736// extractUint8 extracts a 1 byte integer from the byte array.
737func extractUint8(r io.Reader) (uint8, error) {
738 buf := make([]byte, 1)
739 _, err := readFull(r, buf)
740 if err != nil {
741 return 0, err
742 }
743 return buf[0], nil
744}
745
746// checkCRC ensures that the CRC matches with the one from the reader.
747func checkCRC(r io.Reader, expect uint32) error {
748 msgCRC, err := extractUint32(r)
749 if err != nil {
750 return err
751 }
752
753 if msgCRC != expect {
754 return fmt.Errorf("Checksum Mismatch, MessageCRC of 0x%X does not equal expected CRC of 0x%X", msgCRC, expect)
755 }
756 return nil
757}
diff --git a/vendor/github.com/minio/minio-go/v7/api-stat.go b/vendor/github.com/minio/minio-go/v7/api-stat.go
new file mode 100644
index 0000000..b043dc4
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-stat.go
@@ -0,0 +1,116 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "context"
22 "net/http"
23
24 "github.com/minio/minio-go/v7/pkg/s3utils"
25)
26
27// BucketExists verifies if bucket exists and you have permission to access it. Allows for a Context to
28// control cancellations and timeouts.
29func (c *Client) BucketExists(ctx context.Context, bucketName string) (bool, error) {
30 // Input validation.
31 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
32 return false, err
33 }
34
35 // Execute HEAD on bucketName.
36 resp, err := c.executeMethod(ctx, http.MethodHead, requestMetadata{
37 bucketName: bucketName,
38 contentSHA256Hex: emptySHA256Hex,
39 })
40 defer closeResponse(resp)
41 if err != nil {
42 if ToErrorResponse(err).Code == "NoSuchBucket" {
43 return false, nil
44 }
45 return false, err
46 }
47 if resp != nil {
48 resperr := httpRespToErrorResponse(resp, bucketName, "")
49 if ToErrorResponse(resperr).Code == "NoSuchBucket" {
50 return false, nil
51 }
52 if resp.StatusCode != http.StatusOK {
53 return false, httpRespToErrorResponse(resp, bucketName, "")
54 }
55 }
56 return true, nil
57}
58
59// StatObject verifies if object exists, you have permission to access it
60// and returns information about the object.
61func (c *Client) StatObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) {
62 // Input validation.
63 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
64 return ObjectInfo{}, err
65 }
66 if err := s3utils.CheckValidObjectName(objectName); err != nil {
67 return ObjectInfo{}, err
68 }
69 headers := opts.Header()
70 if opts.Internal.ReplicationDeleteMarker {
71 headers.Set(minIOBucketReplicationDeleteMarker, "true")
72 }
73 if opts.Internal.IsReplicationReadyForDeleteMarker {
74 headers.Set(isMinioTgtReplicationReady, "true")
75 }
76
77 // Execute HEAD on objectName.
78 resp, err := c.executeMethod(ctx, http.MethodHead, requestMetadata{
79 bucketName: bucketName,
80 objectName: objectName,
81 queryValues: opts.toQueryValues(),
82 contentSHA256Hex: emptySHA256Hex,
83 customHeader: headers,
84 })
85 defer closeResponse(resp)
86 if err != nil {
87 return ObjectInfo{}, err
88 }
89
90 if resp != nil {
91 deleteMarker := resp.Header.Get(amzDeleteMarker) == "true"
92 replicationReady := resp.Header.Get(minioTgtReplicationReady) == "true"
93 if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
94 if resp.StatusCode == http.StatusMethodNotAllowed && opts.VersionID != "" && deleteMarker {
95 errResp := ErrorResponse{
96 StatusCode: resp.StatusCode,
97 Code: "MethodNotAllowed",
98 Message: "The specified method is not allowed against this resource.",
99 BucketName: bucketName,
100 Key: objectName,
101 }
102 return ObjectInfo{
103 VersionID: resp.Header.Get(amzVersionID),
104 IsDeleteMarker: deleteMarker,
105 }, errResp
106 }
107 return ObjectInfo{
108 VersionID: resp.Header.Get(amzVersionID),
109 IsDeleteMarker: deleteMarker,
110 ReplicationReady: replicationReady, // whether delete marker can be replicated
111 }, httpRespToErrorResponse(resp, bucketName, objectName)
112 }
113 }
114
115 return ToObjectInfo(bucketName, objectName, resp.Header)
116}
diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go
new file mode 100644
index 0000000..f8a9b34
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api.go
@@ -0,0 +1,995 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2023 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "bytes"
22 "context"
23 "encoding/base64"
24 "errors"
25 "fmt"
26 "hash/crc32"
27 "io"
28 "math/rand"
29 "net"
30 "net/http"
31 "net/http/cookiejar"
32 "net/http/httptrace"
33 "net/http/httputil"
34 "net/url"
35 "os"
36 "runtime"
37 "strings"
38 "sync"
39 "sync/atomic"
40 "time"
41
42 md5simd "github.com/minio/md5-simd"
43 "github.com/minio/minio-go/v7/pkg/credentials"
44 "github.com/minio/minio-go/v7/pkg/s3utils"
45 "github.com/minio/minio-go/v7/pkg/signer"
46 "golang.org/x/net/publicsuffix"
47)
48
49// Client implements Amazon S3 compatible methods.
50type Client struct {
51 // Standard options.
52
53 // Parsed endpoint url provided by the user.
54 endpointURL *url.URL
55
56 // Holds various credential providers.
57 credsProvider *credentials.Credentials
58
59 // Custom signerType value overrides all credentials.
60 overrideSignerType credentials.SignatureType
61
62 // User supplied.
63 appInfo struct {
64 appName string
65 appVersion string
66 }
67
68 // Indicate whether we are using https or not
69 secure bool
70
71 // Needs allocation.
72 httpClient *http.Client
73 httpTrace *httptrace.ClientTrace
74 bucketLocCache *bucketLocationCache
75
76 // Advanced functionality.
77 isTraceEnabled bool
78 traceErrorsOnly bool
79 traceOutput io.Writer
80
81 // S3 specific accelerated endpoint.
82 s3AccelerateEndpoint string
83
84 // Region endpoint
85 region string
86
87 // Random seed.
88 random *rand.Rand
89
90 // lookup indicates type of url lookup supported by server. If not specified,
91 // default to Auto.
92 lookup BucketLookupType
93
94 // Factory for MD5 hash functions.
95 md5Hasher func() md5simd.Hasher
96 sha256Hasher func() md5simd.Hasher
97
98 healthStatus int32
99
100 trailingHeaderSupport bool
101}
102
103// Options for New method
104type Options struct {
105 Creds *credentials.Credentials
106 Secure bool
107 Transport http.RoundTripper
108 Trace *httptrace.ClientTrace
109 Region string
110 BucketLookup BucketLookupType
111
112 // Allows setting a custom region lookup based on URL pattern
113 // not all URL patterns are covered by this library so if you
114 // have a custom endpoints with many regions you can use this
115 // function to perform region lookups appropriately.
116 CustomRegionViaURL func(u url.URL) string
117
118 // TrailingHeaders indicates server support of trailing headers.
119 // Only supported for v4 signatures.
120 TrailingHeaders bool
121
122 // Custom hash routines. Leave nil to use standard.
123 CustomMD5 func() md5simd.Hasher
124 CustomSHA256 func() md5simd.Hasher
125}
126
127// Global constants.
128const (
129 libraryName = "minio-go"
130 libraryVersion = "v7.0.66"
131)
132
133// User Agent should always following the below style.
134// Please open an issue to discuss any new changes here.
135//
136// MinIO (OS; ARCH) LIB/VER APP/VER
137const (
138 libraryUserAgentPrefix = "MinIO (" + runtime.GOOS + "; " + runtime.GOARCH + ") "
139 libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion
140)
141
142// BucketLookupType is type of url lookup supported by server.
143type BucketLookupType int
144
145// Different types of url lookup supported by the server.Initialized to BucketLookupAuto
146const (
147 BucketLookupAuto BucketLookupType = iota
148 BucketLookupDNS
149 BucketLookupPath
150)
151
152// New - instantiate minio client with options
153func New(endpoint string, opts *Options) (*Client, error) {
154 if opts == nil {
155 return nil, errors.New("no options provided")
156 }
157 clnt, err := privateNew(endpoint, opts)
158 if err != nil {
159 return nil, err
160 }
161 // If Amazon S3 set to signature v4.
162 if s3utils.IsAmazonEndpoint(*clnt.endpointURL) {
163 clnt.overrideSignerType = credentials.SignatureV4
164 }
165
166 return clnt, nil
167}
168
169// EndpointURL returns the URL of the S3 endpoint.
170func (c *Client) EndpointURL() *url.URL {
171 endpoint := *c.endpointURL // copy to prevent callers from modifying internal state
172 return &endpoint
173}
174
175// lockedRandSource provides protected rand source, implements rand.Source interface.
176type lockedRandSource struct {
177 lk sync.Mutex
178 src rand.Source
179}
180
181// Int63 returns a non-negative pseudo-random 63-bit integer as an int64.
182func (r *lockedRandSource) Int63() (n int64) {
183 r.lk.Lock()
184 n = r.src.Int63()
185 r.lk.Unlock()
186 return
187}
188
189// Seed uses the provided seed value to initialize the generator to a
190// deterministic state.
191func (r *lockedRandSource) Seed(seed int64) {
192 r.lk.Lock()
193 r.src.Seed(seed)
194 r.lk.Unlock()
195}
196
197func privateNew(endpoint string, opts *Options) (*Client, error) {
198 // construct endpoint.
199 endpointURL, err := getEndpointURL(endpoint, opts.Secure)
200 if err != nil {
201 return nil, err
202 }
203
204 // Initialize cookies to preserve server sent cookies if any and replay
205 // them upon each request.
206 jar, err := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})
207 if err != nil {
208 return nil, err
209 }
210
211 // instantiate new Client.
212 clnt := new(Client)
213
214 // Save the credentials.
215 clnt.credsProvider = opts.Creds
216
217 // Remember whether we are using https or not
218 clnt.secure = opts.Secure
219
220 // Save endpoint URL, user agent for future uses.
221 clnt.endpointURL = endpointURL
222
223 transport := opts.Transport
224 if transport == nil {
225 transport, err = DefaultTransport(opts.Secure)
226 if err != nil {
227 return nil, err
228 }
229 }
230
231 clnt.httpTrace = opts.Trace
232
233 // Instantiate http client and bucket location cache.
234 clnt.httpClient = &http.Client{
235 Jar: jar,
236 Transport: transport,
237 CheckRedirect: func(req *http.Request, via []*http.Request) error {
238 return http.ErrUseLastResponse
239 },
240 }
241
242 // Sets custom region, if region is empty bucket location cache is used automatically.
243 if opts.Region == "" {
244 if opts.CustomRegionViaURL != nil {
245 opts.Region = opts.CustomRegionViaURL(*clnt.endpointURL)
246 } else {
247 opts.Region = s3utils.GetRegionFromURL(*clnt.endpointURL)
248 }
249 }
250 clnt.region = opts.Region
251
252 // Instantiate bucket location cache.
253 clnt.bucketLocCache = newBucketLocationCache()
254
255 // Introduce a new locked random seed.
256 clnt.random = rand.New(&lockedRandSource{src: rand.NewSource(time.Now().UTC().UnixNano())})
257
258 // Add default md5 hasher.
259 clnt.md5Hasher = opts.CustomMD5
260 clnt.sha256Hasher = opts.CustomSHA256
261 if clnt.md5Hasher == nil {
262 clnt.md5Hasher = newMd5Hasher
263 }
264 if clnt.sha256Hasher == nil {
265 clnt.sha256Hasher = newSHA256Hasher
266 }
267
268 clnt.trailingHeaderSupport = opts.TrailingHeaders && clnt.overrideSignerType.IsV4()
269
270 // Sets bucket lookup style, whether server accepts DNS or Path lookup. Default is Auto - determined
271 // by the SDK. When Auto is specified, DNS lookup is used for Amazon/Google cloud endpoints and Path for all other endpoints.
272 clnt.lookup = opts.BucketLookup
273
274 // healthcheck is not initialized
275 clnt.healthStatus = unknown
276
277 // Return.
278 return clnt, nil
279}
280
281// SetAppInfo - add application details to user agent.
282func (c *Client) SetAppInfo(appName, appVersion string) {
283 // if app name and version not set, we do not set a new user agent.
284 if appName != "" && appVersion != "" {
285 c.appInfo.appName = appName
286 c.appInfo.appVersion = appVersion
287 }
288}
289
290// TraceOn - enable HTTP tracing.
291func (c *Client) TraceOn(outputStream io.Writer) {
292 // if outputStream is nil then default to os.Stdout.
293 if outputStream == nil {
294 outputStream = os.Stdout
295 }
296 // Sets a new output stream.
297 c.traceOutput = outputStream
298
299 // Enable tracing.
300 c.isTraceEnabled = true
301}
302
303// TraceErrorsOnlyOn - same as TraceOn, but only errors will be traced.
304func (c *Client) TraceErrorsOnlyOn(outputStream io.Writer) {
305 c.TraceOn(outputStream)
306 c.traceErrorsOnly = true
307}
308
309// TraceErrorsOnlyOff - Turns off the errors only tracing and everything will be traced after this call.
310// If all tracing needs to be turned off, call TraceOff().
311func (c *Client) TraceErrorsOnlyOff() {
312 c.traceErrorsOnly = false
313}
314
315// TraceOff - disable HTTP tracing.
316func (c *Client) TraceOff() {
317 // Disable tracing.
318 c.isTraceEnabled = false
319 c.traceErrorsOnly = false
320}
321
322// SetS3TransferAccelerate - turns s3 accelerated endpoint on or off for all your
323// requests. This feature is only specific to S3 for all other endpoints this
324// function does nothing. To read further details on s3 transfer acceleration
325// please vist -
326// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
327func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) {
328 if s3utils.IsAmazonEndpoint(*c.endpointURL) {
329 c.s3AccelerateEndpoint = accelerateEndpoint
330 }
331}
332
333// Hash materials provides relevant initialized hash algo writers
334// based on the expected signature type.
335//
336// - For signature v4 request if the connection is insecure compute only sha256.
337// - For signature v4 request if the connection is secure compute only md5.
338// - For anonymous request compute md5.
339func (c *Client) hashMaterials(isMd5Requested, isSha256Requested bool) (hashAlgos map[string]md5simd.Hasher, hashSums map[string][]byte) {
340 hashSums = make(map[string][]byte)
341 hashAlgos = make(map[string]md5simd.Hasher)
342 if c.overrideSignerType.IsV4() {
343 if c.secure {
344 hashAlgos["md5"] = c.md5Hasher()
345 } else {
346 if isSha256Requested {
347 hashAlgos["sha256"] = c.sha256Hasher()
348 }
349 }
350 } else {
351 if c.overrideSignerType.IsAnonymous() {
352 hashAlgos["md5"] = c.md5Hasher()
353 }
354 }
355 if isMd5Requested {
356 hashAlgos["md5"] = c.md5Hasher()
357 }
358 return hashAlgos, hashSums
359}
360
361const (
362 unknown = -1
363 offline = 0
364 online = 1
365)
366
367// IsOnline returns true if healthcheck enabled and client is online.
368// If HealthCheck function has not been called this will always return true.
369func (c *Client) IsOnline() bool {
370 return !c.IsOffline()
371}
372
373// sets online healthStatus to offline
374func (c *Client) markOffline() {
375 atomic.CompareAndSwapInt32(&c.healthStatus, online, offline)
376}
377
378// IsOffline returns true if healthcheck enabled and client is offline
379// If HealthCheck function has not been called this will always return false.
380func (c *Client) IsOffline() bool {
381 return atomic.LoadInt32(&c.healthStatus) == offline
382}
383
384// HealthCheck starts a healthcheck to see if endpoint is up.
385// Returns a context cancellation function, to stop the health check,
386// and an error if health check is already started.
387func (c *Client) HealthCheck(hcDuration time.Duration) (context.CancelFunc, error) {
388 if atomic.LoadInt32(&c.healthStatus) != unknown {
389 return nil, fmt.Errorf("health check is running")
390 }
391 if hcDuration < 1*time.Second {
392 return nil, fmt.Errorf("health check duration should be at least 1 second")
393 }
394 probeBucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "probe-health-")
395 ctx, cancelFn := context.WithCancel(context.Background())
396 atomic.StoreInt32(&c.healthStatus, offline)
397 {
398 // Change to online, if we can connect.
399 gctx, gcancel := context.WithTimeout(ctx, 3*time.Second)
400 _, err := c.getBucketLocation(gctx, probeBucketName)
401 gcancel()
402 if !IsNetworkOrHostDown(err, false) {
403 switch ToErrorResponse(err).Code {
404 case "NoSuchBucket", "AccessDenied", "":
405 atomic.CompareAndSwapInt32(&c.healthStatus, offline, online)
406 }
407 }
408 }
409
410 go func(duration time.Duration) {
411 timer := time.NewTimer(duration)
412 defer timer.Stop()
413 for {
414 select {
415 case <-ctx.Done():
416 atomic.StoreInt32(&c.healthStatus, unknown)
417 return
418 case <-timer.C:
419 // Do health check the first time and ONLY if the connection is marked offline
420 if c.IsOffline() {
421 gctx, gcancel := context.WithTimeout(context.Background(), 3*time.Second)
422 _, err := c.getBucketLocation(gctx, probeBucketName)
423 gcancel()
424 if !IsNetworkOrHostDown(err, false) {
425 switch ToErrorResponse(err).Code {
426 case "NoSuchBucket", "AccessDenied", "":
427 atomic.CompareAndSwapInt32(&c.healthStatus, offline, online)
428 }
429 }
430 }
431
432 timer.Reset(duration)
433 }
434 }
435 }(hcDuration)
436 return cancelFn, nil
437}
438
439// requestMetadata - is container for all the values to make a request.
440type requestMetadata struct {
441 // If set newRequest presigns the URL.
442 presignURL bool
443
444 // User supplied.
445 bucketName string
446 objectName string
447 queryValues url.Values
448 customHeader http.Header
449 extraPresignHeader http.Header
450 expires int64
451
452 // Generated by our internal code.
453 bucketLocation string
454 contentBody io.Reader
455 contentLength int64
456 contentMD5Base64 string // carries base64 encoded md5sum
457 contentSHA256Hex string // carries hex encoded sha256sum
458 streamSha256 bool
459 addCrc bool
460 trailer http.Header // (http.Request).Trailer. Requires v4 signature.
461}
462
463// dumpHTTP - dump HTTP request and response.
464func (c *Client) dumpHTTP(req *http.Request, resp *http.Response) error {
465 // Starts http dump.
466 _, err := fmt.Fprintln(c.traceOutput, "---------START-HTTP---------")
467 if err != nil {
468 return err
469 }
470
471 // Filter out Signature field from Authorization header.
472 origAuth := req.Header.Get("Authorization")
473 if origAuth != "" {
474 req.Header.Set("Authorization", redactSignature(origAuth))
475 }
476
477 // Only display request header.
478 reqTrace, err := httputil.DumpRequestOut(req, false)
479 if err != nil {
480 return err
481 }
482
483 // Write request to trace output.
484 _, err = fmt.Fprint(c.traceOutput, string(reqTrace))
485 if err != nil {
486 return err
487 }
488
489 // Only display response header.
490 var respTrace []byte
491
492 // For errors we make sure to dump response body as well.
493 if resp.StatusCode != http.StatusOK &&
494 resp.StatusCode != http.StatusPartialContent &&
495 resp.StatusCode != http.StatusNoContent {
496 respTrace, err = httputil.DumpResponse(resp, true)
497 if err != nil {
498 return err
499 }
500 } else {
501 respTrace, err = httputil.DumpResponse(resp, false)
502 if err != nil {
503 return err
504 }
505 }
506
507 // Write response to trace output.
508 _, err = fmt.Fprint(c.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n"))
509 if err != nil {
510 return err
511 }
512
513 // Ends the http dump.
514 _, err = fmt.Fprintln(c.traceOutput, "---------END-HTTP---------")
515 if err != nil {
516 return err
517 }
518
519 // Returns success.
520 return nil
521}
522
523// do - execute http request.
524func (c *Client) do(req *http.Request) (resp *http.Response, err error) {
525 defer func() {
526 if IsNetworkOrHostDown(err, false) {
527 c.markOffline()
528 }
529 }()
530
531 resp, err = c.httpClient.Do(req)
532 if err != nil {
533 // Handle this specifically for now until future Golang versions fix this issue properly.
534 if urlErr, ok := err.(*url.Error); ok {
535 if strings.Contains(urlErr.Err.Error(), "EOF") {
536 return nil, &url.Error{
537 Op: urlErr.Op,
538 URL: urlErr.URL,
539 Err: errors.New("Connection closed by foreign host " + urlErr.URL + ". Retry again."),
540 }
541 }
542 }
543 return nil, err
544 }
545
546 // Response cannot be non-nil, report error if thats the case.
547 if resp == nil {
548 msg := "Response is empty. " + reportIssue
549 return nil, errInvalidArgument(msg)
550 }
551
552 // If trace is enabled, dump http request and response,
553 // except when the traceErrorsOnly enabled and the response's status code is ok
554 if c.isTraceEnabled && !(c.traceErrorsOnly && resp.StatusCode == http.StatusOK) {
555 err = c.dumpHTTP(req, resp)
556 if err != nil {
557 return nil, err
558 }
559 }
560
561 return resp, nil
562}
563
564// List of success status.
565var successStatus = []int{
566 http.StatusOK,
567 http.StatusNoContent,
568 http.StatusPartialContent,
569}
570
571// executeMethod - instantiates a given method, and retries the
572// request upon any error up to maxRetries attempts in a binomially
573// delayed manner using a standard back off algorithm.
574func (c *Client) executeMethod(ctx context.Context, method string, metadata requestMetadata) (res *http.Response, err error) {
575 if c.IsOffline() {
576 return nil, errors.New(c.endpointURL.String() + " is offline.")
577 }
578
579 var retryable bool // Indicates if request can be retried.
580 var bodySeeker io.Seeker // Extracted seeker from io.Reader.
581 reqRetry := MaxRetry // Indicates how many times we can retry the request
582
583 if metadata.contentBody != nil {
584 // Check if body is seekable then it is retryable.
585 bodySeeker, retryable = metadata.contentBody.(io.Seeker)
586 switch bodySeeker {
587 case os.Stdin, os.Stdout, os.Stderr:
588 retryable = false
589 }
590 // Retry only when reader is seekable
591 if !retryable {
592 reqRetry = 1
593 }
594
595 // Figure out if the body can be closed - if yes
596 // we will definitely close it upon the function
597 // return.
598 bodyCloser, ok := metadata.contentBody.(io.Closer)
599 if ok {
600 defer bodyCloser.Close()
601 }
602 }
603
604 // Create cancel context to control 'newRetryTimer' go routine.
605 retryCtx, cancel := context.WithCancel(ctx)
606
607 // Indicate to our routine to exit cleanly upon return.
608 defer cancel()
609
610 for range c.newRetryTimer(retryCtx, reqRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter) {
611 // Retry executes the following function body if request has an
612 // error until maxRetries have been exhausted, retry attempts are
613 // performed after waiting for a given period of time in a
614 // binomial fashion.
615 if retryable {
616 // Seek back to beginning for each attempt.
617 if _, err = bodySeeker.Seek(0, 0); err != nil {
618 // If seek failed, no need to retry.
619 return nil, err
620 }
621 }
622
623 if metadata.addCrc {
624 if metadata.trailer == nil {
625 metadata.trailer = make(http.Header, 1)
626 }
627 crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
628 metadata.contentBody = newHashReaderWrapper(metadata.contentBody, crc, func(hash []byte) {
629 // Update trailer when done.
630 metadata.trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(hash))
631 })
632 metadata.trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(crc.Sum(nil)))
633 }
634 // Instantiate a new request.
635 var req *http.Request
636 req, err = c.newRequest(ctx, method, metadata)
637 if err != nil {
638 errResponse := ToErrorResponse(err)
639 if isS3CodeRetryable(errResponse.Code) {
640 continue // Retry.
641 }
642
643 return nil, err
644 }
645
646 // Initiate the request.
647 res, err = c.do(req)
648 if err != nil {
649 if isRequestErrorRetryable(err) {
650 // Retry the request
651 continue
652 }
653 return nil, err
654 }
655
656 // For any known successful http status, return quickly.
657 for _, httpStatus := range successStatus {
658 if httpStatus == res.StatusCode {
659 return res, nil
660 }
661 }
662
663 // Read the body to be saved later.
664 errBodyBytes, err := io.ReadAll(res.Body)
665 // res.Body should be closed
666 closeResponse(res)
667 if err != nil {
668 return nil, err
669 }
670
671 // Save the body.
672 errBodySeeker := bytes.NewReader(errBodyBytes)
673 res.Body = io.NopCloser(errBodySeeker)
674
675 // For errors verify if its retryable otherwise fail quickly.
676 errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName))
677
678 // Save the body back again.
679 errBodySeeker.Seek(0, 0) // Seek back to starting point.
680 res.Body = io.NopCloser(errBodySeeker)
681
682 // Bucket region if set in error response and the error
683 // code dictates invalid region, we can retry the request
684 // with the new region.
685 //
686 // Additionally, we should only retry if bucketLocation and custom
687 // region is empty.
688 if c.region == "" {
689 switch errResponse.Code {
690 case "AuthorizationHeaderMalformed":
691 fallthrough
692 case "InvalidRegion":
693 fallthrough
694 case "AccessDenied":
695 if errResponse.Region == "" {
696 // Region is empty we simply return the error.
697 return res, err
698 }
699 // Region is not empty figure out a way to
700 // handle this appropriately.
701 if metadata.bucketName != "" {
702 // Gather Cached location only if bucketName is present.
703 if location, cachedOk := c.bucketLocCache.Get(metadata.bucketName); cachedOk && location != errResponse.Region {
704 c.bucketLocCache.Set(metadata.bucketName, errResponse.Region)
705 continue // Retry.
706 }
707 } else {
708 // This is for ListBuckets() fallback.
709 if errResponse.Region != metadata.bucketLocation {
710 // Retry if the error response has a different region
711 // than the request we just made.
712 metadata.bucketLocation = errResponse.Region
713 continue // Retry
714 }
715 }
716 }
717 }
718
719 // Verify if error response code is retryable.
720 if isS3CodeRetryable(errResponse.Code) {
721 continue // Retry.
722 }
723
724 // Verify if http status code is retryable.
725 if isHTTPStatusRetryable(res.StatusCode) {
726 continue // Retry.
727 }
728
729 // For all other cases break out of the retry loop.
730 break
731 }
732
733 // Return an error when retry is canceled or deadlined
734 if e := retryCtx.Err(); e != nil {
735 return nil, e
736 }
737
738 return res, err
739}
740
741// newRequest - instantiate a new HTTP request for a given method.
742func (c *Client) newRequest(ctx context.Context, method string, metadata requestMetadata) (req *http.Request, err error) {
743 // If no method is supplied default to 'POST'.
744 if method == "" {
745 method = http.MethodPost
746 }
747
748 location := metadata.bucketLocation
749 if location == "" {
750 if metadata.bucketName != "" {
751 // Gather location only if bucketName is present.
752 location, err = c.getBucketLocation(ctx, metadata.bucketName)
753 if err != nil {
754 return nil, err
755 }
756 }
757 if location == "" {
758 location = getDefaultLocation(*c.endpointURL, c.region)
759 }
760 }
761
762 // Look if target url supports virtual host.
763 // We explicitly disallow MakeBucket calls to not use virtual DNS style,
764 // since the resolution may fail.
765 isMakeBucket := (metadata.objectName == "" && method == http.MethodPut && len(metadata.queryValues) == 0)
766 isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, metadata.bucketName) && !isMakeBucket
767
768 // Construct a new target URL.
769 targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, location,
770 isVirtualHost, metadata.queryValues)
771 if err != nil {
772 return nil, err
773 }
774
775 if c.httpTrace != nil {
776 ctx = httptrace.WithClientTrace(ctx, c.httpTrace)
777 }
778
779 // Initialize a new HTTP request for the method.
780 req, err = http.NewRequestWithContext(ctx, method, targetURL.String(), nil)
781 if err != nil {
782 return nil, err
783 }
784
785 // Get credentials from the configured credentials provider.
786 value, err := c.credsProvider.Get()
787 if err != nil {
788 return nil, err
789 }
790
791 var (
792 signerType = value.SignerType
793 accessKeyID = value.AccessKeyID
794 secretAccessKey = value.SecretAccessKey
795 sessionToken = value.SessionToken
796 )
797
798 // Custom signer set then override the behavior.
799 if c.overrideSignerType != credentials.SignatureDefault {
800 signerType = c.overrideSignerType
801 }
802
803 // If signerType returned by credentials helper is anonymous,
804 // then do not sign regardless of signerType override.
805 if value.SignerType == credentials.SignatureAnonymous {
806 signerType = credentials.SignatureAnonymous
807 }
808
809 // Generate presign url if needed, return right here.
810 if metadata.expires != 0 && metadata.presignURL {
811 if signerType.IsAnonymous() {
812 return nil, errInvalidArgument("Presigned URLs cannot be generated with anonymous credentials.")
813 }
814 if metadata.extraPresignHeader != nil {
815 if signerType.IsV2() {
816 return nil, errInvalidArgument("Extra signed headers for Presign with Signature V2 is not supported.")
817 }
818 for k, v := range metadata.extraPresignHeader {
819 req.Header.Set(k, v[0])
820 }
821 }
822 if signerType.IsV2() {
823 // Presign URL with signature v2.
824 req = signer.PreSignV2(*req, accessKeyID, secretAccessKey, metadata.expires, isVirtualHost)
825 } else if signerType.IsV4() {
826 // Presign URL with signature v4.
827 req = signer.PreSignV4(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.expires)
828 }
829 return req, nil
830 }
831
832 // Set 'User-Agent' header for the request.
833 c.setUserAgent(req)
834
835 // Set all headers.
836 for k, v := range metadata.customHeader {
837 req.Header.Set(k, v[0])
838 }
839
840 // Go net/http notoriously closes the request body.
841 // - The request Body, if non-nil, will be closed by the underlying Transport, even on errors.
842 // This can cause underlying *os.File seekers to fail, avoid that
843 // by making sure to wrap the closer as a nop.
844 if metadata.contentLength == 0 {
845 req.Body = nil
846 } else {
847 req.Body = io.NopCloser(metadata.contentBody)
848 }
849
850 // Set incoming content-length.
851 req.ContentLength = metadata.contentLength
852 if req.ContentLength <= -1 {
853 // For unknown content length, we upload using transfer-encoding: chunked.
854 req.TransferEncoding = []string{"chunked"}
855 }
856
857 // set md5Sum for content protection.
858 if len(metadata.contentMD5Base64) > 0 {
859 req.Header.Set("Content-Md5", metadata.contentMD5Base64)
860 }
861
862 // For anonymous requests just return.
863 if signerType.IsAnonymous() {
864 return req, nil
865 }
866
867 switch {
868 case signerType.IsV2():
869 // Add signature version '2' authorization header.
870 req = signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost)
871 case metadata.streamSha256 && !c.secure:
872 if len(metadata.trailer) > 0 {
873 req.Trailer = metadata.trailer
874 }
875 // Streaming signature is used by default for a PUT object request.
876 // Additionally, we also look if the initialized client is secure,
877 // if yes then we don't need to perform streaming signature.
878 req = signer.StreamingSignV4(req, accessKeyID,
879 secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC(), c.sha256Hasher())
880 default:
881 // Set sha256 sum for signature calculation only with signature version '4'.
882 shaHeader := unsignedPayload
883 if metadata.contentSHA256Hex != "" {
884 shaHeader = metadata.contentSHA256Hex
885 if len(metadata.trailer) > 0 {
886 // Sanity check, we should not end up here if upstream is sane.
887 return nil, errors.New("internal error: contentSHA256Hex with trailer not supported")
888 }
889 } else if len(metadata.trailer) > 0 {
890 shaHeader = unsignedPayloadTrailer
891 }
892 req.Header.Set("X-Amz-Content-Sha256", shaHeader)
893
894 // Add signature version '4' authorization header.
895 req = signer.SignV4Trailer(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.trailer)
896 }
897
898 // Return request.
899 return req, nil
900}
901
902// set User agent.
903func (c *Client) setUserAgent(req *http.Request) {
904 req.Header.Set("User-Agent", libraryUserAgent)
905 if c.appInfo.appName != "" && c.appInfo.appVersion != "" {
906 req.Header.Set("User-Agent", libraryUserAgent+" "+c.appInfo.appName+"/"+c.appInfo.appVersion)
907 }
908}
909
910// makeTargetURL make a new target url.
911func (c *Client) makeTargetURL(bucketName, objectName, bucketLocation string, isVirtualHostStyle bool, queryValues url.Values) (*url.URL, error) {
912 host := c.endpointURL.Host
913 // For Amazon S3 endpoint, try to fetch location based endpoint.
914 if s3utils.IsAmazonEndpoint(*c.endpointURL) {
915 if c.s3AccelerateEndpoint != "" && bucketName != "" {
916 // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
917 // Disable transfer acceleration for non-compliant bucket names.
918 if strings.Contains(bucketName, ".") {
919 return nil, errTransferAccelerationBucket(bucketName)
920 }
921 // If transfer acceleration is requested set new host.
922 // For more details about enabling transfer acceleration read here.
923 // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
924 host = c.s3AccelerateEndpoint
925 } else {
926 // Do not change the host if the endpoint URL is a FIPS S3 endpoint or a S3 PrivateLink interface endpoint
927 if !s3utils.IsAmazonFIPSEndpoint(*c.endpointURL) && !s3utils.IsAmazonPrivateLinkEndpoint(*c.endpointURL) {
928 // Fetch new host based on the bucket location.
929 host = getS3Endpoint(bucketLocation)
930 }
931 }
932 }
933
934 // Save scheme.
935 scheme := c.endpointURL.Scheme
936
937 // Strip port 80 and 443 so we won't send these ports in Host header.
938 // The reason is that browsers and curl automatically remove :80 and :443
939 // with the generated presigned urls, then a signature mismatch error.
940 if h, p, err := net.SplitHostPort(host); err == nil {
941 if scheme == "http" && p == "80" || scheme == "https" && p == "443" {
942 host = h
943 if ip := net.ParseIP(h); ip != nil && ip.To4() == nil {
944 host = "[" + h + "]"
945 }
946 }
947 }
948
949 urlStr := scheme + "://" + host + "/"
950
951 // Make URL only if bucketName is available, otherwise use the
952 // endpoint URL.
953 if bucketName != "" {
954 // If endpoint supports virtual host style use that always.
955 // Currently only S3 and Google Cloud Storage would support
956 // virtual host style.
957 if isVirtualHostStyle {
958 urlStr = scheme + "://" + bucketName + "." + host + "/"
959 if objectName != "" {
960 urlStr += s3utils.EncodePath(objectName)
961 }
962 } else {
963 // If not fall back to using path style.
964 urlStr = urlStr + bucketName + "/"
965 if objectName != "" {
966 urlStr += s3utils.EncodePath(objectName)
967 }
968 }
969 }
970
971 // If there are any query values, add them to the end.
972 if len(queryValues) > 0 {
973 urlStr = urlStr + "?" + s3utils.QueryEncode(queryValues)
974 }
975
976 return url.Parse(urlStr)
977}
978
979// returns true if virtual hosted style requests are to be used.
980func (c *Client) isVirtualHostStyleRequest(url url.URL, bucketName string) bool {
981 if bucketName == "" {
982 return false
983 }
984
985 if c.lookup == BucketLookupDNS {
986 return true
987 }
988 if c.lookup == BucketLookupPath {
989 return false
990 }
991
992 // default to virtual only for Amazon/Google storage. In all other cases use
993 // path style requests
994 return s3utils.IsVirtualHostSupported(url, bucketName)
995}
diff --git a/vendor/github.com/minio/minio-go/v7/bucket-cache.go b/vendor/github.com/minio/minio-go/v7/bucket-cache.go
new file mode 100644
index 0000000..b1d3b38
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/bucket-cache.go
@@ -0,0 +1,256 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "context"
22 "net"
23 "net/http"
24 "net/url"
25 "path"
26 "sync"
27
28 "github.com/minio/minio-go/v7/pkg/credentials"
29 "github.com/minio/minio-go/v7/pkg/s3utils"
30 "github.com/minio/minio-go/v7/pkg/signer"
31)
32
33// bucketLocationCache - Provides simple mechanism to hold bucket
34// locations in memory.
35type bucketLocationCache struct {
36 // mutex is used for handling the concurrent
37 // read/write requests for cache.
38 sync.RWMutex
39
40 // items holds the cached bucket locations.
41 items map[string]string
42}
43
44// newBucketLocationCache - Provides a new bucket location cache to be
45// used internally with the client object.
46func newBucketLocationCache() *bucketLocationCache {
47 return &bucketLocationCache{
48 items: make(map[string]string),
49 }
50}
51
52// Get - Returns a value of a given key if it exists.
53func (r *bucketLocationCache) Get(bucketName string) (location string, ok bool) {
54 r.RLock()
55 defer r.RUnlock()
56 location, ok = r.items[bucketName]
57 return
58}
59
60// Set - Will persist a value into cache.
61func (r *bucketLocationCache) Set(bucketName, location string) {
62 r.Lock()
63 defer r.Unlock()
64 r.items[bucketName] = location
65}
66
67// Delete - Deletes a bucket name from cache.
68func (r *bucketLocationCache) Delete(bucketName string) {
69 r.Lock()
70 defer r.Unlock()
71 delete(r.items, bucketName)
72}
73
74// GetBucketLocation - get location for the bucket name from location cache, if not
75// fetch freshly by making a new request.
76func (c *Client) GetBucketLocation(ctx context.Context, bucketName string) (string, error) {
77 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
78 return "", err
79 }
80 return c.getBucketLocation(ctx, bucketName)
81}
82
83// getBucketLocation - Get location for the bucketName from location map cache, if not
84// fetch freshly by making a new request.
85func (c *Client) getBucketLocation(ctx context.Context, bucketName string) (string, error) {
86 if err := s3utils.CheckValidBucketName(bucketName); err != nil {
87 return "", err
88 }
89
90 // Region set then no need to fetch bucket location.
91 if c.region != "" {
92 return c.region, nil
93 }
94
95 if location, ok := c.bucketLocCache.Get(bucketName); ok {
96 return location, nil
97 }
98
99 // Initialize a new request.
100 req, err := c.getBucketLocationRequest(ctx, bucketName)
101 if err != nil {
102 return "", err
103 }
104
105 // Initiate the request.
106 resp, err := c.do(req)
107 defer closeResponse(resp)
108 if err != nil {
109 return "", err
110 }
111 location, err := processBucketLocationResponse(resp, bucketName)
112 if err != nil {
113 return "", err
114 }
115 c.bucketLocCache.Set(bucketName, location)
116 return location, nil
117}
118
119// processes the getBucketLocation http response from the server.
120func processBucketLocationResponse(resp *http.Response, bucketName string) (bucketLocation string, err error) {
121 if resp != nil {
122 if resp.StatusCode != http.StatusOK {
123 err = httpRespToErrorResponse(resp, bucketName, "")
124 errResp := ToErrorResponse(err)
125 // For access denied error, it could be an anonymous
126 // request. Move forward and let the top level callers
127 // succeed if possible based on their policy.
128 switch errResp.Code {
129 case "NotImplemented":
130 switch errResp.Server {
131 case "AmazonSnowball":
132 return "snowball", nil
133 case "cloudflare":
134 return "us-east-1", nil
135 }
136 case "AuthorizationHeaderMalformed":
137 fallthrough
138 case "InvalidRegion":
139 fallthrough
140 case "AccessDenied":
141 if errResp.Region == "" {
142 return "us-east-1", nil
143 }
144 return errResp.Region, nil
145 }
146 return "", err
147 }
148 }
149
150 // Extract location.
151 var locationConstraint string
152 err = xmlDecoder(resp.Body, &locationConstraint)
153 if err != nil {
154 return "", err
155 }
156
157 location := locationConstraint
158 // Location is empty will be 'us-east-1'.
159 if location == "" {
160 location = "us-east-1"
161 }
162
163 // Location can be 'EU' convert it to meaningful 'eu-west-1'.
164 if location == "EU" {
165 location = "eu-west-1"
166 }
167
168 // Save the location into cache.
169
170 // Return.
171 return location, nil
172}
173
174// getBucketLocationRequest - Wrapper creates a new getBucketLocation request.
175func (c *Client) getBucketLocationRequest(ctx context.Context, bucketName string) (*http.Request, error) {
176 // Set location query.
177 urlValues := make(url.Values)
178 urlValues.Set("location", "")
179
180 // Set get bucket location always as path style.
181 targetURL := *c.endpointURL
182
183 // as it works in makeTargetURL method from api.go file
184 if h, p, err := net.SplitHostPort(targetURL.Host); err == nil {
185 if targetURL.Scheme == "http" && p == "80" || targetURL.Scheme == "https" && p == "443" {
186 targetURL.Host = h
187 if ip := net.ParseIP(h); ip != nil && ip.To16() != nil {
188 targetURL.Host = "[" + h + "]"
189 }
190 }
191 }
192
193 isVirtualStyle := c.isVirtualHostStyleRequest(targetURL, bucketName)
194
195 var urlStr string
196
197 if isVirtualStyle {
198 urlStr = c.endpointURL.Scheme + "://" + bucketName + "." + targetURL.Host + "/?location"
199 } else {
200 targetURL.Path = path.Join(bucketName, "") + "/"
201 targetURL.RawQuery = urlValues.Encode()
202 urlStr = targetURL.String()
203 }
204
205 // Get a new HTTP request for the method.
206 req, err := http.NewRequestWithContext(ctx, http.MethodGet, urlStr, nil)
207 if err != nil {
208 return nil, err
209 }
210
211 // Set UserAgent for the request.
212 c.setUserAgent(req)
213
214 // Get credentials from the configured credentials provider.
215 value, err := c.credsProvider.Get()
216 if err != nil {
217 return nil, err
218 }
219
220 var (
221 signerType = value.SignerType
222 accessKeyID = value.AccessKeyID
223 secretAccessKey = value.SecretAccessKey
224 sessionToken = value.SessionToken
225 )
226
227 // Custom signer set then override the behavior.
228 if c.overrideSignerType != credentials.SignatureDefault {
229 signerType = c.overrideSignerType
230 }
231
232 // If signerType returned by credentials helper is anonymous,
233 // then do not sign regardless of signerType override.
234 if value.SignerType == credentials.SignatureAnonymous {
235 signerType = credentials.SignatureAnonymous
236 }
237
238 if signerType.IsAnonymous() {
239 return req, nil
240 }
241
242 if signerType.IsV2() {
243 req = signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualStyle)
244 return req, nil
245 }
246
247 // Set sha256 sum for signature calculation only with signature version '4'.
248 contentSha256 := emptySHA256Hex
249 if c.secure {
250 contentSha256 = unsignedPayload
251 }
252
253 req.Header.Set("X-Amz-Content-Sha256", contentSha256)
254 req = signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1")
255 return req, nil
256}
diff --git a/vendor/github.com/minio/minio-go/v7/checksum.go b/vendor/github.com/minio/minio-go/v7/checksum.go
new file mode 100644
index 0000000..a1f6f43
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/checksum.go
@@ -0,0 +1,210 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2023 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "crypto/sha1"
22 "crypto/sha256"
23 "encoding/base64"
24 "hash"
25 "hash/crc32"
26 "io"
27 "math/bits"
28)
29
30// ChecksumType contains information about the checksum type.
31type ChecksumType uint32
32
33const (
34
35 // ChecksumSHA256 indicates a SHA256 checksum.
36 ChecksumSHA256 ChecksumType = 1 << iota
37 // ChecksumSHA1 indicates a SHA-1 checksum.
38 ChecksumSHA1
39 // ChecksumCRC32 indicates a CRC32 checksum with IEEE table.
40 ChecksumCRC32
41 // ChecksumCRC32C indicates a CRC32 checksum with Castagnoli table.
42 ChecksumCRC32C
43
44 // Keep after all valid checksums
45 checksumLast
46
47 // checksumMask is a mask for valid checksum types.
48 checksumMask = checksumLast - 1
49
50 // ChecksumNone indicates no checksum.
51 ChecksumNone ChecksumType = 0
52
53 amzChecksumAlgo = "x-amz-checksum-algorithm"
54 amzChecksumCRC32 = "x-amz-checksum-crc32"
55 amzChecksumCRC32C = "x-amz-checksum-crc32c"
56 amzChecksumSHA1 = "x-amz-checksum-sha1"
57 amzChecksumSHA256 = "x-amz-checksum-sha256"
58)
59
60// Is returns if c is all of t.
61func (c ChecksumType) Is(t ChecksumType) bool {
62 return c&t == t
63}
64
65// Key returns the header key.
66// returns empty string if invalid or none.
67func (c ChecksumType) Key() string {
68 switch c & checksumMask {
69 case ChecksumCRC32:
70 return amzChecksumCRC32
71 case ChecksumCRC32C:
72 return amzChecksumCRC32C
73 case ChecksumSHA1:
74 return amzChecksumSHA1
75 case ChecksumSHA256:
76 return amzChecksumSHA256
77 }
78 return ""
79}
80
81// RawByteLen returns the size of the un-encoded checksum.
82func (c ChecksumType) RawByteLen() int {
83 switch c & checksumMask {
84 case ChecksumCRC32, ChecksumCRC32C:
85 return 4
86 case ChecksumSHA1:
87 return sha1.Size
88 case ChecksumSHA256:
89 return sha256.Size
90 }
91 return 0
92}
93
94// Hasher returns a hasher corresponding to the checksum type.
95// Returns nil if no checksum.
96func (c ChecksumType) Hasher() hash.Hash {
97 switch c & checksumMask {
98 case ChecksumCRC32:
99 return crc32.NewIEEE()
100 case ChecksumCRC32C:
101 return crc32.New(crc32.MakeTable(crc32.Castagnoli))
102 case ChecksumSHA1:
103 return sha1.New()
104 case ChecksumSHA256:
105 return sha256.New()
106 }
107 return nil
108}
109
110// IsSet returns whether the type is valid and known.
111func (c ChecksumType) IsSet() bool {
112 return bits.OnesCount32(uint32(c)) == 1
113}
114
115// String returns the type as a string.
116// CRC32, CRC32C, SHA1, and SHA256 for valid values.
117// Empty string for unset and "<invalid>" if not valid.
118func (c ChecksumType) String() string {
119 switch c & checksumMask {
120 case ChecksumCRC32:
121 return "CRC32"
122 case ChecksumCRC32C:
123 return "CRC32C"
124 case ChecksumSHA1:
125 return "SHA1"
126 case ChecksumSHA256:
127 return "SHA256"
128 case ChecksumNone:
129 return ""
130 }
131 return "<invalid>"
132}
133
134// ChecksumReader reads all of r and returns a checksum of type c.
135// Returns any error that may have occurred while reading.
136func (c ChecksumType) ChecksumReader(r io.Reader) (Checksum, error) {
137 h := c.Hasher()
138 if h == nil {
139 return Checksum{}, nil
140 }
141 _, err := io.Copy(h, r)
142 if err != nil {
143 return Checksum{}, err
144 }
145 return NewChecksum(c, h.Sum(nil)), nil
146}
147
148// ChecksumBytes returns a checksum of the content b with type c.
149func (c ChecksumType) ChecksumBytes(b []byte) Checksum {
150 h := c.Hasher()
151 if h == nil {
152 return Checksum{}
153 }
154 n, err := h.Write(b)
155 if err != nil || n != len(b) {
156 // Shouldn't happen with these checksummers.
157 return Checksum{}
158 }
159 return NewChecksum(c, h.Sum(nil))
160}
161
162// Checksum is a type and encoded value.
163type Checksum struct {
164 Type ChecksumType
165 r []byte
166}
167
168// NewChecksum sets the checksum to the value of b,
169// which is the raw hash output.
170// If the length of c does not match t.RawByteLen,
171// a checksum with ChecksumNone is returned.
172func NewChecksum(t ChecksumType, b []byte) Checksum {
173 if t.IsSet() && len(b) == t.RawByteLen() {
174 return Checksum{Type: t, r: b}
175 }
176 return Checksum{}
177}
178
179// NewChecksumString sets the checksum to the value of s,
180// which is the base 64 encoded raw hash output.
181// If the length of c does not match t.RawByteLen, it is not added.
182func NewChecksumString(t ChecksumType, s string) Checksum {
183 b, _ := base64.StdEncoding.DecodeString(s)
184 if t.IsSet() && len(b) == t.RawByteLen() {
185 return Checksum{Type: t, r: b}
186 }
187 return Checksum{}
188}
189
190// IsSet returns whether the checksum is valid and known.
191func (c Checksum) IsSet() bool {
192 return c.Type.IsSet() && len(c.r) == c.Type.RawByteLen()
193}
194
195// Encoded returns the encoded value.
196// Returns the empty string if not set or valid.
197func (c Checksum) Encoded() string {
198 if !c.IsSet() {
199 return ""
200 }
201 return base64.StdEncoding.EncodeToString(c.r)
202}
203
204// Raw returns the raw checksum value if set.
205func (c Checksum) Raw() []byte {
206 if !c.IsSet() {
207 return nil
208 }
209 return c.r
210}
diff --git a/vendor/github.com/minio/minio-go/v7/code_of_conduct.md b/vendor/github.com/minio/minio-go/v7/code_of_conduct.md
new file mode 100644
index 0000000..cb232c3
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/code_of_conduct.md
@@ -0,0 +1,80 @@
1# Contributor Covenant Code of Conduct
2
3## Our Pledge
4
5In the interest of fostering an open and welcoming environment, we as
6contributors and maintainers pledge to making participation in our project and
7our community a harassment-free experience for everyone, regardless of age, body
8size, disability, ethnicity, gender identity and expression, level of experience,
9nationality, personal appearance, race, religion, or sexual identity and
10orientation.
11
12## Our Standards
13
14Examples of behavior that contributes to creating a positive environment
15include:
16
17* Using welcoming and inclusive language
18* Being respectful of differing viewpoints and experiences
19* Gracefully accepting constructive criticism
20* Focusing on what is best for the community
21* Showing empathy towards other community members
22
23Examples of unacceptable behavior by participants include:
24
25* The use of sexualized language or imagery and unwelcome sexual attention or
26 advances
27* Trolling, insulting/derogatory comments, and personal or political attacks
28* Public or private harassment
29* Publishing others' private information, such as a physical or electronic
30 address, without explicit permission
31* Other conduct which could reasonably be considered inappropriate in a
32 professional setting
33
34## Our Responsibilities
35
36Project maintainers are responsible for clarifying the standards of acceptable
37behavior and are expected to take appropriate and fair corrective action in
38response to any instances of unacceptable behavior, in compliance with the
39licensing terms applying to the Project developments.
40
41Project maintainers have the right and responsibility to remove, edit, or
42reject comments, commits, code, wiki edits, issues, and other contributions
43that are not aligned to this Code of Conduct, or to ban temporarily or
44permanently any contributor for other behaviors that they deem inappropriate,
45threatening, offensive, or harmful. However, these actions shall respect the
46licensing terms of the Project Developments that will always supersede such
47Code of Conduct.
48
49## Scope
50
51This Code of Conduct applies both within project spaces and in public spaces
52when an individual is representing the project or its community. Examples of
53representing a project or community include using an official project e-mail
54address, posting via an official social media account, or acting as an appointed
55representative at an online or offline event. Representation of a project may be
56further defined and clarified by project maintainers.
57
58## Enforcement
59
60Instances of abusive, harassing, or otherwise unacceptable behavior may be
61reported by contacting the project team at [email protected]. The project team
62will review and investigate all complaints, and will respond in a way that it deems
63appropriate to the circumstances. The project team is obligated to maintain
64confidentiality with regard to the reporter of an incident.
65Further details of specific enforcement policies may be posted separately.
66
67Project maintainers who do not follow or enforce the Code of Conduct in good
68faith may face temporary or permanent repercussions as determined by other
69members of the project's leadership.
70
71## Attribution
72
73This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
74available at [http://contributor-covenant.org/version/1/4][version]
75
76This version includes a clarification to ensure that the code of conduct is in
77compliance with the free software licensing terms of the project.
78
79[homepage]: http://contributor-covenant.org
80[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/github.com/minio/minio-go/v7/constants.go b/vendor/github.com/minio/minio-go/v7/constants.go
new file mode 100644
index 0000000..401d2a7
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/constants.go
@@ -0,0 +1,110 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20// Multipart upload defaults.
21
22// absMinPartSize - absolute minimum part size (5 MiB) below which
23// a part in a multipart upload may not be uploaded.
24const absMinPartSize = 1024 * 1024 * 5
25
26// minPartSize - minimum part size 16MiB per object after which
27// putObject behaves internally as multipart.
28const minPartSize = 1024 * 1024 * 16
29
30// maxPartsCount - maximum number of parts for a single multipart session.
31const maxPartsCount = 10000
32
33// maxPartSize - maximum part size 5GiB for a single multipart upload
34// operation.
35const maxPartSize = 1024 * 1024 * 1024 * 5
36
37// maxSinglePutObjectSize - maximum size 5GiB of object per PUT
38// operation.
39const maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5
40
41// maxMultipartPutObjectSize - maximum size 5TiB of object for
42// Multipart operation.
43const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5
44
45// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
46// we don't want to sign the request payload
47const unsignedPayload = "UNSIGNED-PAYLOAD"
48
49// unsignedPayloadTrailer value to be set to X-Amz-Content-Sha256 header when
50// we don't want to sign the request payload, but have a trailer.
51const unsignedPayloadTrailer = "STREAMING-UNSIGNED-PAYLOAD-TRAILER"
52
53// Total number of parallel workers used for multipart operation.
54const totalWorkers = 4
55
56// Signature related constants.
57const (
58 signV4Algorithm = "AWS4-HMAC-SHA256"
59 iso8601DateFormat = "20060102T150405Z"
60)
61
62const (
63 // Storage class header.
64 amzStorageClass = "X-Amz-Storage-Class"
65
66 // Website redirect location header
67 amzWebsiteRedirectLocation = "X-Amz-Website-Redirect-Location"
68
69 // Object Tagging headers
70 amzTaggingHeader = "X-Amz-Tagging"
71 amzTaggingHeaderDirective = "X-Amz-Tagging-Directive"
72
73 amzVersionID = "X-Amz-Version-Id"
74 amzTaggingCount = "X-Amz-Tagging-Count"
75 amzExpiration = "X-Amz-Expiration"
76 amzRestore = "X-Amz-Restore"
77 amzReplicationStatus = "X-Amz-Replication-Status"
78 amzDeleteMarker = "X-Amz-Delete-Marker"
79
80 // Object legal hold header
81 amzLegalHoldHeader = "X-Amz-Object-Lock-Legal-Hold"
82
83 // Object retention header
84 amzLockMode = "X-Amz-Object-Lock-Mode"
85 amzLockRetainUntil = "X-Amz-Object-Lock-Retain-Until-Date"
86 amzBypassGovernance = "X-Amz-Bypass-Governance-Retention"
87
88 // Replication status
89 amzBucketReplicationStatus = "X-Amz-Replication-Status"
90 // Minio specific Replication/lifecycle transition extension
91 minIOBucketSourceMTime = "X-Minio-Source-Mtime"
92
93 minIOBucketSourceETag = "X-Minio-Source-Etag"
94 minIOBucketReplicationDeleteMarker = "X-Minio-Source-DeleteMarker"
95 minIOBucketReplicationProxyRequest = "X-Minio-Source-Proxy-Request"
96 minIOBucketReplicationRequest = "X-Minio-Source-Replication-Request"
97 minIOBucketReplicationCheck = "X-Minio-Source-Replication-Check"
98
99 // Header indicates last tag update time on source
100 minIOBucketReplicationTaggingTimestamp = "X-Minio-Source-Replication-Tagging-Timestamp"
101 // Header indicates last retention update time on source
102 minIOBucketReplicationObjectRetentionTimestamp = "X-Minio-Source-Replication-Retention-Timestamp"
103 // Header indicates last legalhold update time on source
104 minIOBucketReplicationObjectLegalHoldTimestamp = "X-Minio-Source-Replication-LegalHold-Timestamp"
105 minIOForceDelete = "x-minio-force-delete"
106 // Header indicates delete marker replication request can be sent by source now.
107 minioTgtReplicationReady = "X-Minio-Replication-Ready"
108 // Header asks if delete marker replication request can be sent by source now.
109 isMinioTgtReplicationReady = "X-Minio-Check-Replication-Ready"
110)
diff --git a/vendor/github.com/minio/minio-go/v7/core.go b/vendor/github.com/minio/minio-go/v7/core.go
new file mode 100644
index 0000000..132ea70
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/core.go
@@ -0,0 +1,150 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "context"
22 "io"
23 "net/http"
24
25 "github.com/minio/minio-go/v7/pkg/encrypt"
26)
27
28// Core - Inherits Client and adds new methods to expose the low level S3 APIs.
29type Core struct {
30 *Client
31}
32
33// NewCore - Returns new initialized a Core client, this CoreClient should be
34// only used under special conditions such as need to access lower primitives
35// and being able to use them to write your own wrappers.
36func NewCore(endpoint string, opts *Options) (*Core, error) {
37 var s3Client Core
38 client, err := New(endpoint, opts)
39 if err != nil {
40 return nil, err
41 }
42 s3Client.Client = client
43 return &s3Client, nil
44}
45
46// ListObjects - List all the objects at a prefix, optionally with marker and delimiter
47// you can further filter the results.
48func (c Core) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListBucketResult, err error) {
49 return c.listObjectsQuery(context.Background(), bucket, prefix, marker, delimiter, maxKeys, nil)
50}
51
52// ListObjectsV2 - Lists all the objects at a prefix, similar to ListObjects() but uses
53// continuationToken instead of marker to support iteration over the results.
54func (c Core) ListObjectsV2(bucketName, objectPrefix, startAfter, continuationToken, delimiter string, maxkeys int) (ListBucketV2Result, error) {
55 return c.listObjectsV2Query(context.Background(), bucketName, objectPrefix, continuationToken, true, false, delimiter, startAfter, maxkeys, nil)
56}
57
58// CopyObject - copies an object from source object to destination object on server side.
59func (c Core) CopyObject(ctx context.Context, sourceBucket, sourceObject, destBucket, destObject string, metadata map[string]string, srcOpts CopySrcOptions, dstOpts PutObjectOptions) (ObjectInfo, error) {
60 return c.copyObjectDo(ctx, sourceBucket, sourceObject, destBucket, destObject, metadata, srcOpts, dstOpts)
61}
62
63// CopyObjectPart - creates a part in a multipart upload by copying (a
64// part of) an existing object.
65func (c Core) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string,
66 partID int, startOffset, length int64, metadata map[string]string,
67) (p CompletePart, err error) {
68 return c.copyObjectPartDo(ctx, srcBucket, srcObject, destBucket, destObject, uploadID,
69 partID, startOffset, length, metadata)
70}
71
72// PutObject - Upload object. Uploads using single PUT call.
73func (c Core) PutObject(ctx context.Context, bucket, object string, data io.Reader, size int64, md5Base64, sha256Hex string, opts PutObjectOptions) (UploadInfo, error) {
74 hookReader := newHook(data, opts.Progress)
75 return c.putObjectDo(ctx, bucket, object, hookReader, md5Base64, sha256Hex, size, opts)
76}
77
78// NewMultipartUpload - Initiates new multipart upload and returns the new uploadID.
79func (c Core) NewMultipartUpload(ctx context.Context, bucket, object string, opts PutObjectOptions) (uploadID string, err error) {
80 result, err := c.initiateMultipartUpload(ctx, bucket, object, opts)
81 return result.UploadID, err
82}
83
84// ListMultipartUploads - List incomplete uploads.
85func (c Core) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartUploadsResult, err error) {
86 return c.listMultipartUploadsQuery(ctx, bucket, keyMarker, uploadIDMarker, prefix, delimiter, maxUploads)
87}
88
89// PutObjectPartOptions contains options for PutObjectPart API
90type PutObjectPartOptions struct {
91 Md5Base64, Sha256Hex string
92 SSE encrypt.ServerSide
93 CustomHeader, Trailer http.Header
94}
95
96// PutObjectPart - Upload an object part.
97func (c Core) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int,
98 data io.Reader, size int64, opts PutObjectPartOptions,
99) (ObjectPart, error) {
100 p := uploadPartParams{
101 bucketName: bucket,
102 objectName: object,
103 uploadID: uploadID,
104 reader: data,
105 partNumber: partID,
106 md5Base64: opts.Md5Base64,
107 sha256Hex: opts.Sha256Hex,
108 size: size,
109 sse: opts.SSE,
110 streamSha256: true,
111 customHeader: opts.CustomHeader,
112 trailer: opts.Trailer,
113 }
114 return c.uploadPart(ctx, p)
115}
116
117// ListObjectParts - List uploaded parts of an incomplete upload.x
118func (c Core) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int) (result ListObjectPartsResult, err error) {
119 return c.listObjectPartsQuery(ctx, bucket, object, uploadID, partNumberMarker, maxParts)
120}
121
122// CompleteMultipartUpload - Concatenate uploaded parts and commit to an object.
123func (c Core) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, parts []CompletePart, opts PutObjectOptions) (UploadInfo, error) {
124 res, err := c.completeMultipartUpload(ctx, bucket, object, uploadID, completeMultipartUpload{
125 Parts: parts,
126 }, opts)
127 return res, err
128}
129
130// AbortMultipartUpload - Abort an incomplete upload.
131func (c Core) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error {
132 return c.abortMultipartUpload(ctx, bucket, object, uploadID)
133}
134
135// GetBucketPolicy - fetches bucket access policy for a given bucket.
136func (c Core) GetBucketPolicy(ctx context.Context, bucket string) (string, error) {
137 return c.getBucketPolicy(ctx, bucket)
138}
139
140// PutBucketPolicy - applies a new bucket access policy for a given bucket.
141func (c Core) PutBucketPolicy(ctx context.Context, bucket, bucketPolicy string) error {
142 return c.putBucketPolicy(ctx, bucket, bucketPolicy)
143}
144
145// GetObject is a lower level API implemented to support reading
146// partial objects and also downloading objects with special conditions
147// matching etag, modtime etc.
148func (c Core) GetObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) {
149 return c.getObject(ctx, bucketName, objectName, opts)
150}
diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go
new file mode 100644
index 0000000..f951cd0
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/functional_tests.go
@@ -0,0 +1,13004 @@
1//go:build mint
2// +build mint
3
4/*
5 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
6 * Copyright 2015-2020 MinIO, Inc.
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 */
20
21package main
22
23import (
24 "archive/zip"
25 "bytes"
26 "context"
27 "crypto/sha1"
28 "encoding/base64"
29 "errors"
30 "fmt"
31 "hash"
32 "hash/crc32"
33 "io"
34 "math/rand"
35 "mime/multipart"
36 "net/http"
37 "net/url"
38 "os"
39 "path"
40 "path/filepath"
41 "reflect"
42 "runtime"
43 "sort"
44 "strconv"
45 "strings"
46 "sync"
47 "time"
48
49 "github.com/dustin/go-humanize"
50 jsoniter "github.com/json-iterator/go"
51 "github.com/minio/sha256-simd"
52 log "github.com/sirupsen/logrus"
53
54 "github.com/minio/minio-go/v7"
55 "github.com/minio/minio-go/v7/pkg/credentials"
56 "github.com/minio/minio-go/v7/pkg/encrypt"
57 "github.com/minio/minio-go/v7/pkg/notification"
58 "github.com/minio/minio-go/v7/pkg/tags"
59)
60
61const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569"
62const (
63 letterIdxBits = 6 // 6 bits to represent a letter index
64 letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
65 letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
66)
67
68const (
69 serverEndpoint = "SERVER_ENDPOINT"
70 accessKey = "ACCESS_KEY"
71 secretKey = "SECRET_KEY"
72 enableHTTPS = "ENABLE_HTTPS"
73 enableKMS = "ENABLE_KMS"
74)
75
76type mintJSONFormatter struct{}
77
78func (f *mintJSONFormatter) Format(entry *log.Entry) ([]byte, error) {
79 data := make(log.Fields, len(entry.Data))
80 for k, v := range entry.Data {
81 switch v := v.(type) {
82 case error:
83 // Otherwise errors are ignored by `encoding/json`
84 // https://github.com/sirupsen/logrus/issues/137
85 data[k] = v.Error()
86 default:
87 data[k] = v
88 }
89 }
90 json := jsoniter.ConfigCompatibleWithStandardLibrary
91 serialized, err := json.Marshal(data)
92 if err != nil {
93 return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
94 }
95 return append(serialized, '\n'), nil
96}
97
98var readFull = func(r io.Reader, buf []byte) (n int, err error) {
99 // ReadFull reads exactly len(buf) bytes from r into buf.
100 // It returns the number of bytes copied and an error if
101 // fewer bytes were read. The error is EOF only if no bytes
102 // were read. If an EOF happens after reading some but not
103 // all the bytes, ReadFull returns ErrUnexpectedEOF.
104 // On return, n == len(buf) if and only if err == nil.
105 // If r returns an error having read at least len(buf) bytes,
106 // the error is dropped.
107 for n < len(buf) && err == nil {
108 var nn int
109 nn, err = r.Read(buf[n:])
110 // Some spurious io.Reader's return
111 // io.ErrUnexpectedEOF when nn == 0
112 // this behavior is undocumented
113 // so we are on purpose not using io.ReadFull
114 // implementation because this can lead
115 // to custom handling, to avoid that
116 // we simply modify the original io.ReadFull
117 // implementation to avoid this issue.
118 // io.ErrUnexpectedEOF with nn == 0 really
119 // means that io.EOF
120 if err == io.ErrUnexpectedEOF && nn == 0 {
121 err = io.EOF
122 }
123 n += nn
124 }
125 if n >= len(buf) {
126 err = nil
127 } else if n > 0 && err == io.EOF {
128 err = io.ErrUnexpectedEOF
129 }
130 return
131}
132
133func cleanEmptyEntries(fields log.Fields) log.Fields {
134 cleanFields := log.Fields{}
135 for k, v := range fields {
136 if v != "" {
137 cleanFields[k] = v
138 }
139 }
140 return cleanFields
141}
142
143// log successful test runs
144func successLogger(testName, function string, args map[string]interface{}, startTime time.Time) *log.Entry {
145 // calculate the test case duration
146 duration := time.Since(startTime)
147 // log with the fields as per mint
148 fields := log.Fields{"name": "minio-go: " + testName, "function": function, "args": args, "duration": duration.Nanoseconds() / 1000000, "status": "PASS"}
149 return log.WithFields(cleanEmptyEntries(fields))
150}
151
152// As few of the features are not available in Gateway(s) currently, Check if err value is NotImplemented,
153// and log as NA in that case and continue execution. Otherwise log as failure and return
154func logError(testName, function string, args map[string]interface{}, startTime time.Time, alert, message string, err error) {
155 // If server returns NotImplemented we assume it is gateway mode and hence log it as info and move on to next tests
156 // Special case for ComposeObject API as it is implemented on client side and adds specific error details like `Error in upload-part-copy` in
157 // addition to NotImplemented error returned from server
158 if isErrNotImplemented(err) {
159 ignoredLog(testName, function, args, startTime, message).Info()
160 } else if isRunOnFail() {
161 failureLog(testName, function, args, startTime, alert, message, err).Error()
162 } else {
163 failureLog(testName, function, args, startTime, alert, message, err).Fatal()
164 }
165}
166
167// log failed test runs
168func failureLog(testName, function string, args map[string]interface{}, startTime time.Time, alert, message string, err error) *log.Entry {
169 // calculate the test case duration
170 duration := time.Since(startTime)
171 var fields log.Fields
172 // log with the fields as per mint
173 if err != nil {
174 fields = log.Fields{
175 "name": "minio-go: " + testName, "function": function, "args": args,
176 "duration": duration.Nanoseconds() / 1000000, "status": "FAIL", "alert": alert, "message": message, "error": err,
177 }
178 } else {
179 fields = log.Fields{
180 "name": "minio-go: " + testName, "function": function, "args": args,
181 "duration": duration.Nanoseconds() / 1000000, "status": "FAIL", "alert": alert, "message": message,
182 }
183 }
184 return log.WithFields(cleanEmptyEntries(fields))
185}
186
187// log not applicable test runs
188func ignoredLog(testName, function string, args map[string]interface{}, startTime time.Time, alert string) *log.Entry {
189 // calculate the test case duration
190 duration := time.Since(startTime)
191 // log with the fields as per mint
192 fields := log.Fields{
193 "name": "minio-go: " + testName, "function": function, "args": args,
194 "duration": duration.Nanoseconds() / 1000000, "status": "NA", "alert": strings.Split(alert, " ")[0] + " is NotImplemented",
195 }
196 return log.WithFields(cleanEmptyEntries(fields))
197}
198
199// Delete objects in given bucket, recursively
200func cleanupBucket(bucketName string, c *minio.Client) error {
201 // Create a done channel to control 'ListObjectsV2' go routine.
202 doneCh := make(chan struct{})
203 // Exit cleanly upon return.
204 defer close(doneCh)
205 // Iterate over all objects in the bucket via listObjectsV2 and delete
206 for objCh := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{Recursive: true}) {
207 if objCh.Err != nil {
208 return objCh.Err
209 }
210 if objCh.Key != "" {
211 err := c.RemoveObject(context.Background(), bucketName, objCh.Key, minio.RemoveObjectOptions{})
212 if err != nil {
213 return err
214 }
215 }
216 }
217 for objPartInfo := range c.ListIncompleteUploads(context.Background(), bucketName, "", true) {
218 if objPartInfo.Err != nil {
219 return objPartInfo.Err
220 }
221 if objPartInfo.Key != "" {
222 err := c.RemoveIncompleteUpload(context.Background(), bucketName, objPartInfo.Key)
223 if err != nil {
224 return err
225 }
226 }
227 }
228 // objects are already deleted, clear the buckets now
229 err := c.RemoveBucket(context.Background(), bucketName)
230 if err != nil {
231 return err
232 }
233 return err
234}
235
236func cleanupVersionedBucket(bucketName string, c *minio.Client) error {
237 doneCh := make(chan struct{})
238 defer close(doneCh)
239 for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) {
240 if obj.Err != nil {
241 return obj.Err
242 }
243 if obj.Key != "" {
244 err := c.RemoveObject(context.Background(), bucketName, obj.Key,
245 minio.RemoveObjectOptions{VersionID: obj.VersionID, GovernanceBypass: true})
246 if err != nil {
247 return err
248 }
249 }
250 }
251 for objPartInfo := range c.ListIncompleteUploads(context.Background(), bucketName, "", true) {
252 if objPartInfo.Err != nil {
253 return objPartInfo.Err
254 }
255 if objPartInfo.Key != "" {
256 err := c.RemoveIncompleteUpload(context.Background(), bucketName, objPartInfo.Key)
257 if err != nil {
258 return err
259 }
260 }
261 }
262 // objects are already deleted, clear the buckets now
263 err := c.RemoveBucket(context.Background(), bucketName)
264 if err != nil {
265 for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) {
266 log.Println("found", obj.Key, obj.VersionID)
267 }
268 return err
269 }
270 return err
271}
272
273func isErrNotImplemented(err error) bool {
274 return minio.ToErrorResponse(err).Code == "NotImplemented"
275}
276
277func isRunOnFail() bool {
278 return os.Getenv("RUN_ON_FAIL") == "1"
279}
280
281func init() {
282 // If server endpoint is not set, all tests default to
283 // using https://play.min.io
284 if os.Getenv(serverEndpoint) == "" {
285 os.Setenv(serverEndpoint, "play.min.io")
286 os.Setenv(accessKey, "Q3AM3UQ867SPQQA43P2F")
287 os.Setenv(secretKey, "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG")
288 os.Setenv(enableHTTPS, "1")
289 }
290}
291
292var mintDataDir = os.Getenv("MINT_DATA_DIR")
293
294func getMintDataDirFilePath(filename string) (fp string) {
295 if mintDataDir == "" {
296 return
297 }
298 return filepath.Join(mintDataDir, filename)
299}
300
301func newRandomReader(seed, size int64) io.Reader {
302 return io.LimitReader(rand.New(rand.NewSource(seed)), size)
303}
304
305func mustCrcReader(r io.Reader) uint32 {
306 crc := crc32.NewIEEE()
307 _, err := io.Copy(crc, r)
308 if err != nil {
309 panic(err)
310 }
311 return crc.Sum32()
312}
313
314func crcMatches(r io.Reader, want uint32) error {
315 crc := crc32.NewIEEE()
316 _, err := io.Copy(crc, r)
317 if err != nil {
318 panic(err)
319 }
320 got := crc.Sum32()
321 if got != want {
322 return fmt.Errorf("crc mismatch, want %x, got %x", want, got)
323 }
324 return nil
325}
326
327func crcMatchesName(r io.Reader, name string) error {
328 want := dataFileCRC32[name]
329 crc := crc32.NewIEEE()
330 _, err := io.Copy(crc, r)
331 if err != nil {
332 panic(err)
333 }
334 got := crc.Sum32()
335 if got != want {
336 return fmt.Errorf("crc mismatch, want %x, got %x", want, got)
337 }
338 return nil
339}
340
341// read data from file if it exists or optionally create a buffer of particular size
342func getDataReader(fileName string) io.ReadCloser {
343 if mintDataDir == "" {
344 size := int64(dataFileMap[fileName])
345 if _, ok := dataFileCRC32[fileName]; !ok {
346 dataFileCRC32[fileName] = mustCrcReader(newRandomReader(size, size))
347 }
348 return io.NopCloser(newRandomReader(size, size))
349 }
350 reader, _ := os.Open(getMintDataDirFilePath(fileName))
351 if _, ok := dataFileCRC32[fileName]; !ok {
352 dataFileCRC32[fileName] = mustCrcReader(reader)
353 reader.Close()
354 reader, _ = os.Open(getMintDataDirFilePath(fileName))
355 }
356 return reader
357}
358
359// randString generates random names and prepends them with a known prefix.
360func randString(n int, src rand.Source, prefix string) string {
361 b := make([]byte, n)
362 // A rand.Int63() generates 63 random bits, enough for letterIdxMax letters!
363 for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; {
364 if remain == 0 {
365 cache, remain = src.Int63(), letterIdxMax
366 }
367 if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
368 b[i] = letterBytes[idx]
369 i--
370 }
371 cache >>= letterIdxBits
372 remain--
373 }
374 return prefix + string(b[0:30-len(prefix)])
375}
376
377var dataFileMap = map[string]int{
378 "datafile-0-b": 0,
379 "datafile-1-b": 1,
380 "datafile-1-kB": 1 * humanize.KiByte,
381 "datafile-10-kB": 10 * humanize.KiByte,
382 "datafile-33-kB": 33 * humanize.KiByte,
383 "datafile-100-kB": 100 * humanize.KiByte,
384 "datafile-1.03-MB": 1056 * humanize.KiByte,
385 "datafile-1-MB": 1 * humanize.MiByte,
386 "datafile-5-MB": 5 * humanize.MiByte,
387 "datafile-6-MB": 6 * humanize.MiByte,
388 "datafile-11-MB": 11 * humanize.MiByte,
389 "datafile-65-MB": 65 * humanize.MiByte,
390 "datafile-129-MB": 129 * humanize.MiByte,
391}
392
393var dataFileCRC32 = map[string]uint32{}
394
395func isFullMode() bool {
396 return os.Getenv("MINT_MODE") == "full"
397}
398
399func getFuncName() string {
400 return getFuncNameLoc(2)
401}
402
403func getFuncNameLoc(caller int) string {
404 pc, _, _, _ := runtime.Caller(caller)
405 return strings.TrimPrefix(runtime.FuncForPC(pc).Name(), "main.")
406}
407
408// Tests bucket re-create errors.
409func testMakeBucketError() {
410 region := "eu-central-1"
411
412 // initialize logging params
413 startTime := time.Now()
414 testName := getFuncName()
415 function := "MakeBucket(bucketName, region)"
416 // initialize logging params
417 args := map[string]interface{}{
418 "bucketName": "",
419 "region": region,
420 }
421
422 // Seed random based on current time.
423 rand.Seed(time.Now().Unix())
424
425 // Instantiate new minio client object.
426 c, err := minio.New(os.Getenv(serverEndpoint),
427 &minio.Options{
428 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
429 Secure: mustParseBool(os.Getenv(enableHTTPS)),
430 })
431 if err != nil {
432 logError(testName, function, args, startTime, "", "MinIO client creation failed", err)
433 return
434 }
435
436 // Enable tracing, write to stderr.
437 // c.TraceOn(os.Stderr)
438
439 // Set user agent.
440 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
441
442 // Generate a new random bucket name.
443 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
444 args["bucketName"] = bucketName
445
446 // Make a new bucket in 'eu-central-1'.
447 if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err != nil {
448 logError(testName, function, args, startTime, "", "MakeBucket Failed", err)
449 return
450 }
451 defer cleanupBucket(bucketName, c)
452
453 if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err == nil {
454 logError(testName, function, args, startTime, "", "Bucket already exists", err)
455 return
456 }
457 // Verify valid error response from server.
458 if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" &&
459 minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" {
460 logError(testName, function, args, startTime, "", "Invalid error returned by server", err)
461 return
462 }
463
464 successLogger(testName, function, args, startTime).Info()
465}
466
467func testMetadataSizeLimit() {
468 startTime := time.Now()
469 testName := getFuncName()
470 function := "PutObject(bucketName, objectName, reader, objectSize, opts)"
471 args := map[string]interface{}{
472 "bucketName": "",
473 "objectName": "",
474 "opts.UserMetadata": "",
475 }
476 rand.Seed(startTime.Unix())
477
478 // Instantiate new minio client object.
479 c, err := minio.New(os.Getenv(serverEndpoint),
480 &minio.Options{
481 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
482 Secure: mustParseBool(os.Getenv(enableHTTPS)),
483 })
484 if err != nil {
485 logError(testName, function, args, startTime, "", "MinIO client creation failed", err)
486 return
487 }
488 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
489
490 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
491 args["bucketName"] = bucketName
492
493 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
494 args["objectName"] = objectName
495
496 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
497 if err != nil {
498 logError(testName, function, args, startTime, "", "Make bucket failed", err)
499 return
500 }
501
502 defer cleanupBucket(bucketName, c)
503
504 const HeaderSizeLimit = 8 * 1024
505 const UserMetadataLimit = 2 * 1024
506
507 // Meta-data greater than the 2 KB limit of AWS - PUT calls with this meta-data should fail
508 metadata := make(map[string]string)
509 metadata["X-Amz-Meta-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+UserMetadataLimit-len("X-Amz-Meta-Mint-Test")))
510 args["metadata"] = fmt.Sprint(metadata)
511
512 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata})
513 if err == nil {
514 logError(testName, function, args, startTime, "", "Created object with user-defined metadata exceeding metadata size limits", nil)
515 return
516 }
517
518 // Meta-data (headers) greater than the 8 KB limit of AWS - PUT calls with this meta-data should fail
519 metadata = make(map[string]string)
520 metadata["X-Amz-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+HeaderSizeLimit-len("X-Amz-Mint-Test")))
521 args["metadata"] = fmt.Sprint(metadata)
522 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata})
523 if err == nil {
524 logError(testName, function, args, startTime, "", "Created object with headers exceeding header size limits", nil)
525 return
526 }
527
528 successLogger(testName, function, args, startTime).Info()
529}
530
531// Tests various bucket supported formats.
532func testMakeBucketRegions() {
533 region := "eu-central-1"
534 // initialize logging params
535 startTime := time.Now()
536 testName := getFuncName()
537 function := "MakeBucket(bucketName, region)"
538 // initialize logging params
539 args := map[string]interface{}{
540 "bucketName": "",
541 "region": region,
542 }
543
544 // Seed random based on current time.
545 rand.Seed(time.Now().Unix())
546
547 // Instantiate new minio client object.
548 c, err := minio.New(os.Getenv(serverEndpoint),
549 &minio.Options{
550 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
551 Secure: mustParseBool(os.Getenv(enableHTTPS)),
552 })
553 if err != nil {
554 logError(testName, function, args, startTime, "", "MinIO client creation failed", err)
555 return
556 }
557
558 // Enable tracing, write to stderr.
559 // c.TraceOn(os.Stderr)
560
561 // Set user agent.
562 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
563
564 // Generate a new random bucket name.
565 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
566 args["bucketName"] = bucketName
567
568 // Make a new bucket in 'eu-central-1'.
569 if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err != nil {
570 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
571 return
572 }
573
574 // Delete all objects and buckets
575 if err = cleanupBucket(bucketName, c); err != nil {
576 logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
577 return
578 }
579
580 // Make a new bucket with '.' in its name, in 'us-west-2'. This
581 // request is internally staged into a path style instead of
582 // virtual host style.
583 region = "us-west-2"
584 args["region"] = region
585 if err = c.MakeBucket(context.Background(), bucketName+".withperiod", minio.MakeBucketOptions{Region: region}); err != nil {
586 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
587 return
588 }
589
590 // Delete all objects and buckets
591 if err = cleanupBucket(bucketName+".withperiod", c); err != nil {
592 logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
593 return
594 }
595 successLogger(testName, function, args, startTime).Info()
596}
597
598// Test PutObject using a large data to trigger multipart readat
599func testPutObjectReadAt() {
600 // initialize logging params
601 startTime := time.Now()
602 testName := getFuncName()
603 function := "PutObject(bucketName, objectName, reader, opts)"
604 args := map[string]interface{}{
605 "bucketName": "",
606 "objectName": "",
607 "opts": "objectContentType",
608 }
609
610 // Seed random based on current time.
611 rand.Seed(time.Now().Unix())
612
613 // Instantiate new minio client object.
614 c, err := minio.New(os.Getenv(serverEndpoint),
615 &minio.Options{
616 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
617 Secure: mustParseBool(os.Getenv(enableHTTPS)),
618 })
619 if err != nil {
620 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
621 return
622 }
623
624 // Enable tracing, write to stderr.
625 // c.TraceOn(os.Stderr)
626
627 // Set user agent.
628 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
629
630 // Generate a new random bucket name.
631 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
632 args["bucketName"] = bucketName
633
634 // Make a new bucket.
635 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
636 if err != nil {
637 logError(testName, function, args, startTime, "", "Make bucket failed", err)
638 return
639 }
640
641 defer cleanupBucket(bucketName, c)
642
643 bufSize := dataFileMap["datafile-129-MB"]
644 reader := getDataReader("datafile-129-MB")
645 defer reader.Close()
646
647 // Save the data
648 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
649 args["objectName"] = objectName
650
651 // Object content type
652 objectContentType := "binary/octet-stream"
653 args["objectContentType"] = objectContentType
654
655 _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: objectContentType})
656 if err != nil {
657 logError(testName, function, args, startTime, "", "PutObject failed", err)
658 return
659 }
660
661 // Read the data back
662 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
663 if err != nil {
664 logError(testName, function, args, startTime, "", "Get Object failed", err)
665 return
666 }
667
668 st, err := r.Stat()
669 if err != nil {
670 logError(testName, function, args, startTime, "", "Stat Object failed", err)
671 return
672 }
673 if st.Size != int64(bufSize) {
674 logError(testName, function, args, startTime, "", fmt.Sprintf("Number of bytes in stat does not match, expected %d got %d", bufSize, st.Size), err)
675 return
676 }
677 if st.ContentType != objectContentType && st.ContentType != "application/octet-stream" {
678 logError(testName, function, args, startTime, "", "Content types don't match", err)
679 return
680 }
681 if err := crcMatchesName(r, "datafile-129-MB"); err != nil {
682 logError(testName, function, args, startTime, "", "data CRC check failed", err)
683 return
684 }
685 if err := r.Close(); err != nil {
686 logError(testName, function, args, startTime, "", "Object Close failed", err)
687 return
688 }
689 if err := r.Close(); err == nil {
690 logError(testName, function, args, startTime, "", "Object is already closed, didn't return error on Close", err)
691 return
692 }
693
694 successLogger(testName, function, args, startTime).Info()
695}
696
697func testListObjectVersions() {
698 // initialize logging params
699 startTime := time.Now()
700 testName := getFuncName()
701 function := "ListObjectVersions(bucketName, prefix, recursive)"
702 args := map[string]interface{}{
703 "bucketName": "",
704 "prefix": "",
705 "recursive": "",
706 }
707
708 // Seed random based on current time.
709 rand.Seed(time.Now().Unix())
710
711 // Instantiate new minio client object.
712 c, err := minio.New(os.Getenv(serverEndpoint),
713 &minio.Options{
714 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
715 Secure: mustParseBool(os.Getenv(enableHTTPS)),
716 })
717 if err != nil {
718 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
719 return
720 }
721
722 // Enable tracing, write to stderr.
723 // c.TraceOn(os.Stderr)
724
725 // Set user agent.
726 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
727
728 // Generate a new random bucket name.
729 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
730 args["bucketName"] = bucketName
731
732 // Make a new bucket.
733 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
734 if err != nil {
735 logError(testName, function, args, startTime, "", "Make bucket failed", err)
736 return
737 }
738
739 err = c.EnableVersioning(context.Background(), bucketName)
740 if err != nil {
741 logError(testName, function, args, startTime, "", "Enable versioning failed", err)
742 return
743 }
744
745 // Save the data
746 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
747 args["objectName"] = objectName
748
749 bufSize := dataFileMap["datafile-10-kB"]
750 reader := getDataReader("datafile-10-kB")
751
752 _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{})
753 if err != nil {
754 logError(testName, function, args, startTime, "", "PutObject failed", err)
755 return
756 }
757 reader.Close()
758
759 bufSize = dataFileMap["datafile-1-b"]
760 reader = getDataReader("datafile-1-b")
761 _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{})
762 if err != nil {
763 logError(testName, function, args, startTime, "", "PutObject failed", err)
764 return
765 }
766 reader.Close()
767
768 err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{})
769 if err != nil {
770 logError(testName, function, args, startTime, "", "Unexpected object deletion", err)
771 return
772 }
773
774 var deleteMarkers, versions int
775
776 objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
777 for info := range objectsInfo {
778 if info.Err != nil {
779 logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
780 return
781 }
782 if info.Key != objectName {
783 logError(testName, function, args, startTime, "", "Unexpected object name in listing objects", nil)
784 return
785 }
786 if info.VersionID == "" {
787 logError(testName, function, args, startTime, "", "Unexpected version id in listing objects", nil)
788 return
789 }
790 if info.IsDeleteMarker {
791 deleteMarkers++
792 if !info.IsLatest {
793 logError(testName, function, args, startTime, "", "Unexpected IsLatest field in listing objects", nil)
794 return
795 }
796 } else {
797 versions++
798 }
799 }
800
801 if deleteMarkers != 1 {
802 logError(testName, function, args, startTime, "", "Unexpected number of DeleteMarker elements in listing objects", nil)
803 return
804 }
805
806 if versions != 2 {
807 logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil)
808 return
809 }
810
811 // Delete all objects and their versions as long as the bucket itself
812 if err = cleanupVersionedBucket(bucketName, c); err != nil {
813 logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
814 return
815 }
816
817 successLogger(testName, function, args, startTime).Info()
818}
819
820func testStatObjectWithVersioning() {
821 // initialize logging params
822 startTime := time.Now()
823 testName := getFuncName()
824 function := "StatObject"
825 args := map[string]interface{}{}
826
827 // Seed random based on current time.
828 rand.Seed(time.Now().Unix())
829
830 // Instantiate new minio client object.
831 c, err := minio.New(os.Getenv(serverEndpoint),
832 &minio.Options{
833 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
834 Secure: mustParseBool(os.Getenv(enableHTTPS)),
835 })
836 if err != nil {
837 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
838 return
839 }
840
841 // Enable tracing, write to stderr.
842 // c.TraceOn(os.Stderr)
843
844 // Set user agent.
845 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
846
847 // Generate a new random bucket name.
848 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
849 args["bucketName"] = bucketName
850
851 // Make a new bucket.
852 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
853 if err != nil {
854 logError(testName, function, args, startTime, "", "Make bucket failed", err)
855 return
856 }
857
858 err = c.EnableVersioning(context.Background(), bucketName)
859 if err != nil {
860 logError(testName, function, args, startTime, "", "Enable versioning failed", err)
861 return
862 }
863
864 // Save the data
865 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
866 args["objectName"] = objectName
867
868 bufSize := dataFileMap["datafile-10-kB"]
869 reader := getDataReader("datafile-10-kB")
870
871 _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{})
872 if err != nil {
873 logError(testName, function, args, startTime, "", "PutObject failed", err)
874 return
875 }
876 reader.Close()
877
878 bufSize = dataFileMap["datafile-1-b"]
879 reader = getDataReader("datafile-1-b")
880 _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{})
881 if err != nil {
882 logError(testName, function, args, startTime, "", "PutObject failed", err)
883 return
884 }
885 reader.Close()
886
887 objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
888
889 var results []minio.ObjectInfo
890 for info := range objectsInfo {
891 if info.Err != nil {
892 logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
893 return
894 }
895 results = append(results, info)
896 }
897
898 if len(results) != 2 {
899 logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil)
900 return
901 }
902
903 for i := 0; i < len(results); i++ {
904 opts := minio.StatObjectOptions{VersionID: results[i].VersionID}
905 statInfo, err := c.StatObject(context.Background(), bucketName, objectName, opts)
906 if err != nil {
907 logError(testName, function, args, startTime, "", "error during HEAD object", err)
908 return
909 }
910 if statInfo.VersionID == "" || statInfo.VersionID != results[i].VersionID {
911 logError(testName, function, args, startTime, "", "error during HEAD object, unexpected version id", err)
912 return
913 }
914 if statInfo.ETag != results[i].ETag {
915 logError(testName, function, args, startTime, "", "error during HEAD object, unexpected ETag", err)
916 return
917 }
918 if statInfo.LastModified.Unix() != results[i].LastModified.Unix() {
919 logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Last-Modified", err)
920 return
921 }
922 if statInfo.Size != results[i].Size {
923 logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Content-Length", err)
924 return
925 }
926 }
927
928 // Delete all objects and their versions as long as the bucket itself
929 if err = cleanupVersionedBucket(bucketName, c); err != nil {
930 logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
931 return
932 }
933
934 successLogger(testName, function, args, startTime).Info()
935}
936
937func testGetObjectWithVersioning() {
938 // initialize logging params
939 startTime := time.Now()
940 testName := getFuncName()
941 function := "GetObject()"
942 args := map[string]interface{}{}
943
944 // Seed random based on current time.
945 rand.Seed(time.Now().Unix())
946
947 // Instantiate new minio client object.
948 c, err := minio.New(os.Getenv(serverEndpoint),
949 &minio.Options{
950 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
951 Secure: mustParseBool(os.Getenv(enableHTTPS)),
952 })
953 if err != nil {
954 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
955 return
956 }
957
958 // Enable tracing, write to stderr.
959 // c.TraceOn(os.Stderr)
960
961 // Set user agent.
962 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
963
964 // Generate a new random bucket name.
965 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
966 args["bucketName"] = bucketName
967
968 // Make a new bucket.
969 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
970 if err != nil {
971 logError(testName, function, args, startTime, "", "Make bucket failed", err)
972 return
973 }
974
975 err = c.EnableVersioning(context.Background(), bucketName)
976 if err != nil {
977 logError(testName, function, args, startTime, "", "Enable versioning failed", err)
978 return
979 }
980
981 // Save the data
982 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
983 args["objectName"] = objectName
984
985 // Save the contents of datafiles to check with GetObject() reader output later
986 var buffers [][]byte
987 testFiles := []string{"datafile-1-b", "datafile-10-kB"}
988
989 for _, testFile := range testFiles {
990 r := getDataReader(testFile)
991 buf, err := io.ReadAll(r)
992 if err != nil {
993 logError(testName, function, args, startTime, "", "unexpected failure", err)
994 return
995 }
996 r.Close()
997 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{})
998 if err != nil {
999 logError(testName, function, args, startTime, "", "PutObject failed", err)
1000 return
1001 }
1002 buffers = append(buffers, buf)
1003 }
1004
1005 objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
1006
1007 var results []minio.ObjectInfo
1008 for info := range objectsInfo {
1009 if info.Err != nil {
1010 logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
1011 return
1012 }
1013 results = append(results, info)
1014 }
1015
1016 if len(results) != 2 {
1017 logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil)
1018 return
1019 }
1020
1021 sort.SliceStable(results, func(i, j int) bool {
1022 return results[i].Size < results[j].Size
1023 })
1024
1025 sort.SliceStable(buffers, func(i, j int) bool {
1026 return len(buffers[i]) < len(buffers[j])
1027 })
1028
1029 for i := 0; i < len(results); i++ {
1030 opts := minio.GetObjectOptions{VersionID: results[i].VersionID}
1031 reader, err := c.GetObject(context.Background(), bucketName, objectName, opts)
1032 if err != nil {
1033 logError(testName, function, args, startTime, "", "error during GET object", err)
1034 return
1035 }
1036 statInfo, err := reader.Stat()
1037 if err != nil {
1038 logError(testName, function, args, startTime, "", "error during calling reader.Stat()", err)
1039 return
1040 }
1041 if statInfo.ETag != results[i].ETag {
1042 logError(testName, function, args, startTime, "", "error during HEAD object, unexpected ETag", err)
1043 return
1044 }
1045 if statInfo.LastModified.Unix() != results[i].LastModified.Unix() {
1046 logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Last-Modified", err)
1047 return
1048 }
1049 if statInfo.Size != results[i].Size {
1050 logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Content-Length", err)
1051 return
1052 }
1053
1054 tmpBuffer := bytes.NewBuffer([]byte{})
1055 _, err = io.Copy(tmpBuffer, reader)
1056 if err != nil {
1057 logError(testName, function, args, startTime, "", "unexpected io.Copy()", err)
1058 return
1059 }
1060
1061 if !bytes.Equal(tmpBuffer.Bytes(), buffers[i]) {
1062 logError(testName, function, args, startTime, "", "unexpected content of GetObject()", err)
1063 return
1064 }
1065 }
1066
1067 // Delete all objects and their versions as long as the bucket itself
1068 if err = cleanupVersionedBucket(bucketName, c); err != nil {
1069 logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
1070 return
1071 }
1072
1073 successLogger(testName, function, args, startTime).Info()
1074}
1075
1076func testPutObjectWithVersioning() {
1077 // initialize logging params
1078 startTime := time.Now()
1079 testName := getFuncName()
1080 function := "GetObject()"
1081 args := map[string]interface{}{}
1082
1083 // Seed random based on current time.
1084 rand.Seed(time.Now().Unix())
1085
1086 // Instantiate new minio client object.
1087 c, err := minio.New(os.Getenv(serverEndpoint),
1088 &minio.Options{
1089 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
1090 Secure: mustParseBool(os.Getenv(enableHTTPS)),
1091 })
1092 if err != nil {
1093 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
1094 return
1095 }
1096
1097 // Enable tracing, write to stderr.
1098 // c.TraceOn(os.Stderr)
1099
1100 // Set user agent.
1101 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
1102
1103 // Generate a new random bucket name.
1104 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
1105 args["bucketName"] = bucketName
1106
1107 // Make a new bucket.
1108 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
1109 if err != nil {
1110 logError(testName, function, args, startTime, "", "Make bucket failed", err)
1111 return
1112 }
1113
1114 err = c.EnableVersioning(context.Background(), bucketName)
1115 if err != nil {
1116 logError(testName, function, args, startTime, "", "Enable versioning failed", err)
1117 return
1118 }
1119
1120 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
1121 args["objectName"] = objectName
1122
1123 const n = 10
1124 // Read input...
1125
1126 // Save the data concurrently.
1127 var wg sync.WaitGroup
1128 wg.Add(n)
1129 buffers := make([][]byte, n)
1130 var errs [n]error
1131 for i := 0; i < n; i++ {
1132 r := newRandomReader(int64((1<<20)*i+i), int64(i))
1133 buf, err := io.ReadAll(r)
1134 if err != nil {
1135 logError(testName, function, args, startTime, "", "unexpected failure", err)
1136 return
1137 }
1138 buffers[i] = buf
1139
1140 go func(i int) {
1141 defer wg.Done()
1142 _, errs[i] = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{PartSize: 5 << 20})
1143 }(i)
1144 }
1145 wg.Wait()
1146 for _, err := range errs {
1147 if err != nil {
1148 logError(testName, function, args, startTime, "", "PutObject failed", err)
1149 return
1150 }
1151 }
1152
1153 objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
1154 var results []minio.ObjectInfo
1155 for info := range objectsInfo {
1156 if info.Err != nil {
1157 logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
1158 return
1159 }
1160 results = append(results, info)
1161 }
1162
1163 if len(results) != n {
1164 logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil)
1165 return
1166 }
1167
1168 sort.Slice(results, func(i, j int) bool {
1169 return results[i].Size < results[j].Size
1170 })
1171
1172 sort.Slice(buffers, func(i, j int) bool {
1173 return len(buffers[i]) < len(buffers[j])
1174 })
1175
1176 for i := 0; i < len(results); i++ {
1177 opts := minio.GetObjectOptions{VersionID: results[i].VersionID}
1178 reader, err := c.GetObject(context.Background(), bucketName, objectName, opts)
1179 if err != nil {
1180 logError(testName, function, args, startTime, "", "error during GET object", err)
1181 return
1182 }
1183 statInfo, err := reader.Stat()
1184 if err != nil {
1185 logError(testName, function, args, startTime, "", "error during calling reader.Stat()", err)
1186 return
1187 }
1188 if statInfo.ETag != results[i].ETag {
1189 logError(testName, function, args, startTime, "", "error during HEAD object, unexpected ETag", err)
1190 return
1191 }
1192 if statInfo.LastModified.Unix() != results[i].LastModified.Unix() {
1193 logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Last-Modified", err)
1194 return
1195 }
1196 if statInfo.Size != results[i].Size {
1197 logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Content-Length", err)
1198 return
1199 }
1200
1201 tmpBuffer := bytes.NewBuffer([]byte{})
1202 _, err = io.Copy(tmpBuffer, reader)
1203 if err != nil {
1204 logError(testName, function, args, startTime, "", "unexpected io.Copy()", err)
1205 return
1206 }
1207
1208 if !bytes.Equal(tmpBuffer.Bytes(), buffers[i]) {
1209 logError(testName, function, args, startTime, "", "unexpected content of GetObject()", err)
1210 return
1211 }
1212 }
1213
1214 // Delete all objects and their versions as long as the bucket itself
1215 if err = cleanupVersionedBucket(bucketName, c); err != nil {
1216 logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
1217 return
1218 }
1219
1220 successLogger(testName, function, args, startTime).Info()
1221}
1222
1223func testCopyObjectWithVersioning() {
1224 // initialize logging params
1225 startTime := time.Now()
1226 testName := getFuncName()
1227 function := "CopyObject()"
1228 args := map[string]interface{}{}
1229
1230 // Seed random based on current time.
1231 rand.Seed(time.Now().Unix())
1232
1233 // Instantiate new minio client object.
1234 c, err := minio.New(os.Getenv(serverEndpoint),
1235 &minio.Options{
1236 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
1237 Secure: mustParseBool(os.Getenv(enableHTTPS)),
1238 })
1239 if err != nil {
1240 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
1241 return
1242 }
1243
1244 // Enable tracing, write to stderr.
1245 // c.TraceOn(os.Stderr)
1246
1247 // Set user agent.
1248 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
1249
1250 // Generate a new random bucket name.
1251 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
1252 args["bucketName"] = bucketName
1253
1254 // Make a new bucket.
1255 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
1256 if err != nil {
1257 logError(testName, function, args, startTime, "", "Make bucket failed", err)
1258 return
1259 }
1260
1261 err = c.EnableVersioning(context.Background(), bucketName)
1262 if err != nil {
1263 logError(testName, function, args, startTime, "", "Enable versioning failed", err)
1264 return
1265 }
1266
1267 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
1268 args["objectName"] = objectName
1269
1270 testFiles := []string{"datafile-1-b", "datafile-10-kB"}
1271 for _, testFile := range testFiles {
1272 r := getDataReader(testFile)
1273 buf, err := io.ReadAll(r)
1274 if err != nil {
1275 logError(testName, function, args, startTime, "", "unexpected failure", err)
1276 return
1277 }
1278 r.Close()
1279 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{})
1280 if err != nil {
1281 logError(testName, function, args, startTime, "", "PutObject failed", err)
1282 return
1283 }
1284 }
1285
1286 objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
1287 var infos []minio.ObjectInfo
1288 for info := range objectsInfo {
1289 if info.Err != nil {
1290 logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
1291 return
1292 }
1293 infos = append(infos, info)
1294 }
1295
1296 sort.Slice(infos, func(i, j int) bool {
1297 return infos[i].Size < infos[j].Size
1298 })
1299
1300 reader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{VersionID: infos[0].VersionID})
1301 if err != nil {
1302 logError(testName, function, args, startTime, "", "GetObject of the oldest version content failed", err)
1303 return
1304 }
1305
1306 oldestContent, err := io.ReadAll(reader)
1307 if err != nil {
1308 logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err)
1309 return
1310 }
1311
1312 // Copy Source
1313 srcOpts := minio.CopySrcOptions{
1314 Bucket: bucketName,
1315 Object: objectName,
1316 VersionID: infos[0].VersionID,
1317 }
1318 args["src"] = srcOpts
1319
1320 dstOpts := minio.CopyDestOptions{
1321 Bucket: bucketName,
1322 Object: objectName + "-copy",
1323 }
1324 args["dst"] = dstOpts
1325
1326 // Perform the Copy
1327 if _, err = c.CopyObject(context.Background(), dstOpts, srcOpts); err != nil {
1328 logError(testName, function, args, startTime, "", "CopyObject failed", err)
1329 return
1330 }
1331
1332 // Destination object
1333 readerCopy, err := c.GetObject(context.Background(), bucketName, objectName+"-copy", minio.GetObjectOptions{})
1334 if err != nil {
1335 logError(testName, function, args, startTime, "", "GetObject failed", err)
1336 return
1337 }
1338 defer readerCopy.Close()
1339
1340 newestContent, err := io.ReadAll(readerCopy)
1341 if err != nil {
1342 logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err)
1343 return
1344 }
1345
1346 if len(newestContent) == 0 || !bytes.Equal(oldestContent, newestContent) {
1347 logError(testName, function, args, startTime, "", "Unexpected destination object content", err)
1348 return
1349 }
1350
1351 // Delete all objects and their versions as long as the bucket itself
1352 if err = cleanupVersionedBucket(bucketName, c); err != nil {
1353 logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
1354 return
1355 }
1356
1357 successLogger(testName, function, args, startTime).Info()
1358}
1359
1360func testConcurrentCopyObjectWithVersioning() {
1361 // initialize logging params
1362 startTime := time.Now()
1363 testName := getFuncName()
1364 function := "CopyObject()"
1365 args := map[string]interface{}{}
1366
1367 // Seed random based on current time.
1368 rand.Seed(time.Now().Unix())
1369
1370 // Instantiate new minio client object.
1371 c, err := minio.New(os.Getenv(serverEndpoint),
1372 &minio.Options{
1373 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
1374 Secure: mustParseBool(os.Getenv(enableHTTPS)),
1375 })
1376 if err != nil {
1377 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
1378 return
1379 }
1380
1381 // Enable tracing, write to stderr.
1382 // c.TraceOn(os.Stderr)
1383
1384 // Set user agent.
1385 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
1386
1387 // Generate a new random bucket name.
1388 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
1389 args["bucketName"] = bucketName
1390
1391 // Make a new bucket.
1392 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
1393 if err != nil {
1394 logError(testName, function, args, startTime, "", "Make bucket failed", err)
1395 return
1396 }
1397
1398 err = c.EnableVersioning(context.Background(), bucketName)
1399 if err != nil {
1400 logError(testName, function, args, startTime, "", "Enable versioning failed", err)
1401 return
1402 }
1403
1404 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
1405 args["objectName"] = objectName
1406
1407 testFiles := []string{"datafile-10-kB"}
1408 for _, testFile := range testFiles {
1409 r := getDataReader(testFile)
1410 buf, err := io.ReadAll(r)
1411 if err != nil {
1412 logError(testName, function, args, startTime, "", "unexpected failure", err)
1413 return
1414 }
1415 r.Close()
1416 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{})
1417 if err != nil {
1418 logError(testName, function, args, startTime, "", "PutObject failed", err)
1419 return
1420 }
1421 }
1422
1423 objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
1424 var infos []minio.ObjectInfo
1425 for info := range objectsInfo {
1426 if info.Err != nil {
1427 logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
1428 return
1429 }
1430 infos = append(infos, info)
1431 }
1432
1433 sort.Slice(infos, func(i, j int) bool {
1434 return infos[i].Size < infos[j].Size
1435 })
1436
1437 reader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{VersionID: infos[0].VersionID})
1438 if err != nil {
1439 logError(testName, function, args, startTime, "", "GetObject of the oldest version content failed", err)
1440 return
1441 }
1442
1443 oldestContent, err := io.ReadAll(reader)
1444 if err != nil {
1445 logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err)
1446 return
1447 }
1448
1449 // Copy Source
1450 srcOpts := minio.CopySrcOptions{
1451 Bucket: bucketName,
1452 Object: objectName,
1453 VersionID: infos[0].VersionID,
1454 }
1455 args["src"] = srcOpts
1456
1457 dstOpts := minio.CopyDestOptions{
1458 Bucket: bucketName,
1459 Object: objectName + "-copy",
1460 }
1461 args["dst"] = dstOpts
1462
1463 // Perform the Copy concurrently
1464 const n = 10
1465 var wg sync.WaitGroup
1466 wg.Add(n)
1467 var errs [n]error
1468 for i := 0; i < n; i++ {
1469 go func(i int) {
1470 defer wg.Done()
1471 _, errs[i] = c.CopyObject(context.Background(), dstOpts, srcOpts)
1472 }(i)
1473 }
1474 wg.Wait()
1475 for _, err := range errs {
1476 if err != nil {
1477 logError(testName, function, args, startTime, "", "CopyObject failed", err)
1478 return
1479 }
1480 }
1481
1482 objectsInfo = c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: false, Prefix: dstOpts.Object})
1483 infos = []minio.ObjectInfo{}
1484 for info := range objectsInfo {
1485 // Destination object
1486 readerCopy, err := c.GetObject(context.Background(), bucketName, objectName+"-copy", minio.GetObjectOptions{VersionID: info.VersionID})
1487 if err != nil {
1488 logError(testName, function, args, startTime, "", "GetObject failed", err)
1489 return
1490 }
1491 defer readerCopy.Close()
1492
1493 newestContent, err := io.ReadAll(readerCopy)
1494 if err != nil {
1495 logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err)
1496 return
1497 }
1498
1499 if len(newestContent) == 0 || !bytes.Equal(oldestContent, newestContent) {
1500 logError(testName, function, args, startTime, "", "Unexpected destination object content", err)
1501 return
1502 }
1503 infos = append(infos, info)
1504 }
1505
1506 if len(infos) != n {
1507 logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil)
1508 return
1509 }
1510
1511 // Delete all objects and their versions as long as the bucket itself
1512 if err = cleanupVersionedBucket(bucketName, c); err != nil {
1513 logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
1514 return
1515 }
1516
1517 successLogger(testName, function, args, startTime).Info()
1518}
1519
1520func testComposeObjectWithVersioning() {
1521 // initialize logging params
1522 startTime := time.Now()
1523 testName := getFuncName()
1524 function := "ComposeObject()"
1525 args := map[string]interface{}{}
1526
1527 // Seed random based on current time.
1528 rand.Seed(time.Now().Unix())
1529
1530 // Instantiate new minio client object.
1531 c, err := minio.New(os.Getenv(serverEndpoint),
1532 &minio.Options{
1533 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
1534 Secure: mustParseBool(os.Getenv(enableHTTPS)),
1535 })
1536 if err != nil {
1537 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
1538 return
1539 }
1540
1541 // Enable tracing, write to stderr.
1542 // c.TraceOn(os.Stderr)
1543
1544 // Set user agent.
1545 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
1546
1547 // Generate a new random bucket name.
1548 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
1549 args["bucketName"] = bucketName
1550
1551 // Make a new bucket.
1552 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
1553 if err != nil {
1554 logError(testName, function, args, startTime, "", "Make bucket failed", err)
1555 return
1556 }
1557
1558 err = c.EnableVersioning(context.Background(), bucketName)
1559 if err != nil {
1560 logError(testName, function, args, startTime, "", "Enable versioning failed", err)
1561 return
1562 }
1563
1564 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
1565 args["objectName"] = objectName
1566
1567 // var testFiles = []string{"datafile-5-MB", "datafile-10-kB"}
1568 testFiles := []string{"datafile-5-MB", "datafile-10-kB"}
1569 var testFilesBytes [][]byte
1570
1571 for _, testFile := range testFiles {
1572 r := getDataReader(testFile)
1573 buf, err := io.ReadAll(r)
1574 if err != nil {
1575 logError(testName, function, args, startTime, "", "unexpected failure", err)
1576 return
1577 }
1578 r.Close()
1579 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{})
1580 if err != nil {
1581 logError(testName, function, args, startTime, "", "PutObject failed", err)
1582 return
1583 }
1584 testFilesBytes = append(testFilesBytes, buf)
1585 }
1586
1587 objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
1588
1589 var results []minio.ObjectInfo
1590 for info := range objectsInfo {
1591 if info.Err != nil {
1592 logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
1593 return
1594 }
1595 results = append(results, info)
1596 }
1597
1598 sort.SliceStable(results, func(i, j int) bool {
1599 return results[i].Size > results[j].Size
1600 })
1601
1602 // Source objects to concatenate. We also specify decryption
1603 // key for each
1604 src1 := minio.CopySrcOptions{
1605 Bucket: bucketName,
1606 Object: objectName,
1607 VersionID: results[0].VersionID,
1608 }
1609
1610 src2 := minio.CopySrcOptions{
1611 Bucket: bucketName,
1612 Object: objectName,
1613 VersionID: results[1].VersionID,
1614 }
1615
1616 dst := minio.CopyDestOptions{
1617 Bucket: bucketName,
1618 Object: objectName + "-copy",
1619 }
1620
1621 _, err = c.ComposeObject(context.Background(), dst, src1, src2)
1622 if err != nil {
1623 logError(testName, function, args, startTime, "", "ComposeObject failed", err)
1624 return
1625 }
1626
1627 // Destination object
1628 readerCopy, err := c.GetObject(context.Background(), bucketName, objectName+"-copy", minio.GetObjectOptions{})
1629 if err != nil {
1630 logError(testName, function, args, startTime, "", "GetObject of the copy object failed", err)
1631 return
1632 }
1633 defer readerCopy.Close()
1634
1635 copyContentBytes, err := io.ReadAll(readerCopy)
1636 if err != nil {
1637 logError(testName, function, args, startTime, "", "Reading from the copy object reader failed", err)
1638 return
1639 }
1640
1641 var expectedContent []byte
1642 for _, fileBytes := range testFilesBytes {
1643 expectedContent = append(expectedContent, fileBytes...)
1644 }
1645
1646 if len(copyContentBytes) == 0 || !bytes.Equal(copyContentBytes, expectedContent) {
1647 logError(testName, function, args, startTime, "", "Unexpected destination object content", err)
1648 return
1649 }
1650
1651 // Delete all objects and their versions as long as the bucket itself
1652 if err = cleanupVersionedBucket(bucketName, c); err != nil {
1653 logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
1654 return
1655 }
1656
1657 successLogger(testName, function, args, startTime).Info()
1658}
1659
1660func testRemoveObjectWithVersioning() {
1661 // initialize logging params
1662 startTime := time.Now()
1663 testName := getFuncName()
1664 function := "DeleteObject()"
1665 args := map[string]interface{}{}
1666
1667 // Seed random based on current time.
1668 rand.Seed(time.Now().Unix())
1669
1670 // Instantiate new minio client object.
1671 c, err := minio.New(os.Getenv(serverEndpoint),
1672 &minio.Options{
1673 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
1674 Secure: mustParseBool(os.Getenv(enableHTTPS)),
1675 })
1676 if err != nil {
1677 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
1678 return
1679 }
1680
1681 // Enable tracing, write to stderr.
1682 // c.TraceOn(os.Stderr)
1683
1684 // Set user agent.
1685 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
1686
1687 // Generate a new random bucket name.
1688 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
1689 args["bucketName"] = bucketName
1690
1691 // Make a new bucket.
1692 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
1693 if err != nil {
1694 logError(testName, function, args, startTime, "", "Make bucket failed", err)
1695 return
1696 }
1697
1698 err = c.EnableVersioning(context.Background(), bucketName)
1699 if err != nil {
1700 logError(testName, function, args, startTime, "", "Enable versioning failed", err)
1701 return
1702 }
1703
1704 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
1705 args["objectName"] = objectName
1706
1707 _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader("datafile-10-kB"), int64(dataFileMap["datafile-10-kB"]), minio.PutObjectOptions{})
1708 if err != nil {
1709 logError(testName, function, args, startTime, "", "PutObject failed", err)
1710 return
1711 }
1712
1713 objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
1714 var version minio.ObjectInfo
1715 for info := range objectsInfo {
1716 if info.Err != nil {
1717 logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
1718 return
1719 }
1720 version = info
1721 break
1722 }
1723
1724 err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{VersionID: version.VersionID})
1725 if err != nil {
1726 logError(testName, function, args, startTime, "", "DeleteObject failed", err)
1727 return
1728 }
1729
1730 objectsInfo = c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
1731 for range objectsInfo {
1732 logError(testName, function, args, startTime, "", "Unexpected versioning info, should not have any one ", err)
1733 return
1734 }
1735 // test delete marker version id is non-null
1736 _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader("datafile-10-kB"), int64(dataFileMap["datafile-10-kB"]), minio.PutObjectOptions{})
1737 if err != nil {
1738 logError(testName, function, args, startTime, "", "PutObject failed", err)
1739 return
1740 }
1741 // create delete marker
1742 err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{})
1743 if err != nil {
1744 logError(testName, function, args, startTime, "", "DeleteObject failed", err)
1745 return
1746 }
1747 objectsInfo = c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
1748 idx := 0
1749 for info := range objectsInfo {
1750 if info.Err != nil {
1751 logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
1752 return
1753 }
1754 if idx == 0 {
1755 if !info.IsDeleteMarker {
1756 logError(testName, function, args, startTime, "", "Unexpected error - expected delete marker to have been created", err)
1757 return
1758 }
1759 if info.VersionID == "" {
1760 logError(testName, function, args, startTime, "", "Unexpected error - expected delete marker to be versioned", err)
1761 return
1762 }
1763 }
1764 idx++
1765 }
1766
1767 defer cleanupBucket(bucketName, c)
1768
1769 successLogger(testName, function, args, startTime).Info()
1770}
1771
1772func testRemoveObjectsWithVersioning() {
1773 // initialize logging params
1774 startTime := time.Now()
1775 testName := getFuncName()
1776 function := "DeleteObjects()"
1777 args := map[string]interface{}{}
1778
1779 // Seed random based on current time.
1780 rand.Seed(time.Now().Unix())
1781
1782 // Instantiate new minio client object.
1783 c, err := minio.New(os.Getenv(serverEndpoint),
1784 &minio.Options{
1785 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
1786 Secure: mustParseBool(os.Getenv(enableHTTPS)),
1787 })
1788 if err != nil {
1789 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
1790 return
1791 }
1792
1793 // Enable tracing, write to stderr.
1794 // c.TraceOn(os.Stderr)
1795
1796 // Set user agent.
1797 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
1798
1799 // Generate a new random bucket name.
1800 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
1801 args["bucketName"] = bucketName
1802
1803 // Make a new bucket.
1804 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
1805 if err != nil {
1806 logError(testName, function, args, startTime, "", "Make bucket failed", err)
1807 return
1808 }
1809
1810 err = c.EnableVersioning(context.Background(), bucketName)
1811 if err != nil {
1812 logError(testName, function, args, startTime, "", "Enable versioning failed", err)
1813 return
1814 }
1815
1816 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
1817 args["objectName"] = objectName
1818
1819 _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader("datafile-10-kB"), int64(dataFileMap["datafile-10-kB"]), minio.PutObjectOptions{})
1820 if err != nil {
1821 logError(testName, function, args, startTime, "", "PutObject failed", err)
1822 return
1823 }
1824
1825 objectsVersions := make(chan minio.ObjectInfo)
1826 go func() {
1827 objectsVersionsInfo := c.ListObjects(context.Background(), bucketName,
1828 minio.ListObjectsOptions{WithVersions: true, Recursive: true})
1829 for info := range objectsVersionsInfo {
1830 if info.Err != nil {
1831 logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
1832 return
1833 }
1834 objectsVersions <- info
1835 }
1836 close(objectsVersions)
1837 }()
1838
1839 removeErrors := c.RemoveObjects(context.Background(), bucketName, objectsVersions, minio.RemoveObjectsOptions{})
1840 if err != nil {
1841 logError(testName, function, args, startTime, "", "DeleteObjects call failed", err)
1842 return
1843 }
1844
1845 for e := range removeErrors {
1846 if e.Err != nil {
1847 logError(testName, function, args, startTime, "", "Single delete operation failed", err)
1848 return
1849 }
1850 }
1851
1852 objectsVersionsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
1853 for range objectsVersionsInfo {
1854 logError(testName, function, args, startTime, "", "Unexpected versioning info, should not have any one ", err)
1855 return
1856 }
1857
1858 err = c.RemoveBucket(context.Background(), bucketName)
1859 if err != nil {
1860 logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
1861 return
1862 }
1863
1864 successLogger(testName, function, args, startTime).Info()
1865}
1866
1867func testObjectTaggingWithVersioning() {
1868 // initialize logging params
1869 startTime := time.Now()
1870 testName := getFuncName()
1871 function := "{Get,Set,Remove}ObjectTagging()"
1872 args := map[string]interface{}{}
1873
1874 // Seed random based on current time.
1875 rand.Seed(time.Now().Unix())
1876
1877 // Instantiate new minio client object.
1878 c, err := minio.New(os.Getenv(serverEndpoint),
1879 &minio.Options{
1880 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
1881 Secure: mustParseBool(os.Getenv(enableHTTPS)),
1882 })
1883 if err != nil {
1884 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
1885 return
1886 }
1887
1888 // Enable tracing, write to stderr.
1889 // c.TraceOn(os.Stderr)
1890
1891 // Set user agent.
1892 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
1893
1894 // Generate a new random bucket name.
1895 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
1896 args["bucketName"] = bucketName
1897
1898 // Make a new bucket.
1899 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
1900 if err != nil {
1901 logError(testName, function, args, startTime, "", "Make bucket failed", err)
1902 return
1903 }
1904
1905 err = c.EnableVersioning(context.Background(), bucketName)
1906 if err != nil {
1907 logError(testName, function, args, startTime, "", "Enable versioning failed", err)
1908 return
1909 }
1910
1911 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
1912 args["objectName"] = objectName
1913
1914 for _, file := range []string{"datafile-1-b", "datafile-10-kB"} {
1915 _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader(file), int64(dataFileMap[file]), minio.PutObjectOptions{})
1916 if err != nil {
1917 logError(testName, function, args, startTime, "", "PutObject failed", err)
1918 return
1919 }
1920 }
1921
1922 versionsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
1923
1924 var versions []minio.ObjectInfo
1925 for info := range versionsInfo {
1926 if info.Err != nil {
1927 logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
1928 return
1929 }
1930 versions = append(versions, info)
1931 }
1932
1933 sort.SliceStable(versions, func(i, j int) bool {
1934 return versions[i].Size < versions[j].Size
1935 })
1936
1937 tagsV1 := map[string]string{"key1": "val1"}
1938 t1, err := tags.MapToObjectTags(tagsV1)
1939 if err != nil {
1940 logError(testName, function, args, startTime, "", "PutObjectTagging (1) failed", err)
1941 return
1942 }
1943
1944 err = c.PutObjectTagging(context.Background(), bucketName, objectName, t1, minio.PutObjectTaggingOptions{VersionID: versions[0].VersionID})
1945 if err != nil {
1946 logError(testName, function, args, startTime, "", "PutObjectTagging (1) failed", err)
1947 return
1948 }
1949
1950 tagsV2 := map[string]string{"key2": "val2"}
1951 t2, err := tags.MapToObjectTags(tagsV2)
1952 if err != nil {
1953 logError(testName, function, args, startTime, "", "PutObjectTagging (1) failed", err)
1954 return
1955 }
1956
1957 err = c.PutObjectTagging(context.Background(), bucketName, objectName, t2, minio.PutObjectTaggingOptions{VersionID: versions[1].VersionID})
1958 if err != nil {
1959 logError(testName, function, args, startTime, "", "PutObjectTagging (2) failed", err)
1960 return
1961 }
1962
1963 tagsEqual := func(tags1, tags2 map[string]string) bool {
1964 for k1, v1 := range tags1 {
1965 v2, found := tags2[k1]
1966 if found {
1967 if v1 != v2 {
1968 return false
1969 }
1970 }
1971 }
1972 return true
1973 }
1974
1975 gotTagsV1, err := c.GetObjectTagging(context.Background(), bucketName, objectName, minio.GetObjectTaggingOptions{VersionID: versions[0].VersionID})
1976 if err != nil {
1977 logError(testName, function, args, startTime, "", "GetObjectTagging failed", err)
1978 return
1979 }
1980
1981 if !tagsEqual(t1.ToMap(), gotTagsV1.ToMap()) {
1982 logError(testName, function, args, startTime, "", "Unexpected tags content (1)", err)
1983 return
1984 }
1985
1986 gotTagsV2, err := c.GetObjectTagging(context.Background(), bucketName, objectName, minio.GetObjectTaggingOptions{})
1987 if err != nil {
1988 logError(testName, function, args, startTime, "", "GetObjectTaggingContext failed", err)
1989 return
1990 }
1991
1992 if !tagsEqual(t2.ToMap(), gotTagsV2.ToMap()) {
1993 logError(testName, function, args, startTime, "", "Unexpected tags content (2)", err)
1994 return
1995 }
1996
1997 err = c.RemoveObjectTagging(context.Background(), bucketName, objectName, minio.RemoveObjectTaggingOptions{VersionID: versions[0].VersionID})
1998 if err != nil {
1999 logError(testName, function, args, startTime, "", "PutObjectTagging (2) failed", err)
2000 return
2001 }
2002
2003 emptyTags, err := c.GetObjectTagging(context.Background(), bucketName, objectName,
2004 minio.GetObjectTaggingOptions{VersionID: versions[0].VersionID})
2005 if err != nil {
2006 logError(testName, function, args, startTime, "", "GetObjectTagging failed", err)
2007 return
2008 }
2009
2010 if len(emptyTags.ToMap()) != 0 {
2011 logError(testName, function, args, startTime, "", "Unexpected tags content (2)", err)
2012 return
2013 }
2014
2015 // Delete all objects and their versions as long as the bucket itself
2016 if err = cleanupVersionedBucket(bucketName, c); err != nil {
2017 logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
2018 return
2019 }
2020
2021 successLogger(testName, function, args, startTime).Info()
2022}
2023
2024// Test PutObject with custom checksums.
2025func testPutObjectWithChecksums() {
2026 // initialize logging params
2027 startTime := time.Now()
2028 testName := getFuncName()
2029 function := "PutObject(bucketName, objectName, reader,size, opts)"
2030 args := map[string]interface{}{
2031 "bucketName": "",
2032 "objectName": "",
2033 "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}",
2034 }
2035
2036 if !isFullMode() {
2037 ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info()
2038 return
2039 }
2040
2041 // Seed random based on current time.
2042 rand.Seed(time.Now().Unix())
2043
2044 // Instantiate new minio client object.
2045 c, err := minio.New(os.Getenv(serverEndpoint),
2046 &minio.Options{
2047 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
2048 Secure: mustParseBool(os.Getenv(enableHTTPS)),
2049 })
2050 if err != nil {
2051 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
2052 return
2053 }
2054
2055 // Enable tracing, write to stderr.
2056 // c.TraceOn(os.Stderr)
2057
2058 // Set user agent.
2059 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
2060
2061 // Generate a new random bucket name.
2062 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
2063 args["bucketName"] = bucketName
2064
2065 // Make a new bucket.
2066 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
2067 if err != nil {
2068 logError(testName, function, args, startTime, "", "Make bucket failed", err)
2069 return
2070 }
2071
2072 defer cleanupBucket(bucketName, c)
2073 tests := []struct {
2074 header string
2075 hasher hash.Hash
2076
2077 // Checksum values
2078 ChecksumCRC32 string
2079 ChecksumCRC32C string
2080 ChecksumSHA1 string
2081 ChecksumSHA256 string
2082 }{
2083 {header: "x-amz-checksum-crc32", hasher: crc32.NewIEEE()},
2084 {header: "x-amz-checksum-crc32c", hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli))},
2085 {header: "x-amz-checksum-sha1", hasher: sha1.New()},
2086 {header: "x-amz-checksum-sha256", hasher: sha256.New()},
2087 }
2088
2089 for i, test := range tests {
2090 bufSize := dataFileMap["datafile-10-kB"]
2091
2092 // Save the data
2093 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
2094 args["objectName"] = objectName
2095
2096 cmpChecksum := func(got, want string) {
2097 if want != got {
2098 logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got))
2099 return
2100 }
2101 }
2102
2103 meta := map[string]string{}
2104 reader := getDataReader("datafile-10-kB")
2105 b, err := io.ReadAll(reader)
2106 if err != nil {
2107 logError(testName, function, args, startTime, "", "Read failed", err)
2108 return
2109 }
2110 h := test.hasher
2111 h.Reset()
2112 // Wrong CRC.
2113 meta[test.header] = base64.StdEncoding.EncodeToString(h.Sum(nil))
2114 args["metadata"] = meta
2115 args["range"] = "false"
2116
2117 resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{
2118 DisableMultipart: true,
2119 UserMetadata: meta,
2120 })
2121 if err == nil {
2122 if i == 0 && resp.ChecksumCRC32 == "" {
2123 ignoredLog(testName, function, args, startTime, "Checksums does not appear to be supported by backend").Info()
2124 return
2125 }
2126 logError(testName, function, args, startTime, "", "PutObject failed", err)
2127 return
2128 }
2129
2130 // Set correct CRC.
2131 h.Write(b)
2132 meta[test.header] = base64.StdEncoding.EncodeToString(h.Sum(nil))
2133 reader.Close()
2134
2135 resp, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{
2136 DisableMultipart: true,
2137 DisableContentSha256: true,
2138 UserMetadata: meta,
2139 })
2140 if err != nil {
2141 logError(testName, function, args, startTime, "", "PutObject failed", err)
2142 return
2143 }
2144 cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"])
2145 cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"])
2146 cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"])
2147 cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"])
2148
2149 // Read the data back
2150 gopts := minio.GetObjectOptions{Checksum: true}
2151
2152 r, err := c.GetObject(context.Background(), bucketName, objectName, gopts)
2153 if err != nil {
2154 logError(testName, function, args, startTime, "", "GetObject failed", err)
2155 return
2156 }
2157
2158 st, err := r.Stat()
2159 if err != nil {
2160 logError(testName, function, args, startTime, "", "Stat failed", err)
2161 return
2162 }
2163 cmpChecksum(st.ChecksumSHA256, meta["x-amz-checksum-sha256"])
2164 cmpChecksum(st.ChecksumSHA1, meta["x-amz-checksum-sha1"])
2165 cmpChecksum(st.ChecksumCRC32, meta["x-amz-checksum-crc32"])
2166 cmpChecksum(st.ChecksumCRC32C, meta["x-amz-checksum-crc32c"])
2167
2168 if st.Size != int64(bufSize) {
2169 logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err)
2170 return
2171 }
2172
2173 if err := r.Close(); err != nil {
2174 logError(testName, function, args, startTime, "", "Object Close failed", err)
2175 return
2176 }
2177 if err := r.Close(); err == nil {
2178 logError(testName, function, args, startTime, "", "Object already closed, should respond with error", err)
2179 return
2180 }
2181
2182 args["range"] = "true"
2183 err = gopts.SetRange(100, 1000)
2184 if err != nil {
2185 logError(testName, function, args, startTime, "", "SetRange failed", err)
2186 return
2187 }
2188 r, err = c.GetObject(context.Background(), bucketName, objectName, gopts)
2189 if err != nil {
2190 logError(testName, function, args, startTime, "", "GetObject failed", err)
2191 return
2192 }
2193
2194 b, err = io.ReadAll(r)
2195 if err != nil {
2196 logError(testName, function, args, startTime, "", "Read failed", err)
2197 return
2198 }
2199 st, err = r.Stat()
2200 if err != nil {
2201 logError(testName, function, args, startTime, "", "Stat failed", err)
2202 return
2203 }
2204
2205 // Range requests should return empty checksums...
2206 cmpChecksum(st.ChecksumSHA256, "")
2207 cmpChecksum(st.ChecksumSHA1, "")
2208 cmpChecksum(st.ChecksumCRC32, "")
2209 cmpChecksum(st.ChecksumCRC32C, "")
2210
2211 delete(args, "range")
2212 delete(args, "metadata")
2213 }
2214
2215 successLogger(testName, function, args, startTime).Info()
2216}
2217
2218// Test PutObject with custom checksums.
2219func testPutMultipartObjectWithChecksums() {
2220 // initialize logging params
2221 startTime := time.Now()
2222 testName := getFuncName()
2223 function := "PutObject(bucketName, objectName, reader,size, opts)"
2224 args := map[string]interface{}{
2225 "bucketName": "",
2226 "objectName": "",
2227 "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}",
2228 }
2229
2230 if !isFullMode() {
2231 ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info()
2232 return
2233 }
2234
2235 // Seed random based on current time.
2236 rand.Seed(time.Now().Unix())
2237
2238 // Instantiate new minio client object.
2239 c, err := minio.New(os.Getenv(serverEndpoint),
2240 &minio.Options{
2241 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
2242 Secure: mustParseBool(os.Getenv(enableHTTPS)),
2243 })
2244 if err != nil {
2245 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
2246 return
2247 }
2248
2249 // Enable tracing, write to stderr.
2250 // c.TraceOn(os.Stderr)
2251
2252 // Set user agent.
2253 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
2254
2255 // Generate a new random bucket name.
2256 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
2257 args["bucketName"] = bucketName
2258
2259 // Make a new bucket.
2260 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
2261 if err != nil {
2262 logError(testName, function, args, startTime, "", "Make bucket failed", err)
2263 return
2264 }
2265
2266 hashMultiPart := func(b []byte, partSize int, hasher hash.Hash) string {
2267 r := bytes.NewReader(b)
2268 tmp := make([]byte, partSize)
2269 parts := 0
2270 var all []byte
2271 for {
2272 n, err := io.ReadFull(r, tmp)
2273 if err != nil && err != io.ErrUnexpectedEOF {
2274 logError(testName, function, args, startTime, "", "Calc crc failed", err)
2275 }
2276 if n == 0 {
2277 break
2278 }
2279 parts++
2280 hasher.Reset()
2281 hasher.Write(tmp[:n])
2282 all = append(all, hasher.Sum(nil)...)
2283 if err != nil {
2284 break
2285 }
2286 }
2287 hasher.Reset()
2288 hasher.Write(all)
2289 return fmt.Sprintf("%s-%d", base64.StdEncoding.EncodeToString(hasher.Sum(nil)), parts)
2290 }
2291 defer cleanupBucket(bucketName, c)
2292 tests := []struct {
2293 header string
2294 hasher hash.Hash
2295
2296 // Checksum values
2297 ChecksumCRC32 string
2298 ChecksumCRC32C string
2299 ChecksumSHA1 string
2300 ChecksumSHA256 string
2301 }{
2302 // Currently there is no way to override the checksum type.
2303 {header: "x-amz-checksum-crc32c", hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)), ChecksumCRC32C: "OpEx0Q==-13"},
2304 }
2305
2306 for _, test := range tests {
2307 bufSize := dataFileMap["datafile-129-MB"]
2308
2309 // Save the data
2310 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
2311 args["objectName"] = objectName
2312
2313 cmpChecksum := func(got, want string) {
2314 if want != got {
2315 // logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got))
2316 fmt.Printf("want %s, got %s\n", want, got)
2317 return
2318 }
2319 }
2320
2321 const partSize = 10 << 20
2322 reader := getDataReader("datafile-129-MB")
2323 b, err := io.ReadAll(reader)
2324 if err != nil {
2325 logError(testName, function, args, startTime, "", "Read failed", err)
2326 return
2327 }
2328 reader.Close()
2329 h := test.hasher
2330 h.Reset()
2331 test.ChecksumCRC32C = hashMultiPart(b, partSize, test.hasher)
2332
2333 // Set correct CRC.
2334
2335 resp, err := c.PutObject(context.Background(), bucketName, objectName, io.NopCloser(bytes.NewReader(b)), int64(bufSize), minio.PutObjectOptions{
2336 DisableContentSha256: true,
2337 DisableMultipart: false,
2338 UserMetadata: nil,
2339 PartSize: partSize,
2340 })
2341 if err != nil {
2342 logError(testName, function, args, startTime, "", "PutObject failed", err)
2343 return
2344 }
2345 cmpChecksum(resp.ChecksumSHA256, test.ChecksumSHA256)
2346 cmpChecksum(resp.ChecksumSHA1, test.ChecksumSHA1)
2347 cmpChecksum(resp.ChecksumCRC32, test.ChecksumCRC32)
2348 cmpChecksum(resp.ChecksumCRC32C, test.ChecksumCRC32C)
2349
2350 // Read the data back
2351 gopts := minio.GetObjectOptions{Checksum: true}
2352 gopts.PartNumber = 2
2353
2354 // We cannot use StatObject, since it ignores partnumber.
2355 r, err := c.GetObject(context.Background(), bucketName, objectName, gopts)
2356 if err != nil {
2357 logError(testName, function, args, startTime, "", "GetObject failed", err)
2358 return
2359 }
2360 io.Copy(io.Discard, r)
2361 st, err := r.Stat()
2362 if err != nil {
2363 logError(testName, function, args, startTime, "", "Stat failed", err)
2364 return
2365 }
2366
2367 // Test part 2 checksum...
2368 h.Reset()
2369 h.Write(b[partSize : 2*partSize])
2370 got := base64.StdEncoding.EncodeToString(h.Sum(nil))
2371 if test.ChecksumSHA256 != "" {
2372 cmpChecksum(st.ChecksumSHA256, got)
2373 }
2374 if test.ChecksumSHA1 != "" {
2375 cmpChecksum(st.ChecksumSHA1, got)
2376 }
2377 if test.ChecksumCRC32 != "" {
2378 cmpChecksum(st.ChecksumCRC32, got)
2379 }
2380 if test.ChecksumCRC32C != "" {
2381 cmpChecksum(st.ChecksumCRC32C, got)
2382 }
2383
2384 delete(args, "metadata")
2385 }
2386
2387 successLogger(testName, function, args, startTime).Info()
2388}
2389
2390// Test PutObject with trailing checksums.
2391func testTrailingChecksums() {
2392 // initialize logging params
2393 startTime := time.Now()
2394 testName := getFuncName()
2395 function := "PutObject(bucketName, objectName, reader,size, opts)"
2396 args := map[string]interface{}{
2397 "bucketName": "",
2398 "objectName": "",
2399 "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}",
2400 }
2401
2402 if !isFullMode() {
2403 ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info()
2404 return
2405 }
2406
2407 // Instantiate new minio client object.
2408 c, err := minio.New(os.Getenv(serverEndpoint),
2409 &minio.Options{
2410 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
2411 Secure: mustParseBool(os.Getenv(enableHTTPS)),
2412 TrailingHeaders: true,
2413 })
2414 if err != nil {
2415 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
2416 return
2417 }
2418
2419 // Enable tracing, write to stderr.
2420 // c.TraceOn(os.Stderr)
2421
2422 // Set user agent.
2423 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
2424
2425 // Generate a new random bucket name.
2426 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
2427 args["bucketName"] = bucketName
2428
2429 // Make a new bucket.
2430 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
2431 if err != nil {
2432 logError(testName, function, args, startTime, "", "Make bucket failed", err)
2433 return
2434 }
2435
2436 hashMultiPart := func(b []byte, partSize int, hasher hash.Hash) string {
2437 r := bytes.NewReader(b)
2438 tmp := make([]byte, partSize)
2439 parts := 0
2440 var all []byte
2441 for {
2442 n, err := io.ReadFull(r, tmp)
2443 if err != nil && err != io.ErrUnexpectedEOF {
2444 logError(testName, function, args, startTime, "", "Calc crc failed", err)
2445 }
2446 if n == 0 {
2447 break
2448 }
2449 parts++
2450 hasher.Reset()
2451 hasher.Write(tmp[:n])
2452 all = append(all, hasher.Sum(nil)...)
2453 if err != nil {
2454 break
2455 }
2456 }
2457 hasher.Reset()
2458 hasher.Write(all)
2459 return fmt.Sprintf("%s-%d", base64.StdEncoding.EncodeToString(hasher.Sum(nil)), parts)
2460 }
2461 defer cleanupBucket(bucketName, c)
2462 tests := []struct {
2463 header string
2464 hasher hash.Hash
2465
2466 // Checksum values
2467 ChecksumCRC32 string
2468 ChecksumCRC32C string
2469 ChecksumSHA1 string
2470 ChecksumSHA256 string
2471 PO minio.PutObjectOptions
2472 }{
2473 // Currently there is no way to override the checksum type.
2474 {
2475 header: "x-amz-checksum-crc32c",
2476 hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)),
2477 ChecksumCRC32C: "set",
2478 PO: minio.PutObjectOptions{
2479 DisableContentSha256: true,
2480 DisableMultipart: false,
2481 UserMetadata: nil,
2482 PartSize: 5 << 20,
2483 },
2484 },
2485 {
2486 header: "x-amz-checksum-crc32c",
2487 hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)),
2488 ChecksumCRC32C: "set",
2489 PO: minio.PutObjectOptions{
2490 DisableContentSha256: true,
2491 DisableMultipart: false,
2492 UserMetadata: nil,
2493 PartSize: 6_645_654, // Rather arbitrary size
2494 },
2495 },
2496 {
2497 header: "x-amz-checksum-crc32c",
2498 hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)),
2499 ChecksumCRC32C: "set",
2500 PO: minio.PutObjectOptions{
2501 DisableContentSha256: false,
2502 DisableMultipart: false,
2503 UserMetadata: nil,
2504 PartSize: 5 << 20,
2505 },
2506 },
2507 {
2508 header: "x-amz-checksum-crc32c",
2509 hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)),
2510 ChecksumCRC32C: "set",
2511 PO: minio.PutObjectOptions{
2512 DisableContentSha256: false,
2513 DisableMultipart: false,
2514 UserMetadata: nil,
2515 PartSize: 6_645_654, // Rather arbitrary size
2516 },
2517 },
2518 }
2519
2520 for _, test := range tests {
2521 bufSize := dataFileMap["datafile-11-MB"]
2522
2523 // Save the data
2524 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
2525 args["objectName"] = objectName
2526
2527 cmpChecksum := func(got, want string) {
2528 if want != got {
2529 logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %q, got %q", want, got))
2530 return
2531 }
2532 }
2533
2534 reader := getDataReader("datafile-11-MB")
2535 b, err := io.ReadAll(reader)
2536 if err != nil {
2537 logError(testName, function, args, startTime, "", "Read failed", err)
2538 return
2539 }
2540 reader.Close()
2541 h := test.hasher
2542 h.Reset()
2543 test.ChecksumCRC32C = hashMultiPart(b, int(test.PO.PartSize), test.hasher)
2544
2545 // Set correct CRC.
2546 // c.TraceOn(os.Stderr)
2547 resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), test.PO)
2548 if err != nil {
2549 logError(testName, function, args, startTime, "", "PutObject failed", err)
2550 return
2551 }
2552 // c.TraceOff()
2553 cmpChecksum(resp.ChecksumSHA256, test.ChecksumSHA256)
2554 cmpChecksum(resp.ChecksumSHA1, test.ChecksumSHA1)
2555 cmpChecksum(resp.ChecksumCRC32, test.ChecksumCRC32)
2556 cmpChecksum(resp.ChecksumCRC32C, test.ChecksumCRC32C)
2557
2558 // Read the data back
2559 gopts := minio.GetObjectOptions{Checksum: true}
2560 gopts.PartNumber = 2
2561
2562 // We cannot use StatObject, since it ignores partnumber.
2563 r, err := c.GetObject(context.Background(), bucketName, objectName, gopts)
2564 if err != nil {
2565 logError(testName, function, args, startTime, "", "GetObject failed", err)
2566 return
2567 }
2568 io.Copy(io.Discard, r)
2569 st, err := r.Stat()
2570 if err != nil {
2571 logError(testName, function, args, startTime, "", "Stat failed", err)
2572 return
2573 }
2574
2575 // Test part 2 checksum...
2576 h.Reset()
2577 p2 := b[test.PO.PartSize:]
2578 if len(p2) > int(test.PO.PartSize) {
2579 p2 = p2[:test.PO.PartSize]
2580 }
2581 h.Write(p2)
2582 got := base64.StdEncoding.EncodeToString(h.Sum(nil))
2583 if test.ChecksumSHA256 != "" {
2584 cmpChecksum(st.ChecksumSHA256, got)
2585 }
2586 if test.ChecksumSHA1 != "" {
2587 cmpChecksum(st.ChecksumSHA1, got)
2588 }
2589 if test.ChecksumCRC32 != "" {
2590 cmpChecksum(st.ChecksumCRC32, got)
2591 }
2592 if test.ChecksumCRC32C != "" {
2593 cmpChecksum(st.ChecksumCRC32C, got)
2594 }
2595
2596 delete(args, "metadata")
2597 }
2598}
2599
2600// Test PutObject with custom checksums.
2601func testPutObjectWithAutomaticChecksums() {
2602 // initialize logging params
2603 startTime := time.Now()
2604 testName := getFuncName()
2605 function := "PutObject(bucketName, objectName, reader,size, opts)"
2606 args := map[string]interface{}{
2607 "bucketName": "",
2608 "objectName": "",
2609 "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}",
2610 }
2611
2612 if !isFullMode() {
2613 ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info()
2614 return
2615 }
2616
2617 // Seed random based on current time.
2618 rand.Seed(time.Now().Unix())
2619
2620 // Instantiate new minio client object.
2621 c, err := minio.New(os.Getenv(serverEndpoint),
2622 &minio.Options{
2623 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
2624 Secure: mustParseBool(os.Getenv(enableHTTPS)),
2625 TrailingHeaders: true,
2626 })
2627 if err != nil {
2628 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
2629 return
2630 }
2631
2632 // Set user agent.
2633 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
2634
2635 // Generate a new random bucket name.
2636 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
2637 args["bucketName"] = bucketName
2638
2639 // Make a new bucket.
2640 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
2641 if err != nil {
2642 logError(testName, function, args, startTime, "", "Make bucket failed", err)
2643 return
2644 }
2645
2646 defer cleanupBucket(bucketName, c)
2647 tests := []struct {
2648 header string
2649 hasher hash.Hash
2650
2651 // Checksum values
2652 ChecksumCRC32 string
2653 ChecksumCRC32C string
2654 ChecksumSHA1 string
2655 ChecksumSHA256 string
2656 }{
2657 // Built-in will only add crc32c, when no MD5 nor SHA256.
2658 {header: "x-amz-checksum-crc32c", hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli))},
2659 }
2660
2661 // Enable tracing, write to stderr.
2662 // c.TraceOn(os.Stderr)
2663 // defer c.TraceOff()
2664
2665 for i, test := range tests {
2666 bufSize := dataFileMap["datafile-10-kB"]
2667
2668 // Save the data
2669 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
2670 args["objectName"] = objectName
2671
2672 cmpChecksum := func(got, want string) {
2673 if want != got {
2674 logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got))
2675 return
2676 }
2677 }
2678
2679 meta := map[string]string{}
2680 reader := getDataReader("datafile-10-kB")
2681 b, err := io.ReadAll(reader)
2682 if err != nil {
2683 logError(testName, function, args, startTime, "", "Read failed", err)
2684 return
2685 }
2686
2687 h := test.hasher
2688 h.Reset()
2689 h.Write(b)
2690 meta[test.header] = base64.StdEncoding.EncodeToString(h.Sum(nil))
2691 args["metadata"] = meta
2692
2693 resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{
2694 DisableMultipart: true,
2695 UserMetadata: nil,
2696 DisableContentSha256: true,
2697 SendContentMd5: false,
2698 })
2699 if err == nil {
2700 if i == 0 && resp.ChecksumCRC32C == "" {
2701 ignoredLog(testName, function, args, startTime, "Checksums does not appear to be supported by backend").Info()
2702 return
2703 }
2704 } else {
2705 logError(testName, function, args, startTime, "", "PutObject failed", err)
2706 return
2707 }
2708 cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"])
2709 cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"])
2710 cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"])
2711 cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"])
2712
2713 // Usually this will be the same as above, since we skip automatic checksum when SHA256 content is sent.
2714 // When/if we add a checksum control to PutObjectOptions this will make more sense.
2715 resp, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{
2716 DisableMultipart: true,
2717 UserMetadata: nil,
2718 DisableContentSha256: false,
2719 SendContentMd5: false,
2720 })
2721 if err != nil {
2722 logError(testName, function, args, startTime, "", "PutObject failed", err)
2723 return
2724 }
2725 // The checksum will not be enabled on HTTP, since it uses SHA256 blocks.
2726 if mustParseBool(os.Getenv(enableHTTPS)) {
2727 cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"])
2728 cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"])
2729 cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"])
2730 cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"])
2731 }
2732
2733 // Set SHA256 header manually
2734 sh256 := sha256.Sum256(b)
2735 meta = map[string]string{"x-amz-checksum-sha256": base64.StdEncoding.EncodeToString(sh256[:])}
2736 args["metadata"] = meta
2737 resp, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{
2738 DisableMultipart: true,
2739 UserMetadata: meta,
2740 DisableContentSha256: true,
2741 SendContentMd5: false,
2742 })
2743 if err != nil {
2744 logError(testName, function, args, startTime, "", "PutObject failed", err)
2745 return
2746 }
2747 cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"])
2748 cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"])
2749 cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"])
2750 cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"])
2751 delete(args, "metadata")
2752 }
2753
2754 successLogger(testName, function, args, startTime).Info()
2755}
2756
2757// Test PutObject using a large data to trigger multipart readat
2758func testPutObjectWithMetadata() {
2759 // initialize logging params
2760 startTime := time.Now()
2761 testName := getFuncName()
2762 function := "PutObject(bucketName, objectName, reader,size, opts)"
2763 args := map[string]interface{}{
2764 "bucketName": "",
2765 "objectName": "",
2766 "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}",
2767 }
2768
2769 if !isFullMode() {
2770 ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info()
2771 return
2772 }
2773
2774 // Seed random based on current time.
2775 rand.Seed(time.Now().Unix())
2776
2777 // Instantiate new minio client object.
2778 c, err := minio.New(os.Getenv(serverEndpoint),
2779 &minio.Options{
2780 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
2781 Secure: mustParseBool(os.Getenv(enableHTTPS)),
2782 })
2783 if err != nil {
2784 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
2785 return
2786 }
2787
2788 // Enable tracing, write to stderr.
2789 // c.TraceOn(os.Stderr)
2790
2791 // Set user agent.
2792 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
2793
2794 // Generate a new random bucket name.
2795 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
2796 args["bucketName"] = bucketName
2797
2798 // Make a new bucket.
2799 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
2800 if err != nil {
2801 logError(testName, function, args, startTime, "", "Make bucket failed", err)
2802 return
2803 }
2804
2805 defer cleanupBucket(bucketName, c)
2806
2807 bufSize := dataFileMap["datafile-129-MB"]
2808 reader := getDataReader("datafile-129-MB")
2809 defer reader.Close()
2810
2811 // Save the data
2812 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
2813 args["objectName"] = objectName
2814
2815 // Object custom metadata
2816 customContentType := "custom/contenttype"
2817
2818 args["metadata"] = map[string][]string{
2819 "Content-Type": {customContentType},
2820 "X-Amz-Meta-CustomKey": {"extra spaces in value"},
2821 }
2822
2823 _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{
2824 ContentType: customContentType,
2825 })
2826 if err != nil {
2827 logError(testName, function, args, startTime, "", "PutObject failed", err)
2828 return
2829 }
2830
2831 // Read the data back
2832 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
2833 if err != nil {
2834 logError(testName, function, args, startTime, "", "GetObject failed", err)
2835 return
2836 }
2837
2838 st, err := r.Stat()
2839 if err != nil {
2840 logError(testName, function, args, startTime, "", "Stat failed", err)
2841 return
2842 }
2843 if st.Size != int64(bufSize) {
2844 logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err)
2845 return
2846 }
2847 if st.ContentType != customContentType && st.ContentType != "application/octet-stream" {
2848 logError(testName, function, args, startTime, "", "ContentType does not match, expected "+customContentType+" got "+st.ContentType, err)
2849 return
2850 }
2851 if err := crcMatchesName(r, "datafile-129-MB"); err != nil {
2852 logError(testName, function, args, startTime, "", "data CRC check failed", err)
2853 return
2854 }
2855 if err := r.Close(); err != nil {
2856 logError(testName, function, args, startTime, "", "Object Close failed", err)
2857 return
2858 }
2859 if err := r.Close(); err == nil {
2860 logError(testName, function, args, startTime, "", "Object already closed, should respond with error", err)
2861 return
2862 }
2863
2864 successLogger(testName, function, args, startTime).Info()
2865}
2866
2867func testPutObjectWithContentLanguage() {
2868 // initialize logging params
2869 objectName := "test-object"
2870 startTime := time.Now()
2871 testName := getFuncName()
2872 function := "PutObject(bucketName, objectName, reader, size, opts)"
2873 args := map[string]interface{}{
2874 "bucketName": "",
2875 "objectName": objectName,
2876 "size": -1,
2877 "opts": "",
2878 }
2879
2880 // Seed random based on current time.
2881 rand.Seed(time.Now().Unix())
2882
2883 // Instantiate new minio client object.
2884 c, err := minio.New(os.Getenv(serverEndpoint),
2885 &minio.Options{
2886 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
2887 Secure: mustParseBool(os.Getenv(enableHTTPS)),
2888 })
2889 if err != nil {
2890 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
2891 return
2892 }
2893
2894 // Enable tracing, write to stderr.
2895 // c.TraceOn(os.Stderr)
2896
2897 // Set user agent.
2898 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
2899
2900 // Generate a new random bucket name.
2901 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
2902 args["bucketName"] = bucketName
2903 // Make a new bucket.
2904 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
2905 if err != nil {
2906 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
2907 return
2908 }
2909
2910 defer cleanupBucket(bucketName, c)
2911
2912 data := []byte{}
2913 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(data), int64(0), minio.PutObjectOptions{
2914 ContentLanguage: "en",
2915 })
2916 if err != nil {
2917 logError(testName, function, args, startTime, "", "PutObject failed", err)
2918 return
2919 }
2920
2921 objInfo, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{})
2922 if err != nil {
2923 logError(testName, function, args, startTime, "", "StatObject failed", err)
2924 return
2925 }
2926
2927 if objInfo.Metadata.Get("Content-Language") != "en" {
2928 logError(testName, function, args, startTime, "", "Expected content-language 'en' doesn't match with StatObject return value", err)
2929 return
2930 }
2931
2932 successLogger(testName, function, args, startTime).Info()
2933}
2934
2935// Test put object with streaming signature.
2936func testPutObjectStreaming() {
2937 // initialize logging params
2938 objectName := "test-object"
2939 startTime := time.Now()
2940 testName := getFuncName()
2941 function := "PutObject(bucketName, objectName, reader,size,opts)"
2942 args := map[string]interface{}{
2943 "bucketName": "",
2944 "objectName": objectName,
2945 "size": -1,
2946 "opts": "",
2947 }
2948
2949 // Seed random based on current time.
2950 rand.Seed(time.Now().Unix())
2951
2952 // Instantiate new minio client object.
2953 c, err := minio.New(os.Getenv(serverEndpoint),
2954 &minio.Options{
2955 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
2956 Secure: mustParseBool(os.Getenv(enableHTTPS)),
2957 })
2958 if err != nil {
2959 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
2960 return
2961 }
2962
2963 // Enable tracing, write to stderr.
2964 // c.TraceOn(os.Stderr)
2965
2966 // Set user agent.
2967 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
2968
2969 // Generate a new random bucket name.
2970 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
2971 args["bucketName"] = bucketName
2972 // Make a new bucket.
2973 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
2974 if err != nil {
2975 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
2976 return
2977 }
2978
2979 defer cleanupBucket(bucketName, c)
2980
2981 // Upload an object.
2982 sizes := []int64{0, 64*1024 - 1, 64 * 1024}
2983
2984 for _, size := range sizes {
2985 data := newRandomReader(size, size)
2986 ui, err := c.PutObject(context.Background(), bucketName, objectName, data, int64(size), minio.PutObjectOptions{})
2987 if err != nil {
2988 logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err)
2989 return
2990 }
2991
2992 if ui.Size != size {
2993 logError(testName, function, args, startTime, "", "PutObjectStreaming result has unexpected size", nil)
2994 return
2995 }
2996
2997 objInfo, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{})
2998 if err != nil {
2999 logError(testName, function, args, startTime, "", "StatObject failed", err)
3000 return
3001 }
3002 if objInfo.Size != size {
3003 logError(testName, function, args, startTime, "", "Unexpected size", err)
3004 return
3005 }
3006
3007 }
3008
3009 successLogger(testName, function, args, startTime).Info()
3010}
3011
3012// Test get object seeker from the end, using whence set to '2'.
3013func testGetObjectSeekEnd() {
3014 // initialize logging params
3015 startTime := time.Now()
3016 testName := getFuncName()
3017 function := "GetObject(bucketName, objectName)"
3018 args := map[string]interface{}{}
3019
3020 // Seed random based on current time.
3021 rand.Seed(time.Now().Unix())
3022
3023 // Instantiate new minio client object.
3024 c, err := minio.New(os.Getenv(serverEndpoint),
3025 &minio.Options{
3026 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
3027 Secure: mustParseBool(os.Getenv(enableHTTPS)),
3028 })
3029 if err != nil {
3030 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
3031 return
3032 }
3033
3034 // Enable tracing, write to stderr.
3035 // c.TraceOn(os.Stderr)
3036
3037 // Set user agent.
3038 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
3039
3040 // Generate a new random bucket name.
3041 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
3042 args["bucketName"] = bucketName
3043
3044 // Make a new bucket.
3045 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
3046 if err != nil {
3047 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
3048 return
3049 }
3050
3051 defer cleanupBucket(bucketName, c)
3052
3053 // Generate 33K of data.
3054 bufSize := dataFileMap["datafile-33-kB"]
3055 reader := getDataReader("datafile-33-kB")
3056 defer reader.Close()
3057
3058 // Save the data
3059 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
3060 args["objectName"] = objectName
3061
3062 buf, err := io.ReadAll(reader)
3063 if err != nil {
3064 logError(testName, function, args, startTime, "", "ReadAll failed", err)
3065 return
3066 }
3067
3068 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
3069 if err != nil {
3070 logError(testName, function, args, startTime, "", "PutObject failed", err)
3071 return
3072 }
3073
3074 // Read the data back
3075 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
3076 if err != nil {
3077 logError(testName, function, args, startTime, "", "GetObject failed", err)
3078 return
3079 }
3080
3081 st, err := r.Stat()
3082 if err != nil {
3083 logError(testName, function, args, startTime, "", "Stat failed", err)
3084 return
3085 }
3086
3087 if st.Size != int64(bufSize) {
3088 logError(testName, function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err)
3089 return
3090 }
3091
3092 pos, err := r.Seek(-100, 2)
3093 if err != nil {
3094 logError(testName, function, args, startTime, "", "Object Seek failed", err)
3095 return
3096 }
3097 if pos != st.Size-100 {
3098 logError(testName, function, args, startTime, "", "Incorrect position", err)
3099 return
3100 }
3101 buf2 := make([]byte, 100)
3102 m, err := readFull(r, buf2)
3103 if err != nil {
3104 logError(testName, function, args, startTime, "", "Error reading through readFull", err)
3105 return
3106 }
3107 if m != len(buf2) {
3108 logError(testName, function, args, startTime, "", "Number of bytes dont match, expected "+string(len(buf2))+" got "+string(m), err)
3109 return
3110 }
3111 hexBuf1 := fmt.Sprintf("%02x", buf[len(buf)-100:])
3112 hexBuf2 := fmt.Sprintf("%02x", buf2[:m])
3113 if hexBuf1 != hexBuf2 {
3114 logError(testName, function, args, startTime, "", "Values at same index dont match", err)
3115 return
3116 }
3117 pos, err = r.Seek(-100, 2)
3118 if err != nil {
3119 logError(testName, function, args, startTime, "", "Object Seek failed", err)
3120 return
3121 }
3122 if pos != st.Size-100 {
3123 logError(testName, function, args, startTime, "", "Incorrect position", err)
3124 return
3125 }
3126 if err = r.Close(); err != nil {
3127 logError(testName, function, args, startTime, "", "ObjectClose failed", err)
3128 return
3129 }
3130
3131 successLogger(testName, function, args, startTime).Info()
3132}
3133
3134// Test get object reader to not throw error on being closed twice.
3135func testGetObjectClosedTwice() {
3136 // initialize logging params
3137 startTime := time.Now()
3138 testName := getFuncName()
3139 function := "GetObject(bucketName, objectName)"
3140 args := map[string]interface{}{}
3141
3142 // Seed random based on current time.
3143 rand.Seed(time.Now().Unix())
3144
3145 // Instantiate new minio client object.
3146 c, err := minio.New(os.Getenv(serverEndpoint),
3147 &minio.Options{
3148 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
3149 Secure: mustParseBool(os.Getenv(enableHTTPS)),
3150 })
3151 if err != nil {
3152 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
3153 return
3154 }
3155
3156 // Enable tracing, write to stderr.
3157 // c.TraceOn(os.Stderr)
3158
3159 // Set user agent.
3160 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
3161
3162 // Generate a new random bucket name.
3163 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
3164 args["bucketName"] = bucketName
3165
3166 // Make a new bucket.
3167 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
3168 if err != nil {
3169 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
3170 return
3171 }
3172
3173 defer cleanupBucket(bucketName, c)
3174
3175 // Generate 33K of data.
3176 bufSize := dataFileMap["datafile-33-kB"]
3177 reader := getDataReader("datafile-33-kB")
3178 defer reader.Close()
3179
3180 // Save the data
3181 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
3182 args["objectName"] = objectName
3183
3184 _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
3185 if err != nil {
3186 logError(testName, function, args, startTime, "", "PutObject failed", err)
3187 return
3188 }
3189
3190 // Read the data back
3191 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
3192 if err != nil {
3193 logError(testName, function, args, startTime, "", "GetObject failed", err)
3194 return
3195 }
3196
3197 st, err := r.Stat()
3198 if err != nil {
3199 logError(testName, function, args, startTime, "", "Stat failed", err)
3200 return
3201 }
3202 if st.Size != int64(bufSize) {
3203 logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err)
3204 return
3205 }
3206 if err := crcMatchesName(r, "datafile-33-kB"); err != nil {
3207 logError(testName, function, args, startTime, "", "data CRC check failed", err)
3208 return
3209 }
3210 if err := r.Close(); err != nil {
3211 logError(testName, function, args, startTime, "", "Object Close failed", err)
3212 return
3213 }
3214 if err := r.Close(); err == nil {
3215 logError(testName, function, args, startTime, "", "Already closed object. No error returned", err)
3216 return
3217 }
3218
3219 successLogger(testName, function, args, startTime).Info()
3220}
3221
3222// Test RemoveObjects request where context cancels after timeout
3223func testRemoveObjectsContext() {
3224 // Initialize logging params.
3225 startTime := time.Now()
3226 testName := getFuncName()
3227 function := "RemoveObjects(ctx, bucketName, objectsCh)"
3228 args := map[string]interface{}{
3229 "bucketName": "",
3230 }
3231
3232 // Seed random based on current tie.
3233 rand.Seed(time.Now().Unix())
3234
3235 // Instantiate new minio client.
3236 c, err := minio.New(os.Getenv(serverEndpoint),
3237 &minio.Options{
3238 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
3239 Secure: mustParseBool(os.Getenv(enableHTTPS)),
3240 })
3241 if err != nil {
3242 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
3243 return
3244 }
3245
3246 // Set user agent.
3247 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
3248 // Enable tracing, write to stdout.
3249 // c.TraceOn(os.Stderr)
3250
3251 // Generate a new random bucket name.
3252 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
3253 args["bucketName"] = bucketName
3254
3255 // Make a new bucket.
3256 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
3257 if err != nil {
3258 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
3259 return
3260 }
3261
3262 defer cleanupBucket(bucketName, c)
3263
3264 // Generate put data.
3265 r := bytes.NewReader(bytes.Repeat([]byte("a"), 8))
3266
3267 // Multi remove of 20 objects.
3268 nrObjects := 20
3269 objectsCh := make(chan minio.ObjectInfo)
3270 go func() {
3271 defer close(objectsCh)
3272 for i := 0; i < nrObjects; i++ {
3273 objectName := "sample" + strconv.Itoa(i) + ".txt"
3274 info, err := c.PutObject(context.Background(), bucketName, objectName, r, 8,
3275 minio.PutObjectOptions{ContentType: "application/octet-stream"})
3276 if err != nil {
3277 logError(testName, function, args, startTime, "", "PutObject failed", err)
3278 continue
3279 }
3280 objectsCh <- minio.ObjectInfo{
3281 Key: info.Key,
3282 VersionID: info.VersionID,
3283 }
3284 }
3285 }()
3286 // Set context to cancel in 1 nanosecond.
3287 ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
3288 args["ctx"] = ctx
3289 defer cancel()
3290
3291 // Call RemoveObjects API with short timeout.
3292 errorCh := c.RemoveObjects(ctx, bucketName, objectsCh, minio.RemoveObjectsOptions{})
3293 // Check for error.
3294 select {
3295 case r := <-errorCh:
3296 if r.Err == nil {
3297 logError(testName, function, args, startTime, "", "RemoveObjects should fail on short timeout", err)
3298 return
3299 }
3300 }
3301 // Set context with longer timeout.
3302 ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
3303 args["ctx"] = ctx
3304 defer cancel()
3305 // Perform RemoveObjects with the longer timeout. Expect the removals to succeed.
3306 errorCh = c.RemoveObjects(ctx, bucketName, objectsCh, minio.RemoveObjectsOptions{})
3307 select {
3308 case r, more := <-errorCh:
3309 if more || r.Err != nil {
3310 logError(testName, function, args, startTime, "", "Unexpected error", r.Err)
3311 return
3312 }
3313 }
3314
3315 successLogger(testName, function, args, startTime).Info()
3316}
3317
3318// Test removing multiple objects with Remove API
3319func testRemoveMultipleObjects() {
3320 // initialize logging params
3321 startTime := time.Now()
3322 testName := getFuncName()
3323 function := "RemoveObjects(bucketName, objectsCh)"
3324 args := map[string]interface{}{
3325 "bucketName": "",
3326 }
3327
3328 // Seed random based on current time.
3329 rand.Seed(time.Now().Unix())
3330
3331 // Instantiate new minio client object.
3332 c, err := minio.New(os.Getenv(serverEndpoint),
3333 &minio.Options{
3334 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
3335 Secure: mustParseBool(os.Getenv(enableHTTPS)),
3336 })
3337 if err != nil {
3338 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
3339 return
3340 }
3341
3342 // Set user agent.
3343 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
3344
3345 // Enable tracing, write to stdout.
3346 // c.TraceOn(os.Stderr)
3347
3348 // Generate a new random bucket name.
3349 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
3350 args["bucketName"] = bucketName
3351
3352 // Make a new bucket.
3353 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
3354 if err != nil {
3355 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
3356 return
3357 }
3358
3359 defer cleanupBucket(bucketName, c)
3360
3361 r := bytes.NewReader(bytes.Repeat([]byte("a"), 8))
3362
3363 // Multi remove of 1100 objects
3364 nrObjects := 200
3365
3366 objectsCh := make(chan minio.ObjectInfo)
3367
3368 go func() {
3369 defer close(objectsCh)
3370 // Upload objects and send them to objectsCh
3371 for i := 0; i < nrObjects; i++ {
3372 objectName := "sample" + strconv.Itoa(i) + ".txt"
3373 info, err := c.PutObject(context.Background(), bucketName, objectName, r, 8,
3374 minio.PutObjectOptions{ContentType: "application/octet-stream"})
3375 if err != nil {
3376 logError(testName, function, args, startTime, "", "PutObject failed", err)
3377 continue
3378 }
3379 objectsCh <- minio.ObjectInfo{
3380 Key: info.Key,
3381 VersionID: info.VersionID,
3382 }
3383 }
3384 }()
3385
3386 // Call RemoveObjects API
3387 errorCh := c.RemoveObjects(context.Background(), bucketName, objectsCh, minio.RemoveObjectsOptions{})
3388
3389 // Check if errorCh doesn't receive any error
3390 select {
3391 case r, more := <-errorCh:
3392 if more {
3393 logError(testName, function, args, startTime, "", "Unexpected error", r.Err)
3394 return
3395 }
3396 }
3397
3398 successLogger(testName, function, args, startTime).Info()
3399}
3400
3401// Test removing multiple objects and check for results
3402func testRemoveMultipleObjectsWithResult() {
3403 // initialize logging params
3404 startTime := time.Now()
3405 testName := getFuncName()
3406 function := "RemoveObjects(bucketName, objectsCh)"
3407 args := map[string]interface{}{
3408 "bucketName": "",
3409 }
3410
3411 // Seed random based on current time.
3412 rand.Seed(time.Now().Unix())
3413
3414 // Instantiate new minio client object.
3415 c, err := minio.New(os.Getenv(serverEndpoint),
3416 &minio.Options{
3417 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
3418 Secure: mustParseBool(os.Getenv(enableHTTPS)),
3419 })
3420 if err != nil {
3421 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
3422 return
3423 }
3424
3425 // Set user agent.
3426 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
3427
3428 // Enable tracing, write to stdout.
3429 // c.TraceOn(os.Stderr)
3430
3431 // Generate a new random bucket name.
3432 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
3433 args["bucketName"] = bucketName
3434
3435 // Make a new bucket.
3436 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
3437 if err != nil {
3438 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
3439 return
3440 }
3441
3442 defer cleanupVersionedBucket(bucketName, c)
3443
3444 r := bytes.NewReader(bytes.Repeat([]byte("a"), 8))
3445
3446 nrObjects := 10
3447 nrLockedObjects := 5
3448
3449 objectsCh := make(chan minio.ObjectInfo)
3450
3451 go func() {
3452 defer close(objectsCh)
3453 // Upload objects and send them to objectsCh
3454 for i := 0; i < nrObjects; i++ {
3455 objectName := "sample" + strconv.Itoa(i) + ".txt"
3456 info, err := c.PutObject(context.Background(), bucketName, objectName, r, 8,
3457 minio.PutObjectOptions{ContentType: "application/octet-stream"})
3458 if err != nil {
3459 logError(testName, function, args, startTime, "", "PutObject failed", err)
3460 return
3461 }
3462 if i < nrLockedObjects {
3463 // t := time.Date(2130, time.April, 25, 14, 0, 0, 0, time.UTC)
3464 t := time.Now().Add(5 * time.Minute)
3465 m := minio.RetentionMode(minio.Governance)
3466 opts := minio.PutObjectRetentionOptions{
3467 GovernanceBypass: false,
3468 RetainUntilDate: &t,
3469 Mode: &m,
3470 VersionID: info.VersionID,
3471 }
3472 err = c.PutObjectRetention(context.Background(), bucketName, objectName, opts)
3473 if err != nil {
3474 logError(testName, function, args, startTime, "", "Error setting retention", err)
3475 return
3476 }
3477 }
3478
3479 objectsCh <- minio.ObjectInfo{
3480 Key: info.Key,
3481 VersionID: info.VersionID,
3482 }
3483 }
3484 }()
3485
3486 // Call RemoveObjects API
3487 resultCh := c.RemoveObjectsWithResult(context.Background(), bucketName, objectsCh, minio.RemoveObjectsOptions{})
3488
3489 var foundNil, foundErr int
3490
3491 for {
3492 // Check if errorCh doesn't receive any error
3493 select {
3494 case deleteRes, ok := <-resultCh:
3495 if !ok {
3496 goto out
3497 }
3498 if deleteRes.ObjectName == "" {
3499 logError(testName, function, args, startTime, "", "Unexpected object name", nil)
3500 return
3501 }
3502 if deleteRes.ObjectVersionID == "" {
3503 logError(testName, function, args, startTime, "", "Unexpected object version ID", nil)
3504 return
3505 }
3506
3507 if deleteRes.Err == nil {
3508 foundNil++
3509 } else {
3510 foundErr++
3511 }
3512 }
3513 }
3514out:
3515 if foundNil+foundErr != nrObjects {
3516 logError(testName, function, args, startTime, "", "Unexpected number of results", nil)
3517 return
3518 }
3519
3520 if foundNil != nrObjects-nrLockedObjects {
3521 logError(testName, function, args, startTime, "", "Unexpected number of nil errors", nil)
3522 return
3523 }
3524
3525 if foundErr != nrLockedObjects {
3526 logError(testName, function, args, startTime, "", "Unexpected number of errors", nil)
3527 return
3528 }
3529
3530 successLogger(testName, function, args, startTime).Info()
3531}
3532
3533// Tests FPutObject of a big file to trigger multipart
3534func testFPutObjectMultipart() {
3535 // initialize logging params
3536 startTime := time.Now()
3537 testName := getFuncName()
3538 function := "FPutObject(bucketName, objectName, fileName, opts)"
3539 args := map[string]interface{}{
3540 "bucketName": "",
3541 "objectName": "",
3542 "fileName": "",
3543 "opts": "",
3544 }
3545
3546 // Seed random based on current time.
3547 rand.Seed(time.Now().Unix())
3548
3549 // Instantiate new minio client object.
3550 c, err := minio.New(os.Getenv(serverEndpoint),
3551 &minio.Options{
3552 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
3553 Secure: mustParseBool(os.Getenv(enableHTTPS)),
3554 })
3555 if err != nil {
3556 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
3557 return
3558 }
3559
3560 // Enable tracing, write to stderr.
3561 // c.TraceOn(os.Stderr)
3562
3563 // Set user agent.
3564 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
3565
3566 // Generate a new random bucket name.
3567 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
3568 args["bucketName"] = bucketName
3569
3570 // Make a new bucket.
3571 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
3572 if err != nil {
3573 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
3574 return
3575 }
3576
3577 defer cleanupBucket(bucketName, c)
3578
3579 // Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload.
3580 fileName := getMintDataDirFilePath("datafile-129-MB")
3581 if fileName == "" {
3582 // Make a temp file with minPartSize bytes of data.
3583 file, err := os.CreateTemp(os.TempDir(), "FPutObjectTest")
3584 if err != nil {
3585 logError(testName, function, args, startTime, "", "TempFile creation failed", err)
3586 return
3587 }
3588 // Upload 2 parts to utilize all 3 'workers' in multipart and still have a part to upload.
3589 if _, err = io.Copy(file, getDataReader("datafile-129-MB")); err != nil {
3590 logError(testName, function, args, startTime, "", "Copy failed", err)
3591 return
3592 }
3593 if err = file.Close(); err != nil {
3594 logError(testName, function, args, startTime, "", "File Close failed", err)
3595 return
3596 }
3597 fileName = file.Name()
3598 args["fileName"] = fileName
3599 }
3600 totalSize := dataFileMap["datafile-129-MB"]
3601 // Set base object name
3602 objectName := bucketName + "FPutObject" + "-standard"
3603 args["objectName"] = objectName
3604
3605 objectContentType := "testapplication/octet-stream"
3606 args["objectContentType"] = objectContentType
3607
3608 // Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
3609 _, err = c.FPutObject(context.Background(), bucketName, objectName, fileName, minio.PutObjectOptions{ContentType: objectContentType})
3610 if err != nil {
3611 logError(testName, function, args, startTime, "", "FPutObject failed", err)
3612 return
3613 }
3614
3615 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
3616 if err != nil {
3617 logError(testName, function, args, startTime, "", "GetObject failed", err)
3618 return
3619 }
3620 objInfo, err := r.Stat()
3621 if err != nil {
3622 logError(testName, function, args, startTime, "", "Unexpected error", err)
3623 return
3624 }
3625 if objInfo.Size != int64(totalSize) {
3626 logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(totalSize))+" got "+string(objInfo.Size), err)
3627 return
3628 }
3629 if objInfo.ContentType != objectContentType && objInfo.ContentType != "application/octet-stream" {
3630 logError(testName, function, args, startTime, "", "ContentType doesn't match", err)
3631 return
3632 }
3633
3634 successLogger(testName, function, args, startTime).Info()
3635}
3636
3637// Tests FPutObject with null contentType (default = application/octet-stream)
3638func testFPutObject() {
3639 // initialize logging params
3640 startTime := time.Now()
3641 testName := getFuncName()
3642 function := "FPutObject(bucketName, objectName, fileName, opts)"
3643
3644 args := map[string]interface{}{
3645 "bucketName": "",
3646 "objectName": "",
3647 "fileName": "",
3648 "opts": "",
3649 }
3650
3651 // Seed random based on current time.
3652 rand.Seed(time.Now().Unix())
3653
3654 // Instantiate new minio client object.
3655 c, err := minio.New(os.Getenv(serverEndpoint),
3656 &minio.Options{
3657 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
3658 Secure: mustParseBool(os.Getenv(enableHTTPS)),
3659 })
3660 if err != nil {
3661 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
3662 return
3663 }
3664
3665 // Enable tracing, write to stderr.
3666 // c.TraceOn(os.Stderr)
3667
3668 // Set user agent.
3669 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
3670
3671 // Generate a new random bucket name.
3672 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
3673 location := "us-east-1"
3674
3675 // Make a new bucket.
3676 args["bucketName"] = bucketName
3677 args["location"] = location
3678 function = "MakeBucket(bucketName, location)"
3679 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: location})
3680 if err != nil {
3681 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
3682 return
3683 }
3684
3685 defer cleanupBucket(bucketName, c)
3686
3687 // Upload 3 parts worth of data to use all 3 of multiparts 'workers' and have an extra part.
3688 // Use different data in part for multipart tests to check parts are uploaded in correct order.
3689 fName := getMintDataDirFilePath("datafile-129-MB")
3690 if fName == "" {
3691 // Make a temp file with minPartSize bytes of data.
3692 file, err := os.CreateTemp(os.TempDir(), "FPutObjectTest")
3693 if err != nil {
3694 logError(testName, function, args, startTime, "", "TempFile creation failed", err)
3695 return
3696 }
3697
3698 // Upload 3 parts to utilize all 3 'workers' in multipart and still have a part to upload.
3699 if _, err = io.Copy(file, getDataReader("datafile-129-MB")); err != nil {
3700 logError(testName, function, args, startTime, "", "File copy failed", err)
3701 return
3702 }
3703 // Close the file pro-actively for windows.
3704 if err = file.Close(); err != nil {
3705 logError(testName, function, args, startTime, "", "File close failed", err)
3706 return
3707 }
3708 defer os.Remove(file.Name())
3709 fName = file.Name()
3710 }
3711
3712 // Set base object name
3713 function = "FPutObject(bucketName, objectName, fileName, opts)"
3714 objectName := bucketName + "FPutObject"
3715 args["objectName"] = objectName + "-standard"
3716 args["fileName"] = fName
3717 args["opts"] = minio.PutObjectOptions{ContentType: "application/octet-stream"}
3718
3719 // Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
3720 ui, err := c.FPutObject(context.Background(), bucketName, objectName+"-standard", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"})
3721 if err != nil {
3722 logError(testName, function, args, startTime, "", "FPutObject failed", err)
3723 return
3724 }
3725
3726 if ui.Size != int64(dataFileMap["datafile-129-MB"]) {
3727 logError(testName, function, args, startTime, "", "FPutObject returned an unexpected upload size", err)
3728 return
3729 }
3730
3731 // Perform FPutObject with no contentType provided (Expecting application/octet-stream)
3732 args["objectName"] = objectName + "-Octet"
3733 _, err = c.FPutObject(context.Background(), bucketName, objectName+"-Octet", fName, minio.PutObjectOptions{})
3734 if err != nil {
3735 logError(testName, function, args, startTime, "", "File close failed", err)
3736 return
3737 }
3738
3739 srcFile, err := os.Open(fName)
3740 if err != nil {
3741 logError(testName, function, args, startTime, "", "File open failed", err)
3742 return
3743 }
3744 defer srcFile.Close()
3745 // Add extension to temp file name
3746 tmpFile, err := os.Create(fName + ".gtar")
3747 if err != nil {
3748 logError(testName, function, args, startTime, "", "File create failed", err)
3749 return
3750 }
3751 _, err = io.Copy(tmpFile, srcFile)
3752 if err != nil {
3753 logError(testName, function, args, startTime, "", "File copy failed", err)
3754 return
3755 }
3756 tmpFile.Close()
3757
3758 // Perform FPutObject with no contentType provided (Expecting application/x-gtar)
3759 args["objectName"] = objectName + "-GTar"
3760 args["opts"] = minio.PutObjectOptions{}
3761 _, err = c.FPutObject(context.Background(), bucketName, objectName+"-GTar", fName+".gtar", minio.PutObjectOptions{})
3762 if err != nil {
3763 logError(testName, function, args, startTime, "", "FPutObject failed", err)
3764 return
3765 }
3766
3767 // Check headers
3768 function = "StatObject(bucketName, objectName, opts)"
3769 args["objectName"] = objectName + "-standard"
3770 rStandard, err := c.StatObject(context.Background(), bucketName, objectName+"-standard", minio.StatObjectOptions{})
3771 if err != nil {
3772 logError(testName, function, args, startTime, "", "StatObject failed", err)
3773 return
3774 }
3775 if rStandard.ContentType != "application/octet-stream" {
3776 logError(testName, function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rStandard.ContentType, err)
3777 return
3778 }
3779
3780 function = "StatObject(bucketName, objectName, opts)"
3781 args["objectName"] = objectName + "-Octet"
3782 rOctet, err := c.StatObject(context.Background(), bucketName, objectName+"-Octet", minio.StatObjectOptions{})
3783 if err != nil {
3784 logError(testName, function, args, startTime, "", "StatObject failed", err)
3785 return
3786 }
3787 if rOctet.ContentType != "application/octet-stream" {
3788 logError(testName, function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rOctet.ContentType, err)
3789 return
3790 }
3791
3792 function = "StatObject(bucketName, objectName, opts)"
3793 args["objectName"] = objectName + "-GTar"
3794 rGTar, err := c.StatObject(context.Background(), bucketName, objectName+"-GTar", minio.StatObjectOptions{})
3795 if err != nil {
3796 logError(testName, function, args, startTime, "", "StatObject failed", err)
3797 return
3798 }
3799 if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" && rGTar.ContentType != "application/x-tar" {
3800 logError(testName, function, args, startTime, "", "ContentType does not match, expected application/x-tar or application/octet-stream, got "+rGTar.ContentType, err)
3801 return
3802 }
3803
3804 os.Remove(fName + ".gtar")
3805 successLogger(testName, function, args, startTime).Info()
3806}
3807
3808// Tests FPutObject request when context cancels after timeout
3809func testFPutObjectContext() {
3810 // initialize logging params
3811 startTime := time.Now()
3812 testName := getFuncName()
3813 function := "FPutObject(bucketName, objectName, fileName, opts)"
3814 args := map[string]interface{}{
3815 "bucketName": "",
3816 "objectName": "",
3817 "fileName": "",
3818 "opts": "",
3819 }
3820 // Seed random based on current time.
3821 rand.Seed(time.Now().Unix())
3822
3823 // Instantiate new minio client object.
3824 c, err := minio.New(os.Getenv(serverEndpoint),
3825 &minio.Options{
3826 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
3827 Secure: mustParseBool(os.Getenv(enableHTTPS)),
3828 })
3829 if err != nil {
3830 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
3831 return
3832 }
3833
3834 // Enable tracing, write to stderr.
3835 // c.TraceOn(os.Stderr)
3836
3837 // Set user agent.
3838 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
3839
3840 // Generate a new random bucket name.
3841 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
3842 args["bucketName"] = bucketName
3843
3844 // Make a new bucket.
3845 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
3846 if err != nil {
3847 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
3848 return
3849 }
3850
3851 defer cleanupBucket(bucketName, c)
3852
3853 // Upload 1 parts worth of data to use multipart upload.
3854 // Use different data in part for multipart tests to check parts are uploaded in correct order.
3855 fName := getMintDataDirFilePath("datafile-1-MB")
3856 if fName == "" {
3857 // Make a temp file with 1 MiB bytes of data.
3858 file, err := os.CreateTemp(os.TempDir(), "FPutObjectContextTest")
3859 if err != nil {
3860 logError(testName, function, args, startTime, "", "TempFile creation failed", err)
3861 return
3862 }
3863
3864 // Upload 1 parts to trigger multipart upload
3865 if _, err = io.Copy(file, getDataReader("datafile-1-MB")); err != nil {
3866 logError(testName, function, args, startTime, "", "File copy failed", err)
3867 return
3868 }
3869 // Close the file pro-actively for windows.
3870 if err = file.Close(); err != nil {
3871 logError(testName, function, args, startTime, "", "File close failed", err)
3872 return
3873 }
3874 defer os.Remove(file.Name())
3875 fName = file.Name()
3876 }
3877
3878 // Set base object name
3879 objectName := bucketName + "FPutObjectContext"
3880 args["objectName"] = objectName
3881 ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
3882 args["ctx"] = ctx
3883 defer cancel()
3884
3885 // Perform FPutObject with contentType provided (Expecting application/octet-stream)
3886 _, err = c.FPutObject(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"})
3887 if err == nil {
3888 logError(testName, function, args, startTime, "", "FPutObject should fail on short timeout", err)
3889 return
3890 }
3891 ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
3892 defer cancel()
3893 // Perform FPutObject with a long timeout. Expect the put object to succeed
3894 _, err = c.FPutObject(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{})
3895 if err != nil {
3896 logError(testName, function, args, startTime, "", "FPutObject shouldn't fail on long timeout", err)
3897 return
3898 }
3899
3900 _, err = c.StatObject(context.Background(), bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{})
3901 if err != nil {
3902 logError(testName, function, args, startTime, "", "StatObject failed", err)
3903 return
3904 }
3905
3906 successLogger(testName, function, args, startTime).Info()
3907}
3908
3909// Tests FPutObject request when context cancels after timeout
3910func testFPutObjectContextV2() {
3911 // initialize logging params
3912 startTime := time.Now()
3913 testName := getFuncName()
3914 function := "FPutObjectContext(ctx, bucketName, objectName, fileName, opts)"
3915 args := map[string]interface{}{
3916 "bucketName": "",
3917 "objectName": "",
3918 "opts": "minio.PutObjectOptions{ContentType:objectContentType}",
3919 }
3920 // Seed random based on current time.
3921 rand.Seed(time.Now().Unix())
3922
3923 // Instantiate new minio client object.
3924 c, err := minio.New(os.Getenv(serverEndpoint),
3925 &minio.Options{
3926 Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
3927 Secure: mustParseBool(os.Getenv(enableHTTPS)),
3928 })
3929 if err != nil {
3930 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
3931 return
3932 }
3933
3934 // Enable tracing, write to stderr.
3935 // c.TraceOn(os.Stderr)
3936
3937 // Set user agent.
3938 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
3939
3940 // Generate a new random bucket name.
3941 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
3942 args["bucketName"] = bucketName
3943
3944 // Make a new bucket.
3945 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
3946 if err != nil {
3947 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
3948 return
3949 }
3950
3951 defer cleanupBucket(bucketName, c)
3952
3953 // Upload 1 parts worth of data to use multipart upload.
3954 // Use different data in part for multipart tests to check parts are uploaded in correct order.
3955 fName := getMintDataDirFilePath("datafile-1-MB")
3956 if fName == "" {
3957 // Make a temp file with 1 MiB bytes of data.
3958 file, err := os.CreateTemp(os.TempDir(), "FPutObjectContextTest")
3959 if err != nil {
3960 logError(testName, function, args, startTime, "", "Temp file creation failed", err)
3961 return
3962 }
3963
3964 // Upload 1 parts to trigger multipart upload
3965 if _, err = io.Copy(file, getDataReader("datafile-1-MB")); err != nil {
3966 logError(testName, function, args, startTime, "", "File copy failed", err)
3967 return
3968 }
3969
3970 // Close the file pro-actively for windows.
3971 if err = file.Close(); err != nil {
3972 logError(testName, function, args, startTime, "", "File close failed", err)
3973 return
3974 }
3975 defer os.Remove(file.Name())
3976 fName = file.Name()
3977 }
3978
3979 // Set base object name
3980 objectName := bucketName + "FPutObjectContext"
3981 args["objectName"] = objectName
3982
3983 ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
3984 args["ctx"] = ctx
3985 defer cancel()
3986
3987 // Perform FPutObject with contentType provided (Expecting application/octet-stream)
3988 _, err = c.FPutObject(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"})
3989 if err == nil {
3990 logError(testName, function, args, startTime, "", "FPutObject should fail on short timeout", err)
3991 return
3992 }
3993 ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
3994 defer cancel()
3995 // Perform FPutObject with a long timeout. Expect the put object to succeed
3996 _, err = c.FPutObject(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{})
3997 if err != nil {
3998 logError(testName, function, args, startTime, "", "FPutObject shouldn't fail on longer timeout", err)
3999 return
4000 }
4001
4002 _, err = c.StatObject(context.Background(), bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{})
4003 if err != nil {
4004 logError(testName, function, args, startTime, "", "StatObject failed", err)
4005 return
4006 }
4007
4008 successLogger(testName, function, args, startTime).Info()
4009}
4010
4011// Test validates putObject with context to see if request cancellation is honored.
4012func testPutObjectContext() {
4013 // initialize logging params
4014 startTime := time.Now()
4015 testName := getFuncName()
4016 function := "PutObject(ctx, bucketName, objectName, fileName, opts)"
4017 args := map[string]interface{}{
4018 "ctx": "",
4019 "bucketName": "",
4020 "objectName": "",
4021 "opts": "",
4022 }
4023
4024 // Instantiate new minio client object.
4025 c, err := minio.New(os.Getenv(serverEndpoint),
4026 &minio.Options{
4027 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
4028 Secure: mustParseBool(os.Getenv(enableHTTPS)),
4029 })
4030 if err != nil {
4031 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
4032 return
4033 }
4034
4035 // Enable tracing, write to stderr.
4036 // c.TraceOn(os.Stderr)
4037
4038 // Set user agent.
4039 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
4040
4041 // Make a new bucket.
4042 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
4043 args["bucketName"] = bucketName
4044
4045 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
4046 if err != nil {
4047 logError(testName, function, args, startTime, "", "MakeBucket call failed", err)
4048 return
4049 }
4050
4051 defer cleanupBucket(bucketName, c)
4052
4053 bufSize := dataFileMap["datafile-33-kB"]
4054 reader := getDataReader("datafile-33-kB")
4055 defer reader.Close()
4056 objectName := fmt.Sprintf("test-file-%v", rand.Uint32())
4057 args["objectName"] = objectName
4058
4059 ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
4060 cancel()
4061 args["ctx"] = ctx
4062 args["opts"] = minio.PutObjectOptions{ContentType: "binary/octet-stream"}
4063
4064 _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
4065 if err == nil {
4066 logError(testName, function, args, startTime, "", "PutObject should fail on short timeout", err)
4067 return
4068 }
4069
4070 ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
4071 args["ctx"] = ctx
4072
4073 defer cancel()
4074 reader = getDataReader("datafile-33-kB")
4075 defer reader.Close()
4076 _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
4077 if err != nil {
4078 logError(testName, function, args, startTime, "", "PutObject with long timeout failed", err)
4079 return
4080 }
4081
4082 successLogger(testName, function, args, startTime).Info()
4083}
4084
4085// Tests get object with s3zip extensions.
4086func testGetObjectS3Zip() {
4087 // initialize logging params
4088 startTime := time.Now()
4089 testName := getFuncName()
4090 function := "GetObject(bucketName, objectName)"
4091 args := map[string]interface{}{"x-minio-extract": true}
4092
4093 // Seed random based on current time.
4094 rand.Seed(time.Now().Unix())
4095
4096 // Instantiate new minio client object.
4097 c, err := minio.New(os.Getenv(serverEndpoint),
4098 &minio.Options{
4099 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
4100 Secure: mustParseBool(os.Getenv(enableHTTPS)),
4101 })
4102 if err != nil {
4103 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
4104 return
4105 }
4106
4107 // Enable tracing, write to stderr.
4108 // c.TraceOn(os.Stderr)
4109
4110 // Set user agent.
4111 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
4112
4113 // Generate a new random bucket name.
4114 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
4115 args["bucketName"] = bucketName
4116
4117 // Make a new bucket.
4118 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
4119 if err != nil {
4120 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
4121 return
4122 }
4123
4124 defer func() {
4125 // Delete all objects and buckets
4126 if err = cleanupBucket(bucketName, c); err != nil {
4127 logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
4128 return
4129 }
4130 }()
4131
4132 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + ".zip"
4133 args["objectName"] = objectName
4134
4135 var zipFile bytes.Buffer
4136 zw := zip.NewWriter(&zipFile)
4137 rng := rand.New(rand.NewSource(0xc0cac01a))
4138 const nFiles = 500
4139 for i := 0; i <= nFiles; i++ {
4140 if i == nFiles {
4141 // Make one large, compressible file.
4142 i = 1000000
4143 }
4144 b := make([]byte, i)
4145 if i < nFiles {
4146 rng.Read(b)
4147 }
4148 wc, err := zw.Create(fmt.Sprintf("test/small/file-%d.bin", i))
4149 if err != nil {
4150 logError(testName, function, args, startTime, "", "zw.Create failed", err)
4151 return
4152 }
4153 wc.Write(b)
4154 }
4155 err = zw.Close()
4156 if err != nil {
4157 logError(testName, function, args, startTime, "", "zw.Close failed", err)
4158 return
4159 }
4160 buf := zipFile.Bytes()
4161
4162 // Save the data
4163 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
4164 if err != nil {
4165 logError(testName, function, args, startTime, "", "PutObject failed", err)
4166 return
4167 }
4168
4169 // Read the data back
4170 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
4171 if err != nil {
4172 logError(testName, function, args, startTime, "", "GetObject failed", err)
4173 return
4174 }
4175
4176 st, err := r.Stat()
4177 if err != nil {
4178 logError(testName, function, args, startTime, "", "Stat object failed", err)
4179 return
4180 }
4181
4182 if st.Size != int64(len(buf)) {
4183 logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(len(buf))+", got "+string(st.Size), err)
4184 return
4185 }
4186 r.Close()
4187
4188 zr, err := zip.NewReader(bytes.NewReader(buf), int64(len(buf)))
4189 if err != nil {
4190 logError(testName, function, args, startTime, "", "zip.NewReader failed", err)
4191 return
4192 }
4193 lOpts := minio.ListObjectsOptions{}
4194 lOpts.Set("x-minio-extract", "true")
4195 lOpts.Prefix = objectName + "/"
4196 lOpts.Recursive = true
4197 list := c.ListObjects(context.Background(), bucketName, lOpts)
4198 listed := map[string]minio.ObjectInfo{}
4199 for item := range list {
4200 if item.Err != nil {
4201 break
4202 }
4203 listed[item.Key] = item
4204 }
4205 if len(listed) == 0 {
4206 // Assume we are running against non-minio.
4207 args["SKIPPED"] = true
4208 ignoredLog(testName, function, args, startTime, "s3zip does not appear to be present").Info()
4209 return
4210 }
4211
4212 for _, file := range zr.File {
4213 if file.FileInfo().IsDir() {
4214 continue
4215 }
4216 args["zipfile"] = file.Name
4217 zfr, err := file.Open()
4218 if err != nil {
4219 logError(testName, function, args, startTime, "", "file.Open failed", err)
4220 return
4221 }
4222 want, err := io.ReadAll(zfr)
4223 if err != nil {
4224 logError(testName, function, args, startTime, "", "fzip file read failed", err)
4225 return
4226 }
4227
4228 opts := minio.GetObjectOptions{}
4229 opts.Set("x-minio-extract", "true")
4230 key := path.Join(objectName, file.Name)
4231 r, err = c.GetObject(context.Background(), bucketName, key, opts)
4232 if err != nil {
4233 terr := minio.ToErrorResponse(err)
4234 if terr.StatusCode != http.StatusNotFound {
4235 logError(testName, function, args, startTime, "", "GetObject failed", err)
4236 }
4237 return
4238 }
4239 got, err := io.ReadAll(r)
4240 if err != nil {
4241 logError(testName, function, args, startTime, "", "ReadAll failed", err)
4242 return
4243 }
4244 r.Close()
4245 if !bytes.Equal(want, got) {
4246 logError(testName, function, args, startTime, "", "Content mismatch", err)
4247 return
4248 }
4249 oi, ok := listed[key]
4250 if !ok {
4251 logError(testName, function, args, startTime, "", "Object Missing", fmt.Errorf("%s not present in listing", key))
4252 return
4253 }
4254 if int(oi.Size) != len(got) {
4255 logError(testName, function, args, startTime, "", "Object Size Incorrect", fmt.Errorf("listing %d, read %d", oi.Size, len(got)))
4256 return
4257 }
4258 delete(listed, key)
4259 }
4260 delete(args, "zipfile")
4261 if len(listed) > 0 {
4262 logError(testName, function, args, startTime, "", "Extra listed objects", fmt.Errorf("left over: %v", listed))
4263 return
4264 }
4265 successLogger(testName, function, args, startTime).Info()
4266}
4267
4268// Tests get object ReaderSeeker interface methods.
4269func testGetObjectReadSeekFunctional() {
4270 // initialize logging params
4271 startTime := time.Now()
4272 testName := getFuncName()
4273 function := "GetObject(bucketName, objectName)"
4274 args := map[string]interface{}{}
4275
4276 // Seed random based on current time.
4277 rand.Seed(time.Now().Unix())
4278
4279 // Instantiate new minio client object.
4280 c, err := minio.New(os.Getenv(serverEndpoint),
4281 &minio.Options{
4282 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
4283 Secure: mustParseBool(os.Getenv(enableHTTPS)),
4284 })
4285 if err != nil {
4286 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
4287 return
4288 }
4289
4290 // Enable tracing, write to stderr.
4291 // c.TraceOn(os.Stderr)
4292
4293 // Set user agent.
4294 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
4295
4296 // Generate a new random bucket name.
4297 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
4298 args["bucketName"] = bucketName
4299
4300 // Make a new bucket.
4301 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
4302 if err != nil {
4303 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
4304 return
4305 }
4306
4307 defer func() {
4308 // Delete all objects and buckets
4309 if err = cleanupBucket(bucketName, c); err != nil {
4310 logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
4311 return
4312 }
4313 }()
4314
4315 // Generate 33K of data.
4316 bufSize := dataFileMap["datafile-33-kB"]
4317 reader := getDataReader("datafile-33-kB")
4318 defer reader.Close()
4319
4320 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
4321 args["objectName"] = objectName
4322
4323 buf, err := io.ReadAll(reader)
4324 if err != nil {
4325 logError(testName, function, args, startTime, "", "ReadAll failed", err)
4326 return
4327 }
4328
4329 // Save the data
4330 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
4331 if err != nil {
4332 logError(testName, function, args, startTime, "", "PutObject failed", err)
4333 return
4334 }
4335
4336 // Read the data back
4337 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
4338 if err != nil {
4339 logError(testName, function, args, startTime, "", "GetObject failed", err)
4340 return
4341 }
4342
4343 st, err := r.Stat()
4344 if err != nil {
4345 logError(testName, function, args, startTime, "", "Stat object failed", err)
4346 return
4347 }
4348
4349 if st.Size != int64(bufSize) {
4350 logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err)
4351 return
4352 }
4353
4354 // This following function helps us to compare data from the reader after seek
4355 // with the data from the original buffer
4356 cmpData := func(r io.Reader, start, end int) {
4357 if end-start == 0 {
4358 return
4359 }
4360 buffer := bytes.NewBuffer([]byte{})
4361 if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil {
4362 if err != io.EOF {
4363 logError(testName, function, args, startTime, "", "CopyN failed", err)
4364 return
4365 }
4366 }
4367 if !bytes.Equal(buf[start:end], buffer.Bytes()) {
4368 logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err)
4369 return
4370 }
4371 }
4372
4373 // Generic seek error for errors other than io.EOF
4374 seekErr := errors.New("seek error")
4375
4376 testCases := []struct {
4377 offset int64
4378 whence int
4379 pos int64
4380 err error
4381 shouldCmp bool
4382 start int
4383 end int
4384 }{
4385 // Start from offset 0, fetch data and compare
4386 {0, 0, 0, nil, true, 0, 0},
4387 // Start from offset 2048, fetch data and compare
4388 {2048, 0, 2048, nil, true, 2048, bufSize},
4389 // Start from offset larger than possible
4390 {int64(bufSize) + 1024, 0, 0, seekErr, false, 0, 0},
4391 // Move to offset 0 without comparing
4392 {0, 0, 0, nil, false, 0, 0},
4393 // Move one step forward and compare
4394 {1, 1, 1, nil, true, 1, bufSize},
4395 // Move larger than possible
4396 {int64(bufSize), 1, 0, seekErr, false, 0, 0},
4397 // Provide negative offset with CUR_SEEK
4398 {int64(-1), 1, 0, seekErr, false, 0, 0},
4399 // Test with whence SEEK_END and with positive offset
4400 {1024, 2, int64(bufSize) - 1024, io.EOF, true, 0, 0},
4401 // Test with whence SEEK_END and with negative offset
4402 {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize},
4403 // Test with whence SEEK_END and with large negative offset
4404 {-int64(bufSize) * 2, 2, 0, seekErr, true, 0, 0},
4405 }
4406
4407 for i, testCase := range testCases {
4408 // Perform seek operation
4409 n, err := r.Seek(testCase.offset, testCase.whence)
4410 // We expect an error
4411 if testCase.err == seekErr && err == nil {
4412 logError(testName, function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err)
4413 return
4414 }
4415 // We expect a specific error
4416 if testCase.err != seekErr && testCase.err != err {
4417 logError(testName, function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err)
4418 return
4419 }
4420 // If we expect an error go to the next loop
4421 if testCase.err != nil {
4422 continue
4423 }
4424 // Check the returned seek pos
4425 if n != testCase.pos {
4426 logError(testName, function, args, startTime, "", "Test "+string(i+1)+", number of bytes seeked does not match, expected "+string(testCase.pos)+", got "+string(n), err)
4427 return
4428 }
4429 // Compare only if shouldCmp is activated
4430 if testCase.shouldCmp {
4431 cmpData(r, testCase.start, testCase.end)
4432 }
4433 }
4434 successLogger(testName, function, args, startTime).Info()
4435}
4436
4437// Tests get object ReaderAt interface methods.
4438func testGetObjectReadAtFunctional() {
4439 // initialize logging params
4440 startTime := time.Now()
4441 testName := getFuncName()
4442 function := "GetObject(bucketName, objectName)"
4443 args := map[string]interface{}{}
4444
4445 // Seed random based on current time.
4446 rand.Seed(time.Now().Unix())
4447
4448 // Instantiate new minio client object.
4449 c, err := minio.New(os.Getenv(serverEndpoint),
4450 &minio.Options{
4451 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
4452 Secure: mustParseBool(os.Getenv(enableHTTPS)),
4453 })
4454 if err != nil {
4455 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
4456 return
4457 }
4458
4459 // Enable tracing, write to stderr.
4460 // c.TraceOn(os.Stderr)
4461
4462 // Set user agent.
4463 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
4464
4465 // Generate a new random bucket name.
4466 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
4467 args["bucketName"] = bucketName
4468
4469 // Make a new bucket.
4470 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
4471 if err != nil {
4472 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
4473 return
4474 }
4475
4476 defer cleanupBucket(bucketName, c)
4477
4478 // Generate 33K of data.
4479 bufSize := dataFileMap["datafile-33-kB"]
4480 reader := getDataReader("datafile-33-kB")
4481 defer reader.Close()
4482
4483 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
4484 args["objectName"] = objectName
4485
4486 buf, err := io.ReadAll(reader)
4487 if err != nil {
4488 logError(testName, function, args, startTime, "", "ReadAll failed", err)
4489 return
4490 }
4491
4492 // Save the data
4493 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
4494 if err != nil {
4495 logError(testName, function, args, startTime, "", "PutObject failed", err)
4496 return
4497 }
4498
4499 // read the data back
4500 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
4501 if err != nil {
4502 logError(testName, function, args, startTime, "", "PutObject failed", err)
4503 return
4504 }
4505 offset := int64(2048)
4506
4507 // read directly
4508 buf1 := make([]byte, 512)
4509 buf2 := make([]byte, 512)
4510 buf3 := make([]byte, 512)
4511 buf4 := make([]byte, 512)
4512
4513 // Test readAt before stat is called such that objectInfo doesn't change.
4514 m, err := r.ReadAt(buf1, offset)
4515 if err != nil {
4516 logError(testName, function, args, startTime, "", "ReadAt failed", err)
4517 return
4518 }
4519 if m != len(buf1) {
4520 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err)
4521 return
4522 }
4523 if !bytes.Equal(buf1, buf[offset:offset+512]) {
4524 logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
4525 return
4526 }
4527 offset += 512
4528
4529 st, err := r.Stat()
4530 if err != nil {
4531 logError(testName, function, args, startTime, "", "Stat failed", err)
4532 return
4533 }
4534
4535 if st.Size != int64(bufSize) {
4536 logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err)
4537 return
4538 }
4539
4540 m, err = r.ReadAt(buf2, offset)
4541 if err != nil {
4542 logError(testName, function, args, startTime, "", "ReadAt failed", err)
4543 return
4544 }
4545 if m != len(buf2) {
4546 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err)
4547 return
4548 }
4549 if !bytes.Equal(buf2, buf[offset:offset+512]) {
4550 logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
4551 return
4552 }
4553
4554 offset += 512
4555 m, err = r.ReadAt(buf3, offset)
4556 if err != nil {
4557 logError(testName, function, args, startTime, "", "ReadAt failed", err)
4558 return
4559 }
4560 if m != len(buf3) {
4561 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err)
4562 return
4563 }
4564 if !bytes.Equal(buf3, buf[offset:offset+512]) {
4565 logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
4566 return
4567 }
4568 offset += 512
4569 m, err = r.ReadAt(buf4, offset)
4570 if err != nil {
4571 logError(testName, function, args, startTime, "", "ReadAt failed", err)
4572 return
4573 }
4574 if m != len(buf4) {
4575 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err)
4576 return
4577 }
4578 if !bytes.Equal(buf4, buf[offset:offset+512]) {
4579 logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
4580 return
4581 }
4582
4583 buf5 := make([]byte, len(buf))
4584 // Read the whole object.
4585 m, err = r.ReadAt(buf5, 0)
4586 if err != nil {
4587 if err != io.EOF {
4588 logError(testName, function, args, startTime, "", "ReadAt failed", err)
4589 return
4590 }
4591 }
4592 if m != len(buf5) {
4593 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err)
4594 return
4595 }
4596 if !bytes.Equal(buf, buf5) {
4597 logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err)
4598 return
4599 }
4600
4601 buf6 := make([]byte, len(buf)+1)
4602 // Read the whole object and beyond.
4603 _, err = r.ReadAt(buf6, 0)
4604 if err != nil {
4605 if err != io.EOF {
4606 logError(testName, function, args, startTime, "", "ReadAt failed", err)
4607 return
4608 }
4609 }
4610
4611 successLogger(testName, function, args, startTime).Info()
4612}
4613
4614// Reproduces issue https://github.com/minio/minio-go/issues/1137
4615func testGetObjectReadAtWhenEOFWasReached() {
4616 // initialize logging params
4617 startTime := time.Now()
4618 testName := getFuncName()
4619 function := "GetObject(bucketName, objectName)"
4620 args := map[string]interface{}{}
4621
4622 // Seed random based on current time.
4623 rand.Seed(time.Now().Unix())
4624
4625 // Instantiate new minio client object.
4626 c, err := minio.New(os.Getenv(serverEndpoint),
4627 &minio.Options{
4628 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
4629 Secure: mustParseBool(os.Getenv(enableHTTPS)),
4630 })
4631 if err != nil {
4632 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
4633 return
4634 }
4635
4636 // Enable tracing, write to stderr.
4637 // c.TraceOn(os.Stderr)
4638
4639 // Set user agent.
4640 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
4641
4642 // Generate a new random bucket name.
4643 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
4644 args["bucketName"] = bucketName
4645
4646 // Make a new bucket.
4647 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
4648 if err != nil {
4649 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
4650 return
4651 }
4652
4653 defer cleanupBucket(bucketName, c)
4654
4655 // Generate 33K of data.
4656 bufSize := dataFileMap["datafile-33-kB"]
4657 reader := getDataReader("datafile-33-kB")
4658 defer reader.Close()
4659
4660 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
4661 args["objectName"] = objectName
4662
4663 buf, err := io.ReadAll(reader)
4664 if err != nil {
4665 logError(testName, function, args, startTime, "", "ReadAll failed", err)
4666 return
4667 }
4668
4669 // Save the data
4670 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
4671 if err != nil {
4672 logError(testName, function, args, startTime, "", "PutObject failed", err)
4673 return
4674 }
4675
4676 // read the data back
4677 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
4678 if err != nil {
4679 logError(testName, function, args, startTime, "", "PutObject failed", err)
4680 return
4681 }
4682
4683 // read directly
4684 buf1 := make([]byte, len(buf))
4685 buf2 := make([]byte, 512)
4686
4687 m, err := r.Read(buf1)
4688 if err != nil {
4689 if err != io.EOF {
4690 logError(testName, function, args, startTime, "", "Read failed", err)
4691 return
4692 }
4693 }
4694 if m != len(buf1) {
4695 logError(testName, function, args, startTime, "", "Read read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err)
4696 return
4697 }
4698 if !bytes.Equal(buf1, buf) {
4699 logError(testName, function, args, startTime, "", "Incorrect count of Read data", err)
4700 return
4701 }
4702
4703 st, err := r.Stat()
4704 if err != nil {
4705 logError(testName, function, args, startTime, "", "Stat failed", err)
4706 return
4707 }
4708
4709 if st.Size != int64(bufSize) {
4710 logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err)
4711 return
4712 }
4713
4714 m, err = r.ReadAt(buf2, 512)
4715 if err != nil {
4716 logError(testName, function, args, startTime, "", "ReadAt failed", err)
4717 return
4718 }
4719 if m != len(buf2) {
4720 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err)
4721 return
4722 }
4723 if !bytes.Equal(buf2, buf[512:1024]) {
4724 logError(testName, function, args, startTime, "", "Incorrect count of ReadAt data", err)
4725 return
4726 }
4727
4728 successLogger(testName, function, args, startTime).Info()
4729}
4730
4731// Test Presigned Post Policy
4732func testPresignedPostPolicy() {
4733 // initialize logging params
4734 startTime := time.Now()
4735 testName := getFuncName()
4736 function := "PresignedPostPolicy(policy)"
4737 args := map[string]interface{}{
4738 "policy": "",
4739 }
4740
4741 // Seed random based on current time.
4742 rand.Seed(time.Now().Unix())
4743
4744 // Instantiate new minio client object
4745 c, err := minio.New(os.Getenv(serverEndpoint),
4746 &minio.Options{
4747 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
4748 Secure: mustParseBool(os.Getenv(enableHTTPS)),
4749 })
4750 if err != nil {
4751 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
4752 return
4753 }
4754
4755 // Enable tracing, write to stderr.
4756 // c.TraceOn(os.Stderr)
4757
4758 // Set user agent.
4759 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
4760
4761 // Generate a new random bucket name.
4762 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
4763
4764 // Make a new bucket in 'us-east-1' (source bucket).
4765 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
4766 if err != nil {
4767 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
4768 return
4769 }
4770
4771 defer cleanupBucket(bucketName, c)
4772
4773 // Generate 33K of data.
4774 reader := getDataReader("datafile-33-kB")
4775 defer reader.Close()
4776
4777 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
4778 // Azure requires the key to not start with a number
4779 metadataKey := randString(60, rand.NewSource(time.Now().UnixNano()), "user")
4780 metadataValue := randString(60, rand.NewSource(time.Now().UnixNano()), "")
4781
4782 buf, err := io.ReadAll(reader)
4783 if err != nil {
4784 logError(testName, function, args, startTime, "", "ReadAll failed", err)
4785 return
4786 }
4787
4788 // Save the data
4789 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
4790 if err != nil {
4791 logError(testName, function, args, startTime, "", "PutObject failed", err)
4792 return
4793 }
4794
4795 policy := minio.NewPostPolicy()
4796
4797 if err := policy.SetBucket(""); err == nil {
4798 logError(testName, function, args, startTime, "", "SetBucket did not fail for invalid conditions", err)
4799 return
4800 }
4801 if err := policy.SetKey(""); err == nil {
4802 logError(testName, function, args, startTime, "", "SetKey did not fail for invalid conditions", err)
4803 return
4804 }
4805 if err := policy.SetExpires(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)); err == nil {
4806 logError(testName, function, args, startTime, "", "SetExpires did not fail for invalid conditions", err)
4807 return
4808 }
4809 if err := policy.SetContentType(""); err == nil {
4810 logError(testName, function, args, startTime, "", "SetContentType did not fail for invalid conditions", err)
4811 return
4812 }
4813 if err := policy.SetContentLengthRange(1024*1024, 1024); err == nil {
4814 logError(testName, function, args, startTime, "", "SetContentLengthRange did not fail for invalid conditions", err)
4815 return
4816 }
4817 if err := policy.SetUserMetadata("", ""); err == nil {
4818 logError(testName, function, args, startTime, "", "SetUserMetadata did not fail for invalid conditions", err)
4819 return
4820 }
4821
4822 policy.SetBucket(bucketName)
4823 policy.SetKey(objectName)
4824 policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days
4825 policy.SetContentType("binary/octet-stream")
4826 policy.SetContentLengthRange(10, 1024*1024)
4827 policy.SetUserMetadata(metadataKey, metadataValue)
4828
4829 // Add CRC32C
4830 checksum := minio.ChecksumCRC32C.ChecksumBytes(buf)
4831 policy.SetChecksum(checksum)
4832
4833 args["policy"] = policy.String()
4834
4835 presignedPostPolicyURL, formData, err := c.PresignedPostPolicy(context.Background(), policy)
4836 if err != nil {
4837 logError(testName, function, args, startTime, "", "PresignedPostPolicy failed", err)
4838 return
4839 }
4840
4841 var formBuf bytes.Buffer
4842 writer := multipart.NewWriter(&formBuf)
4843 for k, v := range formData {
4844 writer.WriteField(k, v)
4845 }
4846
4847 // Get a 33KB file to upload and test if set post policy works
4848 filePath := getMintDataDirFilePath("datafile-33-kB")
4849 if filePath == "" {
4850 // Make a temp file with 33 KB data.
4851 file, err := os.CreateTemp(os.TempDir(), "PresignedPostPolicyTest")
4852 if err != nil {
4853 logError(testName, function, args, startTime, "", "TempFile creation failed", err)
4854 return
4855 }
4856 if _, err = io.Copy(file, getDataReader("datafile-33-kB")); err != nil {
4857 logError(testName, function, args, startTime, "", "Copy failed", err)
4858 return
4859 }
4860 if err = file.Close(); err != nil {
4861 logError(testName, function, args, startTime, "", "File Close failed", err)
4862 return
4863 }
4864 filePath = file.Name()
4865 }
4866
4867 // add file to post request
4868 f, err := os.Open(filePath)
4869 defer f.Close()
4870 if err != nil {
4871 logError(testName, function, args, startTime, "", "File open failed", err)
4872 return
4873 }
4874 w, err := writer.CreateFormFile("file", filePath)
4875 if err != nil {
4876 logError(testName, function, args, startTime, "", "CreateFormFile failed", err)
4877 return
4878 }
4879
4880 _, err = io.Copy(w, f)
4881 if err != nil {
4882 logError(testName, function, args, startTime, "", "Copy failed", err)
4883 return
4884 }
4885 writer.Close()
4886
4887 transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS)))
4888 if err != nil {
4889 logError(testName, function, args, startTime, "", "DefaultTransport failed", err)
4890 return
4891 }
4892
4893 httpClient := &http.Client{
4894 // Setting a sensible time out of 30secs to wait for response
4895 // headers. Request is pro-actively canceled after 30secs
4896 // with no response.
4897 Timeout: 30 * time.Second,
4898 Transport: transport,
4899 }
4900 args["url"] = presignedPostPolicyURL.String()
4901
4902 req, err := http.NewRequest(http.MethodPost, presignedPostPolicyURL.String(), bytes.NewReader(formBuf.Bytes()))
4903 if err != nil {
4904 logError(testName, function, args, startTime, "", "Http request failed", err)
4905 return
4906 }
4907
4908 req.Header.Set("Content-Type", writer.FormDataContentType())
4909
4910 // make post request with correct form data
4911 res, err := httpClient.Do(req)
4912 if err != nil {
4913 logError(testName, function, args, startTime, "", "Http request failed", err)
4914 return
4915 }
4916 defer res.Body.Close()
4917 if res.StatusCode != http.StatusNoContent {
4918 logError(testName, function, args, startTime, "", "Http request failed", errors.New(res.Status))
4919 return
4920 }
4921
4922 // expected path should be absolute path of the object
4923 var scheme string
4924 if mustParseBool(os.Getenv(enableHTTPS)) {
4925 scheme = "https://"
4926 } else {
4927 scheme = "http://"
4928 }
4929
4930 expectedLocation := scheme + os.Getenv(serverEndpoint) + "/" + bucketName + "/" + objectName
4931 expectedLocationBucketDNS := scheme + bucketName + "." + os.Getenv(serverEndpoint) + "/" + objectName
4932
4933 if !strings.Contains(expectedLocation, "s3.amazonaws.com/") {
4934 // Test when not against AWS S3.
4935 if val, ok := res.Header["Location"]; ok {
4936 if val[0] != expectedLocation && val[0] != expectedLocationBucketDNS {
4937 logError(testName, function, args, startTime, "", fmt.Sprintf("Location in header response is incorrect. Want %q or %q, got %q", expectedLocation, expectedLocationBucketDNS, val[0]), err)
4938 return
4939 }
4940 } else {
4941 logError(testName, function, args, startTime, "", "Location not found in header response", err)
4942 return
4943 }
4944 }
4945 want := checksum.Encoded()
4946 if got := res.Header.Get("X-Amz-Checksum-Crc32c"); got != want {
4947 logError(testName, function, args, startTime, "", fmt.Sprintf("Want checksum %q, got %q", want, got), nil)
4948 return
4949 }
4950
4951 successLogger(testName, function, args, startTime).Info()
4952}
4953
4954// Tests copy object
4955func testCopyObject() {
4956 // initialize logging params
4957 startTime := time.Now()
4958 testName := getFuncName()
4959 function := "CopyObject(dst, src)"
4960 args := map[string]interface{}{}
4961
4962 // Seed random based on current time.
4963 rand.Seed(time.Now().Unix())
4964
4965 // Instantiate new minio client object
4966 c, err := minio.New(os.Getenv(serverEndpoint),
4967 &minio.Options{
4968 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
4969 Secure: mustParseBool(os.Getenv(enableHTTPS)),
4970 })
4971 if err != nil {
4972 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
4973 return
4974 }
4975
4976 // Enable tracing, write to stderr.
4977 // c.TraceOn(os.Stderr)
4978
4979 // Set user agent.
4980 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
4981
4982 // Generate a new random bucket name.
4983 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
4984
4985 // Make a new bucket in 'us-east-1' (source bucket).
4986 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
4987 if err != nil {
4988 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
4989 return
4990 }
4991
4992 defer cleanupBucket(bucketName, c)
4993
4994 // Make a new bucket in 'us-east-1' (destination bucket).
4995 err = c.MakeBucket(context.Background(), bucketName+"-copy", minio.MakeBucketOptions{Region: "us-east-1"})
4996 if err != nil {
4997 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
4998 return
4999 }
5000 defer cleanupBucket(bucketName+"-copy", c)
5001
5002 // Generate 33K of data.
5003 bufSize := dataFileMap["datafile-33-kB"]
5004 reader := getDataReader("datafile-33-kB")
5005
5006 // Save the data
5007 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
5008 _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
5009 if err != nil {
5010 logError(testName, function, args, startTime, "", "PutObject failed", err)
5011 return
5012 }
5013
5014 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
5015 if err != nil {
5016 logError(testName, function, args, startTime, "", "GetObject failed", err)
5017 return
5018 }
5019 // Check the various fields of source object against destination object.
5020 objInfo, err := r.Stat()
5021 if err != nil {
5022 logError(testName, function, args, startTime, "", "Stat failed", err)
5023 return
5024 }
5025
5026 // Copy Source
5027 src := minio.CopySrcOptions{
5028 Bucket: bucketName,
5029 Object: objectName,
5030 // Set copy conditions.
5031 MatchETag: objInfo.ETag,
5032 MatchModifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC),
5033 }
5034 args["src"] = src
5035
5036 dst := minio.CopyDestOptions{
5037 Bucket: bucketName + "-copy",
5038 Object: objectName + "-copy",
5039 }
5040
5041 // Perform the Copy
5042 if _, err = c.CopyObject(context.Background(), dst, src); err != nil {
5043 logError(testName, function, args, startTime, "", "CopyObject failed", err)
5044 return
5045 }
5046
5047 // Source object
5048 r, err = c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
5049 if err != nil {
5050 logError(testName, function, args, startTime, "", "GetObject failed", err)
5051 return
5052 }
5053
5054 // Destination object
5055 readerCopy, err := c.GetObject(context.Background(), bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{})
5056 if err != nil {
5057 logError(testName, function, args, startTime, "", "GetObject failed", err)
5058 return
5059 }
5060
5061 // Check the various fields of source object against destination object.
5062 objInfo, err = r.Stat()
5063 if err != nil {
5064 logError(testName, function, args, startTime, "", "Stat failed", err)
5065 return
5066 }
5067 objInfoCopy, err := readerCopy.Stat()
5068 if err != nil {
5069 logError(testName, function, args, startTime, "", "Stat failed", err)
5070 return
5071 }
5072 if objInfo.Size != objInfoCopy.Size {
5073 logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+", got "+string(objInfo.Size), err)
5074 return
5075 }
5076
5077 if err := crcMatchesName(r, "datafile-33-kB"); err != nil {
5078 logError(testName, function, args, startTime, "", "data CRC check failed", err)
5079 return
5080 }
5081 if err := crcMatchesName(readerCopy, "datafile-33-kB"); err != nil {
5082 logError(testName, function, args, startTime, "", "copy data CRC check failed", err)
5083 return
5084 }
5085 // Close all the get readers before proceeding with CopyObject operations.
5086 r.Close()
5087 readerCopy.Close()
5088
5089 // CopyObject again but with wrong conditions
5090 src = minio.CopySrcOptions{
5091 Bucket: bucketName,
5092 Object: objectName,
5093 MatchUnmodifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC),
5094 NoMatchETag: objInfo.ETag,
5095 }
5096
5097 // Perform the Copy which should fail
5098 _, err = c.CopyObject(context.Background(), dst, src)
5099 if err == nil {
5100 logError(testName, function, args, startTime, "", "CopyObject did not fail for invalid conditions", err)
5101 return
5102 }
5103
5104 src = minio.CopySrcOptions{
5105 Bucket: bucketName,
5106 Object: objectName,
5107 }
5108
5109 dst = minio.CopyDestOptions{
5110 Bucket: bucketName,
5111 Object: objectName,
5112 ReplaceMetadata: true,
5113 UserMetadata: map[string]string{
5114 "Copy": "should be same",
5115 },
5116 }
5117 args["dst"] = dst
5118 args["src"] = src
5119
5120 _, err = c.CopyObject(context.Background(), dst, src)
5121 if err != nil {
5122 logError(testName, function, args, startTime, "", "CopyObject shouldn't fail", err)
5123 return
5124 }
5125
5126 oi, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{})
5127 if err != nil {
5128 logError(testName, function, args, startTime, "", "StatObject failed", err)
5129 return
5130 }
5131
5132 stOpts := minio.StatObjectOptions{}
5133 stOpts.SetMatchETag(oi.ETag)
5134 objInfo, err = c.StatObject(context.Background(), bucketName, objectName, stOpts)
5135 if err != nil {
5136 logError(testName, function, args, startTime, "", "CopyObject ETag should match and not fail", err)
5137 return
5138 }
5139
5140 if objInfo.Metadata.Get("x-amz-meta-copy") != "should be same" {
5141 logError(testName, function, args, startTime, "", "CopyObject modified metadata should match", err)
5142 return
5143 }
5144
5145 successLogger(testName, function, args, startTime).Info()
5146}
5147
5148// Tests SSE-C get object ReaderSeeker interface methods.
5149func testSSECEncryptedGetObjectReadSeekFunctional() {
5150 // initialize logging params
5151 startTime := time.Now()
5152 testName := getFuncName()
5153 function := "GetObject(bucketName, objectName)"
5154 args := map[string]interface{}{}
5155
5156 // Seed random based on current time.
5157 rand.Seed(time.Now().Unix())
5158
5159 // Instantiate new minio client object.
5160 c, err := minio.New(os.Getenv(serverEndpoint),
5161 &minio.Options{
5162 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
5163 Secure: mustParseBool(os.Getenv(enableHTTPS)),
5164 })
5165 if err != nil {
5166 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
5167 return
5168 }
5169
5170 // Enable tracing, write to stderr.
5171 // c.TraceOn(os.Stderr)
5172
5173 // Set user agent.
5174 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
5175
5176 // Generate a new random bucket name.
5177 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
5178 args["bucketName"] = bucketName
5179
5180 // Make a new bucket.
5181 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
5182 if err != nil {
5183 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
5184 return
5185 }
5186
5187 defer func() {
5188 // Delete all objects and buckets
5189 if err = cleanupBucket(bucketName, c); err != nil {
5190 logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
5191 return
5192 }
5193 }()
5194
5195 // Generate 129MiB of data.
5196 bufSize := dataFileMap["datafile-129-MB"]
5197 reader := getDataReader("datafile-129-MB")
5198 defer reader.Close()
5199
5200 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
5201 args["objectName"] = objectName
5202
5203 buf, err := io.ReadAll(reader)
5204 if err != nil {
5205 logError(testName, function, args, startTime, "", "ReadAll failed", err)
5206 return
5207 }
5208
5209 // Save the data
5210 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{
5211 ContentType: "binary/octet-stream",
5212 ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)),
5213 })
5214 if err != nil {
5215 logError(testName, function, args, startTime, "", "PutObject failed", err)
5216 return
5217 }
5218
5219 // Read the data back
5220 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{
5221 ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)),
5222 })
5223 if err != nil {
5224 logError(testName, function, args, startTime, "", "GetObject failed", err)
5225 return
5226 }
5227 defer r.Close()
5228
5229 st, err := r.Stat()
5230 if err != nil {
5231 logError(testName, function, args, startTime, "", "Stat object failed", err)
5232 return
5233 }
5234
5235 if st.Size != int64(bufSize) {
5236 logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err)
5237 return
5238 }
5239
5240 // This following function helps us to compare data from the reader after seek
5241 // with the data from the original buffer
5242 cmpData := func(r io.Reader, start, end int) {
5243 if end-start == 0 {
5244 return
5245 }
5246 buffer := bytes.NewBuffer([]byte{})
5247 if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil {
5248 if err != io.EOF {
5249 logError(testName, function, args, startTime, "", "CopyN failed", err)
5250 return
5251 }
5252 }
5253 if !bytes.Equal(buf[start:end], buffer.Bytes()) {
5254 logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err)
5255 return
5256 }
5257 }
5258
5259 testCases := []struct {
5260 offset int64
5261 whence int
5262 pos int64
5263 err error
5264 shouldCmp bool
5265 start int
5266 end int
5267 }{
5268 // Start from offset 0, fetch data and compare
5269 {0, 0, 0, nil, true, 0, 0},
5270 // Start from offset 2048, fetch data and compare
5271 {2048, 0, 2048, nil, true, 2048, bufSize},
5272 // Start from offset larger than possible
5273 {int64(bufSize) + 1024, 0, 0, io.EOF, false, 0, 0},
5274 // Move to offset 0 without comparing
5275 {0, 0, 0, nil, false, 0, 0},
5276 // Move one step forward and compare
5277 {1, 1, 1, nil, true, 1, bufSize},
5278 // Move larger than possible
5279 {int64(bufSize), 1, 0, io.EOF, false, 0, 0},
5280 // Provide negative offset with CUR_SEEK
5281 {int64(-1), 1, 0, fmt.Errorf("Negative position not allowed for 1"), false, 0, 0},
5282 // Test with whence SEEK_END and with positive offset
5283 {1024, 2, 0, io.EOF, false, 0, 0},
5284 // Test with whence SEEK_END and with negative offset
5285 {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize},
5286 // Test with whence SEEK_END and with large negative offset
5287 {-int64(bufSize) * 2, 2, 0, fmt.Errorf("Seeking at negative offset not allowed for 2"), false, 0, 0},
5288 // Test with invalid whence
5289 {0, 3, 0, fmt.Errorf("Invalid whence 3"), false, 0, 0},
5290 }
5291
5292 for i, testCase := range testCases {
5293 // Perform seek operation
5294 n, err := r.Seek(testCase.offset, testCase.whence)
5295 if err != nil && testCase.err == nil {
5296 // We expected success.
5297 logError(testName, function, args, startTime, "",
5298 fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err)
5299 return
5300 }
5301 if err == nil && testCase.err != nil {
5302 // We expected failure, but got success.
5303 logError(testName, function, args, startTime, "",
5304 fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err)
5305 return
5306 }
5307 if err != nil && testCase.err != nil {
5308 if err.Error() != testCase.err.Error() {
5309 // We expect a specific error
5310 logError(testName, function, args, startTime, "",
5311 fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err)
5312 return
5313 }
5314 }
5315 // Check the returned seek pos
5316 if n != testCase.pos {
5317 logError(testName, function, args, startTime, "",
5318 fmt.Sprintf("Test %d, number of bytes seeked does not match, expected %d, got %d", i+1, testCase.pos, n), err)
5319 return
5320 }
5321 // Compare only if shouldCmp is activated
5322 if testCase.shouldCmp {
5323 cmpData(r, testCase.start, testCase.end)
5324 }
5325 }
5326
5327 successLogger(testName, function, args, startTime).Info()
5328}
5329
5330// Tests SSE-S3 get object ReaderSeeker interface methods.
5331func testSSES3EncryptedGetObjectReadSeekFunctional() {
5332 // initialize logging params
5333 startTime := time.Now()
5334 testName := getFuncName()
5335 function := "GetObject(bucketName, objectName)"
5336 args := map[string]interface{}{}
5337
5338 // Seed random based on current time.
5339 rand.Seed(time.Now().Unix())
5340
5341 // Instantiate new minio client object.
5342 c, err := minio.New(os.Getenv(serverEndpoint),
5343 &minio.Options{
5344 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
5345 Secure: mustParseBool(os.Getenv(enableHTTPS)),
5346 })
5347 if err != nil {
5348 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
5349 return
5350 }
5351
5352 // Enable tracing, write to stderr.
5353 // c.TraceOn(os.Stderr)
5354
5355 // Set user agent.
5356 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
5357
5358 // Generate a new random bucket name.
5359 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
5360 args["bucketName"] = bucketName
5361
5362 // Make a new bucket.
5363 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
5364 if err != nil {
5365 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
5366 return
5367 }
5368
5369 defer func() {
5370 // Delete all objects and buckets
5371 if err = cleanupBucket(bucketName, c); err != nil {
5372 logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
5373 return
5374 }
5375 }()
5376
5377 // Generate 129MiB of data.
5378 bufSize := dataFileMap["datafile-129-MB"]
5379 reader := getDataReader("datafile-129-MB")
5380 defer reader.Close()
5381
5382 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
5383 args["objectName"] = objectName
5384
5385 buf, err := io.ReadAll(reader)
5386 if err != nil {
5387 logError(testName, function, args, startTime, "", "ReadAll failed", err)
5388 return
5389 }
5390
5391 // Save the data
5392 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{
5393 ContentType: "binary/octet-stream",
5394 ServerSideEncryption: encrypt.NewSSE(),
5395 })
5396 if err != nil {
5397 logError(testName, function, args, startTime, "", "PutObject failed", err)
5398 return
5399 }
5400
5401 // Read the data back
5402 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
5403 if err != nil {
5404 logError(testName, function, args, startTime, "", "GetObject failed", err)
5405 return
5406 }
5407 defer r.Close()
5408
5409 st, err := r.Stat()
5410 if err != nil {
5411 logError(testName, function, args, startTime, "", "Stat object failed", err)
5412 return
5413 }
5414
5415 if st.Size != int64(bufSize) {
5416 logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err)
5417 return
5418 }
5419
5420 // This following function helps us to compare data from the reader after seek
5421 // with the data from the original buffer
5422 cmpData := func(r io.Reader, start, end int) {
5423 if end-start == 0 {
5424 return
5425 }
5426 buffer := bytes.NewBuffer([]byte{})
5427 if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil {
5428 if err != io.EOF {
5429 logError(testName, function, args, startTime, "", "CopyN failed", err)
5430 return
5431 }
5432 }
5433 if !bytes.Equal(buf[start:end], buffer.Bytes()) {
5434 logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err)
5435 return
5436 }
5437 }
5438
5439 testCases := []struct {
5440 offset int64
5441 whence int
5442 pos int64
5443 err error
5444 shouldCmp bool
5445 start int
5446 end int
5447 }{
5448 // Start from offset 0, fetch data and compare
5449 {0, 0, 0, nil, true, 0, 0},
5450 // Start from offset 2048, fetch data and compare
5451 {2048, 0, 2048, nil, true, 2048, bufSize},
5452 // Start from offset larger than possible
5453 {int64(bufSize) + 1024, 0, 0, io.EOF, false, 0, 0},
5454 // Move to offset 0 without comparing
5455 {0, 0, 0, nil, false, 0, 0},
5456 // Move one step forward and compare
5457 {1, 1, 1, nil, true, 1, bufSize},
5458 // Move larger than possible
5459 {int64(bufSize), 1, 0, io.EOF, false, 0, 0},
5460 // Provide negative offset with CUR_SEEK
5461 {int64(-1), 1, 0, fmt.Errorf("Negative position not allowed for 1"), false, 0, 0},
5462 // Test with whence SEEK_END and with positive offset
5463 {1024, 2, 0, io.EOF, false, 0, 0},
5464 // Test with whence SEEK_END and with negative offset
5465 {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize},
5466 // Test with whence SEEK_END and with large negative offset
5467 {-int64(bufSize) * 2, 2, 0, fmt.Errorf("Seeking at negative offset not allowed for 2"), false, 0, 0},
5468 // Test with invalid whence
5469 {0, 3, 0, fmt.Errorf("Invalid whence 3"), false, 0, 0},
5470 }
5471
5472 for i, testCase := range testCases {
5473 // Perform seek operation
5474 n, err := r.Seek(testCase.offset, testCase.whence)
5475 if err != nil && testCase.err == nil {
5476 // We expected success.
5477 logError(testName, function, args, startTime, "",
5478 fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err)
5479 return
5480 }
5481 if err == nil && testCase.err != nil {
5482 // We expected failure, but got success.
5483 logError(testName, function, args, startTime, "",
5484 fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err)
5485 return
5486 }
5487 if err != nil && testCase.err != nil {
5488 if err.Error() != testCase.err.Error() {
5489 // We expect a specific error
5490 logError(testName, function, args, startTime, "",
5491 fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err)
5492 return
5493 }
5494 }
5495 // Check the returned seek pos
5496 if n != testCase.pos {
5497 logError(testName, function, args, startTime, "",
5498 fmt.Sprintf("Test %d, number of bytes seeked does not match, expected %d, got %d", i+1, testCase.pos, n), err)
5499 return
5500 }
5501 // Compare only if shouldCmp is activated
5502 if testCase.shouldCmp {
5503 cmpData(r, testCase.start, testCase.end)
5504 }
5505 }
5506
5507 successLogger(testName, function, args, startTime).Info()
5508}
5509
5510// Tests SSE-C get object ReaderAt interface methods.
5511func testSSECEncryptedGetObjectReadAtFunctional() {
5512 // initialize logging params
5513 startTime := time.Now()
5514 testName := getFuncName()
5515 function := "GetObject(bucketName, objectName)"
5516 args := map[string]interface{}{}
5517
5518 // Seed random based on current time.
5519 rand.Seed(time.Now().Unix())
5520
5521 // Instantiate new minio client object.
5522 c, err := minio.New(os.Getenv(serverEndpoint),
5523 &minio.Options{
5524 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
5525 Secure: mustParseBool(os.Getenv(enableHTTPS)),
5526 })
5527 if err != nil {
5528 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
5529 return
5530 }
5531
5532 // Enable tracing, write to stderr.
5533 // c.TraceOn(os.Stderr)
5534
5535 // Set user agent.
5536 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
5537
5538 // Generate a new random bucket name.
5539 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
5540 args["bucketName"] = bucketName
5541
5542 // Make a new bucket.
5543 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
5544 if err != nil {
5545 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
5546 return
5547 }
5548
5549 defer cleanupBucket(bucketName, c)
5550
5551 // Generate 129MiB of data.
5552 bufSize := dataFileMap["datafile-129-MB"]
5553 reader := getDataReader("datafile-129-MB")
5554 defer reader.Close()
5555
5556 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
5557 args["objectName"] = objectName
5558
5559 buf, err := io.ReadAll(reader)
5560 if err != nil {
5561 logError(testName, function, args, startTime, "", "ReadAll failed", err)
5562 return
5563 }
5564
5565 // Save the data
5566 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{
5567 ContentType: "binary/octet-stream",
5568 ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)),
5569 })
5570 if err != nil {
5571 logError(testName, function, args, startTime, "", "PutObject failed", err)
5572 return
5573 }
5574
5575 // read the data back
5576 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{
5577 ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)),
5578 })
5579 if err != nil {
5580 logError(testName, function, args, startTime, "", "PutObject failed", err)
5581 return
5582 }
5583 defer r.Close()
5584
5585 offset := int64(2048)
5586
5587 // read directly
5588 buf1 := make([]byte, 512)
5589 buf2 := make([]byte, 512)
5590 buf3 := make([]byte, 512)
5591 buf4 := make([]byte, 512)
5592
5593 // Test readAt before stat is called such that objectInfo doesn't change.
5594 m, err := r.ReadAt(buf1, offset)
5595 if err != nil {
5596 logError(testName, function, args, startTime, "", "ReadAt failed", err)
5597 return
5598 }
5599 if m != len(buf1) {
5600 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err)
5601 return
5602 }
5603 if !bytes.Equal(buf1, buf[offset:offset+512]) {
5604 logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
5605 return
5606 }
5607 offset += 512
5608
5609 st, err := r.Stat()
5610 if err != nil {
5611 logError(testName, function, args, startTime, "", "Stat failed", err)
5612 return
5613 }
5614
5615 if st.Size != int64(bufSize) {
5616 logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err)
5617 return
5618 }
5619
5620 m, err = r.ReadAt(buf2, offset)
5621 if err != nil {
5622 logError(testName, function, args, startTime, "", "ReadAt failed", err)
5623 return
5624 }
5625 if m != len(buf2) {
5626 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err)
5627 return
5628 }
5629 if !bytes.Equal(buf2, buf[offset:offset+512]) {
5630 logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
5631 return
5632 }
5633 offset += 512
5634 m, err = r.ReadAt(buf3, offset)
5635 if err != nil {
5636 logError(testName, function, args, startTime, "", "ReadAt failed", err)
5637 return
5638 }
5639 if m != len(buf3) {
5640 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err)
5641 return
5642 }
5643 if !bytes.Equal(buf3, buf[offset:offset+512]) {
5644 logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
5645 return
5646 }
5647 offset += 512
5648 m, err = r.ReadAt(buf4, offset)
5649 if err != nil {
5650 logError(testName, function, args, startTime, "", "ReadAt failed", err)
5651 return
5652 }
5653 if m != len(buf4) {
5654 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err)
5655 return
5656 }
5657 if !bytes.Equal(buf4, buf[offset:offset+512]) {
5658 logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
5659 return
5660 }
5661
5662 buf5 := make([]byte, len(buf))
5663 // Read the whole object.
5664 m, err = r.ReadAt(buf5, 0)
5665 if err != nil {
5666 if err != io.EOF {
5667 logError(testName, function, args, startTime, "", "ReadAt failed", err)
5668 return
5669 }
5670 }
5671 if m != len(buf5) {
5672 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err)
5673 return
5674 }
5675 if !bytes.Equal(buf, buf5) {
5676 logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err)
5677 return
5678 }
5679
5680 buf6 := make([]byte, len(buf)+1)
5681 // Read the whole object and beyond.
5682 _, err = r.ReadAt(buf6, 0)
5683 if err != nil {
5684 if err != io.EOF {
5685 logError(testName, function, args, startTime, "", "ReadAt failed", err)
5686 return
5687 }
5688 }
5689
5690 successLogger(testName, function, args, startTime).Info()
5691}
5692
5693// Tests SSE-S3 get object ReaderAt interface methods.
5694func testSSES3EncryptedGetObjectReadAtFunctional() {
5695 // initialize logging params
5696 startTime := time.Now()
5697 testName := getFuncName()
5698 function := "GetObject(bucketName, objectName)"
5699 args := map[string]interface{}{}
5700
5701 // Seed random based on current time.
5702 rand.Seed(time.Now().Unix())
5703
5704 // Instantiate new minio client object.
5705 c, err := minio.New(os.Getenv(serverEndpoint),
5706 &minio.Options{
5707 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
5708 Secure: mustParseBool(os.Getenv(enableHTTPS)),
5709 })
5710 if err != nil {
5711 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
5712 return
5713 }
5714
5715 // Enable tracing, write to stderr.
5716 // c.TraceOn(os.Stderr)
5717
5718 // Set user agent.
5719 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
5720
5721 // Generate a new random bucket name.
5722 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
5723 args["bucketName"] = bucketName
5724
5725 // Make a new bucket.
5726 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
5727 if err != nil {
5728 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
5729 return
5730 }
5731
5732 defer cleanupBucket(bucketName, c)
5733
5734 // Generate 129MiB of data.
5735 bufSize := dataFileMap["datafile-129-MB"]
5736 reader := getDataReader("datafile-129-MB")
5737 defer reader.Close()
5738
5739 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
5740 args["objectName"] = objectName
5741
5742 buf, err := io.ReadAll(reader)
5743 if err != nil {
5744 logError(testName, function, args, startTime, "", "ReadAll failed", err)
5745 return
5746 }
5747
5748 // Save the data
5749 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{
5750 ContentType: "binary/octet-stream",
5751 ServerSideEncryption: encrypt.NewSSE(),
5752 })
5753 if err != nil {
5754 logError(testName, function, args, startTime, "", "PutObject failed", err)
5755 return
5756 }
5757
5758 // read the data back
5759 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
5760 if err != nil {
5761 logError(testName, function, args, startTime, "", "PutObject failed", err)
5762 return
5763 }
5764 defer r.Close()
5765
5766 offset := int64(2048)
5767
5768 // read directly
5769 buf1 := make([]byte, 512)
5770 buf2 := make([]byte, 512)
5771 buf3 := make([]byte, 512)
5772 buf4 := make([]byte, 512)
5773
5774 // Test readAt before stat is called such that objectInfo doesn't change.
5775 m, err := r.ReadAt(buf1, offset)
5776 if err != nil {
5777 logError(testName, function, args, startTime, "", "ReadAt failed", err)
5778 return
5779 }
5780 if m != len(buf1) {
5781 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err)
5782 return
5783 }
5784 if !bytes.Equal(buf1, buf[offset:offset+512]) {
5785 logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
5786 return
5787 }
5788 offset += 512
5789
5790 st, err := r.Stat()
5791 if err != nil {
5792 logError(testName, function, args, startTime, "", "Stat failed", err)
5793 return
5794 }
5795
5796 if st.Size != int64(bufSize) {
5797 logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err)
5798 return
5799 }
5800
5801 m, err = r.ReadAt(buf2, offset)
5802 if err != nil {
5803 logError(testName, function, args, startTime, "", "ReadAt failed", err)
5804 return
5805 }
5806 if m != len(buf2) {
5807 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err)
5808 return
5809 }
5810 if !bytes.Equal(buf2, buf[offset:offset+512]) {
5811 logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
5812 return
5813 }
5814 offset += 512
5815 m, err = r.ReadAt(buf3, offset)
5816 if err != nil {
5817 logError(testName, function, args, startTime, "", "ReadAt failed", err)
5818 return
5819 }
5820 if m != len(buf3) {
5821 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err)
5822 return
5823 }
5824 if !bytes.Equal(buf3, buf[offset:offset+512]) {
5825 logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
5826 return
5827 }
5828 offset += 512
5829 m, err = r.ReadAt(buf4, offset)
5830 if err != nil {
5831 logError(testName, function, args, startTime, "", "ReadAt failed", err)
5832 return
5833 }
5834 if m != len(buf4) {
5835 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err)
5836 return
5837 }
5838 if !bytes.Equal(buf4, buf[offset:offset+512]) {
5839 logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
5840 return
5841 }
5842
5843 buf5 := make([]byte, len(buf))
5844 // Read the whole object.
5845 m, err = r.ReadAt(buf5, 0)
5846 if err != nil {
5847 if err != io.EOF {
5848 logError(testName, function, args, startTime, "", "ReadAt failed", err)
5849 return
5850 }
5851 }
5852 if m != len(buf5) {
5853 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err)
5854 return
5855 }
5856 if !bytes.Equal(buf, buf5) {
5857 logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err)
5858 return
5859 }
5860
5861 buf6 := make([]byte, len(buf)+1)
5862 // Read the whole object and beyond.
5863 _, err = r.ReadAt(buf6, 0)
5864 if err != nil {
5865 if err != io.EOF {
5866 logError(testName, function, args, startTime, "", "ReadAt failed", err)
5867 return
5868 }
5869 }
5870
5871 successLogger(testName, function, args, startTime).Info()
5872}
5873
5874// testSSECEncryptionPutGet tests encryption with customer provided encryption keys
5875func testSSECEncryptionPutGet() {
5876 // initialize logging params
5877 startTime := time.Now()
5878 testName := getFuncName()
5879 function := "PutEncryptedObject(bucketName, objectName, reader, sse)"
5880 args := map[string]interface{}{
5881 "bucketName": "",
5882 "objectName": "",
5883 "sse": "",
5884 }
5885 // Seed random based on current time.
5886 rand.Seed(time.Now().Unix())
5887
5888 // Instantiate new minio client object
5889 c, err := minio.New(os.Getenv(serverEndpoint),
5890 &minio.Options{
5891 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
5892 Secure: mustParseBool(os.Getenv(enableHTTPS)),
5893 })
5894 if err != nil {
5895 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
5896 return
5897 }
5898
5899 // Enable tracing, write to stderr.
5900 // c.TraceOn(os.Stderr)
5901
5902 // Set user agent.
5903 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
5904
5905 // Generate a new random bucket name.
5906 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
5907 args["bucketName"] = bucketName
5908
5909 // Make a new bucket.
5910 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
5911 if err != nil {
5912 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
5913 return
5914 }
5915
5916 defer cleanupBucket(bucketName, c)
5917
5918 testCases := []struct {
5919 buf []byte
5920 }{
5921 {buf: bytes.Repeat([]byte("F"), 1)},
5922 {buf: bytes.Repeat([]byte("F"), 15)},
5923 {buf: bytes.Repeat([]byte("F"), 16)},
5924 {buf: bytes.Repeat([]byte("F"), 17)},
5925 {buf: bytes.Repeat([]byte("F"), 31)},
5926 {buf: bytes.Repeat([]byte("F"), 32)},
5927 {buf: bytes.Repeat([]byte("F"), 33)},
5928 {buf: bytes.Repeat([]byte("F"), 1024)},
5929 {buf: bytes.Repeat([]byte("F"), 1024*2)},
5930 {buf: bytes.Repeat([]byte("F"), 1024*1024)},
5931 }
5932
5933 const password = "correct horse battery staple" // https://xkcd.com/936/
5934
5935 for i, testCase := range testCases {
5936 // Generate a random object name
5937 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
5938 args["objectName"] = objectName
5939
5940 // Secured object
5941 sse := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName))
5942 args["sse"] = sse
5943
5944 // Put encrypted data
5945 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(testCase.buf), int64(len(testCase.buf)), minio.PutObjectOptions{ServerSideEncryption: sse})
5946 if err != nil {
5947 logError(testName, function, args, startTime, "", "PutEncryptedObject failed", err)
5948 return
5949 }
5950
5951 // Read the data back
5952 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse})
5953 if err != nil {
5954 logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err)
5955 return
5956 }
5957 defer r.Close()
5958
5959 // Compare the sent object with the received one
5960 recvBuffer := bytes.NewBuffer([]byte{})
5961 if _, err = io.Copy(recvBuffer, r); err != nil {
5962 logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err)
5963 return
5964 }
5965 if recvBuffer.Len() != len(testCase.buf) {
5966 logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err)
5967 return
5968 }
5969 if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) {
5970 logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err)
5971 return
5972 }
5973
5974 successLogger(testName, function, args, startTime).Info()
5975
5976 }
5977
5978 successLogger(testName, function, args, startTime).Info()
5979}
5980
5981// TestEncryptionFPut tests encryption with customer specified encryption keys
5982func testSSECEncryptionFPut() {
5983 // initialize logging params
5984 startTime := time.Now()
5985 testName := getFuncName()
5986 function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, sse)"
5987 args := map[string]interface{}{
5988 "bucketName": "",
5989 "objectName": "",
5990 "filePath": "",
5991 "contentType": "",
5992 "sse": "",
5993 }
5994 // Seed random based on current time.
5995 rand.Seed(time.Now().Unix())
5996
5997 // Instantiate new minio client object
5998 c, err := minio.New(os.Getenv(serverEndpoint),
5999 &minio.Options{
6000 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
6001 Secure: mustParseBool(os.Getenv(enableHTTPS)),
6002 })
6003 if err != nil {
6004 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
6005 return
6006 }
6007
6008 // Enable tracing, write to stderr.
6009 // c.TraceOn(os.Stderr)
6010
6011 // Set user agent.
6012 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
6013
6014 // Generate a new random bucket name.
6015 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
6016 args["bucketName"] = bucketName
6017
6018 // Make a new bucket.
6019 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
6020 if err != nil {
6021 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
6022 return
6023 }
6024
6025 defer cleanupBucket(bucketName, c)
6026
6027 // Object custom metadata
6028 customContentType := "custom/contenttype"
6029 args["metadata"] = customContentType
6030
6031 testCases := []struct {
6032 buf []byte
6033 }{
6034 {buf: bytes.Repeat([]byte("F"), 0)},
6035 {buf: bytes.Repeat([]byte("F"), 1)},
6036 {buf: bytes.Repeat([]byte("F"), 15)},
6037 {buf: bytes.Repeat([]byte("F"), 16)},
6038 {buf: bytes.Repeat([]byte("F"), 17)},
6039 {buf: bytes.Repeat([]byte("F"), 31)},
6040 {buf: bytes.Repeat([]byte("F"), 32)},
6041 {buf: bytes.Repeat([]byte("F"), 33)},
6042 {buf: bytes.Repeat([]byte("F"), 1024)},
6043 {buf: bytes.Repeat([]byte("F"), 1024*2)},
6044 {buf: bytes.Repeat([]byte("F"), 1024*1024)},
6045 }
6046
6047 const password = "correct horse battery staple" // https://xkcd.com/936/
6048 for i, testCase := range testCases {
6049 // Generate a random object name
6050 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
6051 args["objectName"] = objectName
6052
6053 // Secured object
6054 sse := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName))
6055 args["sse"] = sse
6056
6057 // Generate a random file name.
6058 fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
6059 file, err := os.Create(fileName)
6060 if err != nil {
6061 logError(testName, function, args, startTime, "", "file create failed", err)
6062 return
6063 }
6064 _, err = file.Write(testCase.buf)
6065 if err != nil {
6066 logError(testName, function, args, startTime, "", "file write failed", err)
6067 return
6068 }
6069 file.Close()
6070 // Put encrypted data
6071 if _, err = c.FPutObject(context.Background(), bucketName, objectName, fileName, minio.PutObjectOptions{ServerSideEncryption: sse}); err != nil {
6072 logError(testName, function, args, startTime, "", "FPutEncryptedObject failed", err)
6073 return
6074 }
6075
6076 // Read the data back
6077 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse})
6078 if err != nil {
6079 logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err)
6080 return
6081 }
6082 defer r.Close()
6083
6084 // Compare the sent object with the received one
6085 recvBuffer := bytes.NewBuffer([]byte{})
6086 if _, err = io.Copy(recvBuffer, r); err != nil {
6087 logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err)
6088 return
6089 }
6090 if recvBuffer.Len() != len(testCase.buf) {
6091 logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err)
6092 return
6093 }
6094 if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) {
6095 logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err)
6096 return
6097 }
6098
6099 os.Remove(fileName)
6100 }
6101
6102 successLogger(testName, function, args, startTime).Info()
6103}
6104
6105// testSSES3EncryptionPutGet tests SSE-S3 encryption
6106func testSSES3EncryptionPutGet() {
6107 // initialize logging params
6108 startTime := time.Now()
6109 testName := getFuncName()
6110 function := "PutEncryptedObject(bucketName, objectName, reader, sse)"
6111 args := map[string]interface{}{
6112 "bucketName": "",
6113 "objectName": "",
6114 "sse": "",
6115 }
6116 // Seed random based on current time.
6117 rand.Seed(time.Now().Unix())
6118
6119 // Instantiate new minio client object
6120 c, err := minio.New(os.Getenv(serverEndpoint),
6121 &minio.Options{
6122 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
6123 Secure: mustParseBool(os.Getenv(enableHTTPS)),
6124 })
6125 if err != nil {
6126 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
6127 return
6128 }
6129
6130 // Enable tracing, write to stderr.
6131 // c.TraceOn(os.Stderr)
6132
6133 // Set user agent.
6134 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
6135
6136 // Generate a new random bucket name.
6137 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
6138 args["bucketName"] = bucketName
6139
6140 // Make a new bucket.
6141 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
6142 if err != nil {
6143 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
6144 return
6145 }
6146
6147 defer cleanupBucket(bucketName, c)
6148
6149 testCases := []struct {
6150 buf []byte
6151 }{
6152 {buf: bytes.Repeat([]byte("F"), 1)},
6153 {buf: bytes.Repeat([]byte("F"), 15)},
6154 {buf: bytes.Repeat([]byte("F"), 16)},
6155 {buf: bytes.Repeat([]byte("F"), 17)},
6156 {buf: bytes.Repeat([]byte("F"), 31)},
6157 {buf: bytes.Repeat([]byte("F"), 32)},
6158 {buf: bytes.Repeat([]byte("F"), 33)},
6159 {buf: bytes.Repeat([]byte("F"), 1024)},
6160 {buf: bytes.Repeat([]byte("F"), 1024*2)},
6161 {buf: bytes.Repeat([]byte("F"), 1024*1024)},
6162 }
6163
6164 for i, testCase := range testCases {
6165 // Generate a random object name
6166 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
6167 args["objectName"] = objectName
6168
6169 // Secured object
6170 sse := encrypt.NewSSE()
6171 args["sse"] = sse
6172
6173 // Put encrypted data
6174 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(testCase.buf), int64(len(testCase.buf)), minio.PutObjectOptions{ServerSideEncryption: sse})
6175 if err != nil {
6176 logError(testName, function, args, startTime, "", "PutEncryptedObject failed", err)
6177 return
6178 }
6179
6180 // Read the data back without any encryption headers
6181 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
6182 if err != nil {
6183 logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err)
6184 return
6185 }
6186 defer r.Close()
6187
6188 // Compare the sent object with the received one
6189 recvBuffer := bytes.NewBuffer([]byte{})
6190 if _, err = io.Copy(recvBuffer, r); err != nil {
6191 logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err)
6192 return
6193 }
6194 if recvBuffer.Len() != len(testCase.buf) {
6195 logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err)
6196 return
6197 }
6198 if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) {
6199 logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err)
6200 return
6201 }
6202
6203 successLogger(testName, function, args, startTime).Info()
6204
6205 }
6206
6207 successLogger(testName, function, args, startTime).Info()
6208}
6209
6210// TestSSES3EncryptionFPut tests server side encryption
6211func testSSES3EncryptionFPut() {
6212 // initialize logging params
6213 startTime := time.Now()
6214 testName := getFuncName()
6215 function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, sse)"
6216 args := map[string]interface{}{
6217 "bucketName": "",
6218 "objectName": "",
6219 "filePath": "",
6220 "contentType": "",
6221 "sse": "",
6222 }
6223 // Seed random based on current time.
6224 rand.Seed(time.Now().Unix())
6225
6226 // Instantiate new minio client object
6227 c, err := minio.New(os.Getenv(serverEndpoint),
6228 &minio.Options{
6229 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
6230 Secure: mustParseBool(os.Getenv(enableHTTPS)),
6231 })
6232 if err != nil {
6233 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
6234 return
6235 }
6236
6237 // Enable tracing, write to stderr.
6238 // c.TraceOn(os.Stderr)
6239
6240 // Set user agent.
6241 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
6242
6243 // Generate a new random bucket name.
6244 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
6245 args["bucketName"] = bucketName
6246
6247 // Make a new bucket.
6248 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
6249 if err != nil {
6250 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
6251 return
6252 }
6253
6254 defer cleanupBucket(bucketName, c)
6255
6256 // Object custom metadata
6257 customContentType := "custom/contenttype"
6258 args["metadata"] = customContentType
6259
6260 testCases := []struct {
6261 buf []byte
6262 }{
6263 {buf: bytes.Repeat([]byte("F"), 0)},
6264 {buf: bytes.Repeat([]byte("F"), 1)},
6265 {buf: bytes.Repeat([]byte("F"), 15)},
6266 {buf: bytes.Repeat([]byte("F"), 16)},
6267 {buf: bytes.Repeat([]byte("F"), 17)},
6268 {buf: bytes.Repeat([]byte("F"), 31)},
6269 {buf: bytes.Repeat([]byte("F"), 32)},
6270 {buf: bytes.Repeat([]byte("F"), 33)},
6271 {buf: bytes.Repeat([]byte("F"), 1024)},
6272 {buf: bytes.Repeat([]byte("F"), 1024*2)},
6273 {buf: bytes.Repeat([]byte("F"), 1024*1024)},
6274 }
6275
6276 for i, testCase := range testCases {
6277 // Generate a random object name
6278 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
6279 args["objectName"] = objectName
6280
6281 // Secured object
6282 sse := encrypt.NewSSE()
6283 args["sse"] = sse
6284
6285 // Generate a random file name.
6286 fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
6287 file, err := os.Create(fileName)
6288 if err != nil {
6289 logError(testName, function, args, startTime, "", "file create failed", err)
6290 return
6291 }
6292 _, err = file.Write(testCase.buf)
6293 if err != nil {
6294 logError(testName, function, args, startTime, "", "file write failed", err)
6295 return
6296 }
6297 file.Close()
6298 // Put encrypted data
6299 if _, err = c.FPutObject(context.Background(), bucketName, objectName, fileName, minio.PutObjectOptions{ServerSideEncryption: sse}); err != nil {
6300 logError(testName, function, args, startTime, "", "FPutEncryptedObject failed", err)
6301 return
6302 }
6303
6304 // Read the data back
6305 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
6306 if err != nil {
6307 logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err)
6308 return
6309 }
6310 defer r.Close()
6311
6312 // Compare the sent object with the received one
6313 recvBuffer := bytes.NewBuffer([]byte{})
6314 if _, err = io.Copy(recvBuffer, r); err != nil {
6315 logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err)
6316 return
6317 }
6318 if recvBuffer.Len() != len(testCase.buf) {
6319 logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err)
6320 return
6321 }
6322 if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) {
6323 logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err)
6324 return
6325 }
6326
6327 os.Remove(fileName)
6328 }
6329
6330 successLogger(testName, function, args, startTime).Info()
6331}
6332
6333func testBucketNotification() {
6334 // initialize logging params
6335 startTime := time.Now()
6336 testName := getFuncName()
6337 function := "SetBucketNotification(bucketName)"
6338 args := map[string]interface{}{
6339 "bucketName": "",
6340 }
6341
6342 if os.Getenv("NOTIFY_BUCKET") == "" ||
6343 os.Getenv("NOTIFY_SERVICE") == "" ||
6344 os.Getenv("NOTIFY_REGION") == "" ||
6345 os.Getenv("NOTIFY_ACCOUNTID") == "" ||
6346 os.Getenv("NOTIFY_RESOURCE") == "" {
6347 ignoredLog(testName, function, args, startTime, "Skipped notification test as it is not configured").Info()
6348 return
6349 }
6350
6351 // Seed random based on current time.
6352 rand.Seed(time.Now().Unix())
6353
6354 c, err := minio.New(os.Getenv(serverEndpoint),
6355 &minio.Options{
6356 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
6357 Secure: mustParseBool(os.Getenv(enableHTTPS)),
6358 })
6359 if err != nil {
6360 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
6361 return
6362 }
6363
6364 // Enable to debug
6365 // c.TraceOn(os.Stderr)
6366
6367 // Set user agent.
6368 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
6369
6370 bucketName := os.Getenv("NOTIFY_BUCKET")
6371 args["bucketName"] = bucketName
6372
6373 topicArn := notification.NewArn("aws", os.Getenv("NOTIFY_SERVICE"), os.Getenv("NOTIFY_REGION"), os.Getenv("NOTIFY_ACCOUNTID"), os.Getenv("NOTIFY_RESOURCE"))
6374 queueArn := notification.NewArn("aws", "dummy-service", "dummy-region", "dummy-accountid", "dummy-resource")
6375
6376 topicConfig := notification.NewConfig(topicArn)
6377 topicConfig.AddEvents(notification.ObjectCreatedAll, notification.ObjectRemovedAll)
6378 topicConfig.AddFilterSuffix("jpg")
6379
6380 queueConfig := notification.NewConfig(queueArn)
6381 queueConfig.AddEvents(notification.ObjectCreatedAll)
6382 queueConfig.AddFilterPrefix("photos/")
6383
6384 config := notification.Configuration{}
6385 config.AddTopic(topicConfig)
6386
6387 // Add the same topicConfig again, should have no effect
6388 // because it is duplicated
6389 config.AddTopic(topicConfig)
6390 if len(config.TopicConfigs) != 1 {
6391 logError(testName, function, args, startTime, "", "Duplicate entry added", err)
6392 return
6393 }
6394
6395 // Add and remove a queue config
6396 config.AddQueue(queueConfig)
6397 config.RemoveQueueByArn(queueArn)
6398
6399 err = c.SetBucketNotification(context.Background(), bucketName, config)
6400 if err != nil {
6401 logError(testName, function, args, startTime, "", "SetBucketNotification failed", err)
6402 return
6403 }
6404
6405 config, err = c.GetBucketNotification(context.Background(), bucketName)
6406 if err != nil {
6407 logError(testName, function, args, startTime, "", "GetBucketNotification failed", err)
6408 return
6409 }
6410
6411 if len(config.TopicConfigs) != 1 {
6412 logError(testName, function, args, startTime, "", "Topic config is empty", err)
6413 return
6414 }
6415
6416 if config.TopicConfigs[0].Filter.S3Key.FilterRules[0].Value != "jpg" {
6417 logError(testName, function, args, startTime, "", "Couldn't get the suffix", err)
6418 return
6419 }
6420
6421 err = c.RemoveAllBucketNotification(context.Background(), bucketName)
6422 if err != nil {
6423 logError(testName, function, args, startTime, "", "RemoveAllBucketNotification failed", err)
6424 return
6425 }
6426
6427 // Delete all objects and buckets
6428 if err = cleanupBucket(bucketName, c); err != nil {
6429 logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
6430 return
6431 }
6432
6433 successLogger(testName, function, args, startTime).Info()
6434}
6435
6436// Tests comprehensive list of all methods.
6437func testFunctional() {
6438 // initialize logging params
6439 startTime := time.Now()
6440 testName := getFuncName()
6441 function := "testFunctional()"
6442 functionAll := ""
6443 args := map[string]interface{}{}
6444
6445 // Seed random based on current time.
6446 rand.Seed(time.Now().Unix())
6447
6448 c, err := minio.New(os.Getenv(serverEndpoint),
6449 &minio.Options{
6450 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
6451 Secure: mustParseBool(os.Getenv(enableHTTPS)),
6452 })
6453 if err != nil {
6454 logError(testName, function, nil, startTime, "", "MinIO client object creation failed", err)
6455 return
6456 }
6457
6458 // Enable to debug
6459 // c.TraceOn(os.Stderr)
6460
6461 // Set user agent.
6462 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
6463
6464 // Generate a new random bucket name.
6465 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
6466
6467 // Make a new bucket.
6468 function = "MakeBucket(bucketName, region)"
6469 functionAll = "MakeBucket(bucketName, region)"
6470 args["bucketName"] = bucketName
6471 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
6472
6473 defer cleanupBucket(bucketName, c)
6474 if err != nil {
6475 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
6476 return
6477 }
6478
6479 // Generate a random file name.
6480 fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
6481 file, err := os.Create(fileName)
6482 if err != nil {
6483 logError(testName, function, args, startTime, "", "File creation failed", err)
6484 return
6485 }
6486 for i := 0; i < 3; i++ {
6487 buf := make([]byte, rand.Intn(1<<19))
6488 _, err = file.Write(buf)
6489 if err != nil {
6490 logError(testName, function, args, startTime, "", "File write failed", err)
6491 return
6492 }
6493 }
6494 file.Close()
6495
6496 // Verify if bucket exits and you have access.
6497 var exists bool
6498 function = "BucketExists(bucketName)"
6499 functionAll += ", " + function
6500 args = map[string]interface{}{
6501 "bucketName": bucketName,
6502 }
6503 exists, err = c.BucketExists(context.Background(), bucketName)
6504
6505 if err != nil {
6506 logError(testName, function, args, startTime, "", "BucketExists failed", err)
6507 return
6508 }
6509 if !exists {
6510 logError(testName, function, args, startTime, "", "Could not find the bucket", err)
6511 return
6512 }
6513
6514 // Asserting the default bucket policy.
6515 function = "GetBucketPolicy(ctx, bucketName)"
6516 functionAll += ", " + function
6517 args = map[string]interface{}{
6518 "bucketName": bucketName,
6519 }
6520 nilPolicy, err := c.GetBucketPolicy(context.Background(), bucketName)
6521 if err != nil {
6522 logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err)
6523 return
6524 }
6525 if nilPolicy != "" {
6526 logError(testName, function, args, startTime, "", "policy should be set to nil", err)
6527 return
6528 }
6529
6530 // Set the bucket policy to 'public readonly'.
6531 function = "SetBucketPolicy(bucketName, readOnlyPolicy)"
6532 functionAll += ", " + function
6533
6534 readOnlyPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucket"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}`
6535 args = map[string]interface{}{
6536 "bucketName": bucketName,
6537 "bucketPolicy": readOnlyPolicy,
6538 }
6539
6540 err = c.SetBucketPolicy(context.Background(), bucketName, readOnlyPolicy)
6541 if err != nil {
6542 logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
6543 return
6544 }
6545 // should return policy `readonly`.
6546 function = "GetBucketPolicy(ctx, bucketName)"
6547 functionAll += ", " + function
6548 args = map[string]interface{}{
6549 "bucketName": bucketName,
6550 }
6551 _, err = c.GetBucketPolicy(context.Background(), bucketName)
6552 if err != nil {
6553 logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err)
6554 return
6555 }
6556
6557 // Make the bucket 'public writeonly'.
6558 function = "SetBucketPolicy(bucketName, writeOnlyPolicy)"
6559 functionAll += ", " + function
6560
6561 writeOnlyPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucketMultipartUploads"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}`
6562 args = map[string]interface{}{
6563 "bucketName": bucketName,
6564 "bucketPolicy": writeOnlyPolicy,
6565 }
6566 err = c.SetBucketPolicy(context.Background(), bucketName, writeOnlyPolicy)
6567
6568 if err != nil {
6569 logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
6570 return
6571 }
6572 // should return policy `writeonly`.
6573 function = "GetBucketPolicy(ctx, bucketName)"
6574 functionAll += ", " + function
6575 args = map[string]interface{}{
6576 "bucketName": bucketName,
6577 }
6578
6579 _, err = c.GetBucketPolicy(context.Background(), bucketName)
6580 if err != nil {
6581 logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err)
6582 return
6583 }
6584
6585 // Make the bucket 'public read/write'.
6586 function = "SetBucketPolicy(bucketName, readWritePolicy)"
6587 functionAll += ", " + function
6588
6589 readWritePolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}`
6590
6591 args = map[string]interface{}{
6592 "bucketName": bucketName,
6593 "bucketPolicy": readWritePolicy,
6594 }
6595 err = c.SetBucketPolicy(context.Background(), bucketName, readWritePolicy)
6596
6597 if err != nil {
6598 logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
6599 return
6600 }
6601 // should return policy `readwrite`.
6602 function = "GetBucketPolicy(bucketName)"
6603 functionAll += ", " + function
6604 args = map[string]interface{}{
6605 "bucketName": bucketName,
6606 }
6607 _, err = c.GetBucketPolicy(context.Background(), bucketName)
6608 if err != nil {
6609 logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err)
6610 return
6611 }
6612
6613 // List all buckets.
6614 function = "ListBuckets()"
6615 functionAll += ", " + function
6616 args = nil
6617 buckets, err := c.ListBuckets(context.Background())
6618
6619 if len(buckets) == 0 {
6620 logError(testName, function, args, startTime, "", "Found bucket list to be empty", err)
6621 return
6622 }
6623 if err != nil {
6624 logError(testName, function, args, startTime, "", "ListBuckets failed", err)
6625 return
6626 }
6627
6628 // Verify if previously created bucket is listed in list buckets.
6629 bucketFound := false
6630 for _, bucket := range buckets {
6631 if bucket.Name == bucketName {
6632 bucketFound = true
6633 }
6634 }
6635
6636 // If bucket not found error out.
6637 if !bucketFound {
6638 logError(testName, function, args, startTime, "", "Bucket: "+bucketName+" not found", err)
6639 return
6640 }
6641
6642 objectName := bucketName + "unique"
6643
6644 // Generate data
6645 buf := bytes.Repeat([]byte("f"), 1<<19)
6646
6647 function = "PutObject(bucketName, objectName, reader, contentType)"
6648 functionAll += ", " + function
6649 args = map[string]interface{}{
6650 "bucketName": bucketName,
6651 "objectName": objectName,
6652 "contentType": "",
6653 }
6654
6655 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{})
6656 if err != nil {
6657 logError(testName, function, args, startTime, "", "PutObject failed", err)
6658 return
6659 }
6660
6661 args = map[string]interface{}{
6662 "bucketName": bucketName,
6663 "objectName": objectName + "-nolength",
6664 "contentType": "binary/octet-stream",
6665 }
6666
6667 _, err = c.PutObject(context.Background(), bucketName, objectName+"-nolength", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
6668 if err != nil {
6669 logError(testName, function, args, startTime, "", "PutObject failed", err)
6670 return
6671 }
6672
6673 // Instantiate a done channel to close all listing.
6674 doneCh := make(chan struct{})
6675 defer close(doneCh)
6676
6677 objFound := false
6678 isRecursive := true // Recursive is true.
6679
6680 function = "ListObjects(bucketName, objectName, isRecursive, doneCh)"
6681 functionAll += ", " + function
6682 args = map[string]interface{}{
6683 "bucketName": bucketName,
6684 "objectName": objectName,
6685 "isRecursive": isRecursive,
6686 }
6687
6688 for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Prefix: objectName, Recursive: true}) {
6689 if obj.Key == objectName {
6690 objFound = true
6691 break
6692 }
6693 }
6694 if !objFound {
6695 logError(testName, function, args, startTime, "", "Object "+objectName+" not found", err)
6696 return
6697 }
6698
6699 objFound = false
6700 isRecursive = true // Recursive is true.
6701 function = "ListObjects()"
6702 functionAll += ", " + function
6703 args = map[string]interface{}{
6704 "bucketName": bucketName,
6705 "objectName": objectName,
6706 "isRecursive": isRecursive,
6707 }
6708
6709 for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{Prefix: objectName, Recursive: isRecursive}) {
6710 if obj.Key == objectName {
6711 objFound = true
6712 break
6713 }
6714 }
6715 if !objFound {
6716 logError(testName, function, args, startTime, "", "Object "+objectName+" not found", err)
6717 return
6718 }
6719
6720 incompObjNotFound := true
6721
6722 function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)"
6723 functionAll += ", " + function
6724 args = map[string]interface{}{
6725 "bucketName": bucketName,
6726 "objectName": objectName,
6727 "isRecursive": isRecursive,
6728 }
6729
6730 for objIncompl := range c.ListIncompleteUploads(context.Background(), bucketName, objectName, isRecursive) {
6731 if objIncompl.Key != "" {
6732 incompObjNotFound = false
6733 break
6734 }
6735 }
6736 if !incompObjNotFound {
6737 logError(testName, function, args, startTime, "", "Unexpected dangling incomplete upload found", err)
6738 return
6739 }
6740
6741 function = "GetObject(bucketName, objectName)"
6742 functionAll += ", " + function
6743 args = map[string]interface{}{
6744 "bucketName": bucketName,
6745 "objectName": objectName,
6746 }
6747 newReader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
6748 if err != nil {
6749 logError(testName, function, args, startTime, "", "GetObject failed", err)
6750 return
6751 }
6752
6753 newReadBytes, err := io.ReadAll(newReader)
6754 if err != nil {
6755 logError(testName, function, args, startTime, "", "ReadAll failed", err)
6756 return
6757 }
6758
6759 if !bytes.Equal(newReadBytes, buf) {
6760 logError(testName, function, args, startTime, "", "GetObject bytes mismatch", err)
6761 return
6762 }
6763 newReader.Close()
6764
6765 function = "FGetObject(bucketName, objectName, fileName)"
6766 functionAll += ", " + function
6767 args = map[string]interface{}{
6768 "bucketName": bucketName,
6769 "objectName": objectName,
6770 "fileName": fileName + "-f",
6771 }
6772 err = c.FGetObject(context.Background(), bucketName, objectName, fileName+"-f", minio.GetObjectOptions{})
6773
6774 if err != nil {
6775 logError(testName, function, args, startTime, "", "FGetObject failed", err)
6776 return
6777 }
6778
6779 function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)"
6780 functionAll += ", " + function
6781 args = map[string]interface{}{
6782 "bucketName": bucketName,
6783 "objectName": "",
6784 "expires": 3600 * time.Second,
6785 }
6786 if _, err = c.PresignedHeadObject(context.Background(), bucketName, "", 3600*time.Second, nil); err == nil {
6787 logError(testName, function, args, startTime, "", "PresignedHeadObject success", err)
6788 return
6789 }
6790
6791 // Generate presigned HEAD object url.
6792 function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)"
6793 functionAll += ", " + function
6794 args = map[string]interface{}{
6795 "bucketName": bucketName,
6796 "objectName": objectName,
6797 "expires": 3600 * time.Second,
6798 }
6799 presignedHeadURL, err := c.PresignedHeadObject(context.Background(), bucketName, objectName, 3600*time.Second, nil)
6800 if err != nil {
6801 logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err)
6802 return
6803 }
6804
6805 transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS)))
6806 if err != nil {
6807 logError(testName, function, args, startTime, "", "DefaultTransport failed", err)
6808 return
6809 }
6810
6811 httpClient := &http.Client{
6812 // Setting a sensible time out of 30secs to wait for response
6813 // headers. Request is pro-actively canceled after 30secs
6814 // with no response.
6815 Timeout: 30 * time.Second,
6816 Transport: transport,
6817 }
6818
6819 req, err := http.NewRequest(http.MethodHead, presignedHeadURL.String(), nil)
6820 if err != nil {
6821 logError(testName, function, args, startTime, "", "PresignedHeadObject request was incorrect", err)
6822 return
6823 }
6824
6825 // Verify if presigned url works.
6826 resp, err := httpClient.Do(req)
6827 if err != nil {
6828 logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect", err)
6829 return
6830 }
6831 if resp.StatusCode != http.StatusOK {
6832 logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect, status "+string(resp.StatusCode), err)
6833 return
6834 }
6835 if resp.Header.Get("ETag") == "" {
6836 logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect", err)
6837 return
6838 }
6839 resp.Body.Close()
6840
6841 function = "PresignedGetObject(bucketName, objectName, expires, reqParams)"
6842 functionAll += ", " + function
6843 args = map[string]interface{}{
6844 "bucketName": bucketName,
6845 "objectName": "",
6846 "expires": 3600 * time.Second,
6847 }
6848 _, err = c.PresignedGetObject(context.Background(), bucketName, "", 3600*time.Second, nil)
6849 if err == nil {
6850 logError(testName, function, args, startTime, "", "PresignedGetObject success", err)
6851 return
6852 }
6853
6854 // Generate presigned GET object url.
6855 function = "PresignedGetObject(bucketName, objectName, expires, reqParams)"
6856 functionAll += ", " + function
6857 args = map[string]interface{}{
6858 "bucketName": bucketName,
6859 "objectName": objectName,
6860 "expires": 3600 * time.Second,
6861 }
6862 presignedGetURL, err := c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, nil)
6863 if err != nil {
6864 logError(testName, function, args, startTime, "", "PresignedGetObject failed", err)
6865 return
6866 }
6867
6868 // Verify if presigned url works.
6869 req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil)
6870 if err != nil {
6871 logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err)
6872 return
6873 }
6874
6875 resp, err = httpClient.Do(req)
6876 if err != nil {
6877 logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
6878 return
6879 }
6880 if resp.StatusCode != http.StatusOK {
6881 logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err)
6882 return
6883 }
6884 newPresignedBytes, err := io.ReadAll(resp.Body)
6885 if err != nil {
6886 logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
6887 return
6888 }
6889 resp.Body.Close()
6890 if !bytes.Equal(newPresignedBytes, buf) {
6891 logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
6892 return
6893 }
6894
6895 // Set request parameters.
6896 reqParams := make(url.Values)
6897 reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"")
6898 args = map[string]interface{}{
6899 "bucketName": bucketName,
6900 "objectName": objectName,
6901 "expires": 3600 * time.Second,
6902 "reqParams": reqParams,
6903 }
6904 presignedGetURL, err = c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, reqParams)
6905
6906 if err != nil {
6907 logError(testName, function, args, startTime, "", "PresignedGetObject failed", err)
6908 return
6909 }
6910
6911 // Verify if presigned url works.
6912 req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil)
6913 if err != nil {
6914 logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err)
6915 return
6916 }
6917
6918 resp, err = httpClient.Do(req)
6919 if err != nil {
6920 logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
6921 return
6922 }
6923 if resp.StatusCode != http.StatusOK {
6924 logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err)
6925 return
6926 }
6927 newPresignedBytes, err = io.ReadAll(resp.Body)
6928 if err != nil {
6929 logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
6930 return
6931 }
6932 if !bytes.Equal(newPresignedBytes, buf) {
6933 logError(testName, function, args, startTime, "", "Bytes mismatch for presigned GET URL", err)
6934 return
6935 }
6936 if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" {
6937 logError(testName, function, args, startTime, "", "wrong Content-Disposition received "+string(resp.Header.Get("Content-Disposition")), err)
6938 return
6939 }
6940
6941 function = "PresignedPutObject(bucketName, objectName, expires)"
6942 functionAll += ", " + function
6943 args = map[string]interface{}{
6944 "bucketName": bucketName,
6945 "objectName": "",
6946 "expires": 3600 * time.Second,
6947 }
6948 _, err = c.PresignedPutObject(context.Background(), bucketName, "", 3600*time.Second)
6949 if err == nil {
6950 logError(testName, function, args, startTime, "", "PresignedPutObject success", err)
6951 return
6952 }
6953
6954 function = "PresignedPutObject(bucketName, objectName, expires)"
6955 functionAll += ", " + function
6956 args = map[string]interface{}{
6957 "bucketName": bucketName,
6958 "objectName": objectName + "-presigned",
6959 "expires": 3600 * time.Second,
6960 }
6961 presignedPutURL, err := c.PresignedPutObject(context.Background(), bucketName, objectName+"-presigned", 3600*time.Second)
6962 if err != nil {
6963 logError(testName, function, args, startTime, "", "PresignedPutObject failed", err)
6964 return
6965 }
6966
6967 buf = bytes.Repeat([]byte("g"), 1<<19)
6968
6969 req, err = http.NewRequest(http.MethodPut, presignedPutURL.String(), bytes.NewReader(buf))
6970 if err != nil {
6971 logError(testName, function, args, startTime, "", "Couldn't make HTTP request with PresignedPutObject URL", err)
6972 return
6973 }
6974
6975 resp, err = httpClient.Do(req)
6976 if err != nil {
6977 logError(testName, function, args, startTime, "", "PresignedPutObject failed", err)
6978 return
6979 }
6980
6981 newReader, err = c.GetObject(context.Background(), bucketName, objectName+"-presigned", minio.GetObjectOptions{})
6982 if err != nil {
6983 logError(testName, function, args, startTime, "", "GetObject after PresignedPutObject failed", err)
6984 return
6985 }
6986
6987 newReadBytes, err = io.ReadAll(newReader)
6988 if err != nil {
6989 logError(testName, function, args, startTime, "", "ReadAll after GetObject failed", err)
6990 return
6991 }
6992
6993 if !bytes.Equal(newReadBytes, buf) {
6994 logError(testName, function, args, startTime, "", "Bytes mismatch", err)
6995 return
6996 }
6997
6998 function = "PresignHeader(method, bucketName, objectName, expires, reqParams, extraHeaders)"
6999 functionAll += ", " + function
7000 presignExtraHeaders := map[string][]string{
7001 "mysecret": {"abcxxx"},
7002 }
7003 args = map[string]interface{}{
7004 "method": "PUT",
7005 "bucketName": bucketName,
7006 "objectName": objectName + "-presign-custom",
7007 "expires": 3600 * time.Second,
7008 "extraHeaders": presignExtraHeaders,
7009 }
7010 presignedURL, err := c.PresignHeader(context.Background(), "PUT", bucketName, objectName+"-presign-custom", 3600*time.Second, nil, presignExtraHeaders)
7011 if err != nil {
7012 logError(testName, function, args, startTime, "", "Presigned failed", err)
7013 return
7014 }
7015
7016 // Generate data more than 32K
7017 buf = bytes.Repeat([]byte("1"), rand.Intn(1<<10)+32*1024)
7018
7019 req, err = http.NewRequest(http.MethodPut, presignedURL.String(), bytes.NewReader(buf))
7020 if err != nil {
7021 logError(testName, function, args, startTime, "", "HTTP request to Presigned URL failed", err)
7022 return
7023 }
7024
7025 req.Header.Add("mysecret", "abcxxx")
7026 resp, err = httpClient.Do(req)
7027 if err != nil {
7028 logError(testName, function, args, startTime, "", "HTTP request to Presigned URL failed", err)
7029 return
7030 }
7031
7032 // Download the uploaded object to verify
7033 args = map[string]interface{}{
7034 "bucketName": bucketName,
7035 "objectName": objectName + "-presign-custom",
7036 }
7037 newReader, err = c.GetObject(context.Background(), bucketName, objectName+"-presign-custom", minio.GetObjectOptions{})
7038 if err != nil {
7039 logError(testName, function, args, startTime, "", "GetObject of uploaded custom-presigned object failed", err)
7040 return
7041 }
7042
7043 newReadBytes, err = io.ReadAll(newReader)
7044 if err != nil {
7045 logError(testName, function, args, startTime, "", "ReadAll failed during get on custom-presigned put object", err)
7046 return
7047 }
7048 newReader.Close()
7049
7050 if !bytes.Equal(newReadBytes, buf) {
7051 logError(testName, function, args, startTime, "", "Bytes mismatch on custom-presigned object upload verification", err)
7052 return
7053 }
7054
7055 function = "RemoveObject(bucketName, objectName)"
7056 functionAll += ", " + function
7057 args = map[string]interface{}{
7058 "bucketName": bucketName,
7059 "objectName": objectName,
7060 }
7061 err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{})
7062
7063 if err != nil {
7064 logError(testName, function, args, startTime, "", "RemoveObject failed", err)
7065 return
7066 }
7067 args["objectName"] = objectName + "-f"
7068 err = c.RemoveObject(context.Background(), bucketName, objectName+"-f", minio.RemoveObjectOptions{})
7069
7070 if err != nil {
7071 logError(testName, function, args, startTime, "", "RemoveObject failed", err)
7072 return
7073 }
7074
7075 args["objectName"] = objectName + "-nolength"
7076 err = c.RemoveObject(context.Background(), bucketName, objectName+"-nolength", minio.RemoveObjectOptions{})
7077
7078 if err != nil {
7079 logError(testName, function, args, startTime, "", "RemoveObject failed", err)
7080 return
7081 }
7082
7083 args["objectName"] = objectName + "-presigned"
7084 err = c.RemoveObject(context.Background(), bucketName, objectName+"-presigned", minio.RemoveObjectOptions{})
7085
7086 if err != nil {
7087 logError(testName, function, args, startTime, "", "RemoveObject failed", err)
7088 return
7089 }
7090
7091 args["objectName"] = objectName + "-presign-custom"
7092 err = c.RemoveObject(context.Background(), bucketName, objectName+"-presign-custom", minio.RemoveObjectOptions{})
7093
7094 if err != nil {
7095 logError(testName, function, args, startTime, "", "RemoveObject failed", err)
7096 return
7097 }
7098
7099 function = "RemoveBucket(bucketName)"
7100 functionAll += ", " + function
7101 args = map[string]interface{}{
7102 "bucketName": bucketName,
7103 }
7104 err = c.RemoveBucket(context.Background(), bucketName)
7105
7106 if err != nil {
7107 logError(testName, function, args, startTime, "", "RemoveBucket failed", err)
7108 return
7109 }
7110 err = c.RemoveBucket(context.Background(), bucketName)
7111 if err == nil {
7112 logError(testName, function, args, startTime, "", "RemoveBucket did not fail for invalid bucket name", err)
7113 return
7114 }
7115 if err.Error() != "The specified bucket does not exist" {
7116 logError(testName, function, args, startTime, "", "RemoveBucket failed", err)
7117 return
7118 }
7119
7120 os.Remove(fileName)
7121 os.Remove(fileName + "-f")
7122 successLogger(testName, functionAll, args, startTime).Info()
7123}
7124
7125// Test for validating GetObject Reader* methods functioning when the
7126// object is modified in the object store.
7127func testGetObjectModified() {
7128 // initialize logging params
7129 startTime := time.Now()
7130 testName := getFuncName()
7131 function := "GetObject(bucketName, objectName)"
7132 args := map[string]interface{}{}
7133
7134 // Instantiate new minio client object.
7135 c, err := minio.New(os.Getenv(serverEndpoint),
7136 &minio.Options{
7137 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
7138 Secure: mustParseBool(os.Getenv(enableHTTPS)),
7139 })
7140 if err != nil {
7141 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
7142 return
7143 }
7144
7145 // Enable tracing, write to stderr.
7146 // c.TraceOn(os.Stderr)
7147
7148 // Set user agent.
7149 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
7150
7151 // Make a new bucket.
7152 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
7153 args["bucketName"] = bucketName
7154
7155 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
7156 if err != nil {
7157 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
7158 return
7159 }
7160
7161 defer cleanupBucket(bucketName, c)
7162
7163 // Upload an object.
7164 objectName := "myobject"
7165 args["objectName"] = objectName
7166 content := "helloworld"
7167 _, err = c.PutObject(context.Background(), bucketName, objectName, strings.NewReader(content), int64(len(content)), minio.PutObjectOptions{ContentType: "application/text"})
7168 if err != nil {
7169 logError(testName, function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err)
7170 return
7171 }
7172
7173 defer c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{})
7174
7175 reader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
7176 if err != nil {
7177 logError(testName, function, args, startTime, "", "Failed to GetObject "+objectName+", from bucket "+bucketName, err)
7178 return
7179 }
7180 defer reader.Close()
7181
7182 // Read a few bytes of the object.
7183 b := make([]byte, 5)
7184 n, err := reader.ReadAt(b, 0)
7185 if err != nil {
7186 logError(testName, function, args, startTime, "", "Failed to read object "+objectName+", from bucket "+bucketName+" at an offset", err)
7187 return
7188 }
7189
7190 // Upload different contents to the same object while object is being read.
7191 newContent := "goodbyeworld"
7192 _, err = c.PutObject(context.Background(), bucketName, objectName, strings.NewReader(newContent), int64(len(newContent)), minio.PutObjectOptions{ContentType: "application/text"})
7193 if err != nil {
7194 logError(testName, function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err)
7195 return
7196 }
7197
7198 // Confirm that a Stat() call in between doesn't change the Object's cached etag.
7199 _, err = reader.Stat()
7200 expectedError := "At least one of the pre-conditions you specified did not hold"
7201 if err.Error() != expectedError {
7202 logError(testName, function, args, startTime, "", "Expected Stat to fail with error "+expectedError+", but received "+err.Error(), err)
7203 return
7204 }
7205
7206 // Read again only to find object contents have been modified since last read.
7207 _, err = reader.ReadAt(b, int64(n))
7208 if err.Error() != expectedError {
7209 logError(testName, function, args, startTime, "", "Expected ReadAt to fail with error "+expectedError+", but received "+err.Error(), err)
7210 return
7211 }
7212
7213 successLogger(testName, function, args, startTime).Info()
7214}
7215
7216// Test validates putObject to upload a file seeked at a given offset.
7217func testPutObjectUploadSeekedObject() {
7218 // initialize logging params
7219 startTime := time.Now()
7220 testName := getFuncName()
7221 function := "PutObject(bucketName, objectName, fileToUpload, contentType)"
7222 args := map[string]interface{}{
7223 "bucketName": "",
7224 "objectName": "",
7225 "fileToUpload": "",
7226 "contentType": "binary/octet-stream",
7227 }
7228
7229 // Instantiate new minio client object.
7230 c, err := minio.New(os.Getenv(serverEndpoint),
7231 &minio.Options{
7232 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
7233 Secure: mustParseBool(os.Getenv(enableHTTPS)),
7234 })
7235 if err != nil {
7236 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
7237 return
7238 }
7239
7240 // Enable tracing, write to stderr.
7241 // c.TraceOn(os.Stderr)
7242
7243 // Set user agent.
7244 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
7245
7246 // Make a new bucket.
7247 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
7248 args["bucketName"] = bucketName
7249
7250 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
7251 if err != nil {
7252 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
7253 return
7254 }
7255 defer cleanupBucket(bucketName, c)
7256
7257 var tempfile *os.File
7258
7259 if fileName := getMintDataDirFilePath("datafile-100-kB"); fileName != "" {
7260 tempfile, err = os.Open(fileName)
7261 if err != nil {
7262 logError(testName, function, args, startTime, "", "File open failed", err)
7263 return
7264 }
7265 args["fileToUpload"] = fileName
7266 } else {
7267 tempfile, err = os.CreateTemp("", "minio-go-upload-test-")
7268 if err != nil {
7269 logError(testName, function, args, startTime, "", "TempFile create failed", err)
7270 return
7271 }
7272 args["fileToUpload"] = tempfile.Name()
7273
7274 // Generate 100kB data
7275 if _, err = io.Copy(tempfile, getDataReader("datafile-100-kB")); err != nil {
7276 logError(testName, function, args, startTime, "", "File copy failed", err)
7277 return
7278 }
7279
7280 defer os.Remove(tempfile.Name())
7281
7282 // Seek back to the beginning of the file.
7283 tempfile.Seek(0, 0)
7284 }
7285 length := 100 * humanize.KiByte
7286 objectName := fmt.Sprintf("test-file-%v", rand.Uint32())
7287 args["objectName"] = objectName
7288
7289 offset := length / 2
7290 if _, err = tempfile.Seek(int64(offset), 0); err != nil {
7291 logError(testName, function, args, startTime, "", "TempFile seek failed", err)
7292 return
7293 }
7294
7295 _, err = c.PutObject(context.Background(), bucketName, objectName, tempfile, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
7296 if err != nil {
7297 logError(testName, function, args, startTime, "", "PutObject failed", err)
7298 return
7299 }
7300 tempfile.Close()
7301
7302 obj, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
7303 if err != nil {
7304 logError(testName, function, args, startTime, "", "GetObject failed", err)
7305 return
7306 }
7307 defer obj.Close()
7308
7309 n, err := obj.Seek(int64(offset), 0)
7310 if err != nil {
7311 logError(testName, function, args, startTime, "", "Seek failed", err)
7312 return
7313 }
7314 if n != int64(offset) {
7315 logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid offset returned, expected %d got %d", int64(offset), n), err)
7316 return
7317 }
7318
7319 _, err = c.PutObject(context.Background(), bucketName, objectName+"getobject", obj, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
7320 if err != nil {
7321 logError(testName, function, args, startTime, "", "PutObject failed", err)
7322 return
7323 }
7324 st, err := c.StatObject(context.Background(), bucketName, objectName+"getobject", minio.StatObjectOptions{})
7325 if err != nil {
7326 logError(testName, function, args, startTime, "", "StatObject failed", err)
7327 return
7328 }
7329 if st.Size != int64(length-offset) {
7330 logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid offset returned, expected %d got %d", int64(length-offset), n), err)
7331 return
7332 }
7333
7334 successLogger(testName, function, args, startTime).Info()
7335}
7336
7337// Tests bucket re-create errors.
7338func testMakeBucketErrorV2() {
7339 // initialize logging params
7340 startTime := time.Now()
7341 testName := getFuncName()
7342 function := "MakeBucket(bucketName, region)"
7343 args := map[string]interface{}{
7344 "bucketName": "",
7345 "region": "eu-west-1",
7346 }
7347
7348 // Seed random based on current time.
7349 rand.Seed(time.Now().Unix())
7350
7351 // Instantiate new minio client object.
7352 c, err := minio.New(os.Getenv(serverEndpoint),
7353 &minio.Options{
7354 Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
7355 Secure: mustParseBool(os.Getenv(enableHTTPS)),
7356 })
7357 if err != nil {
7358 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
7359 return
7360 }
7361
7362 // Enable tracing, write to stderr.
7363 // c.TraceOn(os.Stderr)
7364
7365 // Set user agent.
7366 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
7367
7368 // Generate a new random bucket name.
7369 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
7370 region := "eu-west-1"
7371 args["bucketName"] = bucketName
7372 args["region"] = region
7373
7374 // Make a new bucket in 'eu-west-1'.
7375 if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err != nil {
7376 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
7377 return
7378 }
7379
7380 defer cleanupBucket(bucketName, c)
7381
7382 if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err == nil {
7383 logError(testName, function, args, startTime, "", "MakeBucket did not fail for existing bucket name", err)
7384 return
7385 }
7386 // Verify valid error response from server.
7387 if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" &&
7388 minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" {
7389 logError(testName, function, args, startTime, "", "Invalid error returned by server", err)
7390 return
7391 }
7392
7393 successLogger(testName, function, args, startTime).Info()
7394}
7395
7396// Test get object reader to not throw error on being closed twice.
7397func testGetObjectClosedTwiceV2() {
7398 // initialize logging params
7399 startTime := time.Now()
7400 testName := getFuncName()
7401 function := "MakeBucket(bucketName, region)"
7402 args := map[string]interface{}{
7403 "bucketName": "",
7404 "region": "eu-west-1",
7405 }
7406
7407 // Seed random based on current time.
7408 rand.Seed(time.Now().Unix())
7409
7410 // Instantiate new minio client object.
7411 c, err := minio.New(os.Getenv(serverEndpoint),
7412 &minio.Options{
7413 Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
7414 Secure: mustParseBool(os.Getenv(enableHTTPS)),
7415 })
7416 if err != nil {
7417 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
7418 return
7419 }
7420
7421 // Enable tracing, write to stderr.
7422 // c.TraceOn(os.Stderr)
7423
7424 // Set user agent.
7425 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
7426
7427 // Generate a new random bucket name.
7428 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
7429 args["bucketName"] = bucketName
7430
7431 // Make a new bucket.
7432 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
7433 if err != nil {
7434 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
7435 return
7436 }
7437
7438 defer cleanupBucket(bucketName, c)
7439
7440 // Generate 33K of data.
7441 bufSize := dataFileMap["datafile-33-kB"]
7442 reader := getDataReader("datafile-33-kB")
7443 defer reader.Close()
7444
7445 // Save the data
7446 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
7447 args["objectName"] = objectName
7448
7449 _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
7450 if err != nil {
7451 logError(testName, function, args, startTime, "", "PutObject failed", err)
7452 return
7453 }
7454
7455 // Read the data back
7456 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
7457 if err != nil {
7458 logError(testName, function, args, startTime, "", "GetObject failed", err)
7459 return
7460 }
7461
7462 st, err := r.Stat()
7463 if err != nil {
7464 logError(testName, function, args, startTime, "", "Stat failed", err)
7465 return
7466 }
7467
7468 if st.Size != int64(bufSize) {
7469 logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(st.Size), err)
7470 return
7471 }
7472 if err := r.Close(); err != nil {
7473 logError(testName, function, args, startTime, "", "Stat failed", err)
7474 return
7475 }
7476 if err := r.Close(); err == nil {
7477 logError(testName, function, args, startTime, "", "Object is already closed, should return error", err)
7478 return
7479 }
7480
7481 successLogger(testName, function, args, startTime).Info()
7482}
7483
7484// Tests FPutObject hidden contentType setting
7485func testFPutObjectV2() {
7486 // initialize logging params
7487 startTime := time.Now()
7488 testName := getFuncName()
7489 function := "FPutObject(bucketName, objectName, fileName, opts)"
7490 args := map[string]interface{}{
7491 "bucketName": "",
7492 "objectName": "",
7493 "fileName": "",
7494 "opts": "",
7495 }
7496
7497 // Seed random based on current time.
7498 rand.Seed(time.Now().Unix())
7499
7500 // Instantiate new minio client object.
7501 c, err := minio.New(os.Getenv(serverEndpoint),
7502 &minio.Options{
7503 Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
7504 Secure: mustParseBool(os.Getenv(enableHTTPS)),
7505 })
7506 if err != nil {
7507 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
7508 return
7509 }
7510
7511 // Enable tracing, write to stderr.
7512 // c.TraceOn(os.Stderr)
7513
7514 // Set user agent.
7515 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
7516
7517 // Generate a new random bucket name.
7518 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
7519 args["bucketName"] = bucketName
7520
7521 // Make a new bucket.
7522 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
7523 if err != nil {
7524 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
7525 return
7526 }
7527
7528 defer cleanupBucket(bucketName, c)
7529
7530 // Make a temp file with 11*1024*1024 bytes of data.
7531 file, err := os.CreateTemp(os.TempDir(), "FPutObjectTest")
7532 if err != nil {
7533 logError(testName, function, args, startTime, "", "TempFile creation failed", err)
7534 return
7535 }
7536
7537 r := bytes.NewReader(bytes.Repeat([]byte("b"), 11*1024*1024))
7538 n, err := io.CopyN(file, r, 11*1024*1024)
7539 if err != nil {
7540 logError(testName, function, args, startTime, "", "Copy failed", err)
7541 return
7542 }
7543 if n != int64(11*1024*1024) {
7544 logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err)
7545 return
7546 }
7547
7548 // Close the file pro-actively for windows.
7549 err = file.Close()
7550 if err != nil {
7551 logError(testName, function, args, startTime, "", "File close failed", err)
7552 return
7553 }
7554
7555 // Set base object name
7556 objectName := bucketName + "FPutObject"
7557 args["objectName"] = objectName
7558 args["fileName"] = file.Name()
7559
7560 // Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
7561 _, err = c.FPutObject(context.Background(), bucketName, objectName+"-standard", file.Name(), minio.PutObjectOptions{ContentType: "application/octet-stream"})
7562 if err != nil {
7563 logError(testName, function, args, startTime, "", "FPutObject failed", err)
7564 return
7565 }
7566
7567 // Perform FPutObject with no contentType provided (Expecting application/octet-stream)
7568 args["objectName"] = objectName + "-Octet"
7569 args["contentType"] = ""
7570
7571 _, err = c.FPutObject(context.Background(), bucketName, objectName+"-Octet", file.Name(), minio.PutObjectOptions{})
7572 if err != nil {
7573 logError(testName, function, args, startTime, "", "FPutObject failed", err)
7574 return
7575 }
7576
7577 // Add extension to temp file name
7578 fileName := file.Name()
7579 err = os.Rename(fileName, fileName+".gtar")
7580 if err != nil {
7581 logError(testName, function, args, startTime, "", "Rename failed", err)
7582 return
7583 }
7584
7585 // Perform FPutObject with no contentType provided (Expecting application/x-gtar)
7586 args["objectName"] = objectName + "-Octet"
7587 args["contentType"] = ""
7588 args["fileName"] = fileName + ".gtar"
7589
7590 _, err = c.FPutObject(context.Background(), bucketName, objectName+"-GTar", fileName+".gtar", minio.PutObjectOptions{})
7591 if err != nil {
7592 logError(testName, function, args, startTime, "", "FPutObject failed", err)
7593 return
7594 }
7595
7596 // Check headers and sizes
7597 rStandard, err := c.StatObject(context.Background(), bucketName, objectName+"-standard", minio.StatObjectOptions{})
7598 if err != nil {
7599 logError(testName, function, args, startTime, "", "StatObject failed", err)
7600 return
7601 }
7602
7603 if rStandard.Size != 11*1024*1024 {
7604 logError(testName, function, args, startTime, "", "Unexpected size", nil)
7605 return
7606 }
7607
7608 if rStandard.ContentType != "application/octet-stream" {
7609 logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rStandard.ContentType, err)
7610 return
7611 }
7612
7613 rOctet, err := c.StatObject(context.Background(), bucketName, objectName+"-Octet", minio.StatObjectOptions{})
7614 if err != nil {
7615 logError(testName, function, args, startTime, "", "StatObject failed", err)
7616 return
7617 }
7618 if rOctet.ContentType != "application/octet-stream" {
7619 logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rOctet.ContentType, err)
7620 return
7621 }
7622
7623 if rOctet.Size != 11*1024*1024 {
7624 logError(testName, function, args, startTime, "", "Unexpected size", nil)
7625 return
7626 }
7627
7628 rGTar, err := c.StatObject(context.Background(), bucketName, objectName+"-GTar", minio.StatObjectOptions{})
7629 if err != nil {
7630 logError(testName, function, args, startTime, "", "StatObject failed", err)
7631 return
7632 }
7633 if rGTar.Size != 11*1024*1024 {
7634 logError(testName, function, args, startTime, "", "Unexpected size", nil)
7635 return
7636 }
7637 if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" && rGTar.ContentType != "application/x-tar" {
7638 logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/x-tar , got "+rGTar.ContentType, err)
7639 return
7640 }
7641
7642 os.Remove(fileName + ".gtar")
7643 successLogger(testName, function, args, startTime).Info()
7644}
7645
7646// Tests various bucket supported formats.
7647func testMakeBucketRegionsV2() {
7648 // initialize logging params
7649 startTime := time.Now()
7650 testName := getFuncName()
7651 function := "MakeBucket(bucketName, region)"
7652 args := map[string]interface{}{
7653 "bucketName": "",
7654 "region": "eu-west-1",
7655 }
7656
7657 // Seed random based on current time.
7658 rand.Seed(time.Now().Unix())
7659
7660 // Instantiate new minio client object.
7661 c, err := minio.New(os.Getenv(serverEndpoint),
7662 &minio.Options{
7663 Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
7664 Secure: mustParseBool(os.Getenv(enableHTTPS)),
7665 })
7666 if err != nil {
7667 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
7668 return
7669 }
7670
7671 // Enable tracing, write to stderr.
7672 // c.TraceOn(os.Stderr)
7673
7674 // Set user agent.
7675 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
7676
7677 // Generate a new random bucket name.
7678 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
7679 args["bucketName"] = bucketName
7680
7681 // Make a new bucket in 'eu-central-1'.
7682 if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "eu-west-1"}); err != nil {
7683 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
7684 return
7685 }
7686
7687 if err = cleanupBucket(bucketName, c); err != nil {
7688 logError(testName, function, args, startTime, "", "CleanupBucket failed while removing bucket recursively", err)
7689 return
7690 }
7691
7692 // Make a new bucket with '.' in its name, in 'us-west-2'. This
7693 // request is internally staged into a path style instead of
7694 // virtual host style.
7695 if err = c.MakeBucket(context.Background(), bucketName+".withperiod", minio.MakeBucketOptions{Region: "us-west-2"}); err != nil {
7696 args["bucketName"] = bucketName + ".withperiod"
7697 args["region"] = "us-west-2"
7698 logError(testName, function, args, startTime, "", "MakeBucket test with a bucket name with period, '.', failed", err)
7699 return
7700 }
7701
7702 // Delete all objects and buckets
7703 if err = cleanupBucket(bucketName+".withperiod", c); err != nil {
7704 logError(testName, function, args, startTime, "", "CleanupBucket failed while removing bucket recursively", err)
7705 return
7706 }
7707
7708 successLogger(testName, function, args, startTime).Info()
7709}
7710
7711// Tests get object ReaderSeeker interface methods.
7712func testGetObjectReadSeekFunctionalV2() {
7713 // initialize logging params
7714 startTime := time.Now()
7715 testName := getFuncName()
7716 function := "GetObject(bucketName, objectName)"
7717 args := map[string]interface{}{}
7718
7719 // Seed random based on current time.
7720 rand.Seed(time.Now().Unix())
7721
7722 // Instantiate new minio client object.
7723 c, err := minio.New(os.Getenv(serverEndpoint),
7724 &minio.Options{
7725 Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
7726 Secure: mustParseBool(os.Getenv(enableHTTPS)),
7727 })
7728 if err != nil {
7729 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
7730 return
7731 }
7732
7733 // Enable tracing, write to stderr.
7734 // c.TraceOn(os.Stderr)
7735
7736 // Set user agent.
7737 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
7738
7739 // Generate a new random bucket name.
7740 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
7741 args["bucketName"] = bucketName
7742
7743 // Make a new bucket.
7744 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
7745 if err != nil {
7746 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
7747 return
7748 }
7749
7750 defer cleanupBucket(bucketName, c)
7751
7752 // Generate 33K of data.
7753 bufSize := dataFileMap["datafile-33-kB"]
7754 reader := getDataReader("datafile-33-kB")
7755 defer reader.Close()
7756
7757 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
7758 args["objectName"] = objectName
7759
7760 buf, err := io.ReadAll(reader)
7761 if err != nil {
7762 logError(testName, function, args, startTime, "", "ReadAll failed", err)
7763 return
7764 }
7765
7766 // Save the data.
7767 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
7768 if err != nil {
7769 logError(testName, function, args, startTime, "", "PutObject failed", err)
7770 return
7771 }
7772
7773 // Read the data back
7774 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
7775 if err != nil {
7776 logError(testName, function, args, startTime, "", "GetObject failed", err)
7777 return
7778 }
7779 defer r.Close()
7780
7781 st, err := r.Stat()
7782 if err != nil {
7783 logError(testName, function, args, startTime, "", "Stat failed", err)
7784 return
7785 }
7786
7787 if st.Size != int64(bufSize) {
7788 logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err)
7789 return
7790 }
7791
7792 offset := int64(2048)
7793 n, err := r.Seek(offset, 0)
7794 if err != nil {
7795 logError(testName, function, args, startTime, "", "Seek failed", err)
7796 return
7797 }
7798 if n != offset {
7799 logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err)
7800 return
7801 }
7802 n, err = r.Seek(0, 1)
7803 if err != nil {
7804 logError(testName, function, args, startTime, "", "Seek failed", err)
7805 return
7806 }
7807 if n != offset {
7808 logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err)
7809 return
7810 }
7811 _, err = r.Seek(offset, 2)
7812 if err == nil {
7813 logError(testName, function, args, startTime, "", "Seek on positive offset for whence '2' should error out", err)
7814 return
7815 }
7816 n, err = r.Seek(-offset, 2)
7817 if err != nil {
7818 logError(testName, function, args, startTime, "", "Seek failed", err)
7819 return
7820 }
7821 if n != st.Size-offset {
7822 logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(st.Size-offset)+" got "+string(n), err)
7823 return
7824 }
7825
7826 var buffer1 bytes.Buffer
7827 if _, err = io.CopyN(&buffer1, r, st.Size); err != nil {
7828 if err != io.EOF {
7829 logError(testName, function, args, startTime, "", "Copy failed", err)
7830 return
7831 }
7832 }
7833 if !bytes.Equal(buf[len(buf)-int(offset):], buffer1.Bytes()) {
7834 logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err)
7835 return
7836 }
7837
7838 // Seek again and read again.
7839 n, err = r.Seek(offset-1, 0)
7840 if err != nil {
7841 logError(testName, function, args, startTime, "", "Seek failed", err)
7842 return
7843 }
7844 if n != (offset - 1) {
7845 logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset-1)+" got "+string(n), err)
7846 return
7847 }
7848
7849 var buffer2 bytes.Buffer
7850 if _, err = io.CopyN(&buffer2, r, st.Size); err != nil {
7851 if err != io.EOF {
7852 logError(testName, function, args, startTime, "", "Copy failed", err)
7853 return
7854 }
7855 }
7856 // Verify now lesser bytes.
7857 if !bytes.Equal(buf[2047:], buffer2.Bytes()) {
7858 logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err)
7859 return
7860 }
7861
7862 successLogger(testName, function, args, startTime).Info()
7863}
7864
7865// Tests get object ReaderAt interface methods.
7866func testGetObjectReadAtFunctionalV2() {
7867 // initialize logging params
7868 startTime := time.Now()
7869 testName := getFuncName()
7870 function := "GetObject(bucketName, objectName)"
7871 args := map[string]interface{}{}
7872
7873 // Seed random based on current time.
7874 rand.Seed(time.Now().Unix())
7875
7876 // Instantiate new minio client object.
7877 c, err := minio.New(os.Getenv(serverEndpoint),
7878 &minio.Options{
7879 Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
7880 Secure: mustParseBool(os.Getenv(enableHTTPS)),
7881 })
7882 if err != nil {
7883 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
7884 return
7885 }
7886
7887 // Enable tracing, write to stderr.
7888 // c.TraceOn(os.Stderr)
7889
7890 // Set user agent.
7891 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
7892
7893 // Generate a new random bucket name.
7894 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
7895 args["bucketName"] = bucketName
7896
7897 // Make a new bucket.
7898 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
7899 if err != nil {
7900 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
7901 return
7902 }
7903
7904 defer cleanupBucket(bucketName, c)
7905
7906 // Generate 33K of data.
7907 bufSize := dataFileMap["datafile-33-kB"]
7908 reader := getDataReader("datafile-33-kB")
7909 defer reader.Close()
7910
7911 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
7912 args["objectName"] = objectName
7913
7914 buf, err := io.ReadAll(reader)
7915 if err != nil {
7916 logError(testName, function, args, startTime, "", "ReadAll failed", err)
7917 return
7918 }
7919
7920 // Save the data
7921 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
7922 if err != nil {
7923 logError(testName, function, args, startTime, "", "PutObject failed", err)
7924 return
7925 }
7926
7927 // Read the data back
7928 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
7929 if err != nil {
7930 logError(testName, function, args, startTime, "", "GetObject failed", err)
7931 return
7932 }
7933 defer r.Close()
7934
7935 st, err := r.Stat()
7936 if err != nil {
7937 logError(testName, function, args, startTime, "", "Stat failed", err)
7938 return
7939 }
7940
7941 if st.Size != int64(bufSize) {
7942 logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(st.Size), err)
7943 return
7944 }
7945
7946 offset := int64(2048)
7947
7948 // Read directly
7949 buf2 := make([]byte, 512)
7950 buf3 := make([]byte, 512)
7951 buf4 := make([]byte, 512)
7952
7953 m, err := r.ReadAt(buf2, offset)
7954 if err != nil {
7955 logError(testName, function, args, startTime, "", "ReadAt failed", err)
7956 return
7957 }
7958 if m != len(buf2) {
7959 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+" got "+string(m), err)
7960 return
7961 }
7962 if !bytes.Equal(buf2, buf[offset:offset+512]) {
7963 logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
7964 return
7965 }
7966 offset += 512
7967 m, err = r.ReadAt(buf3, offset)
7968 if err != nil {
7969 logError(testName, function, args, startTime, "", "ReadAt failed", err)
7970 return
7971 }
7972 if m != len(buf3) {
7973 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+" got "+string(m), err)
7974 return
7975 }
7976 if !bytes.Equal(buf3, buf[offset:offset+512]) {
7977 logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
7978 return
7979 }
7980 offset += 512
7981 m, err = r.ReadAt(buf4, offset)
7982 if err != nil {
7983 logError(testName, function, args, startTime, "", "ReadAt failed", err)
7984 return
7985 }
7986 if m != len(buf4) {
7987 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+" got "+string(m), err)
7988 return
7989 }
7990 if !bytes.Equal(buf4, buf[offset:offset+512]) {
7991 logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
7992 return
7993 }
7994
7995 buf5 := make([]byte, bufSize)
7996 // Read the whole object.
7997 m, err = r.ReadAt(buf5, 0)
7998 if err != nil {
7999 if err != io.EOF {
8000 logError(testName, function, args, startTime, "", "ReadAt failed", err)
8001 return
8002 }
8003 }
8004 if m != len(buf5) {
8005 logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+" got "+string(m), err)
8006 return
8007 }
8008 if !bytes.Equal(buf, buf5) {
8009 logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err)
8010 return
8011 }
8012
8013 buf6 := make([]byte, bufSize+1)
8014 // Read the whole object and beyond.
8015 _, err = r.ReadAt(buf6, 0)
8016 if err != nil {
8017 if err != io.EOF {
8018 logError(testName, function, args, startTime, "", "ReadAt failed", err)
8019 return
8020 }
8021 }
8022
8023 successLogger(testName, function, args, startTime).Info()
8024}
8025
8026// Tests copy object
8027func testCopyObjectV2() {
8028 // initialize logging params
8029 startTime := time.Now()
8030 testName := getFuncName()
8031 function := "CopyObject(destination, source)"
8032 args := map[string]interface{}{}
8033
8034 // Seed random based on current time.
8035 rand.Seed(time.Now().Unix())
8036
8037 // Instantiate new minio client object
8038 c, err := minio.New(os.Getenv(serverEndpoint),
8039 &minio.Options{
8040 Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
8041 Secure: mustParseBool(os.Getenv(enableHTTPS)),
8042 })
8043 if err != nil {
8044 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
8045 return
8046 }
8047
8048 // Enable tracing, write to stderr.
8049 // c.TraceOn(os.Stderr)
8050
8051 // Set user agent.
8052 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
8053
8054 // Generate a new random bucket name.
8055 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
8056
8057 // Make a new bucket in 'us-east-1' (source bucket).
8058 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
8059 if err != nil {
8060 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
8061 return
8062 }
8063 defer cleanupBucket(bucketName, c)
8064
8065 // Make a new bucket in 'us-east-1' (destination bucket).
8066 err = c.MakeBucket(context.Background(), bucketName+"-copy", minio.MakeBucketOptions{Region: "us-east-1"})
8067 if err != nil {
8068 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
8069 return
8070 }
8071 defer cleanupBucket(bucketName+"-copy", c)
8072
8073 // Generate 33K of data.
8074 bufSize := dataFileMap["datafile-33-kB"]
8075 reader := getDataReader("datafile-33-kB")
8076 defer reader.Close()
8077
8078 // Save the data
8079 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
8080 _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
8081 if err != nil {
8082 logError(testName, function, args, startTime, "", "PutObject failed", err)
8083 return
8084 }
8085
8086 r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
8087 if err != nil {
8088 logError(testName, function, args, startTime, "", "GetObject failed", err)
8089 return
8090 }
8091 // Check the various fields of source object against destination object.
8092 objInfo, err := r.Stat()
8093 if err != nil {
8094 logError(testName, function, args, startTime, "", "Stat failed", err)
8095 return
8096 }
8097 r.Close()
8098
8099 // Copy Source
8100 src := minio.CopySrcOptions{
8101 Bucket: bucketName,
8102 Object: objectName,
8103 MatchModifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC),
8104 MatchETag: objInfo.ETag,
8105 }
8106 args["source"] = src
8107
8108 // Set copy conditions.
8109 dst := minio.CopyDestOptions{
8110 Bucket: bucketName + "-copy",
8111 Object: objectName + "-copy",
8112 }
8113 args["destination"] = dst
8114
8115 // Perform the Copy
8116 _, err = c.CopyObject(context.Background(), dst, src)
8117 if err != nil {
8118 logError(testName, function, args, startTime, "", "CopyObject failed", err)
8119 return
8120 }
8121
8122 // Source object
8123 r, err = c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
8124 if err != nil {
8125 logError(testName, function, args, startTime, "", "GetObject failed", err)
8126 return
8127 }
8128 // Destination object
8129 readerCopy, err := c.GetObject(context.Background(), bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{})
8130 if err != nil {
8131 logError(testName, function, args, startTime, "", "GetObject failed", err)
8132 return
8133 }
8134 // Check the various fields of source object against destination object.
8135 objInfo, err = r.Stat()
8136 if err != nil {
8137 logError(testName, function, args, startTime, "", "Stat failed", err)
8138 return
8139 }
8140 objInfoCopy, err := readerCopy.Stat()
8141 if err != nil {
8142 logError(testName, function, args, startTime, "", "Stat failed", err)
8143 return
8144 }
8145 if objInfo.Size != objInfoCopy.Size {
8146 logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+" got "+string(objInfo.Size), err)
8147 return
8148 }
8149
8150 // Close all the readers.
8151 r.Close()
8152 readerCopy.Close()
8153
8154 // CopyObject again but with wrong conditions
8155 src = minio.CopySrcOptions{
8156 Bucket: bucketName,
8157 Object: objectName,
8158 MatchUnmodifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC),
8159 NoMatchETag: objInfo.ETag,
8160 }
8161
8162 // Perform the Copy which should fail
8163 _, err = c.CopyObject(context.Background(), dst, src)
8164 if err == nil {
8165 logError(testName, function, args, startTime, "", "CopyObject did not fail for invalid conditions", err)
8166 return
8167 }
8168
8169 successLogger(testName, function, args, startTime).Info()
8170}
8171
8172func testComposeObjectErrorCasesWrapper(c *minio.Client) {
8173 // initialize logging params
8174 startTime := time.Now()
8175 testName := getFuncName()
8176 function := "ComposeObject(destination, sourceList)"
8177 args := map[string]interface{}{}
8178
8179 // Generate a new random bucket name.
8180 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
8181
8182 // Make a new bucket in 'us-east-1' (source bucket).
8183 err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
8184 if err != nil {
8185 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
8186 return
8187 }
8188
8189 defer cleanupBucket(bucketName, c)
8190
8191 // Test that more than 10K source objects cannot be
8192 // concatenated.
8193 srcArr := [10001]minio.CopySrcOptions{}
8194 srcSlice := srcArr[:]
8195 dst := minio.CopyDestOptions{
8196 Bucket: bucketName,
8197 Object: "object",
8198 }
8199
8200 args["destination"] = dst
8201 // Just explain about srcArr in args["sourceList"]
8202 // to stop having 10,001 null headers logged
8203 args["sourceList"] = "source array of 10,001 elements"
8204 if _, err := c.ComposeObject(context.Background(), dst, srcSlice...); err == nil {
8205 logError(testName, function, args, startTime, "", "Expected error in ComposeObject", err)
8206 return
8207 } else if err.Error() != "There must be as least one and up to 10000 source objects." {
8208 logError(testName, function, args, startTime, "", "Got unexpected error", err)
8209 return
8210 }
8211
8212 // Create a source with invalid offset spec and check that
8213 // error is returned:
8214 // 1. Create the source object.
8215 const badSrcSize = 5 * 1024 * 1024
8216 buf := bytes.Repeat([]byte("1"), badSrcSize)
8217 _, err = c.PutObject(context.Background(), bucketName, "badObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{})
8218 if err != nil {
8219 logError(testName, function, args, startTime, "", "PutObject failed", err)
8220 return
8221 }
8222 // 2. Set invalid range spec on the object (going beyond
8223 // object size)
8224 badSrc := minio.CopySrcOptions{
8225 Bucket: bucketName,
8226 Object: "badObject",
8227 MatchRange: true,
8228 Start: 1,
8229 End: badSrcSize,
8230 }
8231
8232 // 3. ComposeObject call should fail.
8233 if _, err := c.ComposeObject(context.Background(), dst, badSrc); err == nil {
8234 logError(testName, function, args, startTime, "", "ComposeObject expected to fail", err)
8235 return
8236 } else if !strings.Contains(err.Error(), "has invalid segment-to-copy") {
8237 logError(testName, function, args, startTime, "", "Got invalid error", err)
8238 return
8239 }
8240
8241 successLogger(testName, function, args, startTime).Info()
8242}
8243
8244// Test expected error cases
8245func testComposeObjectErrorCasesV2() {
8246 // initialize logging params
8247 startTime := time.Now()
8248 testName := getFuncName()
8249 function := "ComposeObject(destination, sourceList)"
8250 args := map[string]interface{}{}
8251
8252 // Instantiate new minio client object
8253 c, err := minio.New(os.Getenv(serverEndpoint),
8254 &minio.Options{
8255 Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
8256 Secure: mustParseBool(os.Getenv(enableHTTPS)),
8257 })
8258 if err != nil {
8259 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
8260 return
8261 }
8262
8263 testComposeObjectErrorCasesWrapper(c)
8264}
8265
8266func testComposeMultipleSources(c *minio.Client) {
8267 // initialize logging params
8268 startTime := time.Now()
8269 testName := getFuncName()
8270 function := "ComposeObject(destination, sourceList)"
8271 args := map[string]interface{}{
8272 "destination": "",
8273 "sourceList": "",
8274 }
8275
8276 // Generate a new random bucket name.
8277 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
8278 // Make a new bucket in 'us-east-1' (source bucket).
8279 err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
8280 if err != nil {
8281 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
8282 return
8283 }
8284
8285 defer cleanupBucket(bucketName, c)
8286
8287 // Upload a small source object
8288 const srcSize = 1024 * 1024 * 5
8289 buf := bytes.Repeat([]byte("1"), srcSize)
8290 _, err = c.PutObject(context.Background(), bucketName, "srcObject", bytes.NewReader(buf), int64(srcSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
8291 if err != nil {
8292 logError(testName, function, args, startTime, "", "PutObject failed", err)
8293 return
8294 }
8295
8296 // We will append 10 copies of the object.
8297 srcs := []minio.CopySrcOptions{}
8298 for i := 0; i < 10; i++ {
8299 srcs = append(srcs, minio.CopySrcOptions{
8300 Bucket: bucketName,
8301 Object: "srcObject",
8302 })
8303 }
8304
8305 // make the last part very small
8306 srcs[9].MatchRange = true
8307
8308 args["sourceList"] = srcs
8309
8310 dst := minio.CopyDestOptions{
8311 Bucket: bucketName,
8312 Object: "dstObject",
8313 }
8314 args["destination"] = dst
8315
8316 ui, err := c.ComposeObject(context.Background(), dst, srcs...)
8317 if err != nil {
8318 logError(testName, function, args, startTime, "", "ComposeObject failed", err)
8319 return
8320 }
8321
8322 if ui.Size != 9*srcSize+1 {
8323 logError(testName, function, args, startTime, "", "ComposeObject returned unexpected size", err)
8324 return
8325 }
8326
8327 objProps, err := c.StatObject(context.Background(), bucketName, "dstObject", minio.StatObjectOptions{})
8328 if err != nil {
8329 logError(testName, function, args, startTime, "", "StatObject failed", err)
8330 return
8331 }
8332
8333 if objProps.Size != 9*srcSize+1 {
8334 logError(testName, function, args, startTime, "", "Size mismatched! Expected "+string(10000*srcSize)+" got "+string(objProps.Size), err)
8335 return
8336 }
8337
8338 successLogger(testName, function, args, startTime).Info()
8339}
8340
8341// Test concatenating multiple 10K objects V2
8342func testCompose10KSourcesV2() {
8343 // initialize logging params
8344 startTime := time.Now()
8345 testName := getFuncName()
8346 function := "ComposeObject(destination, sourceList)"
8347 args := map[string]interface{}{}
8348
8349 // Instantiate new minio client object
8350 c, err := minio.New(os.Getenv(serverEndpoint),
8351 &minio.Options{
8352 Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
8353 Secure: mustParseBool(os.Getenv(enableHTTPS)),
8354 })
8355 if err != nil {
8356 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
8357 return
8358 }
8359
8360 testComposeMultipleSources(c)
8361}
8362
8363func testEncryptedEmptyObject() {
8364 // initialize logging params
8365 startTime := time.Now()
8366 testName := getFuncName()
8367 function := "PutObject(bucketName, objectName, reader, objectSize, opts)"
8368 args := map[string]interface{}{}
8369
8370 // Instantiate new minio client object
8371 c, err := minio.New(os.Getenv(serverEndpoint),
8372 &minio.Options{
8373 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
8374 Secure: mustParseBool(os.Getenv(enableHTTPS)),
8375 })
8376 if err != nil {
8377 logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
8378 return
8379 }
8380
8381 // Generate a new random bucket name.
8382 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
8383 args["bucketName"] = bucketName
8384 // Make a new bucket in 'us-east-1' (source bucket).
8385 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
8386 if err != nil {
8387 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
8388 return
8389 }
8390
8391 defer cleanupBucket(bucketName, c)
8392
8393 sse := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"object"))
8394
8395 // 1. create an sse-c encrypted object to copy by uploading
8396 const srcSize = 0
8397 var buf []byte // Empty buffer
8398 args["objectName"] = "object"
8399 _, err = c.PutObject(context.Background(), bucketName, "object", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ServerSideEncryption: sse})
8400 if err != nil {
8401 logError(testName, function, args, startTime, "", "PutObject call failed", err)
8402 return
8403 }
8404
8405 // 2. Test CopyObject for an empty object
8406 src := minio.CopySrcOptions{
8407 Bucket: bucketName,
8408 Object: "object",
8409 Encryption: sse,
8410 }
8411
8412 dst := minio.CopyDestOptions{
8413 Bucket: bucketName,
8414 Object: "new-object",
8415 Encryption: sse,
8416 }
8417
8418 if _, err = c.CopyObject(context.Background(), dst, src); err != nil {
8419 function = "CopyObject(dst, src)"
8420 logError(testName, function, map[string]interface{}{}, startTime, "", "CopyObject failed", err)
8421 return
8422 }
8423
8424 // 3. Test Key rotation
8425 newSSE := encrypt.DefaultPBKDF([]byte("Don't Panic"), []byte(bucketName+"new-object"))
8426 src = minio.CopySrcOptions{
8427 Bucket: bucketName,
8428 Object: "new-object",
8429 Encryption: sse,
8430 }
8431
8432 dst = minio.CopyDestOptions{
8433 Bucket: bucketName,
8434 Object: "new-object",
8435 Encryption: newSSE,
8436 }
8437
8438 if _, err = c.CopyObject(context.Background(), dst, src); err != nil {
8439 function = "CopyObject(dst, src)"
8440 logError(testName, function, map[string]interface{}{}, startTime, "", "CopyObject with key rotation failed", err)
8441 return
8442 }
8443
8444 // 4. Download the object.
8445 reader, err := c.GetObject(context.Background(), bucketName, "new-object", minio.GetObjectOptions{ServerSideEncryption: newSSE})
8446 if err != nil {
8447 logError(testName, function, args, startTime, "", "GetObject failed", err)
8448 return
8449 }
8450 defer reader.Close()
8451
8452 decBytes, err := io.ReadAll(reader)
8453 if err != nil {
8454 logError(testName, function, map[string]interface{}{}, startTime, "", "ReadAll failed", err)
8455 return
8456 }
8457 if !bytes.Equal(decBytes, buf) {
8458 logError(testName, function, map[string]interface{}{}, startTime, "", "Downloaded object doesn't match the empty encrypted object", err)
8459 return
8460 }
8461
8462 delete(args, "objectName")
8463 successLogger(testName, function, args, startTime).Info()
8464}
8465
8466func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, sseDst encrypt.ServerSide) {
8467 // initialize logging params
8468 startTime := time.Now()
8469 testName := getFuncNameLoc(2)
8470 function := "CopyObject(destination, source)"
8471 args := map[string]interface{}{}
8472 var srcEncryption, dstEncryption encrypt.ServerSide
8473
8474 // Make a new bucket in 'us-east-1' (source bucket).
8475 err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
8476 if err != nil {
8477 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
8478 return
8479 }
8480
8481 defer cleanupBucket(bucketName, c)
8482
8483 // 1. create an sse-c encrypted object to copy by uploading
8484 const srcSize = 1024 * 1024
8485 buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB
8486 _, err = c.PutObject(context.Background(), bucketName, "srcObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{
8487 ServerSideEncryption: sseSrc,
8488 })
8489 if err != nil {
8490 logError(testName, function, args, startTime, "", "PutObject call failed", err)
8491 return
8492 }
8493
8494 if sseSrc != nil && sseSrc.Type() != encrypt.S3 {
8495 srcEncryption = sseSrc
8496 }
8497
8498 // 2. copy object and change encryption key
8499 src := minio.CopySrcOptions{
8500 Bucket: bucketName,
8501 Object: "srcObject",
8502 Encryption: srcEncryption,
8503 }
8504 args["source"] = src
8505
8506 dst := minio.CopyDestOptions{
8507 Bucket: bucketName,
8508 Object: "dstObject",
8509 Encryption: sseDst,
8510 }
8511 args["destination"] = dst
8512
8513 _, err = c.CopyObject(context.Background(), dst, src)
8514 if err != nil {
8515 logError(testName, function, args, startTime, "", "CopyObject failed", err)
8516 return
8517 }
8518
8519 if sseDst != nil && sseDst.Type() != encrypt.S3 {
8520 dstEncryption = sseDst
8521 }
8522 // 3. get copied object and check if content is equal
8523 coreClient := minio.Core{c}
8524 reader, _, _, err := coreClient.GetObject(context.Background(), bucketName, "dstObject", minio.GetObjectOptions{ServerSideEncryption: dstEncryption})
8525 if err != nil {
8526 logError(testName, function, args, startTime, "", "GetObject failed", err)
8527 return
8528 }
8529
8530 decBytes, err := io.ReadAll(reader)
8531 if err != nil {
8532 logError(testName, function, args, startTime, "", "ReadAll failed", err)
8533 return
8534 }
8535 if !bytes.Equal(decBytes, buf) {
8536 logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err)
8537 return
8538 }
8539 reader.Close()
8540
8541 // Test key rotation for source object in-place.
8542 var newSSE encrypt.ServerSide
8543 if sseSrc != nil && sseSrc.Type() == encrypt.SSEC {
8544 newSSE = encrypt.DefaultPBKDF([]byte("Don't Panic"), []byte(bucketName+"srcObject")) // replace key
8545 }
8546 if sseSrc != nil && sseSrc.Type() == encrypt.S3 {
8547 newSSE = encrypt.NewSSE()
8548 }
8549 if newSSE != nil {
8550 dst = minio.CopyDestOptions{
8551 Bucket: bucketName,
8552 Object: "srcObject",
8553 Encryption: newSSE,
8554 }
8555 args["destination"] = dst
8556
8557 _, err = c.CopyObject(context.Background(), dst, src)
8558 if err != nil {
8559 logError(testName, function, args, startTime, "", "CopyObject failed", err)
8560 return
8561 }
8562
8563 // Get copied object and check if content is equal
8564 reader, _, _, err = coreClient.GetObject(context.Background(), bucketName, "srcObject", minio.GetObjectOptions{ServerSideEncryption: newSSE})
8565 if err != nil {
8566 logError(testName, function, args, startTime, "", "GetObject failed", err)
8567 return
8568 }
8569
8570 decBytes, err = io.ReadAll(reader)
8571 if err != nil {
8572 logError(testName, function, args, startTime, "", "ReadAll failed", err)
8573 return
8574 }
8575 if !bytes.Equal(decBytes, buf) {
8576 logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err)
8577 return
8578 }
8579 reader.Close()
8580
8581 // Test in-place decryption.
8582 dst = minio.CopyDestOptions{
8583 Bucket: bucketName,
8584 Object: "srcObject",
8585 }
8586 args["destination"] = dst
8587
8588 src = minio.CopySrcOptions{
8589 Bucket: bucketName,
8590 Object: "srcObject",
8591 Encryption: newSSE,
8592 }
8593 args["source"] = src
8594 _, err = c.CopyObject(context.Background(), dst, src)
8595 if err != nil {
8596 logError(testName, function, args, startTime, "", "CopyObject Key rotation failed", err)
8597 return
8598 }
8599 }
8600
8601 // Get copied decrypted object and check if content is equal
8602 reader, _, _, err = coreClient.GetObject(context.Background(), bucketName, "srcObject", minio.GetObjectOptions{})
8603 if err != nil {
8604 logError(testName, function, args, startTime, "", "GetObject failed", err)
8605 return
8606 }
8607 defer reader.Close()
8608
8609 decBytes, err = io.ReadAll(reader)
8610 if err != nil {
8611 logError(testName, function, args, startTime, "", "ReadAll failed", err)
8612 return
8613 }
8614 if !bytes.Equal(decBytes, buf) {
8615 logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err)
8616 return
8617 }
8618
8619 successLogger(testName, function, args, startTime).Info()
8620}
8621
8622// Test encrypted copy object
8623func testUnencryptedToSSECCopyObject() {
8624 // initialize logging params
8625 startTime := time.Now()
8626 testName := getFuncName()
8627 function := "CopyObject(destination, source)"
8628 args := map[string]interface{}{}
8629
8630 // Instantiate new minio client object
8631 c, err := minio.New(os.Getenv(serverEndpoint),
8632 &minio.Options{
8633 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
8634 Secure: mustParseBool(os.Getenv(enableHTTPS)),
8635 })
8636 if err != nil {
8637 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
8638 return
8639 }
8640 // Generate a new random bucket name.
8641 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
8642
8643 sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject"))
8644 // c.TraceOn(os.Stderr)
8645 testEncryptedCopyObjectWrapper(c, bucketName, nil, sseDst)
8646}
8647
8648// Test encrypted copy object
8649func testUnencryptedToSSES3CopyObject() {
8650 // initialize logging params
8651 startTime := time.Now()
8652 testName := getFuncName()
8653 function := "CopyObject(destination, source)"
8654 args := map[string]interface{}{}
8655
8656 // Instantiate new minio client object
8657 c, err := minio.New(os.Getenv(serverEndpoint),
8658 &minio.Options{
8659 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
8660 Secure: mustParseBool(os.Getenv(enableHTTPS)),
8661 })
8662 if err != nil {
8663 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
8664 return
8665 }
8666 // Generate a new random bucket name.
8667 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
8668
8669 var sseSrc encrypt.ServerSide
8670 sseDst := encrypt.NewSSE()
8671 // c.TraceOn(os.Stderr)
8672 testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst)
8673}
8674
8675// Test encrypted copy object
8676func testUnencryptedToUnencryptedCopyObject() {
8677 // initialize logging params
8678 startTime := time.Now()
8679 testName := getFuncName()
8680 function := "CopyObject(destination, source)"
8681 args := map[string]interface{}{}
8682
8683 // Instantiate new minio client object
8684 c, err := minio.New(os.Getenv(serverEndpoint),
8685 &minio.Options{
8686 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
8687 Secure: mustParseBool(os.Getenv(enableHTTPS)),
8688 })
8689 if err != nil {
8690 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
8691 return
8692 }
8693 // Generate a new random bucket name.
8694 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
8695
8696 var sseSrc, sseDst encrypt.ServerSide
8697 // c.TraceOn(os.Stderr)
8698 testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst)
8699}
8700
8701// Test encrypted copy object
8702func testEncryptedSSECToSSECCopyObject() {
8703 // initialize logging params
8704 startTime := time.Now()
8705 testName := getFuncName()
8706 function := "CopyObject(destination, source)"
8707 args := map[string]interface{}{}
8708
8709 // Instantiate new minio client object
8710 c, err := minio.New(os.Getenv(serverEndpoint),
8711 &minio.Options{
8712 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
8713 Secure: mustParseBool(os.Getenv(enableHTTPS)),
8714 })
8715 if err != nil {
8716 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
8717 return
8718 }
8719 // Generate a new random bucket name.
8720 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
8721
8722 sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject"))
8723 sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject"))
8724 // c.TraceOn(os.Stderr)
8725 testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst)
8726}
8727
8728// Test encrypted copy object
8729func testEncryptedSSECToSSES3CopyObject() {
8730 // initialize logging params
8731 startTime := time.Now()
8732 testName := getFuncName()
8733 function := "CopyObject(destination, source)"
8734 args := map[string]interface{}{}
8735
8736 // Instantiate new minio client object
8737 c, err := minio.New(os.Getenv(serverEndpoint),
8738 &minio.Options{
8739 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
8740 Secure: mustParseBool(os.Getenv(enableHTTPS)),
8741 })
8742 if err != nil {
8743 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
8744 return
8745 }
8746 // Generate a new random bucket name.
8747 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
8748
8749 sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject"))
8750 sseDst := encrypt.NewSSE()
8751 // c.TraceOn(os.Stderr)
8752 testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst)
8753}
8754
8755// Test encrypted copy object
8756func testEncryptedSSECToUnencryptedCopyObject() {
8757 // initialize logging params
8758 startTime := time.Now()
8759 testName := getFuncName()
8760 function := "CopyObject(destination, source)"
8761 args := map[string]interface{}{}
8762
8763 // Instantiate new minio client object
8764 c, err := minio.New(os.Getenv(serverEndpoint),
8765 &minio.Options{
8766 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
8767 Secure: mustParseBool(os.Getenv(enableHTTPS)),
8768 })
8769 if err != nil {
8770 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
8771 return
8772 }
8773 // Generate a new random bucket name.
8774 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
8775
8776 sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject"))
8777 var sseDst encrypt.ServerSide
8778 // c.TraceOn(os.Stderr)
8779 testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst)
8780}
8781
8782// Test encrypted copy object
8783func testEncryptedSSES3ToSSECCopyObject() {
8784 // initialize logging params
8785 startTime := time.Now()
8786 testName := getFuncName()
8787 function := "CopyObject(destination, source)"
8788 args := map[string]interface{}{}
8789
8790 // Instantiate new minio client object
8791 c, err := minio.New(os.Getenv(serverEndpoint),
8792 &minio.Options{
8793 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
8794 Secure: mustParseBool(os.Getenv(enableHTTPS)),
8795 })
8796 if err != nil {
8797 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
8798 return
8799 }
8800 // Generate a new random bucket name.
8801 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
8802
8803 sseSrc := encrypt.NewSSE()
8804 sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject"))
8805 // c.TraceOn(os.Stderr)
8806 testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst)
8807}
8808
8809// Test encrypted copy object
8810func testEncryptedSSES3ToSSES3CopyObject() {
8811 // initialize logging params
8812 startTime := time.Now()
8813 testName := getFuncName()
8814 function := "CopyObject(destination, source)"
8815 args := map[string]interface{}{}
8816
8817 // Instantiate new minio client object
8818 c, err := minio.New(os.Getenv(serverEndpoint),
8819 &minio.Options{
8820 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
8821 Secure: mustParseBool(os.Getenv(enableHTTPS)),
8822 })
8823 if err != nil {
8824 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
8825 return
8826 }
8827 // Generate a new random bucket name.
8828 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
8829
8830 sseSrc := encrypt.NewSSE()
8831 sseDst := encrypt.NewSSE()
8832 // c.TraceOn(os.Stderr)
8833 testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst)
8834}
8835
8836// Test encrypted copy object
8837func testEncryptedSSES3ToUnencryptedCopyObject() {
8838 // initialize logging params
8839 startTime := time.Now()
8840 testName := getFuncName()
8841 function := "CopyObject(destination, source)"
8842 args := map[string]interface{}{}
8843
8844 // Instantiate new minio client object
8845 c, err := minio.New(os.Getenv(serverEndpoint),
8846 &minio.Options{
8847 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
8848 Secure: mustParseBool(os.Getenv(enableHTTPS)),
8849 })
8850 if err != nil {
8851 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
8852 return
8853 }
8854 // Generate a new random bucket name.
8855 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
8856
8857 sseSrc := encrypt.NewSSE()
8858 var sseDst encrypt.ServerSide
8859 // c.TraceOn(os.Stderr)
8860 testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst)
8861}
8862
8863// Test encrypted copy object
8864func testEncryptedCopyObjectV2() {
8865 // initialize logging params
8866 startTime := time.Now()
8867 testName := getFuncName()
8868 function := "CopyObject(destination, source)"
8869 args := map[string]interface{}{}
8870
8871 // Instantiate new minio client object
8872 c, err := minio.New(os.Getenv(serverEndpoint),
8873 &minio.Options{
8874 Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
8875 Secure: mustParseBool(os.Getenv(enableHTTPS)),
8876 })
8877 if err != nil {
8878 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
8879 return
8880 }
8881 // Generate a new random bucket name.
8882 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
8883
8884 sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject"))
8885 sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject"))
8886 // c.TraceOn(os.Stderr)
8887 testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst)
8888}
8889
8890func testDecryptedCopyObject() {
8891 // initialize logging params
8892 startTime := time.Now()
8893 testName := getFuncName()
8894 function := "CopyObject(destination, source)"
8895 args := map[string]interface{}{}
8896
8897 // Instantiate new minio client object
8898 c, err := minio.New(os.Getenv(serverEndpoint),
8899 &minio.Options{
8900 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
8901 Secure: mustParseBool(os.Getenv(enableHTTPS)),
8902 })
8903 if err != nil {
8904 logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
8905 return
8906 }
8907
8908 bucketName, objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-"), "object"
8909 if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}); err != nil {
8910 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
8911 return
8912 }
8913
8914 defer cleanupBucket(bucketName, c)
8915
8916 encryption := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName))
8917 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(bytes.Repeat([]byte("a"), 1024*1024)), 1024*1024, minio.PutObjectOptions{
8918 ServerSideEncryption: encryption,
8919 })
8920 if err != nil {
8921 logError(testName, function, args, startTime, "", "PutObject call failed", err)
8922 return
8923 }
8924
8925 src := minio.CopySrcOptions{
8926 Bucket: bucketName,
8927 Object: objectName,
8928 Encryption: encrypt.SSECopy(encryption),
8929 }
8930 args["source"] = src
8931
8932 dst := minio.CopyDestOptions{
8933 Bucket: bucketName,
8934 Object: "decrypted-" + objectName,
8935 }
8936 args["destination"] = dst
8937
8938 if _, err = c.CopyObject(context.Background(), dst, src); err != nil {
8939 logError(testName, function, args, startTime, "", "CopyObject failed", err)
8940 return
8941 }
8942 if _, err = c.GetObject(context.Background(), bucketName, "decrypted-"+objectName, minio.GetObjectOptions{}); err != nil {
8943 logError(testName, function, args, startTime, "", "GetObject failed", err)
8944 return
8945 }
8946 successLogger(testName, function, args, startTime).Info()
8947}
8948
8949func testSSECMultipartEncryptedToSSECCopyObjectPart() {
8950 // initialize logging params
8951 startTime := time.Now()
8952 testName := getFuncName()
8953 function := "CopyObjectPart(destination, source)"
8954 args := map[string]interface{}{}
8955
8956 // Instantiate new minio client object
8957 client, err := minio.New(os.Getenv(serverEndpoint),
8958 &minio.Options{
8959 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
8960 Secure: mustParseBool(os.Getenv(enableHTTPS)),
8961 })
8962 if err != nil {
8963 logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
8964 return
8965 }
8966
8967 // Instantiate new core client object.
8968 c := minio.Core{client}
8969
8970 // Enable tracing, write to stderr.
8971 // c.TraceOn(os.Stderr)
8972
8973 // Set user agent.
8974 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
8975
8976 // Generate a new random bucket name.
8977 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
8978
8979 // Make a new bucket.
8980 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
8981 if err != nil {
8982 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
8983 return
8984 }
8985 defer cleanupBucket(bucketName, client)
8986 // Make a buffer with 6MB of data
8987 buf := bytes.Repeat([]byte("abcdef"), 1024*1024)
8988
8989 // Save the data
8990 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
8991 password := "correct horse battery staple"
8992 srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName))
8993
8994 // Upload a 6MB object using multipart mechanism
8995 uploadID, err := c.NewMultipartUpload(context.Background(), bucketName, objectName, minio.PutObjectOptions{ServerSideEncryption: srcencryption})
8996 if err != nil {
8997 logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
8998 return
8999 }
9000
9001 var completeParts []minio.CompletePart
9002
9003 part, err := c.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 1,
9004 bytes.NewReader(buf[:5*1024*1024]), 5*1024*1024,
9005 minio.PutObjectPartOptions{SSE: srcencryption},
9006 )
9007 if err != nil {
9008 logError(testName, function, args, startTime, "", "PutObjectPart call failed", err)
9009 return
9010 }
9011 completeParts = append(completeParts, minio.CompletePart{PartNumber: part.PartNumber, ETag: part.ETag})
9012
9013 part, err = c.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 2,
9014 bytes.NewReader(buf[5*1024*1024:]), 1024*1024,
9015 minio.PutObjectPartOptions{SSE: srcencryption},
9016 )
9017 if err != nil {
9018 logError(testName, function, args, startTime, "", "PutObjectPart call failed", err)
9019 return
9020 }
9021 completeParts = append(completeParts, minio.CompletePart{PartNumber: part.PartNumber, ETag: part.ETag})
9022
9023 // Complete the multipart upload
9024 _, err = c.CompleteMultipartUpload(context.Background(), bucketName, objectName, uploadID, completeParts, minio.PutObjectOptions{})
9025 if err != nil {
9026 logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
9027 return
9028 }
9029
9030 // Stat the object and check its length matches
9031 objInfo, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption})
9032 if err != nil {
9033 logError(testName, function, args, startTime, "", "StatObject call failed", err)
9034 return
9035 }
9036
9037 destBucketName := bucketName
9038 destObjectName := objectName + "-dest"
9039 dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName))
9040
9041 uploadID, err = c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption})
9042 if err != nil {
9043 logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
9044 return
9045 }
9046
9047 // Content of the destination object will be two copies of
9048 // `objectName` concatenated, followed by first byte of
9049 // `objectName`.
9050 metadata := make(map[string]string)
9051 header := make(http.Header)
9052 encrypt.SSECopy(srcencryption).Marshal(header)
9053 dstencryption.Marshal(header)
9054 for k, v := range header {
9055 metadata[k] = v[0]
9056 }
9057
9058 metadata["x-amz-copy-source-if-match"] = objInfo.ETag
9059
9060 // First of three parts
9061 fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata)
9062 if err != nil {
9063 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
9064 return
9065 }
9066
9067 // Second of three parts
9068 sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata)
9069 if err != nil {
9070 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
9071 return
9072 }
9073
9074 // Last of three parts
9075 lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata)
9076 if err != nil {
9077 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
9078 return
9079 }
9080
9081 // Complete the multipart upload
9082 _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
9083 if err != nil {
9084 logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
9085 return
9086 }
9087
9088 // Stat the object and check its length matches
9089 objInfo, err = c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption})
9090 if err != nil {
9091 logError(testName, function, args, startTime, "", "StatObject call failed", err)
9092 return
9093 }
9094
9095 if objInfo.Size != (6*1024*1024)*2+1 {
9096 logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err)
9097 return
9098 }
9099
9100 // Now we read the data back
9101 getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption}
9102 getOpts.SetRange(0, 6*1024*1024-1)
9103 r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
9104 if err != nil {
9105 logError(testName, function, args, startTime, "", "GetObject call failed", err)
9106 return
9107 }
9108 getBuf := make([]byte, 6*1024*1024)
9109 _, err = readFull(r, getBuf)
9110 if err != nil {
9111 logError(testName, function, args, startTime, "", "Read buffer failed", err)
9112 return
9113 }
9114 if !bytes.Equal(getBuf, buf) {
9115 logError(testName, function, args, startTime, "", "Got unexpected data in first 6MB", err)
9116 return
9117 }
9118
9119 getOpts.SetRange(6*1024*1024, 0)
9120 r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
9121 if err != nil {
9122 logError(testName, function, args, startTime, "", "GetObject call failed", err)
9123 return
9124 }
9125 getBuf = make([]byte, 6*1024*1024+1)
9126 _, err = readFull(r, getBuf)
9127 if err != nil {
9128 logError(testName, function, args, startTime, "", "Read buffer failed", err)
9129 return
9130 }
9131 if !bytes.Equal(getBuf[:6*1024*1024], buf) {
9132 logError(testName, function, args, startTime, "", "Got unexpected data in second 6MB", err)
9133 return
9134 }
9135 if getBuf[6*1024*1024] != buf[0] {
9136 logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err)
9137 return
9138 }
9139
9140 successLogger(testName, function, args, startTime).Info()
9141
9142 // Do not need to remove destBucketName its same as bucketName.
9143}
9144
9145// Test Core CopyObjectPart implementation
9146func testSSECEncryptedToSSECCopyObjectPart() {
9147 // initialize logging params
9148 startTime := time.Now()
9149 testName := getFuncName()
9150 function := "CopyObjectPart(destination, source)"
9151 args := map[string]interface{}{}
9152
9153 // Instantiate new minio client object
9154 client, err := minio.New(os.Getenv(serverEndpoint),
9155 &minio.Options{
9156 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
9157 Secure: mustParseBool(os.Getenv(enableHTTPS)),
9158 })
9159 if err != nil {
9160 logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
9161 return
9162 }
9163
9164 // Instantiate new core client object.
9165 c := minio.Core{client}
9166
9167 // Enable tracing, write to stderr.
9168 // c.TraceOn(os.Stderr)
9169
9170 // Set user agent.
9171 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
9172
9173 // Generate a new random bucket name.
9174 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
9175
9176 // Make a new bucket.
9177 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
9178 if err != nil {
9179 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
9180 return
9181 }
9182 defer cleanupBucket(bucketName, client)
9183 // Make a buffer with 5MB of data
9184 buf := bytes.Repeat([]byte("abcde"), 1024*1024)
9185
9186 // Save the data
9187 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
9188 password := "correct horse battery staple"
9189 srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName))
9190 putmetadata := map[string]string{
9191 "Content-Type": "binary/octet-stream",
9192 }
9193 opts := minio.PutObjectOptions{
9194 UserMetadata: putmetadata,
9195 ServerSideEncryption: srcencryption,
9196 }
9197 uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts)
9198 if err != nil {
9199 logError(testName, function, args, startTime, "", "PutObject call failed", err)
9200 return
9201 }
9202
9203 st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption})
9204 if err != nil {
9205 logError(testName, function, args, startTime, "", "StatObject call failed", err)
9206 return
9207 }
9208
9209 if st.Size != int64(len(buf)) {
9210 logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err)
9211 return
9212 }
9213
9214 destBucketName := bucketName
9215 destObjectName := objectName + "-dest"
9216 dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName))
9217
9218 uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption})
9219 if err != nil {
9220 logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
9221 return
9222 }
9223
9224 // Content of the destination object will be two copies of
9225 // `objectName` concatenated, followed by first byte of
9226 // `objectName`.
9227 metadata := make(map[string]string)
9228 header := make(http.Header)
9229 encrypt.SSECopy(srcencryption).Marshal(header)
9230 dstencryption.Marshal(header)
9231 for k, v := range header {
9232 metadata[k] = v[0]
9233 }
9234
9235 metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag
9236
9237 // First of three parts
9238 fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata)
9239 if err != nil {
9240 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
9241 return
9242 }
9243
9244 // Second of three parts
9245 sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata)
9246 if err != nil {
9247 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
9248 return
9249 }
9250
9251 // Last of three parts
9252 lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata)
9253 if err != nil {
9254 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
9255 return
9256 }
9257
9258 // Complete the multipart upload
9259 _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
9260 if err != nil {
9261 logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
9262 return
9263 }
9264
9265 // Stat the object and check its length matches
9266 objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption})
9267 if err != nil {
9268 logError(testName, function, args, startTime, "", "StatObject call failed", err)
9269 return
9270 }
9271
9272 if objInfo.Size != (5*1024*1024)*2+1 {
9273 logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err)
9274 return
9275 }
9276
9277 // Now we read the data back
9278 getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption}
9279 getOpts.SetRange(0, 5*1024*1024-1)
9280 r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
9281 if err != nil {
9282 logError(testName, function, args, startTime, "", "GetObject call failed", err)
9283 return
9284 }
9285 getBuf := make([]byte, 5*1024*1024)
9286 _, err = readFull(r, getBuf)
9287 if err != nil {
9288 logError(testName, function, args, startTime, "", "Read buffer failed", err)
9289 return
9290 }
9291 if !bytes.Equal(getBuf, buf) {
9292 logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err)
9293 return
9294 }
9295
9296 getOpts.SetRange(5*1024*1024, 0)
9297 r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
9298 if err != nil {
9299 logError(testName, function, args, startTime, "", "GetObject call failed", err)
9300 return
9301 }
9302 getBuf = make([]byte, 5*1024*1024+1)
9303 _, err = readFull(r, getBuf)
9304 if err != nil {
9305 logError(testName, function, args, startTime, "", "Read buffer failed", err)
9306 return
9307 }
9308 if !bytes.Equal(getBuf[:5*1024*1024], buf) {
9309 logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err)
9310 return
9311 }
9312 if getBuf[5*1024*1024] != buf[0] {
9313 logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err)
9314 return
9315 }
9316
9317 successLogger(testName, function, args, startTime).Info()
9318
9319 // Do not need to remove destBucketName its same as bucketName.
9320}
9321
9322// Test Core CopyObjectPart implementation for SSEC encrypted to unencrypted copy
9323func testSSECEncryptedToUnencryptedCopyPart() {
9324 // initialize logging params
9325 startTime := time.Now()
9326 testName := getFuncName()
9327 function := "CopyObjectPart(destination, source)"
9328 args := map[string]interface{}{}
9329
9330 // Instantiate new minio client object
9331 client, err := minio.New(os.Getenv(serverEndpoint),
9332 &minio.Options{
9333 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
9334 Secure: mustParseBool(os.Getenv(enableHTTPS)),
9335 })
9336 if err != nil {
9337 logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
9338 return
9339 }
9340
9341 // Instantiate new core client object.
9342 c := minio.Core{client}
9343
9344 // Enable tracing, write to stderr.
9345 // c.TraceOn(os.Stderr)
9346
9347 // Set user agent.
9348 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
9349
9350 // Generate a new random bucket name.
9351 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
9352
9353 // Make a new bucket.
9354 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
9355 if err != nil {
9356 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
9357 return
9358 }
9359 defer cleanupBucket(bucketName, client)
9360 // Make a buffer with 5MB of data
9361 buf := bytes.Repeat([]byte("abcde"), 1024*1024)
9362
9363 // Save the data
9364 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
9365 password := "correct horse battery staple"
9366 srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName))
9367
9368 opts := minio.PutObjectOptions{
9369 UserMetadata: map[string]string{
9370 "Content-Type": "binary/octet-stream",
9371 },
9372 ServerSideEncryption: srcencryption,
9373 }
9374 uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts)
9375 if err != nil {
9376 logError(testName, function, args, startTime, "", "PutObject call failed", err)
9377 return
9378 }
9379
9380 st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption})
9381 if err != nil {
9382 logError(testName, function, args, startTime, "", "StatObject call failed", err)
9383 return
9384 }
9385
9386 if st.Size != int64(len(buf)) {
9387 logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err)
9388 return
9389 }
9390
9391 destBucketName := bucketName
9392 destObjectName := objectName + "-dest"
9393 var dstencryption encrypt.ServerSide
9394
9395 uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption})
9396 if err != nil {
9397 logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
9398 return
9399 }
9400
9401 // Content of the destination object will be two copies of
9402 // `objectName` concatenated, followed by first byte of
9403 // `objectName`.
9404 metadata := make(map[string]string)
9405 header := make(http.Header)
9406 encrypt.SSECopy(srcencryption).Marshal(header)
9407 for k, v := range header {
9408 metadata[k] = v[0]
9409 }
9410
9411 metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag
9412
9413 // First of three parts
9414 fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata)
9415 if err != nil {
9416 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
9417 return
9418 }
9419
9420 // Second of three parts
9421 sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata)
9422 if err != nil {
9423 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
9424 return
9425 }
9426
9427 // Last of three parts
9428 lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata)
9429 if err != nil {
9430 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
9431 return
9432 }
9433
9434 // Complete the multipart upload
9435 _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
9436 if err != nil {
9437 logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
9438 return
9439 }
9440
9441 // Stat the object and check its length matches
9442 objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{})
9443 if err != nil {
9444 logError(testName, function, args, startTime, "", "StatObject call failed", err)
9445 return
9446 }
9447
9448 if objInfo.Size != (5*1024*1024)*2+1 {
9449 logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err)
9450 return
9451 }
9452
9453 // Now we read the data back
9454 getOpts := minio.GetObjectOptions{}
9455 getOpts.SetRange(0, 5*1024*1024-1)
9456 r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
9457 if err != nil {
9458 logError(testName, function, args, startTime, "", "GetObject call failed", err)
9459 return
9460 }
9461 getBuf := make([]byte, 5*1024*1024)
9462 _, err = readFull(r, getBuf)
9463 if err != nil {
9464 logError(testName, function, args, startTime, "", "Read buffer failed", err)
9465 return
9466 }
9467 if !bytes.Equal(getBuf, buf) {
9468 logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err)
9469 return
9470 }
9471
9472 getOpts.SetRange(5*1024*1024, 0)
9473 r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
9474 if err != nil {
9475 logError(testName, function, args, startTime, "", "GetObject call failed", err)
9476 return
9477 }
9478 getBuf = make([]byte, 5*1024*1024+1)
9479 _, err = readFull(r, getBuf)
9480 if err != nil {
9481 logError(testName, function, args, startTime, "", "Read buffer failed", err)
9482 return
9483 }
9484 if !bytes.Equal(getBuf[:5*1024*1024], buf) {
9485 logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err)
9486 return
9487 }
9488 if getBuf[5*1024*1024] != buf[0] {
9489 logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err)
9490 return
9491 }
9492
9493 successLogger(testName, function, args, startTime).Info()
9494
9495 // Do not need to remove destBucketName its same as bucketName.
9496}
9497
9498// Test Core CopyObjectPart implementation for SSEC encrypted to SSE-S3 encrypted copy
9499func testSSECEncryptedToSSES3CopyObjectPart() {
9500 // initialize logging params
9501 startTime := time.Now()
9502 testName := getFuncName()
9503 function := "CopyObjectPart(destination, source)"
9504 args := map[string]interface{}{}
9505
9506 // Instantiate new minio client object
9507 client, err := minio.New(os.Getenv(serverEndpoint),
9508 &minio.Options{
9509 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
9510 Secure: mustParseBool(os.Getenv(enableHTTPS)),
9511 })
9512 if err != nil {
9513 logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
9514 return
9515 }
9516
9517 // Instantiate new core client object.
9518 c := minio.Core{client}
9519
9520 // Enable tracing, write to stderr.
9521 // c.TraceOn(os.Stderr)
9522
9523 // Set user agent.
9524 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
9525
9526 // Generate a new random bucket name.
9527 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
9528
9529 // Make a new bucket.
9530 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
9531 if err != nil {
9532 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
9533 return
9534 }
9535 defer cleanupBucket(bucketName, client)
9536 // Make a buffer with 5MB of data
9537 buf := bytes.Repeat([]byte("abcde"), 1024*1024)
9538
9539 // Save the data
9540 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
9541 password := "correct horse battery staple"
9542 srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName))
9543 putmetadata := map[string]string{
9544 "Content-Type": "binary/octet-stream",
9545 }
9546 opts := minio.PutObjectOptions{
9547 UserMetadata: putmetadata,
9548 ServerSideEncryption: srcencryption,
9549 }
9550
9551 uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts)
9552 if err != nil {
9553 logError(testName, function, args, startTime, "", "PutObject call failed", err)
9554 return
9555 }
9556
9557 st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption})
9558 if err != nil {
9559 logError(testName, function, args, startTime, "", "StatObject call failed", err)
9560 return
9561 }
9562
9563 if st.Size != int64(len(buf)) {
9564 logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err)
9565 return
9566 }
9567
9568 destBucketName := bucketName
9569 destObjectName := objectName + "-dest"
9570 dstencryption := encrypt.NewSSE()
9571
9572 uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption})
9573 if err != nil {
9574 logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
9575 return
9576 }
9577
9578 // Content of the destination object will be two copies of
9579 // `objectName` concatenated, followed by first byte of
9580 // `objectName`.
9581 metadata := make(map[string]string)
9582 header := make(http.Header)
9583 encrypt.SSECopy(srcencryption).Marshal(header)
9584 dstencryption.Marshal(header)
9585
9586 for k, v := range header {
9587 metadata[k] = v[0]
9588 }
9589
9590 metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag
9591
9592 // First of three parts
9593 fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata)
9594 if err != nil {
9595 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
9596 return
9597 }
9598
9599 // Second of three parts
9600 sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata)
9601 if err != nil {
9602 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
9603 return
9604 }
9605
9606 // Last of three parts
9607 lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata)
9608 if err != nil {
9609 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
9610 return
9611 }
9612
9613 // Complete the multipart upload
9614 _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
9615 if err != nil {
9616 logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
9617 return
9618 }
9619
9620 // Stat the object and check its length matches
9621 objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{})
9622 if err != nil {
9623 logError(testName, function, args, startTime, "", "StatObject call failed", err)
9624 return
9625 }
9626
9627 if objInfo.Size != (5*1024*1024)*2+1 {
9628 logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err)
9629 return
9630 }
9631
9632 // Now we read the data back
9633 getOpts := minio.GetObjectOptions{}
9634 getOpts.SetRange(0, 5*1024*1024-1)
9635 r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
9636 if err != nil {
9637 logError(testName, function, args, startTime, "", "GetObject call failed", err)
9638 return
9639 }
9640 getBuf := make([]byte, 5*1024*1024)
9641 _, err = readFull(r, getBuf)
9642 if err != nil {
9643 logError(testName, function, args, startTime, "", "Read buffer failed", err)
9644 return
9645 }
9646 if !bytes.Equal(getBuf, buf) {
9647 logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err)
9648 return
9649 }
9650
9651 getOpts.SetRange(5*1024*1024, 0)
9652 r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
9653 if err != nil {
9654 logError(testName, function, args, startTime, "", "GetObject call failed", err)
9655 return
9656 }
9657 getBuf = make([]byte, 5*1024*1024+1)
9658 _, err = readFull(r, getBuf)
9659 if err != nil {
9660 logError(testName, function, args, startTime, "", "Read buffer failed", err)
9661 return
9662 }
9663 if !bytes.Equal(getBuf[:5*1024*1024], buf) {
9664 logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err)
9665 return
9666 }
9667 if getBuf[5*1024*1024] != buf[0] {
9668 logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err)
9669 return
9670 }
9671
9672 successLogger(testName, function, args, startTime).Info()
9673
9674 // Do not need to remove destBucketName its same as bucketName.
9675}
9676
9677// Test Core CopyObjectPart implementation for unencrypted to SSEC encryption copy part
9678func testUnencryptedToSSECCopyObjectPart() {
9679 // initialize logging params
9680 startTime := time.Now()
9681 testName := getFuncName()
9682 function := "CopyObjectPart(destination, source)"
9683 args := map[string]interface{}{}
9684
9685 // Instantiate new minio client object
9686 client, err := minio.New(os.Getenv(serverEndpoint),
9687 &minio.Options{
9688 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
9689 Secure: mustParseBool(os.Getenv(enableHTTPS)),
9690 })
9691 if err != nil {
9692 logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
9693 return
9694 }
9695
9696 // Instantiate new core client object.
9697 c := minio.Core{client}
9698
9699 // Enable tracing, write to stderr.
9700 // c.TraceOn(os.Stderr)
9701
9702 // Set user agent.
9703 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
9704
9705 // Generate a new random bucket name.
9706 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
9707
9708 // Make a new bucket.
9709 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
9710 if err != nil {
9711 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
9712 return
9713 }
9714 defer cleanupBucket(bucketName, client)
9715 // Make a buffer with 5MB of data
9716 buf := bytes.Repeat([]byte("abcde"), 1024*1024)
9717
9718 // Save the data
9719 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
9720 password := "correct horse battery staple"
9721 putmetadata := map[string]string{
9722 "Content-Type": "binary/octet-stream",
9723 }
9724 opts := minio.PutObjectOptions{
9725 UserMetadata: putmetadata,
9726 }
9727 uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts)
9728 if err != nil {
9729 logError(testName, function, args, startTime, "", "PutObject call failed", err)
9730 return
9731 }
9732
9733 st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{})
9734 if err != nil {
9735 logError(testName, function, args, startTime, "", "StatObject call failed", err)
9736 return
9737 }
9738
9739 if st.Size != int64(len(buf)) {
9740 logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err)
9741 return
9742 }
9743
9744 destBucketName := bucketName
9745 destObjectName := objectName + "-dest"
9746 dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName))
9747
9748 uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption})
9749 if err != nil {
9750 logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
9751 return
9752 }
9753
9754 // Content of the destination object will be two copies of
9755 // `objectName` concatenated, followed by first byte of
9756 // `objectName`.
9757 metadata := make(map[string]string)
9758 header := make(http.Header)
9759 dstencryption.Marshal(header)
9760 for k, v := range header {
9761 metadata[k] = v[0]
9762 }
9763
9764 metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag
9765
9766 // First of three parts
9767 fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata)
9768 if err != nil {
9769 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
9770 return
9771 }
9772
9773 // Second of three parts
9774 sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata)
9775 if err != nil {
9776 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
9777 return
9778 }
9779
9780 // Last of three parts
9781 lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata)
9782 if err != nil {
9783 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
9784 return
9785 }
9786
9787 // Complete the multipart upload
9788 _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
9789 if err != nil {
9790 logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
9791 return
9792 }
9793
9794 // Stat the object and check its length matches
9795 objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption})
9796 if err != nil {
9797 logError(testName, function, args, startTime, "", "StatObject call failed", err)
9798 return
9799 }
9800
9801 if objInfo.Size != (5*1024*1024)*2+1 {
9802 logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err)
9803 return
9804 }
9805
9806 // Now we read the data back
9807 getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption}
9808 getOpts.SetRange(0, 5*1024*1024-1)
9809 r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
9810 if err != nil {
9811 logError(testName, function, args, startTime, "", "GetObject call failed", err)
9812 return
9813 }
9814 getBuf := make([]byte, 5*1024*1024)
9815 _, err = readFull(r, getBuf)
9816 if err != nil {
9817 logError(testName, function, args, startTime, "", "Read buffer failed", err)
9818 return
9819 }
9820 if !bytes.Equal(getBuf, buf) {
9821 logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err)
9822 return
9823 }
9824
9825 getOpts.SetRange(5*1024*1024, 0)
9826 r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
9827 if err != nil {
9828 logError(testName, function, args, startTime, "", "GetObject call failed", err)
9829 return
9830 }
9831 getBuf = make([]byte, 5*1024*1024+1)
9832 _, err = readFull(r, getBuf)
9833 if err != nil {
9834 logError(testName, function, args, startTime, "", "Read buffer failed", err)
9835 return
9836 }
9837 if !bytes.Equal(getBuf[:5*1024*1024], buf) {
9838 logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err)
9839 return
9840 }
9841 if getBuf[5*1024*1024] != buf[0] {
9842 logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err)
9843 return
9844 }
9845
9846 successLogger(testName, function, args, startTime).Info()
9847
9848 // Do not need to remove destBucketName its same as bucketName.
9849}
9850
9851// Test Core CopyObjectPart implementation for unencrypted to unencrypted copy
9852func testUnencryptedToUnencryptedCopyPart() {
9853 // initialize logging params
9854 startTime := time.Now()
9855 testName := getFuncName()
9856 function := "CopyObjectPart(destination, source)"
9857 args := map[string]interface{}{}
9858
9859 // Instantiate new minio client object
9860 client, err := minio.New(os.Getenv(serverEndpoint),
9861 &minio.Options{
9862 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
9863 Secure: mustParseBool(os.Getenv(enableHTTPS)),
9864 })
9865 if err != nil {
9866 logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
9867 return
9868 }
9869
9870 // Instantiate new core client object.
9871 c := minio.Core{client}
9872
9873 // Enable tracing, write to stderr.
9874 // c.TraceOn(os.Stderr)
9875
9876 // Set user agent.
9877 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
9878
9879 // Generate a new random bucket name.
9880 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
9881
9882 // Make a new bucket.
9883 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
9884 if err != nil {
9885 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
9886 return
9887 }
9888 defer cleanupBucket(bucketName, client)
9889 // Make a buffer with 5MB of data
9890 buf := bytes.Repeat([]byte("abcde"), 1024*1024)
9891
9892 // Save the data
9893 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
9894 putmetadata := map[string]string{
9895 "Content-Type": "binary/octet-stream",
9896 }
9897 opts := minio.PutObjectOptions{
9898 UserMetadata: putmetadata,
9899 }
9900 uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts)
9901 if err != nil {
9902 logError(testName, function, args, startTime, "", "PutObject call failed", err)
9903 return
9904 }
9905 st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{})
9906 if err != nil {
9907 logError(testName, function, args, startTime, "", "StatObject call failed", err)
9908 return
9909 }
9910
9911 if st.Size != int64(len(buf)) {
9912 logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err)
9913 return
9914 }
9915
9916 destBucketName := bucketName
9917 destObjectName := objectName + "-dest"
9918
9919 uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{})
9920 if err != nil {
9921 logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
9922 return
9923 }
9924
9925 // Content of the destination object will be two copies of
9926 // `objectName` concatenated, followed by first byte of
9927 // `objectName`.
9928 metadata := make(map[string]string)
9929 header := make(http.Header)
9930 for k, v := range header {
9931 metadata[k] = v[0]
9932 }
9933
9934 metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag
9935
9936 // First of three parts
9937 fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata)
9938 if err != nil {
9939 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
9940 return
9941 }
9942
9943 // Second of three parts
9944 sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata)
9945 if err != nil {
9946 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
9947 return
9948 }
9949
9950 // Last of three parts
9951 lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata)
9952 if err != nil {
9953 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
9954 return
9955 }
9956
9957 // Complete the multipart upload
9958 _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
9959 if err != nil {
9960 logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
9961 return
9962 }
9963
9964 // Stat the object and check its length matches
9965 objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{})
9966 if err != nil {
9967 logError(testName, function, args, startTime, "", "StatObject call failed", err)
9968 return
9969 }
9970
9971 if objInfo.Size != (5*1024*1024)*2+1 {
9972 logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err)
9973 return
9974 }
9975
9976 // Now we read the data back
9977 getOpts := minio.GetObjectOptions{}
9978 getOpts.SetRange(0, 5*1024*1024-1)
9979 r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
9980 if err != nil {
9981 logError(testName, function, args, startTime, "", "GetObject call failed", err)
9982 return
9983 }
9984 getBuf := make([]byte, 5*1024*1024)
9985 _, err = readFull(r, getBuf)
9986 if err != nil {
9987 logError(testName, function, args, startTime, "", "Read buffer failed", err)
9988 return
9989 }
9990 if !bytes.Equal(getBuf, buf) {
9991 logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err)
9992 return
9993 }
9994
9995 getOpts.SetRange(5*1024*1024, 0)
9996 r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
9997 if err != nil {
9998 logError(testName, function, args, startTime, "", "GetObject call failed", err)
9999 return
10000 }
10001 getBuf = make([]byte, 5*1024*1024+1)
10002 _, err = readFull(r, getBuf)
10003 if err != nil {
10004 logError(testName, function, args, startTime, "", "Read buffer failed", err)
10005 return
10006 }
10007 if !bytes.Equal(getBuf[:5*1024*1024], buf) {
10008 logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err)
10009 return
10010 }
10011 if getBuf[5*1024*1024] != buf[0] {
10012 logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err)
10013 return
10014 }
10015
10016 successLogger(testName, function, args, startTime).Info()
10017
10018 // Do not need to remove destBucketName its same as bucketName.
10019}
10020
10021// Test Core CopyObjectPart implementation for unencrypted to SSE-S3 encrypted copy
10022func testUnencryptedToSSES3CopyObjectPart() {
10023 // initialize logging params
10024 startTime := time.Now()
10025 testName := getFuncName()
10026 function := "CopyObjectPart(destination, source)"
10027 args := map[string]interface{}{}
10028
10029 // Instantiate new minio client object
10030 client, err := minio.New(os.Getenv(serverEndpoint),
10031 &minio.Options{
10032 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
10033 Secure: mustParseBool(os.Getenv(enableHTTPS)),
10034 })
10035 if err != nil {
10036 logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
10037 return
10038 }
10039
10040 // Instantiate new core client object.
10041 c := minio.Core{client}
10042
10043 // Enable tracing, write to stderr.
10044 // c.TraceOn(os.Stderr)
10045
10046 // Set user agent.
10047 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
10048
10049 // Generate a new random bucket name.
10050 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
10051
10052 // Make a new bucket.
10053 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
10054 if err != nil {
10055 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
10056 return
10057 }
10058 defer cleanupBucket(bucketName, client)
10059 // Make a buffer with 5MB of data
10060 buf := bytes.Repeat([]byte("abcde"), 1024*1024)
10061
10062 // Save the data
10063 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
10064 opts := minio.PutObjectOptions{
10065 UserMetadata: map[string]string{
10066 "Content-Type": "binary/octet-stream",
10067 },
10068 }
10069 uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts)
10070 if err != nil {
10071 logError(testName, function, args, startTime, "", "PutObject call failed", err)
10072 return
10073 }
10074 st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{})
10075 if err != nil {
10076 logError(testName, function, args, startTime, "", "StatObject call failed", err)
10077 return
10078 }
10079
10080 if st.Size != int64(len(buf)) {
10081 logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err)
10082 return
10083 }
10084
10085 destBucketName := bucketName
10086 destObjectName := objectName + "-dest"
10087 dstencryption := encrypt.NewSSE()
10088
10089 uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption})
10090 if err != nil {
10091 logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
10092 return
10093 }
10094
10095 // Content of the destination object will be two copies of
10096 // `objectName` concatenated, followed by first byte of
10097 // `objectName`.
10098 metadata := make(map[string]string)
10099 header := make(http.Header)
10100 dstencryption.Marshal(header)
10101
10102 for k, v := range header {
10103 metadata[k] = v[0]
10104 }
10105
10106 metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag
10107
10108 // First of three parts
10109 fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata)
10110 if err != nil {
10111 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
10112 return
10113 }
10114
10115 // Second of three parts
10116 sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata)
10117 if err != nil {
10118 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
10119 return
10120 }
10121
10122 // Last of three parts
10123 lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata)
10124 if err != nil {
10125 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
10126 return
10127 }
10128
10129 // Complete the multipart upload
10130 _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
10131 if err != nil {
10132 logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
10133 return
10134 }
10135
10136 // Stat the object and check its length matches
10137 objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{})
10138 if err != nil {
10139 logError(testName, function, args, startTime, "", "StatObject call failed", err)
10140 return
10141 }
10142
10143 if objInfo.Size != (5*1024*1024)*2+1 {
10144 logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err)
10145 return
10146 }
10147
10148 // Now we read the data back
10149 getOpts := minio.GetObjectOptions{}
10150 getOpts.SetRange(0, 5*1024*1024-1)
10151 r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
10152 if err != nil {
10153 logError(testName, function, args, startTime, "", "GetObject call failed", err)
10154 return
10155 }
10156 getBuf := make([]byte, 5*1024*1024)
10157 _, err = readFull(r, getBuf)
10158 if err != nil {
10159 logError(testName, function, args, startTime, "", "Read buffer failed", err)
10160 return
10161 }
10162 if !bytes.Equal(getBuf, buf) {
10163 logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err)
10164 return
10165 }
10166
10167 getOpts.SetRange(5*1024*1024, 0)
10168 r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
10169 if err != nil {
10170 logError(testName, function, args, startTime, "", "GetObject call failed", err)
10171 return
10172 }
10173 getBuf = make([]byte, 5*1024*1024+1)
10174 _, err = readFull(r, getBuf)
10175 if err != nil {
10176 logError(testName, function, args, startTime, "", "Read buffer failed", err)
10177 return
10178 }
10179 if !bytes.Equal(getBuf[:5*1024*1024], buf) {
10180 logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err)
10181 return
10182 }
10183 if getBuf[5*1024*1024] != buf[0] {
10184 logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err)
10185 return
10186 }
10187
10188 successLogger(testName, function, args, startTime).Info()
10189
10190 // Do not need to remove destBucketName its same as bucketName.
10191}
10192
10193// Test Core CopyObjectPart implementation for SSE-S3 to SSEC encryption copy part
10194func testSSES3EncryptedToSSECCopyObjectPart() {
10195 // initialize logging params
10196 startTime := time.Now()
10197 testName := getFuncName()
10198 function := "CopyObjectPart(destination, source)"
10199 args := map[string]interface{}{}
10200
10201 // Instantiate new minio client object
10202 client, err := minio.New(os.Getenv(serverEndpoint),
10203 &minio.Options{
10204 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
10205 Secure: mustParseBool(os.Getenv(enableHTTPS)),
10206 })
10207 if err != nil {
10208 logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
10209 return
10210 }
10211
10212 // Instantiate new core client object.
10213 c := minio.Core{client}
10214
10215 // Enable tracing, write to stderr.
10216 // c.TraceOn(os.Stderr)
10217
10218 // Set user agent.
10219 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
10220
10221 // Generate a new random bucket name.
10222 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
10223
10224 // Make a new bucket.
10225 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
10226 if err != nil {
10227 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
10228 return
10229 }
10230 defer cleanupBucket(bucketName, client)
10231 // Make a buffer with 5MB of data
10232 buf := bytes.Repeat([]byte("abcde"), 1024*1024)
10233
10234 // Save the data
10235 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
10236 password := "correct horse battery staple"
10237 srcEncryption := encrypt.NewSSE()
10238 opts := minio.PutObjectOptions{
10239 UserMetadata: map[string]string{
10240 "Content-Type": "binary/octet-stream",
10241 },
10242 ServerSideEncryption: srcEncryption,
10243 }
10244 uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts)
10245 if err != nil {
10246 logError(testName, function, args, startTime, "", "PutObject call failed", err)
10247 return
10248 }
10249
10250 st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcEncryption})
10251 if err != nil {
10252 logError(testName, function, args, startTime, "", "StatObject call failed", err)
10253 return
10254 }
10255
10256 if st.Size != int64(len(buf)) {
10257 logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err)
10258 return
10259 }
10260
10261 destBucketName := bucketName
10262 destObjectName := objectName + "-dest"
10263 dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName))
10264
10265 uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption})
10266 if err != nil {
10267 logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
10268 return
10269 }
10270
10271 // Content of the destination object will be two copies of
10272 // `objectName` concatenated, followed by first byte of
10273 // `objectName`.
10274 metadata := make(map[string]string)
10275 header := make(http.Header)
10276 dstencryption.Marshal(header)
10277 for k, v := range header {
10278 metadata[k] = v[0]
10279 }
10280
10281 metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag
10282
10283 // First of three parts
10284 fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata)
10285 if err != nil {
10286 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
10287 return
10288 }
10289
10290 // Second of three parts
10291 sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata)
10292 if err != nil {
10293 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
10294 return
10295 }
10296
10297 // Last of three parts
10298 lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata)
10299 if err != nil {
10300 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
10301 return
10302 }
10303
10304 // Complete the multipart upload
10305 _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
10306 if err != nil {
10307 logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
10308 return
10309 }
10310
10311 // Stat the object and check its length matches
10312 objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption})
10313 if err != nil {
10314 logError(testName, function, args, startTime, "", "StatObject call failed", err)
10315 return
10316 }
10317
10318 if objInfo.Size != (5*1024*1024)*2+1 {
10319 logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err)
10320 return
10321 }
10322
10323 // Now we read the data back
10324 getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption}
10325 getOpts.SetRange(0, 5*1024*1024-1)
10326 r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
10327 if err != nil {
10328 logError(testName, function, args, startTime, "", "GetObject call failed", err)
10329 return
10330 }
10331 getBuf := make([]byte, 5*1024*1024)
10332 _, err = readFull(r, getBuf)
10333 if err != nil {
10334 logError(testName, function, args, startTime, "", "Read buffer failed", err)
10335 return
10336 }
10337 if !bytes.Equal(getBuf, buf) {
10338 logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err)
10339 return
10340 }
10341
10342 getOpts.SetRange(5*1024*1024, 0)
10343 r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
10344 if err != nil {
10345 logError(testName, function, args, startTime, "", "GetObject call failed", err)
10346 return
10347 }
10348 getBuf = make([]byte, 5*1024*1024+1)
10349 _, err = readFull(r, getBuf)
10350 if err != nil {
10351 logError(testName, function, args, startTime, "", "Read buffer failed", err)
10352 return
10353 }
10354 if !bytes.Equal(getBuf[:5*1024*1024], buf) {
10355 logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err)
10356 return
10357 }
10358 if getBuf[5*1024*1024] != buf[0] {
10359 logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err)
10360 return
10361 }
10362
10363 successLogger(testName, function, args, startTime).Info()
10364
10365 // Do not need to remove destBucketName its same as bucketName.
10366}
10367
10368// Test Core CopyObjectPart implementation for unencrypted to unencrypted copy
10369func testSSES3EncryptedToUnencryptedCopyPart() {
10370 // initialize logging params
10371 startTime := time.Now()
10372 testName := getFuncName()
10373 function := "CopyObjectPart(destination, source)"
10374 args := map[string]interface{}{}
10375
10376 // Instantiate new minio client object
10377 client, err := minio.New(os.Getenv(serverEndpoint),
10378 &minio.Options{
10379 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
10380 Secure: mustParseBool(os.Getenv(enableHTTPS)),
10381 })
10382 if err != nil {
10383 logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
10384 return
10385 }
10386
10387 // Instantiate new core client object.
10388 c := minio.Core{client}
10389
10390 // Enable tracing, write to stderr.
10391 // c.TraceOn(os.Stderr)
10392
10393 // Set user agent.
10394 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
10395
10396 // Generate a new random bucket name.
10397 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
10398
10399 // Make a new bucket.
10400 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
10401 if err != nil {
10402 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
10403 return
10404 }
10405 defer cleanupBucket(bucketName, client)
10406 // Make a buffer with 5MB of data
10407 buf := bytes.Repeat([]byte("abcde"), 1024*1024)
10408
10409 // Save the data
10410 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
10411 srcEncryption := encrypt.NewSSE()
10412 opts := minio.PutObjectOptions{
10413 UserMetadata: map[string]string{
10414 "Content-Type": "binary/octet-stream",
10415 },
10416 ServerSideEncryption: srcEncryption,
10417 }
10418 uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts)
10419 if err != nil {
10420 logError(testName, function, args, startTime, "", "PutObject call failed", err)
10421 return
10422 }
10423 st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcEncryption})
10424 if err != nil {
10425 logError(testName, function, args, startTime, "", "StatObject call failed", err)
10426 return
10427 }
10428
10429 if st.Size != int64(len(buf)) {
10430 logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err)
10431 return
10432 }
10433
10434 destBucketName := bucketName
10435 destObjectName := objectName + "-dest"
10436
10437 uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{})
10438 if err != nil {
10439 logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
10440 return
10441 }
10442
10443 // Content of the destination object will be two copies of
10444 // `objectName` concatenated, followed by first byte of
10445 // `objectName`.
10446 metadata := make(map[string]string)
10447 header := make(http.Header)
10448 for k, v := range header {
10449 metadata[k] = v[0]
10450 }
10451
10452 metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag
10453
10454 // First of three parts
10455 fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata)
10456 if err != nil {
10457 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
10458 return
10459 }
10460
10461 // Second of three parts
10462 sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata)
10463 if err != nil {
10464 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
10465 return
10466 }
10467
10468 // Last of three parts
10469 lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata)
10470 if err != nil {
10471 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
10472 return
10473 }
10474
10475 // Complete the multipart upload
10476 _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
10477 if err != nil {
10478 logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
10479 return
10480 }
10481
10482 // Stat the object and check its length matches
10483 objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{})
10484 if err != nil {
10485 logError(testName, function, args, startTime, "", "StatObject call failed", err)
10486 return
10487 }
10488
10489 if objInfo.Size != (5*1024*1024)*2+1 {
10490 logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err)
10491 return
10492 }
10493
10494 // Now we read the data back
10495 getOpts := minio.GetObjectOptions{}
10496 getOpts.SetRange(0, 5*1024*1024-1)
10497 r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
10498 if err != nil {
10499 logError(testName, function, args, startTime, "", "GetObject call failed", err)
10500 return
10501 }
10502 getBuf := make([]byte, 5*1024*1024)
10503 _, err = readFull(r, getBuf)
10504 if err != nil {
10505 logError(testName, function, args, startTime, "", "Read buffer failed", err)
10506 return
10507 }
10508 if !bytes.Equal(getBuf, buf) {
10509 logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err)
10510 return
10511 }
10512
10513 getOpts.SetRange(5*1024*1024, 0)
10514 r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
10515 if err != nil {
10516 logError(testName, function, args, startTime, "", "GetObject call failed", err)
10517 return
10518 }
10519 getBuf = make([]byte, 5*1024*1024+1)
10520 _, err = readFull(r, getBuf)
10521 if err != nil {
10522 logError(testName, function, args, startTime, "", "Read buffer failed", err)
10523 return
10524 }
10525 if !bytes.Equal(getBuf[:5*1024*1024], buf) {
10526 logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err)
10527 return
10528 }
10529 if getBuf[5*1024*1024] != buf[0] {
10530 logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err)
10531 return
10532 }
10533
10534 successLogger(testName, function, args, startTime).Info()
10535
10536 // Do not need to remove destBucketName its same as bucketName.
10537}
10538
10539// Test Core CopyObjectPart implementation for unencrypted to SSE-S3 encrypted copy
10540func testSSES3EncryptedToSSES3CopyObjectPart() {
10541 // initialize logging params
10542 startTime := time.Now()
10543 testName := getFuncName()
10544 function := "CopyObjectPart(destination, source)"
10545 args := map[string]interface{}{}
10546
10547 // Instantiate new minio client object
10548 client, err := minio.New(os.Getenv(serverEndpoint),
10549 &minio.Options{
10550 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
10551 Secure: mustParseBool(os.Getenv(enableHTTPS)),
10552 })
10553 if err != nil {
10554 logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
10555 return
10556 }
10557
10558 // Instantiate new core client object.
10559 c := minio.Core{client}
10560
10561 // Enable tracing, write to stderr.
10562 // c.TraceOn(os.Stderr)
10563
10564 // Set user agent.
10565 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
10566
10567 // Generate a new random bucket name.
10568 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
10569
10570 // Make a new bucket.
10571 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
10572 if err != nil {
10573 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
10574 return
10575 }
10576 defer cleanupBucket(bucketName, client)
10577 // Make a buffer with 5MB of data
10578 buf := bytes.Repeat([]byte("abcde"), 1024*1024)
10579
10580 // Save the data
10581 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
10582 srcEncryption := encrypt.NewSSE()
10583 opts := minio.PutObjectOptions{
10584 UserMetadata: map[string]string{
10585 "Content-Type": "binary/octet-stream",
10586 },
10587 ServerSideEncryption: srcEncryption,
10588 }
10589
10590 uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts)
10591 if err != nil {
10592 logError(testName, function, args, startTime, "", "PutObject call failed", err)
10593 return
10594 }
10595 st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcEncryption})
10596 if err != nil {
10597 logError(testName, function, args, startTime, "", "StatObject call failed", err)
10598 return
10599 }
10600 if st.Size != int64(len(buf)) {
10601 logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err)
10602 return
10603 }
10604
10605 destBucketName := bucketName
10606 destObjectName := objectName + "-dest"
10607 dstencryption := encrypt.NewSSE()
10608
10609 uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption})
10610 if err != nil {
10611 logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
10612 return
10613 }
10614
10615 // Content of the destination object will be two copies of
10616 // `objectName` concatenated, followed by first byte of
10617 // `objectName`.
10618 metadata := make(map[string]string)
10619 header := make(http.Header)
10620 dstencryption.Marshal(header)
10621
10622 for k, v := range header {
10623 metadata[k] = v[0]
10624 }
10625
10626 metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag
10627
10628 // First of three parts
10629 fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata)
10630 if err != nil {
10631 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
10632 return
10633 }
10634
10635 // Second of three parts
10636 sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata)
10637 if err != nil {
10638 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
10639 return
10640 }
10641
10642 // Last of three parts
10643 lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata)
10644 if err != nil {
10645 logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
10646 return
10647 }
10648
10649 // Complete the multipart upload
10650 _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
10651 if err != nil {
10652 logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
10653 return
10654 }
10655
10656 // Stat the object and check its length matches
10657 objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{})
10658 if err != nil {
10659 logError(testName, function, args, startTime, "", "StatObject call failed", err)
10660 return
10661 }
10662
10663 if objInfo.Size != (5*1024*1024)*2+1 {
10664 logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err)
10665 return
10666 }
10667
10668 // Now we read the data back
10669 getOpts := minio.GetObjectOptions{}
10670 getOpts.SetRange(0, 5*1024*1024-1)
10671 r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
10672 if err != nil {
10673 logError(testName, function, args, startTime, "", "GetObject call failed", err)
10674 return
10675 }
10676 getBuf := make([]byte, 5*1024*1024)
10677 _, err = readFull(r, getBuf)
10678 if err != nil {
10679 logError(testName, function, args, startTime, "", "Read buffer failed", err)
10680 return
10681 }
10682 if !bytes.Equal(getBuf, buf) {
10683 logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err)
10684 return
10685 }
10686
10687 getOpts.SetRange(5*1024*1024, 0)
10688 r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
10689 if err != nil {
10690 logError(testName, function, args, startTime, "", "GetObject call failed", err)
10691 return
10692 }
10693 getBuf = make([]byte, 5*1024*1024+1)
10694 _, err = readFull(r, getBuf)
10695 if err != nil {
10696 logError(testName, function, args, startTime, "", "Read buffer failed", err)
10697 return
10698 }
10699 if !bytes.Equal(getBuf[:5*1024*1024], buf) {
10700 logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err)
10701 return
10702 }
10703 if getBuf[5*1024*1024] != buf[0] {
10704 logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err)
10705 return
10706 }
10707
10708 successLogger(testName, function, args, startTime).Info()
10709
10710 // Do not need to remove destBucketName its same as bucketName.
10711}
10712
10713func testUserMetadataCopying() {
10714 // initialize logging params
10715 startTime := time.Now()
10716 testName := getFuncName()
10717 function := "CopyObject(destination, source)"
10718 args := map[string]interface{}{}
10719
10720 // Instantiate new minio client object
10721 c, err := minio.New(os.Getenv(serverEndpoint),
10722 &minio.Options{
10723 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
10724 Secure: mustParseBool(os.Getenv(enableHTTPS)),
10725 })
10726 if err != nil {
10727 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
10728 return
10729 }
10730
10731 // c.TraceOn(os.Stderr)
10732 testUserMetadataCopyingWrapper(c)
10733}
10734
10735func testUserMetadataCopyingWrapper(c *minio.Client) {
10736 // initialize logging params
10737 startTime := time.Now()
10738 testName := getFuncName()
10739 function := "CopyObject(destination, source)"
10740 args := map[string]interface{}{}
10741
10742 // Generate a new random bucket name.
10743 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
10744 // Make a new bucket in 'us-east-1' (source bucket).
10745 err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
10746 if err != nil {
10747 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
10748 return
10749 }
10750
10751 defer cleanupBucket(bucketName, c)
10752
10753 fetchMeta := func(object string) (h http.Header) {
10754 objInfo, err := c.StatObject(context.Background(), bucketName, object, minio.StatObjectOptions{})
10755 if err != nil {
10756 logError(testName, function, args, startTime, "", "Stat failed", err)
10757 return
10758 }
10759 h = make(http.Header)
10760 for k, vs := range objInfo.Metadata {
10761 if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") {
10762 h.Add(k, vs[0])
10763 }
10764 }
10765 return h
10766 }
10767
10768 // 1. create a client encrypted object to copy by uploading
10769 const srcSize = 1024 * 1024
10770 buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB
10771 metadata := make(http.Header)
10772 metadata.Set("x-amz-meta-myheader", "myvalue")
10773 m := make(map[string]string)
10774 m["x-amz-meta-myheader"] = "myvalue"
10775 _, err = c.PutObject(context.Background(), bucketName, "srcObject",
10776 bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{UserMetadata: m})
10777 if err != nil {
10778 logError(testName, function, args, startTime, "", "PutObjectWithMetadata failed", err)
10779 return
10780 }
10781 if !reflect.DeepEqual(metadata, fetchMeta("srcObject")) {
10782 logError(testName, function, args, startTime, "", "Metadata match failed", err)
10783 return
10784 }
10785
10786 // 2. create source
10787 src := minio.CopySrcOptions{
10788 Bucket: bucketName,
10789 Object: "srcObject",
10790 }
10791
10792 // 2.1 create destination with metadata set
10793 dst1 := minio.CopyDestOptions{
10794 Bucket: bucketName,
10795 Object: "dstObject-1",
10796 UserMetadata: map[string]string{"notmyheader": "notmyvalue"},
10797 ReplaceMetadata: true,
10798 }
10799
10800 // 3. Check that copying to an object with metadata set resets
10801 // the headers on the copy.
10802 args["source"] = src
10803 args["destination"] = dst1
10804 _, err = c.CopyObject(context.Background(), dst1, src)
10805 if err != nil {
10806 logError(testName, function, args, startTime, "", "CopyObject failed", err)
10807 return
10808 }
10809
10810 expectedHeaders := make(http.Header)
10811 expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue")
10812 if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-1")) {
10813 logError(testName, function, args, startTime, "", "Metadata match failed", err)
10814 return
10815 }
10816
10817 // 4. create destination with no metadata set and same source
10818 dst2 := minio.CopyDestOptions{
10819 Bucket: bucketName,
10820 Object: "dstObject-2",
10821 }
10822
10823 // 5. Check that copying to an object with no metadata set,
10824 // copies metadata.
10825 args["source"] = src
10826 args["destination"] = dst2
10827 _, err = c.CopyObject(context.Background(), dst2, src)
10828 if err != nil {
10829 logError(testName, function, args, startTime, "", "CopyObject failed", err)
10830 return
10831 }
10832
10833 expectedHeaders = metadata
10834 if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-2")) {
10835 logError(testName, function, args, startTime, "", "Metadata match failed", err)
10836 return
10837 }
10838
10839 // 6. Compose a pair of sources.
10840 dst3 := minio.CopyDestOptions{
10841 Bucket: bucketName,
10842 Object: "dstObject-3",
10843 ReplaceMetadata: true,
10844 }
10845
10846 function = "ComposeObject(destination, sources)"
10847 args["source"] = []minio.CopySrcOptions{src, src}
10848 args["destination"] = dst3
10849 _, err = c.ComposeObject(context.Background(), dst3, src, src)
10850 if err != nil {
10851 logError(testName, function, args, startTime, "", "ComposeObject failed", err)
10852 return
10853 }
10854
10855 // Check that no headers are copied in this case
10856 if !reflect.DeepEqual(make(http.Header), fetchMeta("dstObject-3")) {
10857 logError(testName, function, args, startTime, "", "Metadata match failed", err)
10858 return
10859 }
10860
10861 // 7. Compose a pair of sources with dest user metadata set.
10862 dst4 := minio.CopyDestOptions{
10863 Bucket: bucketName,
10864 Object: "dstObject-4",
10865 UserMetadata: map[string]string{"notmyheader": "notmyvalue"},
10866 ReplaceMetadata: true,
10867 }
10868
10869 function = "ComposeObject(destination, sources)"
10870 args["source"] = []minio.CopySrcOptions{src, src}
10871 args["destination"] = dst4
10872 _, err = c.ComposeObject(context.Background(), dst4, src, src)
10873 if err != nil {
10874 logError(testName, function, args, startTime, "", "ComposeObject failed", err)
10875 return
10876 }
10877
10878 // Check that no headers are copied in this case
10879 expectedHeaders = make(http.Header)
10880 expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue")
10881 if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-4")) {
10882 logError(testName, function, args, startTime, "", "Metadata match failed", err)
10883 return
10884 }
10885
10886 successLogger(testName, function, args, startTime).Info()
10887}
10888
10889func testUserMetadataCopyingV2() {
10890 // initialize logging params
10891 startTime := time.Now()
10892 testName := getFuncName()
10893 function := "CopyObject(destination, source)"
10894 args := map[string]interface{}{}
10895
10896 // Instantiate new minio client object
10897 c, err := minio.New(os.Getenv(serverEndpoint),
10898 &minio.Options{
10899 Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
10900 Secure: mustParseBool(os.Getenv(enableHTTPS)),
10901 })
10902 if err != nil {
10903 logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err)
10904 return
10905 }
10906
10907 // c.TraceOn(os.Stderr)
10908 testUserMetadataCopyingWrapper(c)
10909}
10910
10911func testStorageClassMetadataPutObject() {
10912 // initialize logging params
10913 startTime := time.Now()
10914 function := "testStorageClassMetadataPutObject()"
10915 args := map[string]interface{}{}
10916 testName := getFuncName()
10917
10918 // Instantiate new minio client object
10919 c, err := minio.New(os.Getenv(serverEndpoint),
10920 &minio.Options{
10921 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
10922 Secure: mustParseBool(os.Getenv(enableHTTPS)),
10923 })
10924 if err != nil {
10925 logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
10926 return
10927 }
10928
10929 // Generate a new random bucket name.
10930 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
10931 // Make a new bucket in 'us-east-1' (source bucket).
10932 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
10933 if err != nil {
10934 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
10935 return
10936 }
10937
10938 defer cleanupBucket(bucketName, c)
10939
10940 fetchMeta := func(object string) (h http.Header) {
10941 objInfo, err := c.StatObject(context.Background(), bucketName, object, minio.StatObjectOptions{})
10942 if err != nil {
10943 logError(testName, function, args, startTime, "", "Stat failed", err)
10944 return
10945 }
10946 h = make(http.Header)
10947 for k, vs := range objInfo.Metadata {
10948 if strings.HasPrefix(strings.ToLower(k), "x-amz-storage-class") {
10949 for _, v := range vs {
10950 h.Add(k, v)
10951 }
10952 }
10953 }
10954 return h
10955 }
10956
10957 metadata := make(http.Header)
10958 metadata.Set("x-amz-storage-class", "REDUCED_REDUNDANCY")
10959
10960 emptyMetadata := make(http.Header)
10961
10962 const srcSize = 1024 * 1024
10963 buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 1MiB
10964
10965 _, err = c.PutObject(context.Background(), bucketName, "srcObjectRRSClass",
10966 bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "REDUCED_REDUNDANCY"})
10967 if err != nil {
10968 logError(testName, function, args, startTime, "", "PutObject failed", err)
10969 return
10970 }
10971
10972 // Get the returned metadata
10973 returnedMeta := fetchMeta("srcObjectRRSClass")
10974
10975 // The response metada should either be equal to metadata (with REDUCED_REDUNDANCY) or emptyMetadata (in case of gateways)
10976 if !reflect.DeepEqual(metadata, returnedMeta) && !reflect.DeepEqual(emptyMetadata, returnedMeta) {
10977 logError(testName, function, args, startTime, "", "Metadata match failed", err)
10978 return
10979 }
10980
10981 metadata = make(http.Header)
10982 metadata.Set("x-amz-storage-class", "STANDARD")
10983
10984 _, err = c.PutObject(context.Background(), bucketName, "srcObjectSSClass",
10985 bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "STANDARD"})
10986 if err != nil {
10987 logError(testName, function, args, startTime, "", "PutObject failed", err)
10988 return
10989 }
10990 if reflect.DeepEqual(metadata, fetchMeta("srcObjectSSClass")) {
10991 logError(testName, function, args, startTime, "", "Metadata verification failed, STANDARD storage class should not be a part of response metadata", err)
10992 return
10993 }
10994
10995 successLogger(testName, function, args, startTime).Info()
10996}
10997
10998func testStorageClassInvalidMetadataPutObject() {
10999 // initialize logging params
11000 startTime := time.Now()
11001 function := "testStorageClassInvalidMetadataPutObject()"
11002 args := map[string]interface{}{}
11003 testName := getFuncName()
11004
11005 // Instantiate new minio client object
11006 c, err := minio.New(os.Getenv(serverEndpoint),
11007 &minio.Options{
11008 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
11009 Secure: mustParseBool(os.Getenv(enableHTTPS)),
11010 })
11011 if err != nil {
11012 logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
11013 return
11014 }
11015
11016 // Generate a new random bucket name.
11017 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
11018 // Make a new bucket in 'us-east-1' (source bucket).
11019 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
11020 if err != nil {
11021 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
11022 return
11023 }
11024
11025 defer cleanupBucket(bucketName, c)
11026
11027 const srcSize = 1024 * 1024
11028 buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 1MiB
11029
11030 _, err = c.PutObject(context.Background(), bucketName, "srcObjectRRSClass",
11031 bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "INVALID_STORAGE_CLASS"})
11032 if err == nil {
11033 logError(testName, function, args, startTime, "", "PutObject with invalid storage class passed, was expected to fail", err)
11034 return
11035 }
11036
11037 successLogger(testName, function, args, startTime).Info()
11038}
11039
11040func testStorageClassMetadataCopyObject() {
11041 // initialize logging params
11042 startTime := time.Now()
11043 function := "testStorageClassMetadataCopyObject()"
11044 args := map[string]interface{}{}
11045 testName := getFuncName()
11046
11047 // Instantiate new minio client object
11048 c, err := minio.New(os.Getenv(serverEndpoint),
11049 &minio.Options{
11050 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
11051 Secure: mustParseBool(os.Getenv(enableHTTPS)),
11052 })
11053 if err != nil {
11054 logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
11055 return
11056 }
11057
11058 // Generate a new random bucket name.
11059 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
11060 // Make a new bucket in 'us-east-1' (source bucket).
11061 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
11062 if err != nil {
11063 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
11064 return
11065 }
11066
11067 defer cleanupBucket(bucketName, c)
11068
11069 fetchMeta := func(object string) (h http.Header) {
11070 objInfo, err := c.StatObject(context.Background(), bucketName, object, minio.StatObjectOptions{})
11071 args["bucket"] = bucketName
11072 args["object"] = object
11073 if err != nil {
11074 logError(testName, function, args, startTime, "", "Stat failed", err)
11075 return
11076 }
11077 h = make(http.Header)
11078 for k, vs := range objInfo.Metadata {
11079 if strings.HasPrefix(strings.ToLower(k), "x-amz-storage-class") {
11080 for _, v := range vs {
11081 h.Add(k, v)
11082 }
11083 }
11084 }
11085 return h
11086 }
11087
11088 metadata := make(http.Header)
11089 metadata.Set("x-amz-storage-class", "REDUCED_REDUNDANCY")
11090
11091 emptyMetadata := make(http.Header)
11092
11093 const srcSize = 1024 * 1024
11094 buf := bytes.Repeat([]byte("abcde"), srcSize)
11095
11096 // Put an object with RRS Storage class
11097 _, err = c.PutObject(context.Background(), bucketName, "srcObjectRRSClass",
11098 bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "REDUCED_REDUNDANCY"})
11099 if err != nil {
11100 logError(testName, function, args, startTime, "", "PutObject failed", err)
11101 return
11102 }
11103
11104 // Make server side copy of object uploaded in previous step
11105 src := minio.CopySrcOptions{
11106 Bucket: bucketName,
11107 Object: "srcObjectRRSClass",
11108 }
11109 dst := minio.CopyDestOptions{
11110 Bucket: bucketName,
11111 Object: "srcObjectRRSClassCopy",
11112 }
11113 if _, err = c.CopyObject(context.Background(), dst, src); err != nil {
11114 logError(testName, function, args, startTime, "", "CopyObject failed on RRS", err)
11115 return
11116 }
11117
11118 // Get the returned metadata
11119 returnedMeta := fetchMeta("srcObjectRRSClassCopy")
11120
11121 // The response metada should either be equal to metadata (with REDUCED_REDUNDANCY) or emptyMetadata (in case of gateways)
11122 if !reflect.DeepEqual(metadata, returnedMeta) && !reflect.DeepEqual(emptyMetadata, returnedMeta) {
11123 logError(testName, function, args, startTime, "", "Metadata match failed", err)
11124 return
11125 }
11126
11127 metadata = make(http.Header)
11128 metadata.Set("x-amz-storage-class", "STANDARD")
11129
11130 // Put an object with Standard Storage class
11131 _, err = c.PutObject(context.Background(), bucketName, "srcObjectSSClass",
11132 bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "STANDARD"})
11133 if err != nil {
11134 logError(testName, function, args, startTime, "", "PutObject failed", err)
11135 return
11136 }
11137
11138 // Make server side copy of object uploaded in previous step
11139 src = minio.CopySrcOptions{
11140 Bucket: bucketName,
11141 Object: "srcObjectSSClass",
11142 }
11143 dst = minio.CopyDestOptions{
11144 Bucket: bucketName,
11145 Object: "srcObjectSSClassCopy",
11146 }
11147 if _, err = c.CopyObject(context.Background(), dst, src); err != nil {
11148 logError(testName, function, args, startTime, "", "CopyObject failed on SS", err)
11149 return
11150 }
11151 // Fetch the meta data of copied object
11152 if reflect.DeepEqual(metadata, fetchMeta("srcObjectSSClassCopy")) {
11153 logError(testName, function, args, startTime, "", "Metadata verification failed, STANDARD storage class should not be a part of response metadata", err)
11154 return
11155 }
11156
11157 successLogger(testName, function, args, startTime).Info()
11158}
11159
11160// Test put object with size -1 byte object.
11161func testPutObjectNoLengthV2() {
11162 // initialize logging params
11163 startTime := time.Now()
11164 testName := getFuncName()
11165 function := "PutObject(bucketName, objectName, reader, size, opts)"
11166 args := map[string]interface{}{
11167 "bucketName": "",
11168 "objectName": "",
11169 "size": -1,
11170 "opts": "",
11171 }
11172
11173 // Seed random based on current time.
11174 rand.Seed(time.Now().Unix())
11175
11176 // Instantiate new minio client object.
11177 c, err := minio.New(os.Getenv(serverEndpoint),
11178 &minio.Options{
11179 Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
11180 Secure: mustParseBool(os.Getenv(enableHTTPS)),
11181 })
11182 if err != nil {
11183 logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err)
11184 return
11185 }
11186
11187 // Enable tracing, write to stderr.
11188 // c.TraceOn(os.Stderr)
11189
11190 // Set user agent.
11191 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
11192
11193 // Generate a new random bucket name.
11194 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
11195 args["bucketName"] = bucketName
11196
11197 // Make a new bucket.
11198 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
11199 if err != nil {
11200 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
11201 return
11202 }
11203
11204 defer cleanupBucket(bucketName, c)
11205
11206 objectName := bucketName + "unique"
11207 args["objectName"] = objectName
11208
11209 bufSize := dataFileMap["datafile-129-MB"]
11210 reader := getDataReader("datafile-129-MB")
11211 defer reader.Close()
11212 args["size"] = bufSize
11213
11214 // Upload an object.
11215 _, err = c.PutObject(context.Background(), bucketName, objectName, reader, -1, minio.PutObjectOptions{})
11216 if err != nil {
11217 logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err)
11218 return
11219 }
11220
11221 st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{})
11222 if err != nil {
11223 logError(testName, function, args, startTime, "", "StatObject failed", err)
11224 return
11225 }
11226
11227 if st.Size != int64(bufSize) {
11228 logError(testName, function, args, startTime, "", "Expected upload object size "+string(bufSize)+" got "+string(st.Size), err)
11229 return
11230 }
11231
11232 successLogger(testName, function, args, startTime).Info()
11233}
11234
11235// Test put objects of unknown size.
11236func testPutObjectsUnknownV2() {
11237 // initialize logging params
11238 startTime := time.Now()
11239 testName := getFuncName()
11240 function := "PutObject(bucketName, objectName, reader,size,opts)"
11241 args := map[string]interface{}{
11242 "bucketName": "",
11243 "objectName": "",
11244 "size": "",
11245 "opts": "",
11246 }
11247
11248 // Seed random based on current time.
11249 rand.Seed(time.Now().Unix())
11250
11251 // Instantiate new minio client object.
11252 c, err := minio.New(os.Getenv(serverEndpoint),
11253 &minio.Options{
11254 Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
11255 Secure: mustParseBool(os.Getenv(enableHTTPS)),
11256 })
11257 if err != nil {
11258 logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err)
11259 return
11260 }
11261
11262 // Enable tracing, write to stderr.
11263 // c.TraceOn(os.Stderr)
11264
11265 // Set user agent.
11266 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
11267
11268 // Generate a new random bucket name.
11269 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
11270 args["bucketName"] = bucketName
11271
11272 // Make a new bucket.
11273 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
11274 if err != nil {
11275 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
11276 return
11277 }
11278
11279 defer cleanupBucket(bucketName, c)
11280
11281 // Issues are revealed by trying to upload multiple files of unknown size
11282 // sequentially (on 4GB machines)
11283 for i := 1; i <= 4; i++ {
11284 // Simulate that we could be receiving byte slices of data that we want
11285 // to upload as a file
11286 rpipe, wpipe := io.Pipe()
11287 defer rpipe.Close()
11288 go func() {
11289 b := []byte("test")
11290 wpipe.Write(b)
11291 wpipe.Close()
11292 }()
11293
11294 // Upload the object.
11295 objectName := fmt.Sprintf("%sunique%d", bucketName, i)
11296 args["objectName"] = objectName
11297
11298 ui, err := c.PutObject(context.Background(), bucketName, objectName, rpipe, -1, minio.PutObjectOptions{})
11299 if err != nil {
11300 logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err)
11301 return
11302 }
11303
11304 if ui.Size != 4 {
11305 logError(testName, function, args, startTime, "", "Expected upload object size "+string(4)+" got "+string(ui.Size), nil)
11306 return
11307 }
11308
11309 st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{})
11310 if err != nil {
11311 logError(testName, function, args, startTime, "", "StatObjectStreaming failed", err)
11312 return
11313 }
11314
11315 if st.Size != int64(4) {
11316 logError(testName, function, args, startTime, "", "Expected upload object size "+string(4)+" got "+string(st.Size), err)
11317 return
11318 }
11319
11320 }
11321
11322 successLogger(testName, function, args, startTime).Info()
11323}
11324
11325// Test put object with 0 byte object.
11326func testPutObject0ByteV2() {
11327 // initialize logging params
11328 startTime := time.Now()
11329 testName := getFuncName()
11330 function := "PutObject(bucketName, objectName, reader, size, opts)"
11331 args := map[string]interface{}{
11332 "bucketName": "",
11333 "objectName": "",
11334 "size": 0,
11335 "opts": "",
11336 }
11337
11338 // Seed random based on current time.
11339 rand.Seed(time.Now().Unix())
11340
11341 // Instantiate new minio client object.
11342 c, err := minio.New(os.Getenv(serverEndpoint),
11343 &minio.Options{
11344 Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
11345 Secure: mustParseBool(os.Getenv(enableHTTPS)),
11346 })
11347 if err != nil {
11348 logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err)
11349 return
11350 }
11351
11352 // Enable tracing, write to stderr.
11353 // c.TraceOn(os.Stderr)
11354
11355 // Set user agent.
11356 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
11357
11358 // Generate a new random bucket name.
11359 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
11360 args["bucketName"] = bucketName
11361
11362 // Make a new bucket.
11363 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
11364 if err != nil {
11365 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
11366 return
11367 }
11368
11369 defer cleanupBucket(bucketName, c)
11370
11371 objectName := bucketName + "unique"
11372 args["objectName"] = objectName
11373 args["opts"] = minio.PutObjectOptions{}
11374
11375 // Upload an object.
11376 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader([]byte("")), 0, minio.PutObjectOptions{})
11377 if err != nil {
11378 logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err)
11379 return
11380 }
11381 st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{})
11382 if err != nil {
11383 logError(testName, function, args, startTime, "", "StatObjectWithSize failed", err)
11384 return
11385 }
11386 if st.Size != 0 {
11387 logError(testName, function, args, startTime, "", "Expected upload object size 0 but got "+string(st.Size), err)
11388 return
11389 }
11390
11391 successLogger(testName, function, args, startTime).Info()
11392}
11393
11394// Test expected error cases
11395func testComposeObjectErrorCases() {
11396 // initialize logging params
11397 startTime := time.Now()
11398 testName := getFuncName()
11399 function := "ComposeObject(destination, sourceList)"
11400 args := map[string]interface{}{}
11401
11402 // Instantiate new minio client object
11403 c, err := minio.New(os.Getenv(serverEndpoint),
11404 &minio.Options{
11405 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
11406 Secure: mustParseBool(os.Getenv(enableHTTPS)),
11407 })
11408 if err != nil {
11409 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
11410 return
11411 }
11412
11413 testComposeObjectErrorCasesWrapper(c)
11414}
11415
11416// Test concatenating multiple 10K objects V4
11417func testCompose10KSources() {
11418 // initialize logging params
11419 startTime := time.Now()
11420 testName := getFuncName()
11421 function := "ComposeObject(destination, sourceList)"
11422 args := map[string]interface{}{}
11423
11424 // Instantiate new minio client object
11425 c, err := minio.New(os.Getenv(serverEndpoint),
11426 &minio.Options{
11427 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
11428 Secure: mustParseBool(os.Getenv(enableHTTPS)),
11429 })
11430 if err != nil {
11431 logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
11432 return
11433 }
11434
11435 testComposeMultipleSources(c)
11436}
11437
11438// Tests comprehensive list of all methods.
11439func testFunctionalV2() {
11440 // initialize logging params
11441 startTime := time.Now()
11442 testName := getFuncName()
11443 function := "testFunctionalV2()"
11444 functionAll := ""
11445 args := map[string]interface{}{}
11446
11447 // Seed random based on current time.
11448 rand.Seed(time.Now().Unix())
11449
11450 c, err := minio.New(os.Getenv(serverEndpoint),
11451 &minio.Options{
11452 Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
11453 Secure: mustParseBool(os.Getenv(enableHTTPS)),
11454 })
11455 if err != nil {
11456 logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err)
11457 return
11458 }
11459
11460 // Enable to debug
11461 // c.TraceOn(os.Stderr)
11462
11463 // Set user agent.
11464 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
11465
11466 // Generate a new random bucket name.
11467 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
11468 location := "us-east-1"
11469 // Make a new bucket.
11470 function = "MakeBucket(bucketName, location)"
11471 functionAll = "MakeBucket(bucketName, location)"
11472 args = map[string]interface{}{
11473 "bucketName": bucketName,
11474 "location": location,
11475 }
11476 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: location})
11477 if err != nil {
11478 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
11479 return
11480 }
11481
11482 defer cleanupBucket(bucketName, c)
11483
11484 // Generate a random file name.
11485 fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
11486 file, err := os.Create(fileName)
11487 if err != nil {
11488 logError(testName, function, args, startTime, "", "file create failed", err)
11489 return
11490 }
11491 for i := 0; i < 3; i++ {
11492 buf := make([]byte, rand.Intn(1<<19))
11493 _, err = file.Write(buf)
11494 if err != nil {
11495 logError(testName, function, args, startTime, "", "file write failed", err)
11496 return
11497 }
11498 }
11499 file.Close()
11500
11501 // Verify if bucket exits and you have access.
11502 var exists bool
11503 function = "BucketExists(bucketName)"
11504 functionAll += ", " + function
11505 args = map[string]interface{}{
11506 "bucketName": bucketName,
11507 }
11508 exists, err = c.BucketExists(context.Background(), bucketName)
11509 if err != nil {
11510 logError(testName, function, args, startTime, "", "BucketExists failed", err)
11511 return
11512 }
11513 if !exists {
11514 logError(testName, function, args, startTime, "", "Could not find existing bucket "+bucketName, err)
11515 return
11516 }
11517
11518 // Make the bucket 'public read/write'.
11519 function = "SetBucketPolicy(bucketName, bucketPolicy)"
11520 functionAll += ", " + function
11521
11522 readWritePolicy := `{"Version": "2012-10-17","Statement": [{"Action": ["s3:ListBucketMultipartUploads", "s3:ListBucket"],"Effect": "Allow","Principal": {"AWS": ["*"]},"Resource": ["arn:aws:s3:::` + bucketName + `"],"Sid": ""}]}`
11523
11524 args = map[string]interface{}{
11525 "bucketName": bucketName,
11526 "bucketPolicy": readWritePolicy,
11527 }
11528 err = c.SetBucketPolicy(context.Background(), bucketName, readWritePolicy)
11529
11530 if err != nil {
11531 logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
11532 return
11533 }
11534
11535 // List all buckets.
11536 function = "ListBuckets()"
11537 functionAll += ", " + function
11538 args = nil
11539 buckets, err := c.ListBuckets(context.Background())
11540 if len(buckets) == 0 {
11541 logError(testName, function, args, startTime, "", "List buckets cannot be empty", err)
11542 return
11543 }
11544 if err != nil {
11545 logError(testName, function, args, startTime, "", "ListBuckets failed", err)
11546 return
11547 }
11548
11549 // Verify if previously created bucket is listed in list buckets.
11550 bucketFound := false
11551 for _, bucket := range buckets {
11552 if bucket.Name == bucketName {
11553 bucketFound = true
11554 }
11555 }
11556
11557 // If bucket not found error out.
11558 if !bucketFound {
11559 logError(testName, function, args, startTime, "", "Bucket "+bucketName+"not found", err)
11560 return
11561 }
11562
11563 objectName := bucketName + "unique"
11564
11565 // Generate data
11566 buf := bytes.Repeat([]byte("n"), rand.Intn(1<<19))
11567
11568 args = map[string]interface{}{
11569 "bucketName": bucketName,
11570 "objectName": objectName,
11571 "contentType": "",
11572 }
11573 _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{})
11574 if err != nil {
11575 logError(testName, function, args, startTime, "", "PutObject failed", err)
11576 return
11577 }
11578
11579 st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{})
11580 if err != nil {
11581 logError(testName, function, args, startTime, "", "StatObject failed", err)
11582 return
11583 }
11584 if st.Size != int64(len(buf)) {
11585 logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(st.Size), err)
11586 return
11587 }
11588
11589 objectNameNoLength := objectName + "-nolength"
11590 args["objectName"] = objectNameNoLength
11591 _, err = c.PutObject(context.Background(), bucketName, objectNameNoLength, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
11592 if err != nil {
11593 logError(testName, function, args, startTime, "", "PutObject failed", err)
11594 return
11595 }
11596 st, err = c.StatObject(context.Background(), bucketName, objectNameNoLength, minio.StatObjectOptions{})
11597 if err != nil {
11598 logError(testName, function, args, startTime, "", "StatObject failed", err)
11599 return
11600 }
11601 if st.Size != int64(len(buf)) {
11602 logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(st.Size), err)
11603 return
11604 }
11605
11606 // Instantiate a done channel to close all listing.
11607 doneCh := make(chan struct{})
11608 defer close(doneCh)
11609
11610 objFound := false
11611 isRecursive := true // Recursive is true.
11612 function = "ListObjects(bucketName, objectName, isRecursive, doneCh)"
11613 functionAll += ", " + function
11614 args = map[string]interface{}{
11615 "bucketName": bucketName,
11616 "objectName": objectName,
11617 "isRecursive": isRecursive,
11618 }
11619 for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Prefix: objectName, Recursive: isRecursive}) {
11620 if obj.Key == objectName {
11621 objFound = true
11622 break
11623 }
11624 }
11625 if !objFound {
11626 logError(testName, function, args, startTime, "", "Could not find existing object "+objectName, err)
11627 return
11628 }
11629
11630 incompObjNotFound := true
11631 function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)"
11632 functionAll += ", " + function
11633 args = map[string]interface{}{
11634 "bucketName": bucketName,
11635 "objectName": objectName,
11636 "isRecursive": isRecursive,
11637 }
11638 for objIncompl := range c.ListIncompleteUploads(context.Background(), bucketName, objectName, isRecursive) {
11639 if objIncompl.Key != "" {
11640 incompObjNotFound = false
11641 break
11642 }
11643 }
11644 if !incompObjNotFound {
11645 logError(testName, function, args, startTime, "", "Unexpected dangling incomplete upload found", err)
11646 return
11647 }
11648
11649 function = "GetObject(bucketName, objectName)"
11650 functionAll += ", " + function
11651 args = map[string]interface{}{
11652 "bucketName": bucketName,
11653 "objectName": objectName,
11654 }
11655 newReader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
11656 if err != nil {
11657 logError(testName, function, args, startTime, "", "GetObject failed", err)
11658 return
11659 }
11660
11661 newReadBytes, err := io.ReadAll(newReader)
11662 if err != nil {
11663 logError(testName, function, args, startTime, "", "ReadAll failed", err)
11664 return
11665 }
11666 newReader.Close()
11667
11668 if !bytes.Equal(newReadBytes, buf) {
11669 logError(testName, function, args, startTime, "", "Bytes mismatch", err)
11670 return
11671 }
11672
11673 function = "FGetObject(bucketName, objectName, fileName)"
11674 functionAll += ", " + function
11675 args = map[string]interface{}{
11676 "bucketName": bucketName,
11677 "objectName": objectName,
11678 "fileName": fileName + "-f",
11679 }
11680 err = c.FGetObject(context.Background(), bucketName, objectName, fileName+"-f", minio.GetObjectOptions{})
11681 if err != nil {
11682 logError(testName, function, args, startTime, "", "FgetObject failed", err)
11683 return
11684 }
11685
11686 // Generate presigned HEAD object url.
11687 function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)"
11688 functionAll += ", " + function
11689 args = map[string]interface{}{
11690 "bucketName": bucketName,
11691 "objectName": objectName,
11692 "expires": 3600 * time.Second,
11693 }
11694 presignedHeadURL, err := c.PresignedHeadObject(context.Background(), bucketName, objectName, 3600*time.Second, nil)
11695 if err != nil {
11696 logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err)
11697 return
11698 }
11699
11700 transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS)))
11701 if err != nil {
11702 logError(testName, function, args, startTime, "", "DefaultTransport failed", err)
11703 return
11704 }
11705
11706 httpClient := &http.Client{
11707 // Setting a sensible time out of 30secs to wait for response
11708 // headers. Request is pro-actively canceled after 30secs
11709 // with no response.
11710 Timeout: 30 * time.Second,
11711 Transport: transport,
11712 }
11713
11714 req, err := http.NewRequest(http.MethodHead, presignedHeadURL.String(), nil)
11715 if err != nil {
11716 logError(testName, function, args, startTime, "", "PresignedHeadObject URL head request failed", err)
11717 return
11718 }
11719
11720 // Verify if presigned url works.
11721 resp, err := httpClient.Do(req)
11722 if err != nil {
11723 logError(testName, function, args, startTime, "", "PresignedHeadObject URL head request failed", err)
11724 return
11725 }
11726 if resp.StatusCode != http.StatusOK {
11727 logError(testName, function, args, startTime, "", "PresignedHeadObject URL returns status "+string(resp.StatusCode), err)
11728 return
11729 }
11730 if resp.Header.Get("ETag") == "" {
11731 logError(testName, function, args, startTime, "", "Got empty ETag", err)
11732 return
11733 }
11734 resp.Body.Close()
11735
11736 // Generate presigned GET object url.
11737 function = "PresignedGetObject(bucketName, objectName, expires, reqParams)"
11738 functionAll += ", " + function
11739 args = map[string]interface{}{
11740 "bucketName": bucketName,
11741 "objectName": objectName,
11742 "expires": 3600 * time.Second,
11743 }
11744 presignedGetURL, err := c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, nil)
11745 if err != nil {
11746 logError(testName, function, args, startTime, "", "PresignedGetObject failed", err)
11747 return
11748 }
11749
11750 // Verify if presigned url works.
11751 req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil)
11752 if err != nil {
11753 logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err)
11754 return
11755 }
11756
11757 resp, err = httpClient.Do(req)
11758 if err != nil {
11759 logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
11760 return
11761 }
11762
11763 if resp.StatusCode != http.StatusOK {
11764 logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err)
11765 return
11766 }
11767 newPresignedBytes, err := io.ReadAll(resp.Body)
11768 if err != nil {
11769 logError(testName, function, args, startTime, "", "ReadAll failed", err)
11770 return
11771 }
11772 resp.Body.Close()
11773 if !bytes.Equal(newPresignedBytes, buf) {
11774 logError(testName, function, args, startTime, "", "Bytes mismatch", err)
11775 return
11776 }
11777
11778 // Set request parameters.
11779 reqParams := make(url.Values)
11780 reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"")
11781 // Generate presigned GET object url.
11782 args["reqParams"] = reqParams
11783 presignedGetURL, err = c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, reqParams)
11784 if err != nil {
11785 logError(testName, function, args, startTime, "", "PresignedGetObject failed", err)
11786 return
11787 }
11788
11789 // Verify if presigned url works.
11790 req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil)
11791 if err != nil {
11792 logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err)
11793 return
11794 }
11795
11796 resp, err = httpClient.Do(req)
11797 if err != nil {
11798 logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
11799 return
11800 }
11801
11802 if resp.StatusCode != http.StatusOK {
11803 logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err)
11804 return
11805 }
11806 newPresignedBytes, err = io.ReadAll(resp.Body)
11807 if err != nil {
11808 logError(testName, function, args, startTime, "", "ReadAll failed", err)
11809 return
11810 }
11811 if !bytes.Equal(newPresignedBytes, buf) {
11812 logError(testName, function, args, startTime, "", "Bytes mismatch", err)
11813 return
11814 }
11815 // Verify content disposition.
11816 if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" {
11817 logError(testName, function, args, startTime, "", "wrong Content-Disposition received ", err)
11818 return
11819 }
11820
11821 function = "PresignedPutObject(bucketName, objectName, expires)"
11822 functionAll += ", " + function
11823 args = map[string]interface{}{
11824 "bucketName": bucketName,
11825 "objectName": objectName + "-presigned",
11826 "expires": 3600 * time.Second,
11827 }
11828 presignedPutURL, err := c.PresignedPutObject(context.Background(), bucketName, objectName+"-presigned", 3600*time.Second)
11829 if err != nil {
11830 logError(testName, function, args, startTime, "", "PresignedPutObject failed", err)
11831 return
11832 }
11833
11834 // Generate data more than 32K
11835 buf = bytes.Repeat([]byte("1"), rand.Intn(1<<10)+32*1024)
11836
11837 req, err = http.NewRequest(http.MethodPut, presignedPutURL.String(), bytes.NewReader(buf))
11838 if err != nil {
11839 logError(testName, function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err)
11840 return
11841 }
11842
11843 resp, err = httpClient.Do(req)
11844 if err != nil {
11845 logError(testName, function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err)
11846 return
11847 }
11848
11849 // Download the uploaded object to verify
11850 args = map[string]interface{}{
11851 "bucketName": bucketName,
11852 "objectName": objectName + "-presigned",
11853 }
11854 newReader, err = c.GetObject(context.Background(), bucketName, objectName+"-presigned", minio.GetObjectOptions{})
11855 if err != nil {
11856 logError(testName, function, args, startTime, "", "GetObject of uploaded presigned object failed", err)
11857 return
11858 }
11859
11860 newReadBytes, err = io.ReadAll(newReader)
11861 if err != nil {
11862 logError(testName, function, args, startTime, "", "ReadAll failed during get on presigned put object", err)
11863 return
11864 }
11865 newReader.Close()
11866
11867 if !bytes.Equal(newReadBytes, buf) {
11868 logError(testName, function, args, startTime, "", "Bytes mismatch on presigned object upload verification", err)
11869 return
11870 }
11871
11872 function = "PresignHeader(method, bucketName, objectName, expires, reqParams, extraHeaders)"
11873 functionAll += ", " + function
11874 presignExtraHeaders := map[string][]string{
11875 "mysecret": {"abcxxx"},
11876 }
11877 args = map[string]interface{}{
11878 "method": "PUT",
11879 "bucketName": bucketName,
11880 "objectName": objectName + "-presign-custom",
11881 "expires": 3600 * time.Second,
11882 "extraHeaders": presignExtraHeaders,
11883 }
11884 _, err = c.PresignHeader(context.Background(), "PUT", bucketName, objectName+"-presign-custom", 3600*time.Second, nil, presignExtraHeaders)
11885 if err == nil {
11886 logError(testName, function, args, startTime, "", "Presigned with extra headers succeeded", err)
11887 return
11888 }
11889
11890 os.Remove(fileName)
11891 os.Remove(fileName + "-f")
11892 successLogger(testName, functionAll, args, startTime).Info()
11893}
11894
11895// Test get object with GetObject with context
11896func testGetObjectContext() {
11897 // initialize logging params
11898 startTime := time.Now()
11899 testName := getFuncName()
11900 function := "GetObject(ctx, bucketName, objectName)"
11901 args := map[string]interface{}{
11902 "ctx": "",
11903 "bucketName": "",
11904 "objectName": "",
11905 }
11906 // Seed random based on current time.
11907 rand.Seed(time.Now().Unix())
11908
11909 // Instantiate new minio client object.
11910 c, err := minio.New(os.Getenv(serverEndpoint),
11911 &minio.Options{
11912 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
11913 Secure: mustParseBool(os.Getenv(enableHTTPS)),
11914 })
11915 if err != nil {
11916 logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err)
11917 return
11918 }
11919
11920 // Enable tracing, write to stderr.
11921 // c.TraceOn(os.Stderr)
11922
11923 // Set user agent.
11924 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
11925
11926 // Generate a new random bucket name.
11927 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
11928 args["bucketName"] = bucketName
11929
11930 // Make a new bucket.
11931 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
11932 if err != nil {
11933 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
11934 return
11935 }
11936
11937 defer cleanupBucket(bucketName, c)
11938
11939 bufSize := dataFileMap["datafile-33-kB"]
11940 reader := getDataReader("datafile-33-kB")
11941 defer reader.Close()
11942 // Save the data
11943 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
11944 args["objectName"] = objectName
11945
11946 _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
11947 if err != nil {
11948 logError(testName, function, args, startTime, "", "PutObject failed", err)
11949 return
11950 }
11951
11952 ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
11953 args["ctx"] = ctx
11954 cancel()
11955
11956 r, err := c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{})
11957 if err != nil {
11958 logError(testName, function, args, startTime, "", "GetObject failed unexpectedly", err)
11959 return
11960 }
11961
11962 if _, err = r.Stat(); err == nil {
11963 logError(testName, function, args, startTime, "", "GetObject should fail on short timeout", err)
11964 return
11965 }
11966 r.Close()
11967
11968 ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
11969 args["ctx"] = ctx
11970 defer cancel()
11971
11972 // Read the data back
11973 r, err = c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{})
11974 if err != nil {
11975 logError(testName, function, args, startTime, "", "GetObject failed", err)
11976 return
11977 }
11978
11979 st, err := r.Stat()
11980 if err != nil {
11981 logError(testName, function, args, startTime, "", "object Stat call failed", err)
11982 return
11983 }
11984 if st.Size != int64(bufSize) {
11985 logError(testName, function, args, startTime, "", "Number of bytes in stat does not match: want "+string(bufSize)+", got"+string(st.Size), err)
11986 return
11987 }
11988 if err := r.Close(); err != nil {
11989 logError(testName, function, args, startTime, "", "object Close() call failed", err)
11990 return
11991 }
11992
11993 successLogger(testName, function, args, startTime).Info()
11994}
11995
11996// Test get object with FGetObject with a user provided context
11997func testFGetObjectContext() {
11998 // initialize logging params
11999 startTime := time.Now()
12000 testName := getFuncName()
12001 function := "FGetObject(ctx, bucketName, objectName, fileName)"
12002 args := map[string]interface{}{
12003 "ctx": "",
12004 "bucketName": "",
12005 "objectName": "",
12006 "fileName": "",
12007 }
12008 // Seed random based on current time.
12009 rand.Seed(time.Now().Unix())
12010
12011 // Instantiate new minio client object.
12012 c, err := minio.New(os.Getenv(serverEndpoint),
12013 &minio.Options{
12014 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
12015 Secure: mustParseBool(os.Getenv(enableHTTPS)),
12016 })
12017 if err != nil {
12018 logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err)
12019 return
12020 }
12021
12022 // Enable tracing, write to stderr.
12023 // c.TraceOn(os.Stderr)
12024
12025 // Set user agent.
12026 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
12027
12028 // Generate a new random bucket name.
12029 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
12030 args["bucketName"] = bucketName
12031
12032 // Make a new bucket.
12033 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
12034 if err != nil {
12035 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
12036 return
12037 }
12038
12039 defer cleanupBucket(bucketName, c)
12040
12041 bufSize := dataFileMap["datafile-1-MB"]
12042 reader := getDataReader("datafile-1-MB")
12043 defer reader.Close()
12044 // Save the data
12045 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
12046 args["objectName"] = objectName
12047
12048 _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
12049 if err != nil {
12050 logError(testName, function, args, startTime, "", "PutObject failed", err)
12051 return
12052 }
12053
12054 ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
12055 args["ctx"] = ctx
12056 defer cancel()
12057
12058 fileName := "tempfile-context"
12059 args["fileName"] = fileName
12060 // Read the data back
12061 err = c.FGetObject(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{})
12062 if err == nil {
12063 logError(testName, function, args, startTime, "", "FGetObject should fail on short timeout", err)
12064 return
12065 }
12066 ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
12067 defer cancel()
12068
12069 // Read the data back
12070 err = c.FGetObject(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{})
12071 if err != nil {
12072 logError(testName, function, args, startTime, "", "FGetObject with long timeout failed", err)
12073 return
12074 }
12075 if err = os.Remove(fileName + "-fcontext"); err != nil {
12076 logError(testName, function, args, startTime, "", "Remove file failed", err)
12077 return
12078 }
12079
12080 successLogger(testName, function, args, startTime).Info()
12081}
12082
12083// Test get object with GetObject with a user provided context
12084func testGetObjectRanges() {
12085 // initialize logging params
12086 startTime := time.Now()
12087 testName := getFuncName()
12088 function := "GetObject(ctx, bucketName, objectName, fileName)"
12089 args := map[string]interface{}{
12090 "ctx": "",
12091 "bucketName": "",
12092 "objectName": "",
12093 "fileName": "",
12094 }
12095 ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
12096 defer cancel()
12097
12098 rng := rand.NewSource(time.Now().UnixNano())
12099 // Instantiate new minio client object.
12100 c, err := minio.New(os.Getenv(serverEndpoint),
12101 &minio.Options{
12102 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
12103 Secure: mustParseBool(os.Getenv(enableHTTPS)),
12104 })
12105 if err != nil {
12106 logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err)
12107 return
12108 }
12109
12110 // Enable tracing, write to stderr.
12111 // c.TraceOn(os.Stderr)
12112
12113 // Set user agent.
12114 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
12115
12116 // Generate a new random bucket name.
12117 bucketName := randString(60, rng, "minio-go-test-")
12118 args["bucketName"] = bucketName
12119
12120 // Make a new bucket.
12121 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
12122 if err != nil {
12123 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
12124 return
12125 }
12126
12127 defer cleanupBucket(bucketName, c)
12128
12129 bufSize := dataFileMap["datafile-129-MB"]
12130 reader := getDataReader("datafile-129-MB")
12131 defer reader.Close()
12132 // Save the data
12133 objectName := randString(60, rng, "")
12134 args["objectName"] = objectName
12135
12136 _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
12137 if err != nil {
12138 logError(testName, function, args, startTime, "", "PutObject failed", err)
12139 return
12140 }
12141
12142 // Read the data back
12143 tests := []struct {
12144 start int64
12145 end int64
12146 }{
12147 {
12148 start: 1024,
12149 end: 1024 + 1<<20,
12150 },
12151 {
12152 start: 20e6,
12153 end: 20e6 + 10000,
12154 },
12155 {
12156 start: 40e6,
12157 end: 40e6 + 10000,
12158 },
12159 {
12160 start: 60e6,
12161 end: 60e6 + 10000,
12162 },
12163 {
12164 start: 80e6,
12165 end: 80e6 + 10000,
12166 },
12167 {
12168 start: 120e6,
12169 end: int64(bufSize),
12170 },
12171 }
12172 for _, test := range tests {
12173 wantRC := getDataReader("datafile-129-MB")
12174 io.CopyN(io.Discard, wantRC, test.start)
12175 want := mustCrcReader(io.LimitReader(wantRC, test.end-test.start+1))
12176 opts := minio.GetObjectOptions{}
12177 opts.SetRange(test.start, test.end)
12178 args["opts"] = fmt.Sprintf("%+v", test)
12179 obj, err := c.GetObject(ctx, bucketName, objectName, opts)
12180 if err != nil {
12181 logError(testName, function, args, startTime, "", "FGetObject with long timeout failed", err)
12182 return
12183 }
12184 err = crcMatches(obj, want)
12185 if err != nil {
12186 logError(testName, function, args, startTime, "", fmt.Sprintf("GetObject offset %d -> %d", test.start, test.end), err)
12187 return
12188 }
12189 }
12190
12191 successLogger(testName, function, args, startTime).Info()
12192}
12193
12194// Test get object ACLs with GetObjectACL with custom provided context
12195func testGetObjectACLContext() {
12196 // initialize logging params
12197 startTime := time.Now()
12198 testName := getFuncName()
12199 function := "GetObjectACL(ctx, bucketName, objectName)"
12200 args := map[string]interface{}{
12201 "ctx": "",
12202 "bucketName": "",
12203 "objectName": "",
12204 }
12205 // Seed random based on current time.
12206 rand.Seed(time.Now().Unix())
12207
12208 // Instantiate new minio client object.
12209 c, err := minio.New(os.Getenv(serverEndpoint),
12210 &minio.Options{
12211 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
12212 Secure: mustParseBool(os.Getenv(enableHTTPS)),
12213 })
12214 if err != nil {
12215 logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err)
12216 return
12217 }
12218
12219 // Enable tracing, write to stderr.
12220 // c.TraceOn(os.Stderr)
12221
12222 // Set user agent.
12223 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
12224
12225 // Generate a new random bucket name.
12226 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
12227 args["bucketName"] = bucketName
12228
12229 // Make a new bucket.
12230 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
12231 if err != nil {
12232 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
12233 return
12234 }
12235
12236 defer cleanupBucket(bucketName, c)
12237
12238 bufSize := dataFileMap["datafile-1-MB"]
12239 reader := getDataReader("datafile-1-MB")
12240 defer reader.Close()
12241 // Save the data
12242 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
12243 args["objectName"] = objectName
12244
12245 // Add meta data to add a canned acl
12246 metaData := map[string]string{
12247 "X-Amz-Acl": "public-read-write",
12248 }
12249
12250 _, err = c.PutObject(context.Background(), bucketName,
12251 objectName, reader, int64(bufSize),
12252 minio.PutObjectOptions{
12253 ContentType: "binary/octet-stream",
12254 UserMetadata: metaData,
12255 })
12256
12257 if err != nil {
12258 logError(testName, function, args, startTime, "", "PutObject failed", err)
12259 return
12260 }
12261
12262 ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
12263 args["ctx"] = ctx
12264 defer cancel()
12265
12266 // Read the data back
12267 objectInfo, getObjectACLErr := c.GetObjectACL(ctx, bucketName, objectName)
12268 if getObjectACLErr != nil {
12269 logError(testName, function, args, startTime, "", "GetObjectACL failed. ", getObjectACLErr)
12270 return
12271 }
12272
12273 s, ok := objectInfo.Metadata["X-Amz-Acl"]
12274 if !ok {
12275 logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Acl\"", nil)
12276 return
12277 }
12278
12279 if len(s) != 1 {
12280 logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Acl\" canned acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil)
12281 return
12282 }
12283
12284 // Do a very limited testing if this is not AWS S3
12285 if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
12286 if s[0] != "private" {
12287 logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Acl\" expected \"private\" but got"+fmt.Sprintf("%q", s[0]), nil)
12288 return
12289 }
12290
12291 successLogger(testName, function, args, startTime).Info()
12292 return
12293 }
12294
12295 if s[0] != "public-read-write" {
12296 logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Acl\" expected \"public-read-write\" but got"+fmt.Sprintf("%q", s[0]), nil)
12297 return
12298 }
12299
12300 bufSize = dataFileMap["datafile-1-MB"]
12301 reader2 := getDataReader("datafile-1-MB")
12302 defer reader2.Close()
12303 // Save the data
12304 objectName = randString(60, rand.NewSource(time.Now().UnixNano()), "")
12305 args["objectName"] = objectName
12306
12307 // Add meta data to add a canned acl
12308 metaData = map[string]string{
12309 "X-Amz-Grant-Read": "[email protected]",
12310 "X-Amz-Grant-Write": "[email protected]",
12311 }
12312
12313 _, err = c.PutObject(context.Background(), bucketName, objectName, reader2, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream", UserMetadata: metaData})
12314 if err != nil {
12315 logError(testName, function, args, startTime, "", "PutObject failed", err)
12316 return
12317 }
12318
12319 ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
12320 args["ctx"] = ctx
12321 defer cancel()
12322
12323 // Read the data back
12324 objectInfo, getObjectACLErr = c.GetObjectACL(ctx, bucketName, objectName)
12325 if getObjectACLErr == nil {
12326 logError(testName, function, args, startTime, "", "GetObjectACL fail", getObjectACLErr)
12327 return
12328 }
12329
12330 if len(objectInfo.Metadata) != 3 {
12331 logError(testName, function, args, startTime, "", "GetObjectACL fail expected \"3\" ACLs but got "+fmt.Sprintf(`"%d"`, len(objectInfo.Metadata)), nil)
12332 return
12333 }
12334
12335 s, ok = objectInfo.Metadata["X-Amz-Grant-Read"]
12336 if !ok {
12337 logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Grant-Read\"", nil)
12338 return
12339 }
12340
12341 if len(s) != 1 {
12342 logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Read\" acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil)
12343 return
12344 }
12345
12346 if s[0] != "[email protected]" {
12347 logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Read\" acl expected \"[email protected]\" got "+fmt.Sprintf("%q", s), nil)
12348 return
12349 }
12350
12351 s, ok = objectInfo.Metadata["X-Amz-Grant-Write"]
12352 if !ok {
12353 logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Grant-Write\"", nil)
12354 return
12355 }
12356
12357 if len(s) != 1 {
12358 logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Write\" acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil)
12359 return
12360 }
12361
12362 if s[0] != "[email protected]" {
12363 logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Write\" acl expected \"[email protected]\" got "+fmt.Sprintf("%q", s), nil)
12364 return
12365 }
12366
12367 successLogger(testName, function, args, startTime).Info()
12368}
12369
12370// Test validates putObject with context to see if request cancellation is honored for V2.
12371func testPutObjectContextV2() {
12372 // initialize logging params
12373 startTime := time.Now()
12374 testName := getFuncName()
12375 function := "PutObject(ctx, bucketName, objectName, reader, size, opts)"
12376 args := map[string]interface{}{
12377 "ctx": "",
12378 "bucketName": "",
12379 "objectName": "",
12380 "size": "",
12381 "opts": "",
12382 }
12383 // Instantiate new minio client object.
12384 c, err := minio.New(os.Getenv(serverEndpoint),
12385 &minio.Options{
12386 Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
12387 Secure: mustParseBool(os.Getenv(enableHTTPS)),
12388 })
12389 if err != nil {
12390 logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err)
12391 return
12392 }
12393
12394 // Enable tracing, write to stderr.
12395 // c.TraceOn(os.Stderr)
12396
12397 // Set user agent.
12398 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
12399
12400 // Make a new bucket.
12401 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
12402 args["bucketName"] = bucketName
12403
12404 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
12405 if err != nil {
12406 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
12407 return
12408 }
12409 defer cleanupBucket(bucketName, c)
12410 bufSize := dataFileMap["datatfile-33-kB"]
12411 reader := getDataReader("datafile-33-kB")
12412 defer reader.Close()
12413
12414 objectName := fmt.Sprintf("test-file-%v", rand.Uint32())
12415 args["objectName"] = objectName
12416
12417 ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
12418 args["ctx"] = ctx
12419 args["size"] = bufSize
12420 defer cancel()
12421
12422 _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
12423 if err != nil {
12424 logError(testName, function, args, startTime, "", "PutObject with short timeout failed", err)
12425 return
12426 }
12427
12428 ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
12429 args["ctx"] = ctx
12430
12431 defer cancel()
12432 reader = getDataReader("datafile-33-kB")
12433 defer reader.Close()
12434 _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
12435 if err != nil {
12436 logError(testName, function, args, startTime, "", "PutObject with long timeout failed", err)
12437 return
12438 }
12439
12440 successLogger(testName, function, args, startTime).Info()
12441}
12442
12443// Test get object with GetObject with custom context
12444func testGetObjectContextV2() {
12445 // initialize logging params
12446 startTime := time.Now()
12447 testName := getFuncName()
12448 function := "GetObject(ctx, bucketName, objectName)"
12449 args := map[string]interface{}{
12450 "ctx": "",
12451 "bucketName": "",
12452 "objectName": "",
12453 }
12454 // Seed random based on current time.
12455 rand.Seed(time.Now().Unix())
12456
12457 // Instantiate new minio client object.
12458 c, err := minio.New(os.Getenv(serverEndpoint),
12459 &minio.Options{
12460 Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
12461 Secure: mustParseBool(os.Getenv(enableHTTPS)),
12462 })
12463 if err != nil {
12464 logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err)
12465 return
12466 }
12467
12468 // Enable tracing, write to stderr.
12469 // c.TraceOn(os.Stderr)
12470
12471 // Set user agent.
12472 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
12473
12474 // Generate a new random bucket name.
12475 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
12476 args["bucketName"] = bucketName
12477
12478 // Make a new bucket.
12479 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
12480 if err != nil {
12481 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
12482 return
12483 }
12484
12485 defer cleanupBucket(bucketName, c)
12486
12487 bufSize := dataFileMap["datafile-33-kB"]
12488 reader := getDataReader("datafile-33-kB")
12489 defer reader.Close()
12490 // Save the data
12491 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
12492 args["objectName"] = objectName
12493
12494 _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
12495 if err != nil {
12496 logError(testName, function, args, startTime, "", "PutObject call failed", err)
12497 return
12498 }
12499
12500 ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
12501 args["ctx"] = ctx
12502 cancel()
12503
12504 r, err := c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{})
12505 if err != nil {
12506 logError(testName, function, args, startTime, "", "GetObject failed unexpectedly", err)
12507 return
12508 }
12509 if _, err = r.Stat(); err == nil {
12510 logError(testName, function, args, startTime, "", "GetObject should fail on short timeout", err)
12511 return
12512 }
12513 r.Close()
12514
12515 ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
12516 defer cancel()
12517
12518 // Read the data back
12519 r, err = c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{})
12520 if err != nil {
12521 logError(testName, function, args, startTime, "", "GetObject shouldn't fail on longer timeout", err)
12522 return
12523 }
12524
12525 st, err := r.Stat()
12526 if err != nil {
12527 logError(testName, function, args, startTime, "", "object Stat call failed", err)
12528 return
12529 }
12530 if st.Size != int64(bufSize) {
12531 logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(bufSize)+" got "+string(st.Size), err)
12532 return
12533 }
12534 if err := r.Close(); err != nil {
12535 logError(testName, function, args, startTime, "", " object Close() call failed", err)
12536 return
12537 }
12538
12539 successLogger(testName, function, args, startTime).Info()
12540}
12541
12542// Test get object with FGetObject with custom context
12543func testFGetObjectContextV2() {
12544 // initialize logging params
12545 startTime := time.Now()
12546 testName := getFuncName()
12547 function := "FGetObject(ctx, bucketName, objectName,fileName)"
12548 args := map[string]interface{}{
12549 "ctx": "",
12550 "bucketName": "",
12551 "objectName": "",
12552 "fileName": "",
12553 }
12554 // Seed random based on current time.
12555 rand.Seed(time.Now().Unix())
12556
12557 // Instantiate new minio client object.
12558 c, err := minio.New(os.Getenv(serverEndpoint),
12559 &minio.Options{
12560 Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
12561 Secure: mustParseBool(os.Getenv(enableHTTPS)),
12562 })
12563 if err != nil {
12564 logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err)
12565 return
12566 }
12567
12568 // Enable tracing, write to stderr.
12569 // c.TraceOn(os.Stderr)
12570
12571 // Set user agent.
12572 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
12573
12574 // Generate a new random bucket name.
12575 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
12576 args["bucketName"] = bucketName
12577
12578 // Make a new bucket.
12579 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
12580 if err != nil {
12581 logError(testName, function, args, startTime, "", "MakeBucket call failed", err)
12582 return
12583 }
12584
12585 defer cleanupBucket(bucketName, c)
12586
12587 bufSize := dataFileMap["datatfile-1-MB"]
12588 reader := getDataReader("datafile-1-MB")
12589 defer reader.Close()
12590 // Save the data
12591 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
12592 args["objectName"] = objectName
12593
12594 _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
12595 if err != nil {
12596 logError(testName, function, args, startTime, "", "PutObject call failed", err)
12597 return
12598 }
12599
12600 ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
12601 args["ctx"] = ctx
12602 defer cancel()
12603
12604 fileName := "tempfile-context"
12605 args["fileName"] = fileName
12606
12607 // Read the data back
12608 err = c.FGetObject(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{})
12609 if err == nil {
12610 logError(testName, function, args, startTime, "", "FGetObject should fail on short timeout", err)
12611 return
12612 }
12613 ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
12614 defer cancel()
12615
12616 // Read the data back
12617 err = c.FGetObject(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{})
12618 if err != nil {
12619 logError(testName, function, args, startTime, "", "FGetObject call shouldn't fail on long timeout", err)
12620 return
12621 }
12622
12623 if err = os.Remove(fileName + "-fcontext"); err != nil {
12624 logError(testName, function, args, startTime, "", "Remove file failed", err)
12625 return
12626 }
12627
12628 successLogger(testName, function, args, startTime).Info()
12629}
12630
12631// Test list object v1 and V2
12632func testListObjects() {
12633 // initialize logging params
12634 startTime := time.Now()
12635 testName := getFuncName()
12636 function := "ListObjects(bucketName, objectPrefix, recursive, doneCh)"
12637 args := map[string]interface{}{
12638 "bucketName": "",
12639 "objectPrefix": "",
12640 "recursive": "true",
12641 }
12642 // Seed random based on current time.
12643 rand.Seed(time.Now().Unix())
12644
12645 // Instantiate new minio client object.
12646 c, err := minio.New(os.Getenv(serverEndpoint),
12647 &minio.Options{
12648 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
12649 Secure: mustParseBool(os.Getenv(enableHTTPS)),
12650 })
12651 if err != nil {
12652 logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err)
12653 return
12654 }
12655
12656 // Enable tracing, write to stderr.
12657 // c.TraceOn(os.Stderr)
12658
12659 // Set user agent.
12660 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
12661
12662 // Generate a new random bucket name.
12663 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
12664 args["bucketName"] = bucketName
12665
12666 // Make a new bucket.
12667 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
12668 if err != nil {
12669 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
12670 return
12671 }
12672
12673 defer cleanupBucket(bucketName, c)
12674
12675 testObjects := []struct {
12676 name string
12677 storageClass string
12678 }{
12679 // Special characters
12680 {"foo bar", "STANDARD"},
12681 {"foo-%", "STANDARD"},
12682 {"random-object-1", "STANDARD"},
12683 {"random-object-2", "REDUCED_REDUNDANCY"},
12684 }
12685
12686 for i, object := range testObjects {
12687 bufSize := dataFileMap["datafile-33-kB"]
12688 reader := getDataReader("datafile-33-kB")
12689 defer reader.Close()
12690 _, err = c.PutObject(context.Background(), bucketName, object.name, reader, int64(bufSize),
12691 minio.PutObjectOptions{ContentType: "binary/octet-stream", StorageClass: object.storageClass})
12692 if err != nil {
12693 logError(testName, function, args, startTime, "", fmt.Sprintf("PutObject %d call failed", i+1), err)
12694 return
12695 }
12696 }
12697
12698 testList := func(listFn func(context.Context, string, minio.ListObjectsOptions) <-chan minio.ObjectInfo, bucket string, opts minio.ListObjectsOptions) {
12699 var objCursor int
12700
12701 // check for object name and storage-class from listing object result
12702 for objInfo := range listFn(context.Background(), bucket, opts) {
12703 if objInfo.Err != nil {
12704 logError(testName, function, args, startTime, "", "ListObjects failed unexpectedly", err)
12705 return
12706 }
12707 if objInfo.Key != testObjects[objCursor].name {
12708 logError(testName, function, args, startTime, "", "ListObjects does not return expected object name", err)
12709 return
12710 }
12711 if objInfo.StorageClass != testObjects[objCursor].storageClass {
12712 // Ignored as Gateways (Azure/GCS etc) wont return storage class
12713 ignoredLog(testName, function, args, startTime, "ListObjects doesn't return expected storage class").Info()
12714 }
12715 objCursor++
12716 }
12717
12718 if objCursor != len(testObjects) {
12719 logError(testName, function, args, startTime, "", "ListObjects returned unexpected number of items", errors.New(""))
12720 return
12721 }
12722 }
12723
12724 testList(c.ListObjects, bucketName, minio.ListObjectsOptions{Recursive: true, UseV1: true})
12725 testList(c.ListObjects, bucketName, minio.ListObjectsOptions{Recursive: true})
12726 testList(c.ListObjects, bucketName, minio.ListObjectsOptions{Recursive: true, WithMetadata: true})
12727
12728 successLogger(testName, function, args, startTime).Info()
12729}
12730
12731// Test deleting multiple objects with object retention set in Governance mode
12732func testRemoveObjects() {
12733 // initialize logging params
12734 startTime := time.Now()
12735 testName := getFuncName()
12736 function := "RemoveObjects(bucketName, objectsCh, opts)"
12737 args := map[string]interface{}{
12738 "bucketName": "",
12739 "objectPrefix": "",
12740 "recursive": "true",
12741 }
12742 // Seed random based on current time.
12743 rand.Seed(time.Now().Unix())
12744
12745 // Instantiate new minio client object.
12746 c, err := minio.New(os.Getenv(serverEndpoint),
12747 &minio.Options{
12748 Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
12749 Secure: mustParseBool(os.Getenv(enableHTTPS)),
12750 })
12751 if err != nil {
12752 logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err)
12753 return
12754 }
12755
12756 // Enable tracing, write to stderr.
12757 // c.TraceOn(os.Stderr)
12758
12759 // Set user agent.
12760 c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
12761
12762 // Generate a new random bucket name.
12763 bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
12764 args["bucketName"] = bucketName
12765 objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
12766 args["objectName"] = objectName
12767
12768 // Make a new bucket.
12769 err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
12770 if err != nil {
12771 logError(testName, function, args, startTime, "", "MakeBucket failed", err)
12772 return
12773 }
12774
12775 bufSize := dataFileMap["datafile-129-MB"]
12776 reader := getDataReader("datafile-129-MB")
12777 defer reader.Close()
12778
12779 _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{})
12780 if err != nil {
12781 logError(testName, function, args, startTime, "", "Error uploading object", err)
12782 return
12783 }
12784
12785 // Replace with smaller...
12786 bufSize = dataFileMap["datafile-10-kB"]
12787 reader = getDataReader("datafile-10-kB")
12788 defer reader.Close()
12789
12790 _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{})
12791 if err != nil {
12792 logError(testName, function, args, startTime, "", "Error uploading object", err)
12793 }
12794
12795 t := time.Date(2030, time.April, 25, 14, 0, 0, 0, time.UTC)
12796 m := minio.RetentionMode(minio.Governance)
12797 opts := minio.PutObjectRetentionOptions{
12798 GovernanceBypass: false,
12799 RetainUntilDate: &t,
12800 Mode: &m,
12801 }
12802 err = c.PutObjectRetention(context.Background(), bucketName, objectName, opts)
12803 if err != nil {
12804 logError(testName, function, args, startTime, "", "Error setting retention", err)
12805 return
12806 }
12807
12808 objectsCh := make(chan minio.ObjectInfo)
12809 // Send object names that are needed to be removed to objectsCh
12810 go func() {
12811 defer close(objectsCh)
12812 // List all objects from a bucket-name with a matching prefix.
12813 for object := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Recursive: true}) {
12814 if object.Err != nil {
12815 logError(testName, function, args, startTime, "", "Error listing objects", object.Err)
12816 return
12817 }
12818 objectsCh <- object
12819 }
12820 }()
12821
12822 for rErr := range c.RemoveObjects(context.Background(), bucketName, objectsCh, minio.RemoveObjectsOptions{}) {
12823 // Error is expected here because Retention is set on the object
12824 // and RemoveObjects is called without Bypass Governance
12825 if rErr.Err == nil {
12826 logError(testName, function, args, startTime, "", "Expected error during deletion", nil)
12827 return
12828 }
12829 }
12830
12831 objectsCh1 := make(chan minio.ObjectInfo)
12832
12833 // Send object names that are needed to be removed to objectsCh
12834 go func() {
12835 defer close(objectsCh1)
12836 // List all objects from a bucket-name with a matching prefix.
12837 for object := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Recursive: true}) {
12838 if object.Err != nil {
12839 logError(testName, function, args, startTime, "", "Error listing objects", object.Err)
12840 return
12841 }
12842 objectsCh1 <- object
12843 }
12844 }()
12845
12846 opts1 := minio.RemoveObjectsOptions{
12847 GovernanceBypass: true,
12848 }
12849
12850 for rErr := range c.RemoveObjects(context.Background(), bucketName, objectsCh1, opts1) {
12851 // Error is not expected here because Retention is set on the object
12852 // and RemoveObjects is called with Bypass Governance
12853 logError(testName, function, args, startTime, "", "Error detected during deletion", rErr.Err)
12854 return
12855 }
12856
12857 // Delete all objects and buckets
12858 if err = cleanupVersionedBucket(bucketName, c); err != nil {
12859 logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
12860 return
12861 }
12862
12863 successLogger(testName, function, args, startTime).Info()
12864}
12865
12866// Convert string to bool and always return false if any error
12867func mustParseBool(str string) bool {
12868 b, err := strconv.ParseBool(str)
12869 if err != nil {
12870 return false
12871 }
12872 return b
12873}
12874
12875func main() {
12876 // Output to stdout instead of the default stderr
12877 log.SetOutput(os.Stdout)
12878 // create custom formatter
12879 mintFormatter := mintJSONFormatter{}
12880 // set custom formatter
12881 log.SetFormatter(&mintFormatter)
12882 // log Info or above -- success cases are Info level, failures are Fatal level
12883 log.SetLevel(log.InfoLevel)
12884
12885 tls := mustParseBool(os.Getenv(enableHTTPS))
12886 kms := mustParseBool(os.Getenv(enableKMS))
12887 if os.Getenv(enableKMS) == "" {
12888 // Default to KMS tests.
12889 kms = true
12890 }
12891
12892 // execute tests
12893 if isFullMode() {
12894 testMakeBucketErrorV2()
12895 testGetObjectClosedTwiceV2()
12896 testFPutObjectV2()
12897 testMakeBucketRegionsV2()
12898 testGetObjectReadSeekFunctionalV2()
12899 testGetObjectReadAtFunctionalV2()
12900 testGetObjectRanges()
12901 testCopyObjectV2()
12902 testFunctionalV2()
12903 testComposeObjectErrorCasesV2()
12904 testCompose10KSourcesV2()
12905 testUserMetadataCopyingV2()
12906 testPutObjectWithChecksums()
12907 testPutMultipartObjectWithChecksums()
12908 testPutObject0ByteV2()
12909 testPutObjectNoLengthV2()
12910 testPutObjectsUnknownV2()
12911 testGetObjectContextV2()
12912 testFPutObjectContextV2()
12913 testFGetObjectContextV2()
12914 testPutObjectContextV2()
12915 testPutObjectWithVersioning()
12916 testMakeBucketError()
12917 testMakeBucketRegions()
12918 testPutObjectWithMetadata()
12919 testPutObjectReadAt()
12920 testPutObjectStreaming()
12921 testGetObjectSeekEnd()
12922 testGetObjectClosedTwice()
12923 testGetObjectS3Zip()
12924 testRemoveMultipleObjects()
12925 testRemoveMultipleObjectsWithResult()
12926 testFPutObjectMultipart()
12927 testFPutObject()
12928 testGetObjectReadSeekFunctional()
12929 testGetObjectReadAtFunctional()
12930 testGetObjectReadAtWhenEOFWasReached()
12931 testPresignedPostPolicy()
12932 testCopyObject()
12933 testComposeObjectErrorCases()
12934 testCompose10KSources()
12935 testUserMetadataCopying()
12936 testBucketNotification()
12937 testFunctional()
12938 testGetObjectModified()
12939 testPutObjectUploadSeekedObject()
12940 testGetObjectContext()
12941 testFPutObjectContext()
12942 testFGetObjectContext()
12943 testGetObjectACLContext()
12944 testPutObjectContext()
12945 testStorageClassMetadataPutObject()
12946 testStorageClassInvalidMetadataPutObject()
12947 testStorageClassMetadataCopyObject()
12948 testPutObjectWithContentLanguage()
12949 testListObjects()
12950 testRemoveObjects()
12951 testListObjectVersions()
12952 testStatObjectWithVersioning()
12953 testGetObjectWithVersioning()
12954 testCopyObjectWithVersioning()
12955 testConcurrentCopyObjectWithVersioning()
12956 testComposeObjectWithVersioning()
12957 testRemoveObjectWithVersioning()
12958 testRemoveObjectsWithVersioning()
12959 testObjectTaggingWithVersioning()
12960 testTrailingChecksums()
12961 testPutObjectWithAutomaticChecksums()
12962
12963 // SSE-C tests will only work over TLS connection.
12964 if tls {
12965 testSSECEncryptionPutGet()
12966 testSSECEncryptionFPut()
12967 testSSECEncryptedGetObjectReadAtFunctional()
12968 testSSECEncryptedGetObjectReadSeekFunctional()
12969 testEncryptedCopyObjectV2()
12970 testEncryptedSSECToSSECCopyObject()
12971 testEncryptedSSECToUnencryptedCopyObject()
12972 testUnencryptedToSSECCopyObject()
12973 testUnencryptedToUnencryptedCopyObject()
12974 testEncryptedEmptyObject()
12975 testDecryptedCopyObject()
12976 testSSECEncryptedToSSECCopyObjectPart()
12977 testSSECMultipartEncryptedToSSECCopyObjectPart()
12978 testSSECEncryptedToUnencryptedCopyPart()
12979 testUnencryptedToSSECCopyObjectPart()
12980 testUnencryptedToUnencryptedCopyPart()
12981 testEncryptedSSECToSSES3CopyObject()
12982 testEncryptedSSES3ToSSECCopyObject()
12983 testSSECEncryptedToSSES3CopyObjectPart()
12984 testSSES3EncryptedToSSECCopyObjectPart()
12985 }
12986
12987 // KMS tests
12988 if kms {
12989 testSSES3EncryptionPutGet()
12990 testSSES3EncryptionFPut()
12991 testSSES3EncryptedGetObjectReadAtFunctional()
12992 testSSES3EncryptedGetObjectReadSeekFunctional()
12993 testEncryptedSSES3ToSSES3CopyObject()
12994 testEncryptedSSES3ToUnencryptedCopyObject()
12995 testUnencryptedToSSES3CopyObject()
12996 testUnencryptedToSSES3CopyObjectPart()
12997 testSSES3EncryptedToUnencryptedCopyPart()
12998 testSSES3EncryptedToSSES3CopyObjectPart()
12999 }
13000 } else {
13001 testFunctional()
13002 testFunctionalV2()
13003 }
13004}
diff --git a/vendor/github.com/minio/minio-go/v7/hook-reader.go b/vendor/github.com/minio/minio-go/v7/hook-reader.go
new file mode 100644
index 0000000..07bc7db
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/hook-reader.go
@@ -0,0 +1,101 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "fmt"
22 "io"
23 "sync"
24)
25
26// hookReader hooks additional reader in the source stream. It is
27// useful for making progress bars. Second reader is appropriately
28// notified about the exact number of bytes read from the primary
29// source on each Read operation.
30type hookReader struct {
31 mu sync.RWMutex
32 source io.Reader
33 hook io.Reader
34}
35
36// Seek implements io.Seeker. Seeks source first, and if necessary
37// seeks hook if Seek method is appropriately found.
38func (hr *hookReader) Seek(offset int64, whence int) (n int64, err error) {
39 hr.mu.Lock()
40 defer hr.mu.Unlock()
41
42 // Verify for source has embedded Seeker, use it.
43 sourceSeeker, ok := hr.source.(io.Seeker)
44 if ok {
45 n, err = sourceSeeker.Seek(offset, whence)
46 if err != nil {
47 return 0, err
48 }
49 }
50
51 if hr.hook != nil {
52 // Verify if hook has embedded Seeker, use it.
53 hookSeeker, ok := hr.hook.(io.Seeker)
54 if ok {
55 var m int64
56 m, err = hookSeeker.Seek(offset, whence)
57 if err != nil {
58 return 0, err
59 }
60 if n != m {
61 return 0, fmt.Errorf("hook seeker seeked %d bytes, expected source %d bytes", m, n)
62 }
63 }
64 }
65
66 return n, nil
67}
68
69// Read implements io.Reader. Always reads from the source, the return
70// value 'n' number of bytes are reported through the hook. Returns
71// error for all non io.EOF conditions.
72func (hr *hookReader) Read(b []byte) (n int, err error) {
73 hr.mu.RLock()
74 defer hr.mu.RUnlock()
75
76 n, err = hr.source.Read(b)
77 if err != nil && err != io.EOF {
78 return n, err
79 }
80 if hr.hook != nil {
81 // Progress the hook with the total read bytes from the source.
82 if _, herr := hr.hook.Read(b[:n]); herr != nil {
83 if herr != io.EOF {
84 return n, herr
85 }
86 }
87 }
88 return n, err
89}
90
91// newHook returns a io.ReadSeeker which implements hookReader that
92// reports the data read from the source to the hook.
93func newHook(source, hook io.Reader) io.Reader {
94 if hook == nil {
95 return &hookReader{source: source}
96 }
97 return &hookReader{
98 source: source,
99 hook: hook,
100 }
101}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go
new file mode 100644
index 0000000..800c4a2
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go
@@ -0,0 +1,242 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20import (
21 "bytes"
22 "crypto/sha256"
23 "encoding/hex"
24 "encoding/xml"
25 "errors"
26 "io"
27 "net/http"
28 "net/url"
29 "strconv"
30 "strings"
31 "time"
32
33 "github.com/minio/minio-go/v7/pkg/signer"
34)
35
36// AssumeRoleResponse contains the result of successful AssumeRole request.
37type AssumeRoleResponse struct {
38 XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleResponse" json:"-"`
39
40 Result AssumeRoleResult `xml:"AssumeRoleResult"`
41 ResponseMetadata struct {
42 RequestID string `xml:"RequestId,omitempty"`
43 } `xml:"ResponseMetadata,omitempty"`
44}
45
46// AssumeRoleResult - Contains the response to a successful AssumeRole
47// request, including temporary credentials that can be used to make
48// MinIO API requests.
49type AssumeRoleResult struct {
50 // The identifiers for the temporary security credentials that the operation
51 // returns.
52 AssumedRoleUser AssumedRoleUser `xml:",omitempty"`
53
54 // The temporary security credentials, which include an access key ID, a secret
55 // access key, and a security (or session) token.
56 //
57 // Note: The size of the security token that STS APIs return is not fixed. We
58 // strongly recommend that you make no assumptions about the maximum size. As
59 // of this writing, the typical size is less than 4096 bytes, but that can vary.
60 // Also, future updates to AWS might require larger sizes.
61 Credentials struct {
62 AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"`
63 SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"`
64 Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"`
65 SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"`
66 } `xml:",omitempty"`
67
68 // A percentage value that indicates the size of the policy in packed form.
69 // The service rejects any policy with a packed size greater than 100 percent,
70 // which means the policy exceeded the allowed space.
71 PackedPolicySize int `xml:",omitempty"`
72}
73
74// A STSAssumeRole retrieves credentials from MinIO service, and keeps track if
75// those credentials are expired.
76type STSAssumeRole struct {
77 Expiry
78
79 // Required http Client to use when connecting to MinIO STS service.
80 Client *http.Client
81
82 // STS endpoint to fetch STS credentials.
83 STSEndpoint string
84
85 // various options for this request.
86 Options STSAssumeRoleOptions
87}
88
89// STSAssumeRoleOptions collection of various input options
90// to obtain AssumeRole credentials.
91type STSAssumeRoleOptions struct {
92 // Mandatory inputs.
93 AccessKey string
94 SecretKey string
95
96 SessionToken string // Optional if the first request is made with temporary credentials.
97 Policy string // Optional to assign a policy to the assumed role
98
99 Location string // Optional commonly needed with AWS STS.
100 DurationSeconds int // Optional defaults to 1 hour.
101
102 // Optional only valid if using with AWS STS
103 RoleARN string
104 RoleSessionName string
105 ExternalID string
106}
107
108// NewSTSAssumeRole returns a pointer to a new
109// Credentials object wrapping the STSAssumeRole.
110func NewSTSAssumeRole(stsEndpoint string, opts STSAssumeRoleOptions) (*Credentials, error) {
111 if stsEndpoint == "" {
112 return nil, errors.New("STS endpoint cannot be empty")
113 }
114 if opts.AccessKey == "" || opts.SecretKey == "" {
115 return nil, errors.New("AssumeRole credentials access/secretkey is mandatory")
116 }
117 return New(&STSAssumeRole{
118 Client: &http.Client{
119 Transport: http.DefaultTransport,
120 },
121 STSEndpoint: stsEndpoint,
122 Options: opts,
123 }), nil
124}
125
126const defaultDurationSeconds = 3600
127
128// closeResponse close non nil response with any response Body.
129// convenient wrapper to drain any remaining data on response body.
130//
131// Subsequently this allows golang http RoundTripper
132// to re-use the same connection for future requests.
133func closeResponse(resp *http.Response) {
134 // Callers should close resp.Body when done reading from it.
135 // If resp.Body is not closed, the Client's underlying RoundTripper
136 // (typically Transport) may not be able to re-use a persistent TCP
137 // connection to the server for a subsequent "keep-alive" request.
138 if resp != nil && resp.Body != nil {
139 // Drain any remaining Body and then close the connection.
140 // Without this closing connection would disallow re-using
141 // the same connection for future uses.
142 // - http://stackoverflow.com/a/17961593/4465767
143 io.Copy(io.Discard, resp.Body)
144 resp.Body.Close()
145 }
146}
147
148func getAssumeRoleCredentials(clnt *http.Client, endpoint string, opts STSAssumeRoleOptions) (AssumeRoleResponse, error) {
149 v := url.Values{}
150 v.Set("Action", "AssumeRole")
151 v.Set("Version", STSVersion)
152 if opts.RoleARN != "" {
153 v.Set("RoleArn", opts.RoleARN)
154 }
155 if opts.RoleSessionName != "" {
156 v.Set("RoleSessionName", opts.RoleSessionName)
157 }
158 if opts.DurationSeconds > defaultDurationSeconds {
159 v.Set("DurationSeconds", strconv.Itoa(opts.DurationSeconds))
160 } else {
161 v.Set("DurationSeconds", strconv.Itoa(defaultDurationSeconds))
162 }
163 if opts.Policy != "" {
164 v.Set("Policy", opts.Policy)
165 }
166 if opts.ExternalID != "" {
167 v.Set("ExternalId", opts.ExternalID)
168 }
169
170 u, err := url.Parse(endpoint)
171 if err != nil {
172 return AssumeRoleResponse{}, err
173 }
174 u.Path = "/"
175
176 postBody := strings.NewReader(v.Encode())
177 hash := sha256.New()
178 if _, err = io.Copy(hash, postBody); err != nil {
179 return AssumeRoleResponse{}, err
180 }
181 postBody.Seek(0, 0)
182
183 req, err := http.NewRequest(http.MethodPost, u.String(), postBody)
184 if err != nil {
185 return AssumeRoleResponse{}, err
186 }
187 req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
188 req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(hash.Sum(nil)))
189 if opts.SessionToken != "" {
190 req.Header.Set("X-Amz-Security-Token", opts.SessionToken)
191 }
192 req = signer.SignV4STS(*req, opts.AccessKey, opts.SecretKey, opts.Location)
193
194 resp, err := clnt.Do(req)
195 if err != nil {
196 return AssumeRoleResponse{}, err
197 }
198 defer closeResponse(resp)
199 if resp.StatusCode != http.StatusOK {
200 var errResp ErrorResponse
201 buf, err := io.ReadAll(resp.Body)
202 if err != nil {
203 return AssumeRoleResponse{}, err
204 }
205 _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp)
206 if err != nil {
207 var s3Err Error
208 if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil {
209 return AssumeRoleResponse{}, err
210 }
211 errResp.RequestID = s3Err.RequestID
212 errResp.STSError.Code = s3Err.Code
213 errResp.STSError.Message = s3Err.Message
214 }
215 return AssumeRoleResponse{}, errResp
216 }
217
218 a := AssumeRoleResponse{}
219 if _, err = xmlDecodeAndBody(resp.Body, &a); err != nil {
220 return AssumeRoleResponse{}, err
221 }
222 return a, nil
223}
224
225// Retrieve retrieves credentials from the MinIO service.
226// Error will be returned if the request fails.
227func (m *STSAssumeRole) Retrieve() (Value, error) {
228 a, err := getAssumeRoleCredentials(m.Client, m.STSEndpoint, m.Options)
229 if err != nil {
230 return Value{}, err
231 }
232
233 // Expiry window is set to 10secs.
234 m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow)
235
236 return Value{
237 AccessKeyID: a.Result.Credentials.AccessKey,
238 SecretAccessKey: a.Result.Credentials.SecretKey,
239 SessionToken: a.Result.Credentials.SessionToken,
240 SignerType: SignatureV4,
241 }, nil
242}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go
new file mode 100644
index 0000000..ddccfb1
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go
@@ -0,0 +1,88 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20// A Chain will search for a provider which returns credentials
21// and cache that provider until Retrieve is called again.
22//
23// The Chain provides a way of chaining multiple providers together
24// which will pick the first available using priority order of the
25// Providers in the list.
26//
27// If none of the Providers retrieve valid credentials Value, ChainProvider's
28// Retrieve() will return the no credentials value.
29//
30// If a Provider is found which returns valid credentials Value ChainProvider
31// will cache that Provider for all calls to IsExpired(), until Retrieve is
32// called again after IsExpired() is true.
33//
34// creds := credentials.NewChainCredentials(
35// []credentials.Provider{
36// &credentials.EnvAWSS3{},
37// &credentials.EnvMinio{},
38// })
39//
40// // Usage of ChainCredentials.
41// mc, err := minio.NewWithCredentials(endpoint, creds, secure, "us-east-1")
42// if err != nil {
43// log.Fatalln(err)
44// }
45type Chain struct {
46 Providers []Provider
47 curr Provider
48}
49
50// NewChainCredentials returns a pointer to a new Credentials object
51// wrapping a chain of providers.
52func NewChainCredentials(providers []Provider) *Credentials {
53 return New(&Chain{
54 Providers: append([]Provider{}, providers...),
55 })
56}
57
58// Retrieve returns the credentials value, returns no credentials(anonymous)
59// if no credentials provider returned any value.
60//
61// If a provider is found with credentials, it will be cached and any calls
62// to IsExpired() will return the expired state of the cached provider.
63func (c *Chain) Retrieve() (Value, error) {
64 for _, p := range c.Providers {
65 creds, _ := p.Retrieve()
66 // Always prioritize non-anonymous providers, if any.
67 if creds.AccessKeyID == "" && creds.SecretAccessKey == "" {
68 continue
69 }
70 c.curr = p
71 return creds, nil
72 }
73 // At this point we have exhausted all the providers and
74 // are left without any credentials return anonymous.
75 return Value{
76 SignerType: SignatureAnonymous,
77 }, nil
78}
79
80// IsExpired will returned the expired state of the currently cached provider
81// if there is one. If there is no current provider, true will be returned.
82func (c *Chain) IsExpired() bool {
83 if c.curr != nil {
84 return c.curr.IsExpired()
85 }
86
87 return true
88}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample b/vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample
new file mode 100644
index 0000000..d793c9e
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample
@@ -0,0 +1,17 @@
1{
2 "version": "8",
3 "hosts": {
4 "play": {
5 "url": "https://play.min.io",
6 "accessKey": "Q3AM3UQ867SPQQA43P2F",
7 "secretKey": "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
8 "api": "S3v2"
9 },
10 "s3": {
11 "url": "https://s3.amazonaws.com",
12 "accessKey": "accessKey",
13 "secretKey": "secret",
14 "api": "S3v4"
15 }
16 }
17} \ No newline at end of file
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go
new file mode 100644
index 0000000..af61049
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go
@@ -0,0 +1,193 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20import (
21 "sync"
22 "time"
23)
24
25const (
26 // STSVersion sts version string
27 STSVersion = "2011-06-15"
28
29 // How much duration to slash from the given expiration duration
30 defaultExpiryWindow = 0.8
31)
32
33// A Value is the AWS credentials value for individual credential fields.
34type Value struct {
35 // AWS Access key ID
36 AccessKeyID string
37
38 // AWS Secret Access Key
39 SecretAccessKey string
40
41 // AWS Session Token
42 SessionToken string
43
44 // Signature Type.
45 SignerType SignatureType
46}
47
48// A Provider is the interface for any component which will provide credentials
49// Value. A provider is required to manage its own Expired state, and what to
50// be expired means.
51type Provider interface {
52 // Retrieve returns nil if it successfully retrieved the value.
53 // Error is returned if the value were not obtainable, or empty.
54 Retrieve() (Value, error)
55
56 // IsExpired returns if the credentials are no longer valid, and need
57 // to be retrieved.
58 IsExpired() bool
59}
60
61// A Expiry provides shared expiration logic to be used by credentials
62// providers to implement expiry functionality.
63//
64// The best method to use this struct is as an anonymous field within the
65// provider's struct.
66//
67// Example:
68//
69// type IAMCredentialProvider struct {
70// Expiry
71// ...
72// }
73type Expiry struct {
74 // The date/time when to expire on
75 expiration time.Time
76
77 // If set will be used by IsExpired to determine the current time.
78 // Defaults to time.Now if CurrentTime is not set.
79 CurrentTime func() time.Time
80}
81
82// SetExpiration sets the expiration IsExpired will check when called.
83//
84// If window is greater than 0 the expiration time will be reduced by the
85// window value.
86//
87// Using a window is helpful to trigger credentials to expire sooner than
88// the expiration time given to ensure no requests are made with expired
89// tokens.
90func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
91 if e.CurrentTime == nil {
92 e.CurrentTime = time.Now
93 }
94 cut := window
95 if cut < 0 {
96 expireIn := expiration.Sub(e.CurrentTime())
97 cut = time.Duration(float64(expireIn) * (1 - defaultExpiryWindow))
98 }
99 e.expiration = expiration.Add(-cut)
100}
101
102// IsExpired returns if the credentials are expired.
103func (e *Expiry) IsExpired() bool {
104 if e.CurrentTime == nil {
105 e.CurrentTime = time.Now
106 }
107 return e.expiration.Before(e.CurrentTime())
108}
109
110// Credentials - A container for synchronous safe retrieval of credentials Value.
111// Credentials will cache the credentials value until they expire. Once the value
112// expires the next Get will attempt to retrieve valid credentials.
113//
114// Credentials is safe to use across multiple goroutines and will manage the
115// synchronous state so the Providers do not need to implement their own
116// synchronization.
117//
118// The first Credentials.Get() will always call Provider.Retrieve() to get the
119// first instance of the credentials Value. All calls to Get() after that
120// will return the cached credentials Value until IsExpired() returns true.
121type Credentials struct {
122 sync.Mutex
123
124 creds Value
125 forceRefresh bool
126 provider Provider
127}
128
129// New returns a pointer to a new Credentials with the provider set.
130func New(provider Provider) *Credentials {
131 return &Credentials{
132 provider: provider,
133 forceRefresh: true,
134 }
135}
136
137// Get returns the credentials value, or error if the credentials Value failed
138// to be retrieved.
139//
140// Will return the cached credentials Value if it has not expired. If the
141// credentials Value has expired the Provider's Retrieve() will be called
142// to refresh the credentials.
143//
144// If Credentials.Expire() was called the credentials Value will be force
145// expired, and the next call to Get() will cause them to be refreshed.
146func (c *Credentials) Get() (Value, error) {
147 if c == nil {
148 return Value{}, nil
149 }
150
151 c.Lock()
152 defer c.Unlock()
153
154 if c.isExpired() {
155 creds, err := c.provider.Retrieve()
156 if err != nil {
157 return Value{}, err
158 }
159 c.creds = creds
160 c.forceRefresh = false
161 }
162
163 return c.creds, nil
164}
165
166// Expire expires the credentials and forces them to be retrieved on the
167// next call to Get().
168//
169// This will override the Provider's expired state, and force Credentials
170// to call the Provider's Retrieve().
171func (c *Credentials) Expire() {
172 c.Lock()
173 defer c.Unlock()
174
175 c.forceRefresh = true
176}
177
178// IsExpired returns if the credentials are no longer valid, and need
179// to be refreshed.
180//
181// If the Credentials were forced to be expired with Expire() this will
182// reflect that override.
183func (c *Credentials) IsExpired() bool {
184 c.Lock()
185 defer c.Unlock()
186
187 return c.isExpired()
188}
189
190// isExpired helper method wrapping the definition of expired credentials.
191func (c *Credentials) isExpired() bool {
192 return c.forceRefresh || c.provider.IsExpired()
193}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json
new file mode 100644
index 0000000..afbfad5
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json
@@ -0,0 +1,7 @@
1{
2 "Version": 1,
3 "SessionToken": "token",
4 "AccessKeyId": "accessKey",
5 "SecretAccessKey": "secret",
6 "Expiration": "9999-04-27T16:02:25.000Z"
7}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample
new file mode 100644
index 0000000..e2dc1bf
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample
@@ -0,0 +1,15 @@
1[default]
2aws_access_key_id = accessKey
3aws_secret_access_key = secret
4aws_session_token = token
5
6[no_token]
7aws_access_key_id = accessKey
8aws_secret_access_key = secret
9
10[with_colon]
11aws_access_key_id: accessKey
12aws_secret_access_key: secret
13
14[with_process]
15credential_process = /bin/cat credentials.json
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go
new file mode 100644
index 0000000..fbfb105
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go
@@ -0,0 +1,60 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18// Package credentials provides credential retrieval and management
19// for S3 compatible object storage.
20//
21// By default the Credentials.Get() will cache the successful result of a
22// Provider's Retrieve() until Provider.IsExpired() returns true. At which
23// point Credentials will call Provider's Retrieve() to get new credential Value.
24//
25// The Provider is responsible for determining when credentials have expired.
26// It is also important to note that Credentials will always call Retrieve the
27// first time Credentials.Get() is called.
28//
29// Example of using the environment variable credentials.
30//
31// creds := NewFromEnv()
32// // Retrieve the credentials value
33// credValue, err := creds.Get()
34// if err != nil {
35// // handle error
36// }
37//
38// Example of forcing credentials to expire and be refreshed on the next Get().
39// This may be helpful to proactively expire credentials and refresh them sooner
40// than they would naturally expire on their own.
41//
42// creds := NewFromIAM("")
43// creds.Expire()
44// credsValue, err := creds.Get()
45// // New credentials will be retrieved instead of from cache.
46//
47// # Custom Provider
48//
49// Each Provider built into this package also provides a helper method to generate
50// a Credentials pointer setup with the provider. To use a custom Provider just
51// create a type which satisfies the Provider interface and pass it to the
52// NewCredentials method.
53//
54// type MyProvider struct{}
55// func (m *MyProvider) Retrieve() (Value, error) {...}
56// func (m *MyProvider) IsExpired() bool {...}
57//
58// creds := NewCredentials(&MyProvider{})
59// credValue, err := creds.Get()
60package credentials
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go
new file mode 100644
index 0000000..b6e60d0
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go
@@ -0,0 +1,71 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20import "os"
21
22// A EnvAWS retrieves credentials from the environment variables of the
23// running process. EnvAWSironment credentials never expire.
24//
25// EnvAWSironment variables used:
26//
27// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY.
28// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY.
29// * Secret Token: AWS_SESSION_TOKEN.
30type EnvAWS struct {
31 retrieved bool
32}
33
34// NewEnvAWS returns a pointer to a new Credentials object
35// wrapping the environment variable provider.
36func NewEnvAWS() *Credentials {
37 return New(&EnvAWS{})
38}
39
40// Retrieve retrieves the keys from the environment.
41func (e *EnvAWS) Retrieve() (Value, error) {
42 e.retrieved = false
43
44 id := os.Getenv("AWS_ACCESS_KEY_ID")
45 if id == "" {
46 id = os.Getenv("AWS_ACCESS_KEY")
47 }
48
49 secret := os.Getenv("AWS_SECRET_ACCESS_KEY")
50 if secret == "" {
51 secret = os.Getenv("AWS_SECRET_KEY")
52 }
53
54 signerType := SignatureV4
55 if id == "" || secret == "" {
56 signerType = SignatureAnonymous
57 }
58
59 e.retrieved = true
60 return Value{
61 AccessKeyID: id,
62 SecretAccessKey: secret,
63 SessionToken: os.Getenv("AWS_SESSION_TOKEN"),
64 SignerType: signerType,
65 }, nil
66}
67
68// IsExpired returns if the credentials have been retrieved.
69func (e *EnvAWS) IsExpired() bool {
70 return !e.retrieved
71}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go
new file mode 100644
index 0000000..5bfeab1
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go
@@ -0,0 +1,68 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20import "os"
21
22// A EnvMinio retrieves credentials from the environment variables of the
23// running process. EnvMinioironment credentials never expire.
24//
25// Environment variables used:
26//
27// * Access Key ID: MINIO_ACCESS_KEY.
28// * Secret Access Key: MINIO_SECRET_KEY.
29// * Access Key ID: MINIO_ROOT_USER.
30// * Secret Access Key: MINIO_ROOT_PASSWORD.
31type EnvMinio struct {
32 retrieved bool
33}
34
35// NewEnvMinio returns a pointer to a new Credentials object
36// wrapping the environment variable provider.
37func NewEnvMinio() *Credentials {
38 return New(&EnvMinio{})
39}
40
41// Retrieve retrieves the keys from the environment.
42func (e *EnvMinio) Retrieve() (Value, error) {
43 e.retrieved = false
44
45 id := os.Getenv("MINIO_ROOT_USER")
46 secret := os.Getenv("MINIO_ROOT_PASSWORD")
47
48 signerType := SignatureV4
49 if id == "" || secret == "" {
50 id = os.Getenv("MINIO_ACCESS_KEY")
51 secret = os.Getenv("MINIO_SECRET_KEY")
52 if id == "" || secret == "" {
53 signerType = SignatureAnonymous
54 }
55 }
56
57 e.retrieved = true
58 return Value{
59 AccessKeyID: id,
60 SecretAccessKey: secret,
61 SignerType: signerType,
62 }, nil
63}
64
65// IsExpired returns if the credentials have been retrieved.
66func (e *EnvMinio) IsExpired() bool {
67 return !e.retrieved
68}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go
new file mode 100644
index 0000000..07a9c2f
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go
@@ -0,0 +1,95 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2021 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20import (
21 "bytes"
22 "encoding/xml"
23 "fmt"
24 "io"
25)
26
27// ErrorResponse - Is the typed error returned.
28// ErrorResponse struct should be comparable since it is compared inside
29// golang http API (https://github.com/golang/go/issues/29768)
30type ErrorResponse struct {
31 XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ ErrorResponse" json:"-"`
32 STSError struct {
33 Type string `xml:"Type"`
34 Code string `xml:"Code"`
35 Message string `xml:"Message"`
36 } `xml:"Error"`
37 RequestID string `xml:"RequestId"`
38}
39
40// Error - Is the typed error returned by all API operations.
41type Error struct {
42 XMLName xml.Name `xml:"Error" json:"-"`
43 Code string
44 Message string
45 BucketName string
46 Key string
47 Resource string
48 RequestID string `xml:"RequestId"`
49 HostID string `xml:"HostId"`
50
51 // Region where the bucket is located. This header is returned
52 // only in HEAD bucket and ListObjects response.
53 Region string
54
55 // Captures the server string returned in response header.
56 Server string
57
58 // Underlying HTTP status code for the returned error
59 StatusCode int `xml:"-" json:"-"`
60}
61
62// Error - Returns S3 error string.
63func (e Error) Error() string {
64 if e.Message == "" {
65 return fmt.Sprintf("Error response code %s.", e.Code)
66 }
67 return e.Message
68}
69
70// Error - Returns STS error string.
71func (e ErrorResponse) Error() string {
72 if e.STSError.Message == "" {
73 return fmt.Sprintf("Error response code %s.", e.STSError.Code)
74 }
75 return e.STSError.Message
76}
77
78// xmlDecoder provide decoded value in xml.
79func xmlDecoder(body io.Reader, v interface{}) error {
80 d := xml.NewDecoder(body)
81 return d.Decode(v)
82}
83
84// xmlDecodeAndBody reads the whole body up to 1MB and
85// tries to XML decode it into v.
86// The body that was read and any error from reading or decoding is returned.
87func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) {
88 // read the whole body (up to 1MB)
89 const maxBodyLength = 1 << 20
90 body, err := io.ReadAll(io.LimitReader(bodyReader, maxBodyLength))
91 if err != nil {
92 return nil, err
93 }
94 return bytes.TrimSpace(body), xmlDecoder(bytes.NewReader(body), v)
95}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go
new file mode 100644
index 0000000..5b07376
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go
@@ -0,0 +1,157 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20import (
21 "encoding/json"
22 "errors"
23 "os"
24 "os/exec"
25 "path/filepath"
26 "strings"
27 "time"
28
29 ini "gopkg.in/ini.v1"
30)
31
32// A externalProcessCredentials stores the output of a credential_process
33type externalProcessCredentials struct {
34 Version int
35 SessionToken string
36 AccessKeyID string `json:"AccessKeyId"`
37 SecretAccessKey string
38 Expiration time.Time
39}
40
41// A FileAWSCredentials retrieves credentials from the current user's home
42// directory, and keeps track if those credentials are expired.
43//
44// Profile ini file example: $HOME/.aws/credentials
45type FileAWSCredentials struct {
46 Expiry
47
48 // Path to the shared credentials file.
49 //
50 // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the
51 // env value is empty will default to current user's home directory.
52 // Linux/OSX: "$HOME/.aws/credentials"
53 // Windows: "%USERPROFILE%\.aws\credentials"
54 Filename string
55
56 // AWS Profile to extract credentials from the shared credentials file. If empty
57 // will default to environment variable "AWS_PROFILE" or "default" if
58 // environment variable is also not set.
59 Profile string
60
61 // retrieved states if the credentials have been successfully retrieved.
62 retrieved bool
63}
64
65// NewFileAWSCredentials returns a pointer to a new Credentials object
66// wrapping the Profile file provider.
67func NewFileAWSCredentials(filename, profile string) *Credentials {
68 return New(&FileAWSCredentials{
69 Filename: filename,
70 Profile: profile,
71 })
72}
73
74// Retrieve reads and extracts the shared credentials from the current
75// users home directory.
76func (p *FileAWSCredentials) Retrieve() (Value, error) {
77 if p.Filename == "" {
78 p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE")
79 if p.Filename == "" {
80 homeDir, err := os.UserHomeDir()
81 if err != nil {
82 return Value{}, err
83 }
84 p.Filename = filepath.Join(homeDir, ".aws", "credentials")
85 }
86 }
87 if p.Profile == "" {
88 p.Profile = os.Getenv("AWS_PROFILE")
89 if p.Profile == "" {
90 p.Profile = "default"
91 }
92 }
93
94 p.retrieved = false
95
96 iniProfile, err := loadProfile(p.Filename, p.Profile)
97 if err != nil {
98 return Value{}, err
99 }
100
101 // Default to empty string if not found.
102 id := iniProfile.Key("aws_access_key_id")
103 // Default to empty string if not found.
104 secret := iniProfile.Key("aws_secret_access_key")
105 // Default to empty string if not found.
106 token := iniProfile.Key("aws_session_token")
107
108 // If credential_process is defined, obtain credentials by executing
109 // the external process
110 credentialProcess := strings.TrimSpace(iniProfile.Key("credential_process").String())
111 if credentialProcess != "" {
112 args := strings.Fields(credentialProcess)
113 if len(args) <= 1 {
114 return Value{}, errors.New("invalid credential process args")
115 }
116 cmd := exec.Command(args[0], args[1:]...)
117 out, err := cmd.Output()
118 if err != nil {
119 return Value{}, err
120 }
121 var externalProcessCredentials externalProcessCredentials
122 err = json.Unmarshal([]byte(out), &externalProcessCredentials)
123 if err != nil {
124 return Value{}, err
125 }
126 p.retrieved = true
127 p.SetExpiration(externalProcessCredentials.Expiration, DefaultExpiryWindow)
128 return Value{
129 AccessKeyID: externalProcessCredentials.AccessKeyID,
130 SecretAccessKey: externalProcessCredentials.SecretAccessKey,
131 SessionToken: externalProcessCredentials.SessionToken,
132 SignerType: SignatureV4,
133 }, nil
134 }
135 p.retrieved = true
136 return Value{
137 AccessKeyID: id.String(),
138 SecretAccessKey: secret.String(),
139 SessionToken: token.String(),
140 SignerType: SignatureV4,
141 }, nil
142}
143
144// loadProfiles loads from the file pointed to by shared credentials filename for profile.
145// The credentials retrieved from the profile will be returned or error. Error will be
146// returned if it fails to read from the file, or the data is invalid.
147func loadProfile(filename, profile string) (*ini.Section, error) {
148 config, err := ini.Load(filename)
149 if err != nil {
150 return nil, err
151 }
152 iniProfile, err := config.GetSection(profile)
153 if err != nil {
154 return nil, err
155 }
156 return iniProfile, nil
157}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go
new file mode 100644
index 0000000..eb77767
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go
@@ -0,0 +1,139 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20import (
21 "os"
22 "path/filepath"
23 "runtime"
24
25 jsoniter "github.com/json-iterator/go"
26)
27
28// A FileMinioClient retrieves credentials from the current user's home
29// directory, and keeps track if those credentials are expired.
30//
31// Configuration file example: $HOME/.mc/config.json
32type FileMinioClient struct {
33 // Path to the shared credentials file.
34 //
35 // If empty will look for "MINIO_SHARED_CREDENTIALS_FILE" env variable. If the
36 // env value is empty will default to current user's home directory.
37 // Linux/OSX: "$HOME/.mc/config.json"
38 // Windows: "%USERALIAS%\mc\config.json"
39 Filename string
40
41 // MinIO Alias to extract credentials from the shared credentials file. If empty
42 // will default to environment variable "MINIO_ALIAS" or "default" if
43 // environment variable is also not set.
44 Alias string
45
46 // retrieved states if the credentials have been successfully retrieved.
47 retrieved bool
48}
49
50// NewFileMinioClient returns a pointer to a new Credentials object
51// wrapping the Alias file provider.
52func NewFileMinioClient(filename, alias string) *Credentials {
53 return New(&FileMinioClient{
54 Filename: filename,
55 Alias: alias,
56 })
57}
58
59// Retrieve reads and extracts the shared credentials from the current
60// users home directory.
61func (p *FileMinioClient) Retrieve() (Value, error) {
62 if p.Filename == "" {
63 if value, ok := os.LookupEnv("MINIO_SHARED_CREDENTIALS_FILE"); ok {
64 p.Filename = value
65 } else {
66 homeDir, err := os.UserHomeDir()
67 if err != nil {
68 return Value{}, err
69 }
70 p.Filename = filepath.Join(homeDir, ".mc", "config.json")
71 if runtime.GOOS == "windows" {
72 p.Filename = filepath.Join(homeDir, "mc", "config.json")
73 }
74 }
75 }
76
77 if p.Alias == "" {
78 p.Alias = os.Getenv("MINIO_ALIAS")
79 if p.Alias == "" {
80 p.Alias = "s3"
81 }
82 }
83
84 p.retrieved = false
85
86 hostCfg, err := loadAlias(p.Filename, p.Alias)
87 if err != nil {
88 return Value{}, err
89 }
90
91 p.retrieved = true
92 return Value{
93 AccessKeyID: hostCfg.AccessKey,
94 SecretAccessKey: hostCfg.SecretKey,
95 SignerType: parseSignatureType(hostCfg.API),
96 }, nil
97}
98
99// IsExpired returns if the shared credentials have expired.
100func (p *FileMinioClient) IsExpired() bool {
101 return !p.retrieved
102}
103
104// hostConfig configuration of a host.
105type hostConfig struct {
106 URL string `json:"url"`
107 AccessKey string `json:"accessKey"`
108 SecretKey string `json:"secretKey"`
109 API string `json:"api"`
110}
111
112// config config version.
113type config struct {
114 Version string `json:"version"`
115 Hosts map[string]hostConfig `json:"hosts"`
116 Aliases map[string]hostConfig `json:"aliases"`
117}
118
119// loadAliass loads from the file pointed to by shared credentials filename for alias.
120// The credentials retrieved from the alias will be returned or error. Error will be
121// returned if it fails to read from the file.
122func loadAlias(filename, alias string) (hostConfig, error) {
123 cfg := &config{}
124 json := jsoniter.ConfigCompatibleWithStandardLibrary
125
126 configBytes, err := os.ReadFile(filename)
127 if err != nil {
128 return hostConfig{}, err
129 }
130 if err = json.Unmarshal(configBytes, cfg); err != nil {
131 return hostConfig{}, err
132 }
133
134 if cfg.Version == "10" {
135 return cfg.Aliases[alias], nil
136 }
137
138 return cfg.Hosts[alias], nil
139}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go
new file mode 100644
index 0000000..c5153c4
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go
@@ -0,0 +1,433 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20import (
21 "bufio"
22 "context"
23 "errors"
24 "fmt"
25 "io"
26 "net"
27 "net/http"
28 "net/url"
29 "os"
30 "path"
31 "strings"
32 "time"
33
34 jsoniter "github.com/json-iterator/go"
35)
36
37// DefaultExpiryWindow - Default expiry window.
38// ExpiryWindow will allow the credentials to trigger refreshing
39// prior to the credentials actually expiring. This is beneficial
40// so race conditions with expiring credentials do not cause
41// request to fail unexpectedly due to ExpiredTokenException exceptions.
42// DefaultExpiryWindow can be used as parameter to (*Expiry).SetExpiration.
43// When used the tokens refresh will be triggered when 80% of the elapsed
44// time until the actual expiration time is passed.
45const DefaultExpiryWindow = -1
46
47// A IAM retrieves credentials from the EC2 service, and keeps track if
48// those credentials are expired.
49type IAM struct {
50 Expiry
51
52 // Required http Client to use when connecting to IAM metadata service.
53 Client *http.Client
54
55 // Custom endpoint to fetch IAM role credentials.
56 Endpoint string
57
58 // Region configurable custom region for STS
59 Region string
60
61 // Support for container authorization token https://docs.aws.amazon.com/sdkref/latest/guide/feature-container-credentials.html
62 Container struct {
63 AuthorizationToken string
64 CredentialsFullURI string
65 CredentialsRelativeURI string
66 }
67
68 // EKS based k8s RBAC authorization - https://docs.aws.amazon.com/eks/latest/userguide/pod-configuration.html
69 EKSIdentity struct {
70 TokenFile string
71 RoleARN string
72 RoleSessionName string
73 }
74}
75
76// IAM Roles for Amazon EC2
77// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
78const (
79 DefaultIAMRoleEndpoint = "http://169.254.169.254"
80 DefaultECSRoleEndpoint = "http://169.254.170.2"
81 DefaultSTSRoleEndpoint = "https://sts.amazonaws.com"
82 DefaultIAMSecurityCredsPath = "/latest/meta-data/iam/security-credentials/"
83 TokenRequestTTLHeader = "X-aws-ec2-metadata-token-ttl-seconds"
84 TokenPath = "/latest/api/token"
85 TokenTTL = "21600"
86 TokenRequestHeader = "X-aws-ec2-metadata-token"
87)
88
89// NewIAM returns a pointer to a new Credentials object wrapping the IAM.
90func NewIAM(endpoint string) *Credentials {
91 return New(&IAM{
92 Client: &http.Client{
93 Transport: http.DefaultTransport,
94 },
95 Endpoint: endpoint,
96 })
97}
98
99// Retrieve retrieves credentials from the EC2 service.
100// Error will be returned if the request fails, or unable to extract
101// the desired
102func (m *IAM) Retrieve() (Value, error) {
103 token := os.Getenv("AWS_CONTAINER_AUTHORIZATION_TOKEN")
104 if token == "" {
105 token = m.Container.AuthorizationToken
106 }
107
108 relativeURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI")
109 if relativeURI == "" {
110 relativeURI = m.Container.CredentialsRelativeURI
111 }
112
113 fullURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_FULL_URI")
114 if fullURI == "" {
115 fullURI = m.Container.CredentialsFullURI
116 }
117
118 identityFile := os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE")
119 if identityFile == "" {
120 identityFile = m.EKSIdentity.TokenFile
121 }
122
123 roleArn := os.Getenv("AWS_ROLE_ARN")
124 if roleArn == "" {
125 roleArn = m.EKSIdentity.RoleARN
126 }
127
128 roleSessionName := os.Getenv("AWS_ROLE_SESSION_NAME")
129 if roleSessionName == "" {
130 roleSessionName = m.EKSIdentity.RoleSessionName
131 }
132
133 region := os.Getenv("AWS_REGION")
134 if region == "" {
135 region = m.Region
136 }
137
138 var roleCreds ec2RoleCredRespBody
139 var err error
140
141 endpoint := m.Endpoint
142 switch {
143 case identityFile != "":
144 if len(endpoint) == 0 {
145 if region != "" {
146 if strings.HasPrefix(region, "cn-") {
147 endpoint = "https://sts." + region + ".amazonaws.com.cn"
148 } else {
149 endpoint = "https://sts." + region + ".amazonaws.com"
150 }
151 } else {
152 endpoint = DefaultSTSRoleEndpoint
153 }
154 }
155
156 creds := &STSWebIdentity{
157 Client: m.Client,
158 STSEndpoint: endpoint,
159 GetWebIDTokenExpiry: func() (*WebIdentityToken, error) {
160 token, err := os.ReadFile(identityFile)
161 if err != nil {
162 return nil, err
163 }
164
165 return &WebIdentityToken{Token: string(token)}, nil
166 },
167 RoleARN: roleArn,
168 roleSessionName: roleSessionName,
169 }
170
171 stsWebIdentityCreds, err := creds.Retrieve()
172 if err == nil {
173 m.SetExpiration(creds.Expiration(), DefaultExpiryWindow)
174 }
175 return stsWebIdentityCreds, err
176
177 case relativeURI != "":
178 if len(endpoint) == 0 {
179 endpoint = fmt.Sprintf("%s%s", DefaultECSRoleEndpoint, relativeURI)
180 }
181
182 roleCreds, err = getEcsTaskCredentials(m.Client, endpoint, token)
183
184 case fullURI != "":
185 if len(endpoint) == 0 {
186 endpoint = fullURI
187 var ok bool
188 if ok, err = isLoopback(endpoint); !ok {
189 if err == nil {
190 err = fmt.Errorf("uri host is not a loopback address: %s", endpoint)
191 }
192 break
193 }
194 }
195
196 roleCreds, err = getEcsTaskCredentials(m.Client, endpoint, token)
197
198 default:
199 roleCreds, err = getCredentials(m.Client, endpoint)
200 }
201
202 if err != nil {
203 return Value{}, err
204 }
205 // Expiry window is set to 10secs.
206 m.SetExpiration(roleCreds.Expiration, DefaultExpiryWindow)
207
208 return Value{
209 AccessKeyID: roleCreds.AccessKeyID,
210 SecretAccessKey: roleCreds.SecretAccessKey,
211 SessionToken: roleCreds.Token,
212 SignerType: SignatureV4,
213 }, nil
214}
215
216// A ec2RoleCredRespBody provides the shape for unmarshaling credential
217// request responses.
218type ec2RoleCredRespBody struct {
219 // Success State
220 Expiration time.Time
221 AccessKeyID string
222 SecretAccessKey string
223 Token string
224
225 // Error state
226 Code string
227 Message string
228
229 // Unused params.
230 LastUpdated time.Time
231 Type string
232}
233
234// Get the final IAM role URL where the request will
235// be sent to fetch the rolling access credentials.
236// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
237func getIAMRoleURL(endpoint string) (*url.URL, error) {
238 u, err := url.Parse(endpoint)
239 if err != nil {
240 return nil, err
241 }
242 u.Path = DefaultIAMSecurityCredsPath
243 return u, nil
244}
245
246// listRoleNames lists of credential role names associated
247// with the current EC2 service. If there are no credentials,
248// or there is an error making or receiving the request.
249// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
250func listRoleNames(client *http.Client, u *url.URL, token string) ([]string, error) {
251 req, err := http.NewRequest(http.MethodGet, u.String(), nil)
252 if err != nil {
253 return nil, err
254 }
255 if token != "" {
256 req.Header.Add(TokenRequestHeader, token)
257 }
258 resp, err := client.Do(req)
259 if err != nil {
260 return nil, err
261 }
262 defer resp.Body.Close()
263 if resp.StatusCode != http.StatusOK {
264 return nil, errors.New(resp.Status)
265 }
266
267 credsList := []string{}
268 s := bufio.NewScanner(resp.Body)
269 for s.Scan() {
270 credsList = append(credsList, s.Text())
271 }
272
273 if err := s.Err(); err != nil {
274 return nil, err
275 }
276
277 return credsList, nil
278}
279
280func getEcsTaskCredentials(client *http.Client, endpoint, token string) (ec2RoleCredRespBody, error) {
281 req, err := http.NewRequest(http.MethodGet, endpoint, nil)
282 if err != nil {
283 return ec2RoleCredRespBody{}, err
284 }
285
286 if token != "" {
287 req.Header.Set("Authorization", token)
288 }
289
290 resp, err := client.Do(req)
291 if err != nil {
292 return ec2RoleCredRespBody{}, err
293 }
294 defer resp.Body.Close()
295 if resp.StatusCode != http.StatusOK {
296 return ec2RoleCredRespBody{}, errors.New(resp.Status)
297 }
298
299 respCreds := ec2RoleCredRespBody{}
300 if err := jsoniter.NewDecoder(resp.Body).Decode(&respCreds); err != nil {
301 return ec2RoleCredRespBody{}, err
302 }
303
304 return respCreds, nil
305}
306
307func fetchIMDSToken(client *http.Client, endpoint string) (string, error) {
308 ctx, cancel := context.WithTimeout(context.Background(), time.Second)
309 defer cancel()
310
311 req, err := http.NewRequestWithContext(ctx, http.MethodPut, endpoint+TokenPath, nil)
312 if err != nil {
313 return "", err
314 }
315 req.Header.Add(TokenRequestTTLHeader, TokenTTL)
316 resp, err := client.Do(req)
317 if err != nil {
318 return "", err
319 }
320 defer resp.Body.Close()
321 data, err := io.ReadAll(resp.Body)
322 if err != nil {
323 return "", err
324 }
325 if resp.StatusCode != http.StatusOK {
326 return "", errors.New(resp.Status)
327 }
328 return string(data), nil
329}
330
331// getCredentials - obtains the credentials from the IAM role name associated with
332// the current EC2 service.
333//
334// If the credentials cannot be found, or there is an error
335// reading the response an error will be returned.
336func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) {
337 if endpoint == "" {
338 endpoint = DefaultIAMRoleEndpoint
339 }
340
341 // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html
342 token, err := fetchIMDSToken(client, endpoint)
343 if err != nil {
344 // Return only errors for valid situations, if the IMDSv2 is not enabled
345 // we will not be able to get the token, in such a situation we have
346 // to rely on IMDSv1 behavior as a fallback, this check ensures that.
347 // Refer https://github.com/minio/minio-go/issues/1866
348 if !errors.Is(err, context.DeadlineExceeded) && !errors.Is(err, context.Canceled) {
349 return ec2RoleCredRespBody{}, err
350 }
351 }
352
353 // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
354 u, err := getIAMRoleURL(endpoint)
355 if err != nil {
356 return ec2RoleCredRespBody{}, err
357 }
358
359 // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
360 roleNames, err := listRoleNames(client, u, token)
361 if err != nil {
362 return ec2RoleCredRespBody{}, err
363 }
364
365 if len(roleNames) == 0 {
366 return ec2RoleCredRespBody{}, errors.New("No IAM roles attached to this EC2 service")
367 }
368
369 // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
370 // - An instance profile can contain only one IAM role. This limit cannot be increased.
371 roleName := roleNames[0]
372
373 // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
374 // The following command retrieves the security credentials for an
375 // IAM role named `s3access`.
376 //
377 // $ curl http://169.254.169.254/latest/meta-data/iam/security-credentials/s3access
378 //
379 u.Path = path.Join(u.Path, roleName)
380 req, err := http.NewRequest(http.MethodGet, u.String(), nil)
381 if err != nil {
382 return ec2RoleCredRespBody{}, err
383 }
384 if token != "" {
385 req.Header.Add(TokenRequestHeader, token)
386 }
387
388 resp, err := client.Do(req)
389 if err != nil {
390 return ec2RoleCredRespBody{}, err
391 }
392 defer resp.Body.Close()
393 if resp.StatusCode != http.StatusOK {
394 return ec2RoleCredRespBody{}, errors.New(resp.Status)
395 }
396
397 respCreds := ec2RoleCredRespBody{}
398 if err := jsoniter.NewDecoder(resp.Body).Decode(&respCreds); err != nil {
399 return ec2RoleCredRespBody{}, err
400 }
401
402 if respCreds.Code != "Success" {
403 // If an error code was returned something failed requesting the role.
404 return ec2RoleCredRespBody{}, errors.New(respCreds.Message)
405 }
406
407 return respCreds, nil
408}
409
410// isLoopback identifies if a uri's host is on a loopback address
411func isLoopback(uri string) (bool, error) {
412 u, err := url.Parse(uri)
413 if err != nil {
414 return false, err
415 }
416
417 host := u.Hostname()
418 if len(host) == 0 {
419 return false, fmt.Errorf("can't parse host from uri: %s", uri)
420 }
421
422 ips, err := net.LookupHost(host)
423 if err != nil {
424 return false, err
425 }
426 for _, ip := range ips {
427 if !net.ParseIP(ip).IsLoopback() {
428 return false, nil
429 }
430 }
431
432 return true, nil
433}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go
new file mode 100644
index 0000000..b794333
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go
@@ -0,0 +1,77 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20import "strings"
21
22// SignatureType is type of Authorization requested for a given HTTP request.
23type SignatureType int
24
25// Different types of supported signatures - default is SignatureV4 or SignatureDefault.
26const (
27 // SignatureDefault is always set to v4.
28 SignatureDefault SignatureType = iota
29 SignatureV4
30 SignatureV2
31 SignatureV4Streaming
32 SignatureAnonymous // Anonymous signature signifies, no signature.
33)
34
35// IsV2 - is signature SignatureV2?
36func (s SignatureType) IsV2() bool {
37 return s == SignatureV2
38}
39
40// IsV4 - is signature SignatureV4?
41func (s SignatureType) IsV4() bool {
42 return s == SignatureV4 || s == SignatureDefault
43}
44
45// IsStreamingV4 - is signature SignatureV4Streaming?
46func (s SignatureType) IsStreamingV4() bool {
47 return s == SignatureV4Streaming
48}
49
50// IsAnonymous - is signature empty?
51func (s SignatureType) IsAnonymous() bool {
52 return s == SignatureAnonymous
53}
54
55// Stringer humanized version of signature type,
56// strings returned here are case insensitive.
57func (s SignatureType) String() string {
58 if s.IsV2() {
59 return "S3v2"
60 } else if s.IsV4() {
61 return "S3v4"
62 } else if s.IsStreamingV4() {
63 return "S3v4Streaming"
64 }
65 return "Anonymous"
66}
67
68func parseSignatureType(str string) SignatureType {
69 if strings.EqualFold(str, "S3v4") {
70 return SignatureV4
71 } else if strings.EqualFold(str, "S3v2") {
72 return SignatureV2
73 } else if strings.EqualFold(str, "S3v4Streaming") {
74 return SignatureV4Streaming
75 }
76 return SignatureAnonymous
77}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go
new file mode 100644
index 0000000..7dde00b
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go
@@ -0,0 +1,67 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20// A Static is a set of credentials which are set programmatically,
21// and will never expire.
22type Static struct {
23 Value
24}
25
26// NewStaticV2 returns a pointer to a new Credentials object
27// wrapping a static credentials value provider, signature is
28// set to v2. If access and secret are not specified then
29// regardless of signature type set it Value will return
30// as anonymous.
31func NewStaticV2(id, secret, token string) *Credentials {
32 return NewStatic(id, secret, token, SignatureV2)
33}
34
35// NewStaticV4 is similar to NewStaticV2 with similar considerations.
36func NewStaticV4(id, secret, token string) *Credentials {
37 return NewStatic(id, secret, token, SignatureV4)
38}
39
40// NewStatic returns a pointer to a new Credentials object
41// wrapping a static credentials value provider.
42func NewStatic(id, secret, token string, signerType SignatureType) *Credentials {
43 return New(&Static{
44 Value: Value{
45 AccessKeyID: id,
46 SecretAccessKey: secret,
47 SessionToken: token,
48 SignerType: signerType,
49 },
50 })
51}
52
53// Retrieve returns the static credentials.
54func (s *Static) Retrieve() (Value, error) {
55 if s.AccessKeyID == "" || s.SecretAccessKey == "" {
56 // Anonymous is not an error
57 return Value{SignerType: SignatureAnonymous}, nil
58 }
59 return s.Value, nil
60}
61
62// IsExpired returns if the credentials are expired.
63//
64// For Static, the credentials never expired.
65func (s *Static) IsExpired() bool {
66 return false
67}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go
new file mode 100644
index 0000000..9e92c1e
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go
@@ -0,0 +1,182 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2019-2022 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20import (
21 "bytes"
22 "encoding/xml"
23 "errors"
24 "fmt"
25 "io"
26 "net/http"
27 "net/url"
28 "strings"
29 "time"
30)
31
32// AssumedRoleUser - The identifiers for the temporary security credentials that
33// the operation returns. Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumedRoleUser
34type AssumedRoleUser struct {
35 Arn string
36 AssumedRoleID string `xml:"AssumeRoleId"`
37}
38
39// AssumeRoleWithClientGrantsResponse contains the result of successful AssumeRoleWithClientGrants request.
40type AssumeRoleWithClientGrantsResponse struct {
41 XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithClientGrantsResponse" json:"-"`
42 Result ClientGrantsResult `xml:"AssumeRoleWithClientGrantsResult"`
43 ResponseMetadata struct {
44 RequestID string `xml:"RequestId,omitempty"`
45 } `xml:"ResponseMetadata,omitempty"`
46}
47
48// ClientGrantsResult - Contains the response to a successful AssumeRoleWithClientGrants
49// request, including temporary credentials that can be used to make MinIO API requests.
50type ClientGrantsResult struct {
51 AssumedRoleUser AssumedRoleUser `xml:",omitempty"`
52 Audience string `xml:",omitempty"`
53 Credentials struct {
54 AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"`
55 SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"`
56 Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"`
57 SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"`
58 } `xml:",omitempty"`
59 PackedPolicySize int `xml:",omitempty"`
60 Provider string `xml:",omitempty"`
61 SubjectFromClientGrantsToken string `xml:",omitempty"`
62}
63
64// ClientGrantsToken - client grants token with expiry.
65type ClientGrantsToken struct {
66 Token string
67 Expiry int
68}
69
70// A STSClientGrants retrieves credentials from MinIO service, and keeps track if
71// those credentials are expired.
72type STSClientGrants struct {
73 Expiry
74
75 // Required http Client to use when connecting to MinIO STS service.
76 Client *http.Client
77
78 // MinIO endpoint to fetch STS credentials.
79 STSEndpoint string
80
81 // getClientGrantsTokenExpiry function to retrieve tokens
82 // from IDP This function should return two values one is
83 // accessToken which is a self contained access token (JWT)
84 // and second return value is the expiry associated with
85 // this token. This is a customer provided function and
86 // is mandatory.
87 GetClientGrantsTokenExpiry func() (*ClientGrantsToken, error)
88}
89
90// NewSTSClientGrants returns a pointer to a new
91// Credentials object wrapping the STSClientGrants.
92func NewSTSClientGrants(stsEndpoint string, getClientGrantsTokenExpiry func() (*ClientGrantsToken, error)) (*Credentials, error) {
93 if stsEndpoint == "" {
94 return nil, errors.New("STS endpoint cannot be empty")
95 }
96 if getClientGrantsTokenExpiry == nil {
97 return nil, errors.New("Client grants access token and expiry retrieval function should be defined")
98 }
99 return New(&STSClientGrants{
100 Client: &http.Client{
101 Transport: http.DefaultTransport,
102 },
103 STSEndpoint: stsEndpoint,
104 GetClientGrantsTokenExpiry: getClientGrantsTokenExpiry,
105 }), nil
106}
107
108func getClientGrantsCredentials(clnt *http.Client, endpoint string,
109 getClientGrantsTokenExpiry func() (*ClientGrantsToken, error),
110) (AssumeRoleWithClientGrantsResponse, error) {
111 accessToken, err := getClientGrantsTokenExpiry()
112 if err != nil {
113 return AssumeRoleWithClientGrantsResponse{}, err
114 }
115
116 v := url.Values{}
117 v.Set("Action", "AssumeRoleWithClientGrants")
118 v.Set("Token", accessToken.Token)
119 v.Set("DurationSeconds", fmt.Sprintf("%d", accessToken.Expiry))
120 v.Set("Version", STSVersion)
121
122 u, err := url.Parse(endpoint)
123 if err != nil {
124 return AssumeRoleWithClientGrantsResponse{}, err
125 }
126
127 req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode()))
128 if err != nil {
129 return AssumeRoleWithClientGrantsResponse{}, err
130 }
131
132 req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
133
134 resp, err := clnt.Do(req)
135 if err != nil {
136 return AssumeRoleWithClientGrantsResponse{}, err
137 }
138 defer resp.Body.Close()
139 if resp.StatusCode != http.StatusOK {
140 var errResp ErrorResponse
141 buf, err := io.ReadAll(resp.Body)
142 if err != nil {
143 return AssumeRoleWithClientGrantsResponse{}, err
144 }
145 _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp)
146 if err != nil {
147 var s3Err Error
148 if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil {
149 return AssumeRoleWithClientGrantsResponse{}, err
150 }
151 errResp.RequestID = s3Err.RequestID
152 errResp.STSError.Code = s3Err.Code
153 errResp.STSError.Message = s3Err.Message
154 }
155 return AssumeRoleWithClientGrantsResponse{}, errResp
156 }
157
158 a := AssumeRoleWithClientGrantsResponse{}
159 if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil {
160 return AssumeRoleWithClientGrantsResponse{}, err
161 }
162 return a, nil
163}
164
165// Retrieve retrieves credentials from the MinIO service.
166// Error will be returned if the request fails.
167func (m *STSClientGrants) Retrieve() (Value, error) {
168 a, err := getClientGrantsCredentials(m.Client, m.STSEndpoint, m.GetClientGrantsTokenExpiry)
169 if err != nil {
170 return Value{}, err
171 }
172
173 // Expiry window is set to 10secs.
174 m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow)
175
176 return Value{
177 AccessKeyID: a.Result.Credentials.AccessKey,
178 SecretAccessKey: a.Result.Credentials.SecretKey,
179 SessionToken: a.Result.Credentials.SessionToken,
180 SignerType: SignatureV4,
181 }, nil
182}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go
new file mode 100644
index 0000000..e1f9ce4
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go
@@ -0,0 +1,146 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2022 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20import (
21 "encoding/xml"
22 "errors"
23 "fmt"
24 "net/http"
25 "net/url"
26 "time"
27)
28
29// CustomTokenResult - Contains temporary creds and user metadata.
30type CustomTokenResult struct {
31 Credentials struct {
32 AccessKey string `xml:"AccessKeyId"`
33 SecretKey string `xml:"SecretAccessKey"`
34 Expiration time.Time `xml:"Expiration"`
35 SessionToken string `xml:"SessionToken"`
36 } `xml:",omitempty"`
37
38 AssumedUser string `xml:",omitempty"`
39}
40
41// AssumeRoleWithCustomTokenResponse contains the result of a successful
42// AssumeRoleWithCustomToken request.
43type AssumeRoleWithCustomTokenResponse struct {
44 XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithCustomTokenResponse" json:"-"`
45 Result CustomTokenResult `xml:"AssumeRoleWithCustomTokenResult"`
46 Metadata struct {
47 RequestID string `xml:"RequestId,omitempty"`
48 } `xml:"ResponseMetadata,omitempty"`
49}
50
51// CustomTokenIdentity - satisfies the Provider interface, and retrieves
52// credentials from MinIO using the AssumeRoleWithCustomToken STS API.
53type CustomTokenIdentity struct {
54 Expiry
55
56 Client *http.Client
57
58 // MinIO server STS endpoint to fetch STS credentials.
59 STSEndpoint string
60
61 // The custom token to use with the request.
62 Token string
63
64 // RoleArn associated with the identity
65 RoleArn string
66
67 // RequestedExpiry is to set the validity of the generated credentials
68 // (this value bounded by server).
69 RequestedExpiry time.Duration
70}
71
72// Retrieve - to satisfy Provider interface; fetches credentials from MinIO.
73func (c *CustomTokenIdentity) Retrieve() (value Value, err error) {
74 u, err := url.Parse(c.STSEndpoint)
75 if err != nil {
76 return value, err
77 }
78
79 v := url.Values{}
80 v.Set("Action", "AssumeRoleWithCustomToken")
81 v.Set("Version", STSVersion)
82 v.Set("RoleArn", c.RoleArn)
83 v.Set("Token", c.Token)
84 if c.RequestedExpiry != 0 {
85 v.Set("DurationSeconds", fmt.Sprintf("%d", int(c.RequestedExpiry.Seconds())))
86 }
87
88 u.RawQuery = v.Encode()
89
90 req, err := http.NewRequest(http.MethodPost, u.String(), nil)
91 if err != nil {
92 return value, err
93 }
94
95 resp, err := c.Client.Do(req)
96 if err != nil {
97 return value, err
98 }
99
100 defer resp.Body.Close()
101 if resp.StatusCode != http.StatusOK {
102 return value, errors.New(resp.Status)
103 }
104
105 r := AssumeRoleWithCustomTokenResponse{}
106 if err = xml.NewDecoder(resp.Body).Decode(&r); err != nil {
107 return
108 }
109
110 cr := r.Result.Credentials
111 c.SetExpiration(cr.Expiration, DefaultExpiryWindow)
112 return Value{
113 AccessKeyID: cr.AccessKey,
114 SecretAccessKey: cr.SecretKey,
115 SessionToken: cr.SessionToken,
116 SignerType: SignatureV4,
117 }, nil
118}
119
120// NewCustomTokenCredentials - returns credentials using the
121// AssumeRoleWithCustomToken STS API.
122func NewCustomTokenCredentials(stsEndpoint, token, roleArn string, optFuncs ...CustomTokenOpt) (*Credentials, error) {
123 c := CustomTokenIdentity{
124 Client: &http.Client{Transport: http.DefaultTransport},
125 STSEndpoint: stsEndpoint,
126 Token: token,
127 RoleArn: roleArn,
128 }
129 for _, optFunc := range optFuncs {
130 optFunc(&c)
131 }
132 return New(&c), nil
133}
134
135// CustomTokenOpt is a function type to configure the custom-token based
136// credentials using NewCustomTokenCredentials.
137type CustomTokenOpt func(*CustomTokenIdentity)
138
139// CustomTokenValidityOpt sets the validity duration of the requested
140// credentials. This value is ignored if the server enforces a lower validity
141// period.
142func CustomTokenValidityOpt(d time.Duration) CustomTokenOpt {
143 return func(c *CustomTokenIdentity) {
144 c.RequestedExpiry = d
145 }
146}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go
new file mode 100644
index 0000000..ec5f3f0
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go
@@ -0,0 +1,189 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2019-2022 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20import (
21 "bytes"
22 "encoding/xml"
23 "fmt"
24 "io"
25 "net/http"
26 "net/url"
27 "strings"
28 "time"
29)
30
31// AssumeRoleWithLDAPResponse contains the result of successful
32// AssumeRoleWithLDAPIdentity request
33type AssumeRoleWithLDAPResponse struct {
34 XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithLDAPIdentityResponse" json:"-"`
35 Result LDAPIdentityResult `xml:"AssumeRoleWithLDAPIdentityResult"`
36 ResponseMetadata struct {
37 RequestID string `xml:"RequestId,omitempty"`
38 } `xml:"ResponseMetadata,omitempty"`
39}
40
41// LDAPIdentityResult - contains credentials for a successful
42// AssumeRoleWithLDAPIdentity request.
43type LDAPIdentityResult struct {
44 Credentials struct {
45 AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"`
46 SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"`
47 Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"`
48 SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"`
49 } `xml:",omitempty"`
50
51 SubjectFromToken string `xml:",omitempty"`
52}
53
54// LDAPIdentity retrieves credentials from MinIO
55type LDAPIdentity struct {
56 Expiry
57
58 // Required http Client to use when connecting to MinIO STS service.
59 Client *http.Client
60
61 // Exported STS endpoint to fetch STS credentials.
62 STSEndpoint string
63
64 // LDAP username/password used to fetch LDAP STS credentials.
65 LDAPUsername, LDAPPassword string
66
67 // Session policy to apply to the generated credentials. Leave empty to
68 // use the full access policy available to the user.
69 Policy string
70
71 // RequestedExpiry is the configured expiry duration for credentials
72 // requested from LDAP.
73 RequestedExpiry time.Duration
74}
75
76// NewLDAPIdentity returns new credentials object that uses LDAP
77// Identity.
78func NewLDAPIdentity(stsEndpoint, ldapUsername, ldapPassword string, optFuncs ...LDAPIdentityOpt) (*Credentials, error) {
79 l := LDAPIdentity{
80 Client: &http.Client{Transport: http.DefaultTransport},
81 STSEndpoint: stsEndpoint,
82 LDAPUsername: ldapUsername,
83 LDAPPassword: ldapPassword,
84 }
85 for _, optFunc := range optFuncs {
86 optFunc(&l)
87 }
88 return New(&l), nil
89}
90
91// LDAPIdentityOpt is a function type used to configured the LDAPIdentity
92// instance.
93type LDAPIdentityOpt func(*LDAPIdentity)
94
95// LDAPIdentityPolicyOpt sets the session policy for requested credentials.
96func LDAPIdentityPolicyOpt(policy string) LDAPIdentityOpt {
97 return func(k *LDAPIdentity) {
98 k.Policy = policy
99 }
100}
101
102// LDAPIdentityExpiryOpt sets the expiry duration for requested credentials.
103func LDAPIdentityExpiryOpt(d time.Duration) LDAPIdentityOpt {
104 return func(k *LDAPIdentity) {
105 k.RequestedExpiry = d
106 }
107}
108
109// NewLDAPIdentityWithSessionPolicy returns new credentials object that uses
110// LDAP Identity with a specified session policy. The `policy` parameter must be
111// a JSON string specifying the policy document.
112//
113// Deprecated: Use the `LDAPIdentityPolicyOpt` with `NewLDAPIdentity` instead.
114func NewLDAPIdentityWithSessionPolicy(stsEndpoint, ldapUsername, ldapPassword, policy string) (*Credentials, error) {
115 return New(&LDAPIdentity{
116 Client: &http.Client{Transport: http.DefaultTransport},
117 STSEndpoint: stsEndpoint,
118 LDAPUsername: ldapUsername,
119 LDAPPassword: ldapPassword,
120 Policy: policy,
121 }), nil
122}
123
124// Retrieve gets the credential by calling the MinIO STS API for
125// LDAP on the configured stsEndpoint.
126func (k *LDAPIdentity) Retrieve() (value Value, err error) {
127 u, err := url.Parse(k.STSEndpoint)
128 if err != nil {
129 return value, err
130 }
131
132 v := url.Values{}
133 v.Set("Action", "AssumeRoleWithLDAPIdentity")
134 v.Set("Version", STSVersion)
135 v.Set("LDAPUsername", k.LDAPUsername)
136 v.Set("LDAPPassword", k.LDAPPassword)
137 if k.Policy != "" {
138 v.Set("Policy", k.Policy)
139 }
140 if k.RequestedExpiry != 0 {
141 v.Set("DurationSeconds", fmt.Sprintf("%d", int(k.RequestedExpiry.Seconds())))
142 }
143
144 req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode()))
145 if err != nil {
146 return value, err
147 }
148
149 req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
150
151 resp, err := k.Client.Do(req)
152 if err != nil {
153 return value, err
154 }
155
156 defer resp.Body.Close()
157 if resp.StatusCode != http.StatusOK {
158 var errResp ErrorResponse
159 buf, err := io.ReadAll(resp.Body)
160 if err != nil {
161 return value, err
162 }
163 _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp)
164 if err != nil {
165 var s3Err Error
166 if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil {
167 return value, err
168 }
169 errResp.RequestID = s3Err.RequestID
170 errResp.STSError.Code = s3Err.Code
171 errResp.STSError.Message = s3Err.Message
172 }
173 return value, errResp
174 }
175
176 r := AssumeRoleWithLDAPResponse{}
177 if err = xml.NewDecoder(resp.Body).Decode(&r); err != nil {
178 return
179 }
180
181 cr := r.Result.Credentials
182 k.SetExpiration(cr.Expiration, DefaultExpiryWindow)
183 return Value{
184 AccessKeyID: cr.AccessKey,
185 SecretAccessKey: cr.SecretKey,
186 SessionToken: cr.SessionToken,
187 SignerType: SignatureV4,
188 }, nil
189}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go
new file mode 100644
index 0000000..dee0a8c
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go
@@ -0,0 +1,211 @@
1// MinIO Go Library for Amazon S3 Compatible Cloud Storage
2// Copyright 2021 MinIO, Inc.
3//
4// Licensed under the Apache License, Version 2.0 (the "License");
5// you may not use this file except in compliance with the License.
6// You may obtain a copy of the License at
7//
8// http://www.apache.org/licenses/LICENSE-2.0
9//
10// Unless required by applicable law or agreed to in writing, software
11// distributed under the License is distributed on an "AS IS" BASIS,
12// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13// See the License for the specific language governing permissions and
14// limitations under the License.
15
16package credentials
17
18import (
19 "bytes"
20 "crypto/tls"
21 "encoding/xml"
22 "errors"
23 "io"
24 "net"
25 "net/http"
26 "net/url"
27 "strconv"
28 "time"
29)
30
31// CertificateIdentityOption is an optional AssumeRoleWithCertificate
32// parameter - e.g. a custom HTTP transport configuration or S3 credental
33// livetime.
34type CertificateIdentityOption func(*STSCertificateIdentity)
35
36// CertificateIdentityWithTransport returns a CertificateIdentityOption that
37// customizes the STSCertificateIdentity with the given http.RoundTripper.
38func CertificateIdentityWithTransport(t http.RoundTripper) CertificateIdentityOption {
39 return CertificateIdentityOption(func(i *STSCertificateIdentity) { i.Client.Transport = t })
40}
41
42// CertificateIdentityWithExpiry returns a CertificateIdentityOption that
43// customizes the STSCertificateIdentity with the given livetime.
44//
45// Fetched S3 credentials will have the given livetime if the STS server
46// allows such credentials.
47func CertificateIdentityWithExpiry(livetime time.Duration) CertificateIdentityOption {
48 return CertificateIdentityOption(func(i *STSCertificateIdentity) { i.S3CredentialLivetime = livetime })
49}
50
51// A STSCertificateIdentity retrieves S3 credentials from the MinIO STS API and
52// rotates those credentials once they expire.
53type STSCertificateIdentity struct {
54 Expiry
55
56 // STSEndpoint is the base URL endpoint of the STS API.
57 // For example, https://minio.local:9000
58 STSEndpoint string
59
60 // S3CredentialLivetime is the duration temp. S3 access
61 // credentials should be valid.
62 //
63 // It represents the access credential livetime requested
64 // by the client. The STS server may choose to issue
65 // temp. S3 credentials that have a different - usually
66 // shorter - livetime.
67 //
68 // The default livetime is one hour.
69 S3CredentialLivetime time.Duration
70
71 // Client is the HTTP client used to authenticate and fetch
72 // S3 credentials.
73 //
74 // A custom TLS client configuration can be specified by
75 // using a custom http.Transport:
76 // Client: http.Client {
77 // Transport: &http.Transport{
78 // TLSClientConfig: &tls.Config{},
79 // },
80 // }
81 Client http.Client
82}
83
84var _ Provider = (*STSWebIdentity)(nil) // compiler check
85
86// NewSTSCertificateIdentity returns a STSCertificateIdentity that authenticates
87// to the given STS endpoint with the given TLS certificate and retrieves and
88// rotates S3 credentials.
89func NewSTSCertificateIdentity(endpoint string, certificate tls.Certificate, options ...CertificateIdentityOption) (*Credentials, error) {
90 if endpoint == "" {
91 return nil, errors.New("STS endpoint cannot be empty")
92 }
93 if _, err := url.Parse(endpoint); err != nil {
94 return nil, err
95 }
96 identity := &STSCertificateIdentity{
97 STSEndpoint: endpoint,
98 Client: http.Client{
99 Transport: &http.Transport{
100 Proxy: http.ProxyFromEnvironment,
101 DialContext: (&net.Dialer{
102 Timeout: 30 * time.Second,
103 KeepAlive: 30 * time.Second,
104 }).DialContext,
105 ForceAttemptHTTP2: true,
106 MaxIdleConns: 100,
107 IdleConnTimeout: 90 * time.Second,
108 TLSHandshakeTimeout: 10 * time.Second,
109 ExpectContinueTimeout: 5 * time.Second,
110 TLSClientConfig: &tls.Config{
111 Certificates: []tls.Certificate{certificate},
112 },
113 },
114 },
115 }
116 for _, option := range options {
117 option(identity)
118 }
119 return New(identity), nil
120}
121
122// Retrieve fetches a new set of S3 credentials from the configured
123// STS API endpoint.
124func (i *STSCertificateIdentity) Retrieve() (Value, error) {
125 endpointURL, err := url.Parse(i.STSEndpoint)
126 if err != nil {
127 return Value{}, err
128 }
129 livetime := i.S3CredentialLivetime
130 if livetime == 0 {
131 livetime = 1 * time.Hour
132 }
133
134 queryValues := url.Values{}
135 queryValues.Set("Action", "AssumeRoleWithCertificate")
136 queryValues.Set("Version", STSVersion)
137 endpointURL.RawQuery = queryValues.Encode()
138
139 req, err := http.NewRequest(http.MethodPost, endpointURL.String(), nil)
140 if err != nil {
141 return Value{}, err
142 }
143 if req.Form == nil {
144 req.Form = url.Values{}
145 }
146 req.Form.Add("DurationSeconds", strconv.FormatUint(uint64(livetime.Seconds()), 10))
147
148 resp, err := i.Client.Do(req)
149 if err != nil {
150 return Value{}, err
151 }
152 if resp.Body != nil {
153 defer resp.Body.Close()
154 }
155 if resp.StatusCode != http.StatusOK {
156 var errResp ErrorResponse
157 buf, err := io.ReadAll(resp.Body)
158 if err != nil {
159 return Value{}, err
160 }
161 _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp)
162 if err != nil {
163 var s3Err Error
164 if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil {
165 return Value{}, err
166 }
167 errResp.RequestID = s3Err.RequestID
168 errResp.STSError.Code = s3Err.Code
169 errResp.STSError.Message = s3Err.Message
170 }
171 return Value{}, errResp
172 }
173
174 const MaxSize = 10 * 1 << 20
175 var body io.Reader = resp.Body
176 if resp.ContentLength > 0 && resp.ContentLength < MaxSize {
177 body = io.LimitReader(body, resp.ContentLength)
178 } else {
179 body = io.LimitReader(body, MaxSize)
180 }
181
182 var response assumeRoleWithCertificateResponse
183 if err = xml.NewDecoder(body).Decode(&response); err != nil {
184 return Value{}, err
185 }
186 i.SetExpiration(response.Result.Credentials.Expiration, DefaultExpiryWindow)
187 return Value{
188 AccessKeyID: response.Result.Credentials.AccessKey,
189 SecretAccessKey: response.Result.Credentials.SecretKey,
190 SessionToken: response.Result.Credentials.SessionToken,
191 SignerType: SignatureDefault,
192 }, nil
193}
194
195// Expiration returns the expiration time of the current S3 credentials.
196func (i *STSCertificateIdentity) Expiration() time.Time { return i.expiration }
197
198type assumeRoleWithCertificateResponse struct {
199 XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithCertificateResponse" json:"-"`
200 Result struct {
201 Credentials struct {
202 AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"`
203 SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"`
204 Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"`
205 SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"`
206 } `xml:"Credentials" json:"credentials,omitempty"`
207 } `xml:"AssumeRoleWithCertificateResult"`
208 ResponseMetadata struct {
209 RequestID string `xml:"RequestId,omitempty"`
210 } `xml:"ResponseMetadata,omitempty"`
211}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go
new file mode 100644
index 0000000..2e2af50
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go
@@ -0,0 +1,205 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2019-2022 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package credentials
19
20import (
21 "bytes"
22 "encoding/xml"
23 "errors"
24 "fmt"
25 "io"
26 "net/http"
27 "net/url"
28 "strconv"
29 "strings"
30 "time"
31)
32
33// AssumeRoleWithWebIdentityResponse contains the result of successful AssumeRoleWithWebIdentity request.
34type AssumeRoleWithWebIdentityResponse struct {
35 XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithWebIdentityResponse" json:"-"`
36 Result WebIdentityResult `xml:"AssumeRoleWithWebIdentityResult"`
37 ResponseMetadata struct {
38 RequestID string `xml:"RequestId,omitempty"`
39 } `xml:"ResponseMetadata,omitempty"`
40}
41
42// WebIdentityResult - Contains the response to a successful AssumeRoleWithWebIdentity
43// request, including temporary credentials that can be used to make MinIO API requests.
44type WebIdentityResult struct {
45 AssumedRoleUser AssumedRoleUser `xml:",omitempty"`
46 Audience string `xml:",omitempty"`
47 Credentials struct {
48 AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"`
49 SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"`
50 Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"`
51 SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"`
52 } `xml:",omitempty"`
53 PackedPolicySize int `xml:",omitempty"`
54 Provider string `xml:",omitempty"`
55 SubjectFromWebIdentityToken string `xml:",omitempty"`
56}
57
58// WebIdentityToken - web identity token with expiry.
59type WebIdentityToken struct {
60 Token string
61 AccessToken string
62 Expiry int
63}
64
65// A STSWebIdentity retrieves credentials from MinIO service, and keeps track if
66// those credentials are expired.
67type STSWebIdentity struct {
68 Expiry
69
70 // Required http Client to use when connecting to MinIO STS service.
71 Client *http.Client
72
73 // Exported STS endpoint to fetch STS credentials.
74 STSEndpoint string
75
76 // Exported GetWebIDTokenExpiry function which returns ID
77 // tokens from IDP. This function should return two values
78 // one is ID token which is a self contained ID token (JWT)
79 // and second return value is the expiry associated with
80 // this token.
81 // This is a customer provided function and is mandatory.
82 GetWebIDTokenExpiry func() (*WebIdentityToken, error)
83
84 // RoleARN is the Amazon Resource Name (ARN) of the role that the caller is
85 // assuming.
86 RoleARN string
87
88 // roleSessionName is the identifier for the assumed role session.
89 roleSessionName string
90}
91
92// NewSTSWebIdentity returns a pointer to a new
93// Credentials object wrapping the STSWebIdentity.
94func NewSTSWebIdentity(stsEndpoint string, getWebIDTokenExpiry func() (*WebIdentityToken, error)) (*Credentials, error) {
95 if stsEndpoint == "" {
96 return nil, errors.New("STS endpoint cannot be empty")
97 }
98 if getWebIDTokenExpiry == nil {
99 return nil, errors.New("Web ID token and expiry retrieval function should be defined")
100 }
101 return New(&STSWebIdentity{
102 Client: &http.Client{
103 Transport: http.DefaultTransport,
104 },
105 STSEndpoint: stsEndpoint,
106 GetWebIDTokenExpiry: getWebIDTokenExpiry,
107 }), nil
108}
109
110func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSessionName string,
111 getWebIDTokenExpiry func() (*WebIdentityToken, error),
112) (AssumeRoleWithWebIdentityResponse, error) {
113 idToken, err := getWebIDTokenExpiry()
114 if err != nil {
115 return AssumeRoleWithWebIdentityResponse{}, err
116 }
117
118 v := url.Values{}
119 v.Set("Action", "AssumeRoleWithWebIdentity")
120 if len(roleARN) > 0 {
121 v.Set("RoleArn", roleARN)
122
123 if len(roleSessionName) == 0 {
124 roleSessionName = strconv.FormatInt(time.Now().UnixNano(), 10)
125 }
126 v.Set("RoleSessionName", roleSessionName)
127 }
128 v.Set("WebIdentityToken", idToken.Token)
129 if idToken.AccessToken != "" {
130 // Usually set when server is using extended userInfo endpoint.
131 v.Set("WebIdentityAccessToken", idToken.AccessToken)
132 }
133 if idToken.Expiry > 0 {
134 v.Set("DurationSeconds", fmt.Sprintf("%d", idToken.Expiry))
135 }
136 v.Set("Version", STSVersion)
137
138 u, err := url.Parse(endpoint)
139 if err != nil {
140 return AssumeRoleWithWebIdentityResponse{}, err
141 }
142
143 req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode()))
144 if err != nil {
145 return AssumeRoleWithWebIdentityResponse{}, err
146 }
147
148 req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
149
150 resp, err := clnt.Do(req)
151 if err != nil {
152 return AssumeRoleWithWebIdentityResponse{}, err
153 }
154
155 defer resp.Body.Close()
156 if resp.StatusCode != http.StatusOK {
157 var errResp ErrorResponse
158 buf, err := io.ReadAll(resp.Body)
159 if err != nil {
160 return AssumeRoleWithWebIdentityResponse{}, err
161 }
162 _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp)
163 if err != nil {
164 var s3Err Error
165 if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil {
166 return AssumeRoleWithWebIdentityResponse{}, err
167 }
168 errResp.RequestID = s3Err.RequestID
169 errResp.STSError.Code = s3Err.Code
170 errResp.STSError.Message = s3Err.Message
171 }
172 return AssumeRoleWithWebIdentityResponse{}, errResp
173 }
174
175 a := AssumeRoleWithWebIdentityResponse{}
176 if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil {
177 return AssumeRoleWithWebIdentityResponse{}, err
178 }
179
180 return a, nil
181}
182
183// Retrieve retrieves credentials from the MinIO service.
184// Error will be returned if the request fails.
185func (m *STSWebIdentity) Retrieve() (Value, error) {
186 a, err := getWebIdentityCredentials(m.Client, m.STSEndpoint, m.RoleARN, m.roleSessionName, m.GetWebIDTokenExpiry)
187 if err != nil {
188 return Value{}, err
189 }
190
191 // Expiry window is set to 10secs.
192 m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow)
193
194 return Value{
195 AccessKeyID: a.Result.Credentials.AccessKey,
196 SecretAccessKey: a.Result.Credentials.SecretKey,
197 SessionToken: a.Result.Credentials.SessionToken,
198 SignerType: SignatureV4,
199 }, nil
200}
201
202// Expiration returns the expiration time of the credentials
203func (m *STSWebIdentity) Expiration() time.Time {
204 return m.expiration
205}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go
new file mode 100644
index 0000000..6db26c0
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go
@@ -0,0 +1,24 @@
1//go:build !fips
2// +build !fips
3
4/*
5 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
6 * Copyright 2022 MinIO, Inc.
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 */
20
21package encrypt
22
23// FIPS is true if 'fips' build tag was specified.
24const FIPS = false
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go
new file mode 100644
index 0000000..6402582
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go
@@ -0,0 +1,24 @@
1//go:build fips
2// +build fips
3
4/*
5 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
6 * Copyright 2022 MinIO, Inc.
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 */
20
21package encrypt
22
23// FIPS is true if 'fips' build tag was specified.
24const FIPS = true
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go
new file mode 100644
index 0000000..a7081c5
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go
@@ -0,0 +1,198 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2018 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package encrypt
19
20import (
21 "crypto/md5"
22 "encoding/base64"
23 "errors"
24 "net/http"
25
26 jsoniter "github.com/json-iterator/go"
27 "golang.org/x/crypto/argon2"
28)
29
30const (
31 // SseGenericHeader is the AWS SSE header used for SSE-S3 and SSE-KMS.
32 SseGenericHeader = "X-Amz-Server-Side-Encryption"
33
34 // SseKmsKeyID is the AWS SSE-KMS key id.
35 SseKmsKeyID = SseGenericHeader + "-Aws-Kms-Key-Id"
36 // SseEncryptionContext is the AWS SSE-KMS Encryption Context data.
37 SseEncryptionContext = SseGenericHeader + "-Context"
38
39 // SseCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key.
40 SseCustomerAlgorithm = SseGenericHeader + "-Customer-Algorithm"
41 // SseCustomerKey is the AWS SSE-C encryption key HTTP header key.
42 SseCustomerKey = SseGenericHeader + "-Customer-Key"
43 // SseCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key.
44 SseCustomerKeyMD5 = SseGenericHeader + "-Customer-Key-MD5"
45
46 // SseCopyCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key for CopyObject API.
47 SseCopyCustomerAlgorithm = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm"
48 // SseCopyCustomerKey is the AWS SSE-C encryption key HTTP header key for CopyObject API.
49 SseCopyCustomerKey = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key"
50 // SseCopyCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key for CopyObject API.
51 SseCopyCustomerKeyMD5 = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-MD5"
52)
53
54// PBKDF creates a SSE-C key from the provided password and salt.
55// PBKDF is a password-based key derivation function
56// which can be used to derive a high-entropy cryptographic
57// key from a low-entropy password and a salt.
58type PBKDF func(password, salt []byte) ServerSide
59
60// DefaultPBKDF is the default PBKDF. It uses Argon2id with the
61// recommended parameters from the RFC draft (1 pass, 64 MB memory, 4 threads).
62var DefaultPBKDF PBKDF = func(password, salt []byte) ServerSide {
63 sse := ssec{}
64 copy(sse[:], argon2.IDKey(password, salt, 1, 64*1024, 4, 32))
65 return sse
66}
67
68// Type is the server-side-encryption method. It represents one of
69// the following encryption methods:
70// - SSE-C: server-side-encryption with customer provided keys
71// - KMS: server-side-encryption with managed keys
72// - S3: server-side-encryption using S3 storage encryption
73type Type string
74
75const (
76 // SSEC represents server-side-encryption with customer provided keys
77 SSEC Type = "SSE-C"
78 // KMS represents server-side-encryption with managed keys
79 KMS Type = "KMS"
80 // S3 represents server-side-encryption using S3 storage encryption
81 S3 Type = "S3"
82)
83
84// ServerSide is a form of S3 server-side-encryption.
85type ServerSide interface {
86 // Type returns the server-side-encryption method.
87 Type() Type
88
89 // Marshal adds encryption headers to the provided HTTP headers.
90 // It marks an HTTP request as server-side-encryption request
91 // and inserts the required data into the headers.
92 Marshal(h http.Header)
93}
94
95// NewSSE returns a server-side-encryption using S3 storage encryption.
96// Using SSE-S3 the server will encrypt the object with server-managed keys.
97func NewSSE() ServerSide { return s3{} }
98
99// NewSSEKMS returns a new server-side-encryption using SSE-KMS and the provided Key Id and context.
100func NewSSEKMS(keyID string, context interface{}) (ServerSide, error) {
101 if context == nil {
102 return kms{key: keyID, hasContext: false}, nil
103 }
104 json := jsoniter.ConfigCompatibleWithStandardLibrary
105 serializedContext, err := json.Marshal(context)
106 if err != nil {
107 return nil, err
108 }
109 return kms{key: keyID, context: serializedContext, hasContext: true}, nil
110}
111
112// NewSSEC returns a new server-side-encryption using SSE-C and the provided key.
113// The key must be 32 bytes long.
114func NewSSEC(key []byte) (ServerSide, error) {
115 if len(key) != 32 {
116 return nil, errors.New("encrypt: SSE-C key must be 256 bit long")
117 }
118 sse := ssec{}
119 copy(sse[:], key)
120 return sse, nil
121}
122
123// SSE transforms a SSE-C copy encryption into a SSE-C encryption.
124// It is the inverse of SSECopy(...).
125//
126// If the provided sse is no SSE-C copy encryption SSE returns
127// sse unmodified.
128func SSE(sse ServerSide) ServerSide {
129 if sse == nil || sse.Type() != SSEC {
130 return sse
131 }
132 if sse, ok := sse.(ssecCopy); ok {
133 return ssec(sse)
134 }
135 return sse
136}
137
138// SSECopy transforms a SSE-C encryption into a SSE-C copy
139// encryption. This is required for SSE-C key rotation or a SSE-C
140// copy where the source and the destination should be encrypted.
141//
142// If the provided sse is no SSE-C encryption SSECopy returns
143// sse unmodified.
144func SSECopy(sse ServerSide) ServerSide {
145 if sse == nil || sse.Type() != SSEC {
146 return sse
147 }
148 if sse, ok := sse.(ssec); ok {
149 return ssecCopy(sse)
150 }
151 return sse
152}
153
154type ssec [32]byte
155
156func (s ssec) Type() Type { return SSEC }
157
158func (s ssec) Marshal(h http.Header) {
159 keyMD5 := md5.Sum(s[:])
160 h.Set(SseCustomerAlgorithm, "AES256")
161 h.Set(SseCustomerKey, base64.StdEncoding.EncodeToString(s[:]))
162 h.Set(SseCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:]))
163}
164
165type ssecCopy [32]byte
166
167func (s ssecCopy) Type() Type { return SSEC }
168
169func (s ssecCopy) Marshal(h http.Header) {
170 keyMD5 := md5.Sum(s[:])
171 h.Set(SseCopyCustomerAlgorithm, "AES256")
172 h.Set(SseCopyCustomerKey, base64.StdEncoding.EncodeToString(s[:]))
173 h.Set(SseCopyCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:]))
174}
175
176type s3 struct{}
177
178func (s s3) Type() Type { return S3 }
179
180func (s s3) Marshal(h http.Header) { h.Set(SseGenericHeader, "AES256") }
181
182type kms struct {
183 key string
184 context []byte
185 hasContext bool
186}
187
188func (s kms) Type() Type { return KMS }
189
190func (s kms) Marshal(h http.Header) {
191 h.Set(SseGenericHeader, "aws:kms")
192 if s.key != "" {
193 h.Set(SseKmsKeyID, s.key)
194 }
195 if s.hasContext {
196 h.Set(SseEncryptionContext, base64.StdEncoding.EncodeToString(s.context))
197 }
198}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go
new file mode 100644
index 0000000..c52f78c
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go
@@ -0,0 +1,491 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18// Package lifecycle contains all the lifecycle related data types and marshallers.
19package lifecycle
20
21import (
22 "encoding/json"
23 "encoding/xml"
24 "errors"
25 "time"
26)
27
28var errMissingStorageClass = errors.New("storage-class cannot be empty")
29
30// AbortIncompleteMultipartUpload structure, not supported yet on MinIO
31type AbortIncompleteMultipartUpload struct {
32 XMLName xml.Name `xml:"AbortIncompleteMultipartUpload,omitempty" json:"-"`
33 DaysAfterInitiation ExpirationDays `xml:"DaysAfterInitiation,omitempty" json:"DaysAfterInitiation,omitempty"`
34}
35
36// IsDaysNull returns true if days field is null
37func (n AbortIncompleteMultipartUpload) IsDaysNull() bool {
38 return n.DaysAfterInitiation == ExpirationDays(0)
39}
40
41// MarshalXML if days after initiation is set to non-zero value
42func (n AbortIncompleteMultipartUpload) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
43 if n.IsDaysNull() {
44 return nil
45 }
46 type abortIncompleteMultipartUploadWrapper AbortIncompleteMultipartUpload
47 return e.EncodeElement(abortIncompleteMultipartUploadWrapper(n), start)
48}
49
50// NoncurrentVersionExpiration - Specifies when noncurrent object versions expire.
51// Upon expiration, server permanently deletes the noncurrent object versions.
52// Set this lifecycle configuration action on a bucket that has versioning enabled
53// (or suspended) to request server delete noncurrent object versions at a
54// specific period in the object's lifetime.
55type NoncurrentVersionExpiration struct {
56 XMLName xml.Name `xml:"NoncurrentVersionExpiration" json:"-"`
57 NoncurrentDays ExpirationDays `xml:"NoncurrentDays,omitempty" json:"NoncurrentDays,omitempty"`
58 NewerNoncurrentVersions int `xml:"NewerNoncurrentVersions,omitempty" json:"NewerNoncurrentVersions,omitempty"`
59}
60
61// MarshalXML if n is non-empty, i.e has a non-zero NoncurrentDays or NewerNoncurrentVersions.
62func (n NoncurrentVersionExpiration) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
63 if n.isNull() {
64 return nil
65 }
66 type noncurrentVersionExpirationWrapper NoncurrentVersionExpiration
67 return e.EncodeElement(noncurrentVersionExpirationWrapper(n), start)
68}
69
70// IsDaysNull returns true if days field is null
71func (n NoncurrentVersionExpiration) IsDaysNull() bool {
72 return n.NoncurrentDays == ExpirationDays(0)
73}
74
75func (n NoncurrentVersionExpiration) isNull() bool {
76 return n.IsDaysNull() && n.NewerNoncurrentVersions == 0
77}
78
79// NoncurrentVersionTransition structure, set this action to request server to
80// transition noncurrent object versions to different set storage classes
81// at a specific period in the object's lifetime.
82type NoncurrentVersionTransition struct {
83 XMLName xml.Name `xml:"NoncurrentVersionTransition,omitempty" json:"-"`
84 StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"`
85 NoncurrentDays ExpirationDays `xml:"NoncurrentDays" json:"NoncurrentDays"`
86 NewerNoncurrentVersions int `xml:"NewerNoncurrentVersions,omitempty" json:"NewerNoncurrentVersions,omitempty"`
87}
88
89// IsDaysNull returns true if days field is null
90func (n NoncurrentVersionTransition) IsDaysNull() bool {
91 return n.NoncurrentDays == ExpirationDays(0)
92}
93
94// IsStorageClassEmpty returns true if storage class field is empty
95func (n NoncurrentVersionTransition) IsStorageClassEmpty() bool {
96 return n.StorageClass == ""
97}
98
99func (n NoncurrentVersionTransition) isNull() bool {
100 return n.StorageClass == ""
101}
102
103// UnmarshalJSON implements NoncurrentVersionTransition JSONify
104func (n *NoncurrentVersionTransition) UnmarshalJSON(b []byte) error {
105 type noncurrentVersionTransition NoncurrentVersionTransition
106 var nt noncurrentVersionTransition
107 err := json.Unmarshal(b, &nt)
108 if err != nil {
109 return err
110 }
111
112 if nt.StorageClass == "" {
113 return errMissingStorageClass
114 }
115 *n = NoncurrentVersionTransition(nt)
116 return nil
117}
118
119// MarshalXML is extended to leave out
120// <NoncurrentVersionTransition></NoncurrentVersionTransition> tags
121func (n NoncurrentVersionTransition) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
122 if n.isNull() {
123 return nil
124 }
125 type noncurrentVersionTransitionWrapper NoncurrentVersionTransition
126 return e.EncodeElement(noncurrentVersionTransitionWrapper(n), start)
127}
128
129// Tag structure key/value pair representing an object tag to apply lifecycle configuration
130type Tag struct {
131 XMLName xml.Name `xml:"Tag,omitempty" json:"-"`
132 Key string `xml:"Key,omitempty" json:"Key,omitempty"`
133 Value string `xml:"Value,omitempty" json:"Value,omitempty"`
134}
135
136// IsEmpty returns whether this tag is empty or not.
137func (tag Tag) IsEmpty() bool {
138 return tag.Key == ""
139}
140
141// Transition structure - transition details of lifecycle configuration
142type Transition struct {
143 XMLName xml.Name `xml:"Transition" json:"-"`
144 Date ExpirationDate `xml:"Date,omitempty" json:"Date,omitempty"`
145 StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"`
146 Days ExpirationDays `xml:"Days" json:"Days"`
147}
148
149// UnmarshalJSON returns an error if storage-class is empty.
150func (t *Transition) UnmarshalJSON(b []byte) error {
151 type transition Transition
152 var tr transition
153 err := json.Unmarshal(b, &tr)
154 if err != nil {
155 return err
156 }
157
158 if tr.StorageClass == "" {
159 return errMissingStorageClass
160 }
161 *t = Transition(tr)
162 return nil
163}
164
165// MarshalJSON customizes json encoding by omitting empty values
166func (t Transition) MarshalJSON() ([]byte, error) {
167 if t.IsNull() {
168 return nil, nil
169 }
170 type transition struct {
171 Date *ExpirationDate `json:"Date,omitempty"`
172 StorageClass string `json:"StorageClass,omitempty"`
173 Days *ExpirationDays `json:"Days"`
174 }
175
176 newt := transition{
177 StorageClass: t.StorageClass,
178 }
179
180 if !t.IsDateNull() {
181 newt.Date = &t.Date
182 } else {
183 newt.Days = &t.Days
184 }
185 return json.Marshal(newt)
186}
187
188// IsDaysNull returns true if days field is null
189func (t Transition) IsDaysNull() bool {
190 return t.Days == ExpirationDays(0)
191}
192
193// IsDateNull returns true if date field is null
194func (t Transition) IsDateNull() bool {
195 return t.Date.Time.IsZero()
196}
197
198// IsNull returns true if no storage-class is set.
199func (t Transition) IsNull() bool {
200 return t.StorageClass == ""
201}
202
203// MarshalXML is transition is non null
204func (t Transition) MarshalXML(en *xml.Encoder, startElement xml.StartElement) error {
205 if t.IsNull() {
206 return nil
207 }
208 type transitionWrapper Transition
209 return en.EncodeElement(transitionWrapper(t), startElement)
210}
211
212// And And Rule for LifecycleTag, to be used in LifecycleRuleFilter
213type And struct {
214 XMLName xml.Name `xml:"And" json:"-"`
215 Prefix string `xml:"Prefix" json:"Prefix,omitempty"`
216 Tags []Tag `xml:"Tag" json:"Tags,omitempty"`
217 ObjectSizeLessThan int64 `xml:"ObjectSizeLessThan,omitempty" json:"ObjectSizeLessThan,omitempty"`
218 ObjectSizeGreaterThan int64 `xml:"ObjectSizeGreaterThan,omitempty" json:"ObjectSizeGreaterThan,omitempty"`
219}
220
221// IsEmpty returns true if Tags field is null
222func (a And) IsEmpty() bool {
223 return len(a.Tags) == 0 && a.Prefix == "" &&
224 a.ObjectSizeLessThan == 0 && a.ObjectSizeGreaterThan == 0
225}
226
227// Filter will be used in selecting rule(s) for lifecycle configuration
228type Filter struct {
229 XMLName xml.Name `xml:"Filter" json:"-"`
230 And And `xml:"And,omitempty" json:"And,omitempty"`
231 Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"`
232 Tag Tag `xml:"Tag,omitempty" json:"Tag,omitempty"`
233 ObjectSizeLessThan int64 `xml:"ObjectSizeLessThan,omitempty" json:"ObjectSizeLessThan,omitempty"`
234 ObjectSizeGreaterThan int64 `xml:"ObjectSizeGreaterThan,omitempty" json:"ObjectSizeGreaterThan,omitempty"`
235}
236
237// IsNull returns true if all Filter fields are empty.
238func (f Filter) IsNull() bool {
239 return f.Tag.IsEmpty() && f.And.IsEmpty() && f.Prefix == "" &&
240 f.ObjectSizeLessThan == 0 && f.ObjectSizeGreaterThan == 0
241}
242
243// MarshalJSON customizes json encoding by removing empty values.
244func (f Filter) MarshalJSON() ([]byte, error) {
245 type filter struct {
246 And *And `json:"And,omitempty"`
247 Prefix string `json:"Prefix,omitempty"`
248 Tag *Tag `json:"Tag,omitempty"`
249 ObjectSizeLessThan int64 `json:"ObjectSizeLessThan,omitempty"`
250 ObjectSizeGreaterThan int64 `json:"ObjectSizeGreaterThan,omitempty"`
251 }
252
253 newf := filter{
254 Prefix: f.Prefix,
255 }
256 if !f.Tag.IsEmpty() {
257 newf.Tag = &f.Tag
258 }
259 if !f.And.IsEmpty() {
260 newf.And = &f.And
261 }
262 newf.ObjectSizeLessThan = f.ObjectSizeLessThan
263 newf.ObjectSizeGreaterThan = f.ObjectSizeGreaterThan
264 return json.Marshal(newf)
265}
266
267// MarshalXML - produces the xml representation of the Filter struct
268// only one of Prefix, And and Tag should be present in the output.
269func (f Filter) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
270 if err := e.EncodeToken(start); err != nil {
271 return err
272 }
273
274 switch {
275 case !f.And.IsEmpty():
276 if err := e.EncodeElement(f.And, xml.StartElement{Name: xml.Name{Local: "And"}}); err != nil {
277 return err
278 }
279 case !f.Tag.IsEmpty():
280 if err := e.EncodeElement(f.Tag, xml.StartElement{Name: xml.Name{Local: "Tag"}}); err != nil {
281 return err
282 }
283 default:
284 if f.ObjectSizeLessThan > 0 {
285 if err := e.EncodeElement(f.ObjectSizeLessThan, xml.StartElement{Name: xml.Name{Local: "ObjectSizeLessThan"}}); err != nil {
286 return err
287 }
288 break
289 }
290 if f.ObjectSizeGreaterThan > 0 {
291 if err := e.EncodeElement(f.ObjectSizeGreaterThan, xml.StartElement{Name: xml.Name{Local: "ObjectSizeGreaterThan"}}); err != nil {
292 return err
293 }
294 break
295 }
296 // Print empty Prefix field only when everything else is empty
297 if err := e.EncodeElement(f.Prefix, xml.StartElement{Name: xml.Name{Local: "Prefix"}}); err != nil {
298 return err
299 }
300 }
301
302 return e.EncodeToken(xml.EndElement{Name: start.Name})
303}
304
305// ExpirationDays is a type alias to unmarshal Days in Expiration
306type ExpirationDays int
307
308// MarshalXML encodes number of days to expire if it is non-zero and
309// encodes empty string otherwise
310func (eDays ExpirationDays) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
311 if eDays == 0 {
312 return nil
313 }
314 return e.EncodeElement(int(eDays), startElement)
315}
316
317// ExpirationDate is a embedded type containing time.Time to unmarshal
318// Date in Expiration
319type ExpirationDate struct {
320 time.Time
321}
322
323// MarshalXML encodes expiration date if it is non-zero and encodes
324// empty string otherwise
325func (eDate ExpirationDate) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
326 if eDate.Time.IsZero() {
327 return nil
328 }
329 return e.EncodeElement(eDate.Format(time.RFC3339), startElement)
330}
331
332// ExpireDeleteMarker represents value of ExpiredObjectDeleteMarker field in Expiration XML element.
333type ExpireDeleteMarker ExpirationBoolean
334
335// IsEnabled returns true if the auto delete-marker expiration is enabled
336func (e ExpireDeleteMarker) IsEnabled() bool {
337 return bool(e)
338}
339
340// ExpirationBoolean represents an XML version of 'bool' type
341type ExpirationBoolean bool
342
343// MarshalXML encodes delete marker boolean into an XML form.
344func (b ExpirationBoolean) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
345 if !b {
346 return nil
347 }
348 type booleanWrapper ExpirationBoolean
349 return e.EncodeElement(booleanWrapper(b), startElement)
350}
351
352// IsEnabled returns true if the expiration boolean is enabled
353func (b ExpirationBoolean) IsEnabled() bool {
354 return bool(b)
355}
356
357// Expiration structure - expiration details of lifecycle configuration
358type Expiration struct {
359 XMLName xml.Name `xml:"Expiration,omitempty" json:"-"`
360 Date ExpirationDate `xml:"Date,omitempty" json:"Date,omitempty"`
361 Days ExpirationDays `xml:"Days,omitempty" json:"Days,omitempty"`
362 DeleteMarker ExpireDeleteMarker `xml:"ExpiredObjectDeleteMarker,omitempty" json:"ExpiredObjectDeleteMarker,omitempty"`
363 DeleteAll ExpirationBoolean `xml:"ExpiredObjectAllVersions,omitempty" json:"ExpiredObjectAllVersions,omitempty"`
364}
365
366// MarshalJSON customizes json encoding by removing empty day/date specification.
367func (e Expiration) MarshalJSON() ([]byte, error) {
368 type expiration struct {
369 Date *ExpirationDate `json:"Date,omitempty"`
370 Days *ExpirationDays `json:"Days,omitempty"`
371 DeleteMarker ExpireDeleteMarker `json:"ExpiredObjectDeleteMarker,omitempty"`
372 DeleteAll ExpirationBoolean `json:"ExpiredObjectAllVersions,omitempty"`
373 }
374
375 newexp := expiration{
376 DeleteMarker: e.DeleteMarker,
377 DeleteAll: e.DeleteAll,
378 }
379 if !e.IsDaysNull() {
380 newexp.Days = &e.Days
381 }
382 if !e.IsDateNull() {
383 newexp.Date = &e.Date
384 }
385 return json.Marshal(newexp)
386}
387
388// IsDaysNull returns true if days field is null
389func (e Expiration) IsDaysNull() bool {
390 return e.Days == ExpirationDays(0)
391}
392
393// IsDateNull returns true if date field is null
394func (e Expiration) IsDateNull() bool {
395 return e.Date.Time.IsZero()
396}
397
398// IsDeleteMarkerExpirationEnabled returns true if the auto-expiration of delete marker is enabled
399func (e Expiration) IsDeleteMarkerExpirationEnabled() bool {
400 return e.DeleteMarker.IsEnabled()
401}
402
403// IsNull returns true if both date and days fields are null
404func (e Expiration) IsNull() bool {
405 return e.IsDaysNull() && e.IsDateNull() && !e.IsDeleteMarkerExpirationEnabled()
406}
407
408// MarshalXML is expiration is non null
409func (e Expiration) MarshalXML(en *xml.Encoder, startElement xml.StartElement) error {
410 if e.IsNull() {
411 return nil
412 }
413 type expirationWrapper Expiration
414 return en.EncodeElement(expirationWrapper(e), startElement)
415}
416
417// MarshalJSON customizes json encoding by omitting empty values
418func (r Rule) MarshalJSON() ([]byte, error) {
419 type rule struct {
420 AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `json:"AbortIncompleteMultipartUpload,omitempty"`
421 Expiration *Expiration `json:"Expiration,omitempty"`
422 ID string `json:"ID"`
423 RuleFilter *Filter `json:"Filter,omitempty"`
424 NoncurrentVersionExpiration *NoncurrentVersionExpiration `json:"NoncurrentVersionExpiration,omitempty"`
425 NoncurrentVersionTransition *NoncurrentVersionTransition `json:"NoncurrentVersionTransition,omitempty"`
426 Prefix string `json:"Prefix,omitempty"`
427 Status string `json:"Status"`
428 Transition *Transition `json:"Transition,omitempty"`
429 }
430 newr := rule{
431 Prefix: r.Prefix,
432 Status: r.Status,
433 ID: r.ID,
434 }
435
436 if !r.RuleFilter.IsNull() {
437 newr.RuleFilter = &r.RuleFilter
438 }
439 if !r.AbortIncompleteMultipartUpload.IsDaysNull() {
440 newr.AbortIncompleteMultipartUpload = &r.AbortIncompleteMultipartUpload
441 }
442 if !r.Expiration.IsNull() {
443 newr.Expiration = &r.Expiration
444 }
445 if !r.Transition.IsNull() {
446 newr.Transition = &r.Transition
447 }
448 if !r.NoncurrentVersionExpiration.isNull() {
449 newr.NoncurrentVersionExpiration = &r.NoncurrentVersionExpiration
450 }
451 if !r.NoncurrentVersionTransition.isNull() {
452 newr.NoncurrentVersionTransition = &r.NoncurrentVersionTransition
453 }
454
455 return json.Marshal(newr)
456}
457
458// Rule represents a single rule in lifecycle configuration
459type Rule struct {
460 XMLName xml.Name `xml:"Rule,omitempty" json:"-"`
461 AbortIncompleteMultipartUpload AbortIncompleteMultipartUpload `xml:"AbortIncompleteMultipartUpload,omitempty" json:"AbortIncompleteMultipartUpload,omitempty"`
462 Expiration Expiration `xml:"Expiration,omitempty" json:"Expiration,omitempty"`
463 ID string `xml:"ID" json:"ID"`
464 RuleFilter Filter `xml:"Filter,omitempty" json:"Filter,omitempty"`
465 NoncurrentVersionExpiration NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty" json:"NoncurrentVersionExpiration,omitempty"`
466 NoncurrentVersionTransition NoncurrentVersionTransition `xml:"NoncurrentVersionTransition,omitempty" json:"NoncurrentVersionTransition,omitempty"`
467 Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"`
468 Status string `xml:"Status" json:"Status"`
469 Transition Transition `xml:"Transition,omitempty" json:"Transition,omitempty"`
470}
471
472// Configuration is a collection of Rule objects.
473type Configuration struct {
474 XMLName xml.Name `xml:"LifecycleConfiguration,omitempty" json:"-"`
475 Rules []Rule `xml:"Rule"`
476}
477
478// Empty check if lifecycle configuration is empty
479func (c *Configuration) Empty() bool {
480 if c == nil {
481 return true
482 }
483 return len(c.Rules) == 0
484}
485
486// NewConfiguration initializes a fresh lifecycle configuration
487// for manipulation, such as setting and removing lifecycle rules
488// and filters.
489func NewConfiguration() *Configuration {
490 return &Configuration{}
491}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go b/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go
new file mode 100644
index 0000000..126661a
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go
@@ -0,0 +1,78 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017-2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package notification
19
20// Indentity represents the user id, this is a compliance field.
21type identity struct {
22 PrincipalID string `json:"principalId"`
23}
24
25// event bucket metadata.
26type bucketMeta struct {
27 Name string `json:"name"`
28 OwnerIdentity identity `json:"ownerIdentity"`
29 ARN string `json:"arn"`
30}
31
32// event object metadata.
33type objectMeta struct {
34 Key string `json:"key"`
35 Size int64 `json:"size,omitempty"`
36 ETag string `json:"eTag,omitempty"`
37 ContentType string `json:"contentType,omitempty"`
38 UserMetadata map[string]string `json:"userMetadata,omitempty"`
39 VersionID string `json:"versionId,omitempty"`
40 Sequencer string `json:"sequencer"`
41}
42
43// event server specific metadata.
44type eventMeta struct {
45 SchemaVersion string `json:"s3SchemaVersion"`
46 ConfigurationID string `json:"configurationId"`
47 Bucket bucketMeta `json:"bucket"`
48 Object objectMeta `json:"object"`
49}
50
51// sourceInfo represents information on the client that
52// triggered the event notification.
53type sourceInfo struct {
54 Host string `json:"host"`
55 Port string `json:"port"`
56 UserAgent string `json:"userAgent"`
57}
58
59// Event represents an Amazon an S3 bucket notification event.
60type Event struct {
61 EventVersion string `json:"eventVersion"`
62 EventSource string `json:"eventSource"`
63 AwsRegion string `json:"awsRegion"`
64 EventTime string `json:"eventTime"`
65 EventName string `json:"eventName"`
66 UserIdentity identity `json:"userIdentity"`
67 RequestParameters map[string]string `json:"requestParameters"`
68 ResponseElements map[string]string `json:"responseElements"`
69 S3 eventMeta `json:"s3"`
70 Source sourceInfo `json:"source"`
71}
72
73// Info - represents the collection of notification events, additionally
74// also reports errors if any while listening on bucket notifications.
75type Info struct {
76 Records []Event
77 Err error
78}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go
new file mode 100644
index 0000000..a44799d
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go
@@ -0,0 +1,440 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package notification
19
20import (
21 "encoding/xml"
22 "errors"
23 "fmt"
24 "strings"
25
26 "github.com/minio/minio-go/v7/pkg/set"
27)
28
29// EventType is a S3 notification event associated to the bucket notification configuration
30type EventType string
31
32// The role of all event types are described in :
33//
34// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations
35const (
36 ObjectCreatedAll EventType = "s3:ObjectCreated:*"
37 ObjectCreatedPut EventType = "s3:ObjectCreated:Put"
38 ObjectCreatedPost EventType = "s3:ObjectCreated:Post"
39 ObjectCreatedCopy EventType = "s3:ObjectCreated:Copy"
40 ObjectCreatedDeleteTagging EventType = "s3:ObjectCreated:DeleteTagging"
41 ObjectCreatedCompleteMultipartUpload EventType = "s3:ObjectCreated:CompleteMultipartUpload"
42 ObjectCreatedPutLegalHold EventType = "s3:ObjectCreated:PutLegalHold"
43 ObjectCreatedPutRetention EventType = "s3:ObjectCreated:PutRetention"
44 ObjectCreatedPutTagging EventType = "s3:ObjectCreated:PutTagging"
45 ObjectAccessedGet EventType = "s3:ObjectAccessed:Get"
46 ObjectAccessedHead EventType = "s3:ObjectAccessed:Head"
47 ObjectAccessedGetRetention EventType = "s3:ObjectAccessed:GetRetention"
48 ObjectAccessedGetLegalHold EventType = "s3:ObjectAccessed:GetLegalHold"
49 ObjectAccessedAll EventType = "s3:ObjectAccessed:*"
50 ObjectRemovedAll EventType = "s3:ObjectRemoved:*"
51 ObjectRemovedDelete EventType = "s3:ObjectRemoved:Delete"
52 ObjectRemovedDeleteMarkerCreated EventType = "s3:ObjectRemoved:DeleteMarkerCreated"
53 ObjectReducedRedundancyLostObject EventType = "s3:ReducedRedundancyLostObject"
54 ObjectTransitionAll EventType = "s3:ObjectTransition:*"
55 ObjectTransitionFailed EventType = "s3:ObjectTransition:Failed"
56 ObjectTransitionComplete EventType = "s3:ObjectTransition:Complete"
57 ObjectTransitionPost EventType = "s3:ObjectRestore:Post"
58 ObjectTransitionCompleted EventType = "s3:ObjectRestore:Completed"
59 ObjectReplicationAll EventType = "s3:Replication:*"
60 ObjectReplicationOperationCompletedReplication EventType = "s3:Replication:OperationCompletedReplication"
61 ObjectReplicationOperationFailedReplication EventType = "s3:Replication:OperationFailedReplication"
62 ObjectReplicationOperationMissedThreshold EventType = "s3:Replication:OperationMissedThreshold"
63 ObjectReplicationOperationNotTracked EventType = "s3:Replication:OperationNotTracked"
64 ObjectReplicationOperationReplicatedAfterThreshold EventType = "s3:Replication:OperationReplicatedAfterThreshold"
65 ObjectScannerManyVersions EventType = "s3:Scanner:ManyVersions"
66 ObjectScannerBigPrefix EventType = "s3:Scanner:BigPrefix"
67 ObjectScannerAll EventType = "s3:Scanner:*"
68 BucketCreatedAll EventType = "s3:BucketCreated:*"
69 BucketRemovedAll EventType = "s3:BucketRemoved:*"
70)
71
72// FilterRule - child of S3Key, a tag in the notification xml which
73// carries suffix/prefix filters
74type FilterRule struct {
75 Name string `xml:"Name"`
76 Value string `xml:"Value"`
77}
78
79// S3Key - child of Filter, a tag in the notification xml which
80// carries suffix/prefix filters
81type S3Key struct {
82 FilterRules []FilterRule `xml:"FilterRule,omitempty"`
83}
84
85// Filter - a tag in the notification xml structure which carries
86// suffix/prefix filters
87type Filter struct {
88 S3Key S3Key `xml:"S3Key,omitempty"`
89}
90
91// Arn - holds ARN information that will be sent to the web service,
92// ARN desciption can be found in http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
93type Arn struct {
94 Partition string
95 Service string
96 Region string
97 AccountID string
98 Resource string
99}
100
101// NewArn creates new ARN based on the given partition, service, region, account id and resource
102func NewArn(partition, service, region, accountID, resource string) Arn {
103 return Arn{
104 Partition: partition,
105 Service: service,
106 Region: region,
107 AccountID: accountID,
108 Resource: resource,
109 }
110}
111
112var (
113 // ErrInvalidArnPrefix is returned when ARN string format does not start with 'arn'
114 ErrInvalidArnPrefix = errors.New("invalid ARN format, must start with 'arn:'")
115 // ErrInvalidArnFormat is returned when ARN string format is not valid
116 ErrInvalidArnFormat = errors.New("invalid ARN format, must be 'arn:<partition>:<service>:<region>:<accountID>:<resource>'")
117)
118
119// NewArnFromString parses string representation of ARN into Arn object.
120// Returns an error if the string format is incorrect.
121func NewArnFromString(arn string) (Arn, error) {
122 parts := strings.Split(arn, ":")
123 if len(parts) != 6 {
124 return Arn{}, ErrInvalidArnFormat
125 }
126 if parts[0] != "arn" {
127 return Arn{}, ErrInvalidArnPrefix
128 }
129
130 return NewArn(parts[1], parts[2], parts[3], parts[4], parts[5]), nil
131}
132
133// String returns the string format of the ARN
134func (arn Arn) String() string {
135 return "arn:" + arn.Partition + ":" + arn.Service + ":" + arn.Region + ":" + arn.AccountID + ":" + arn.Resource
136}
137
138// Config - represents one single notification configuration
139// such as topic, queue or lambda configuration.
140type Config struct {
141 ID string `xml:"Id,omitempty"`
142 Arn Arn `xml:"-"`
143 Events []EventType `xml:"Event"`
144 Filter *Filter `xml:"Filter,omitempty"`
145}
146
147// NewConfig creates one notification config and sets the given ARN
148func NewConfig(arn Arn) Config {
149 return Config{Arn: arn, Filter: &Filter{}}
150}
151
152// AddEvents adds one event to the current notification config
153func (t *Config) AddEvents(events ...EventType) {
154 t.Events = append(t.Events, events...)
155}
156
157// AddFilterSuffix sets the suffix configuration to the current notification config
158func (t *Config) AddFilterSuffix(suffix string) {
159 if t.Filter == nil {
160 t.Filter = &Filter{}
161 }
162 newFilterRule := FilterRule{Name: "suffix", Value: suffix}
163 // Replace any suffix rule if existing and add to the list otherwise
164 for index := range t.Filter.S3Key.FilterRules {
165 if t.Filter.S3Key.FilterRules[index].Name == "suffix" {
166 t.Filter.S3Key.FilterRules[index] = newFilterRule
167 return
168 }
169 }
170 t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule)
171}
172
173// AddFilterPrefix sets the prefix configuration to the current notification config
174func (t *Config) AddFilterPrefix(prefix string) {
175 if t.Filter == nil {
176 t.Filter = &Filter{}
177 }
178 newFilterRule := FilterRule{Name: "prefix", Value: prefix}
179 // Replace any prefix rule if existing and add to the list otherwise
180 for index := range t.Filter.S3Key.FilterRules {
181 if t.Filter.S3Key.FilterRules[index].Name == "prefix" {
182 t.Filter.S3Key.FilterRules[index] = newFilterRule
183 return
184 }
185 }
186 t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule)
187}
188
189// EqualEventTypeList tells whether a and b contain the same events
190func EqualEventTypeList(a, b []EventType) bool {
191 if len(a) != len(b) {
192 return false
193 }
194 setA := set.NewStringSet()
195 for _, i := range a {
196 setA.Add(string(i))
197 }
198
199 setB := set.NewStringSet()
200 for _, i := range b {
201 setB.Add(string(i))
202 }
203
204 return setA.Difference(setB).IsEmpty()
205}
206
207// EqualFilterRuleList tells whether a and b contain the same filters
208func EqualFilterRuleList(a, b []FilterRule) bool {
209 if len(a) != len(b) {
210 return false
211 }
212
213 setA := set.NewStringSet()
214 for _, i := range a {
215 setA.Add(fmt.Sprintf("%s-%s", i.Name, i.Value))
216 }
217
218 setB := set.NewStringSet()
219 for _, i := range b {
220 setB.Add(fmt.Sprintf("%s-%s", i.Name, i.Value))
221 }
222
223 return setA.Difference(setB).IsEmpty()
224}
225
226// Equal returns whether this `Config` is equal to another defined by the passed parameters
227func (t *Config) Equal(events []EventType, prefix, suffix string) bool {
228 if t == nil {
229 return false
230 }
231
232 // Compare events
233 passEvents := EqualEventTypeList(t.Events, events)
234
235 // Compare filters
236 var newFilterRules []FilterRule
237 if prefix != "" {
238 newFilterRules = append(newFilterRules, FilterRule{Name: "prefix", Value: prefix})
239 }
240 if suffix != "" {
241 newFilterRules = append(newFilterRules, FilterRule{Name: "suffix", Value: suffix})
242 }
243
244 var currentFilterRules []FilterRule
245 if t.Filter != nil {
246 currentFilterRules = t.Filter.S3Key.FilterRules
247 }
248
249 passFilters := EqualFilterRuleList(currentFilterRules, newFilterRules)
250 return passEvents && passFilters
251}
252
253// TopicConfig carries one single topic notification configuration
254type TopicConfig struct {
255 Config
256 Topic string `xml:"Topic"`
257}
258
259// QueueConfig carries one single queue notification configuration
260type QueueConfig struct {
261 Config
262 Queue string `xml:"Queue"`
263}
264
265// LambdaConfig carries one single cloudfunction notification configuration
266type LambdaConfig struct {
267 Config
268 Lambda string `xml:"CloudFunction"`
269}
270
271// Configuration - the struct that represents the whole XML to be sent to the web service
272type Configuration struct {
273 XMLName xml.Name `xml:"NotificationConfiguration"`
274 LambdaConfigs []LambdaConfig `xml:"CloudFunctionConfiguration"`
275 TopicConfigs []TopicConfig `xml:"TopicConfiguration"`
276 QueueConfigs []QueueConfig `xml:"QueueConfiguration"`
277}
278
279// AddTopic adds a given topic config to the general bucket notification config
280func (b *Configuration) AddTopic(topicConfig Config) bool {
281 newTopicConfig := TopicConfig{Config: topicConfig, Topic: topicConfig.Arn.String()}
282 for _, n := range b.TopicConfigs {
283 // If new config matches existing one
284 if n.Topic == newTopicConfig.Arn.String() && newTopicConfig.Filter == n.Filter {
285
286 existingConfig := set.NewStringSet()
287 for _, v := range n.Events {
288 existingConfig.Add(string(v))
289 }
290
291 newConfig := set.NewStringSet()
292 for _, v := range topicConfig.Events {
293 newConfig.Add(string(v))
294 }
295
296 if !newConfig.Intersection(existingConfig).IsEmpty() {
297 return false
298 }
299 }
300 }
301 b.TopicConfigs = append(b.TopicConfigs, newTopicConfig)
302 return true
303}
304
305// AddQueue adds a given queue config to the general bucket notification config
306func (b *Configuration) AddQueue(queueConfig Config) bool {
307 newQueueConfig := QueueConfig{Config: queueConfig, Queue: queueConfig.Arn.String()}
308 for _, n := range b.QueueConfigs {
309 if n.Queue == newQueueConfig.Arn.String() && newQueueConfig.Filter == n.Filter {
310
311 existingConfig := set.NewStringSet()
312 for _, v := range n.Events {
313 existingConfig.Add(string(v))
314 }
315
316 newConfig := set.NewStringSet()
317 for _, v := range queueConfig.Events {
318 newConfig.Add(string(v))
319 }
320
321 if !newConfig.Intersection(existingConfig).IsEmpty() {
322 return false
323 }
324 }
325 }
326 b.QueueConfigs = append(b.QueueConfigs, newQueueConfig)
327 return true
328}
329
330// AddLambda adds a given lambda config to the general bucket notification config
331func (b *Configuration) AddLambda(lambdaConfig Config) bool {
332 newLambdaConfig := LambdaConfig{Config: lambdaConfig, Lambda: lambdaConfig.Arn.String()}
333 for _, n := range b.LambdaConfigs {
334 if n.Lambda == newLambdaConfig.Arn.String() && newLambdaConfig.Filter == n.Filter {
335
336 existingConfig := set.NewStringSet()
337 for _, v := range n.Events {
338 existingConfig.Add(string(v))
339 }
340
341 newConfig := set.NewStringSet()
342 for _, v := range lambdaConfig.Events {
343 newConfig.Add(string(v))
344 }
345
346 if !newConfig.Intersection(existingConfig).IsEmpty() {
347 return false
348 }
349 }
350 }
351 b.LambdaConfigs = append(b.LambdaConfigs, newLambdaConfig)
352 return true
353}
354
355// RemoveTopicByArn removes all topic configurations that match the exact specified ARN
356func (b *Configuration) RemoveTopicByArn(arn Arn) {
357 var topics []TopicConfig
358 for _, topic := range b.TopicConfigs {
359 if topic.Topic != arn.String() {
360 topics = append(topics, topic)
361 }
362 }
363 b.TopicConfigs = topics
364}
365
366// ErrNoConfigMatch is returned when a notification configuration (sqs,sns,lambda) is not found when trying to delete
367var ErrNoConfigMatch = errors.New("no notification configuration matched")
368
369// RemoveTopicByArnEventsPrefixSuffix removes a topic configuration that match the exact specified ARN, events, prefix and suffix
370func (b *Configuration) RemoveTopicByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error {
371 removeIndex := -1
372 for i, v := range b.TopicConfigs {
373 // if it matches events and filters, mark the index for deletion
374 if v.Topic == arn.String() && v.Config.Equal(events, prefix, suffix) {
375 removeIndex = i
376 break // since we have at most one matching config
377 }
378 }
379 if removeIndex >= 0 {
380 b.TopicConfigs = append(b.TopicConfigs[:removeIndex], b.TopicConfigs[removeIndex+1:]...)
381 return nil
382 }
383 return ErrNoConfigMatch
384}
385
386// RemoveQueueByArn removes all queue configurations that match the exact specified ARN
387func (b *Configuration) RemoveQueueByArn(arn Arn) {
388 var queues []QueueConfig
389 for _, queue := range b.QueueConfigs {
390 if queue.Queue != arn.String() {
391 queues = append(queues, queue)
392 }
393 }
394 b.QueueConfigs = queues
395}
396
397// RemoveQueueByArnEventsPrefixSuffix removes a queue configuration that match the exact specified ARN, events, prefix and suffix
398func (b *Configuration) RemoveQueueByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error {
399 removeIndex := -1
400 for i, v := range b.QueueConfigs {
401 // if it matches events and filters, mark the index for deletion
402 if v.Queue == arn.String() && v.Config.Equal(events, prefix, suffix) {
403 removeIndex = i
404 break // since we have at most one matching config
405 }
406 }
407 if removeIndex >= 0 {
408 b.QueueConfigs = append(b.QueueConfigs[:removeIndex], b.QueueConfigs[removeIndex+1:]...)
409 return nil
410 }
411 return ErrNoConfigMatch
412}
413
414// RemoveLambdaByArn removes all lambda configurations that match the exact specified ARN
415func (b *Configuration) RemoveLambdaByArn(arn Arn) {
416 var lambdas []LambdaConfig
417 for _, lambda := range b.LambdaConfigs {
418 if lambda.Lambda != arn.String() {
419 lambdas = append(lambdas, lambda)
420 }
421 }
422 b.LambdaConfigs = lambdas
423}
424
425// RemoveLambdaByArnEventsPrefixSuffix removes a topic configuration that match the exact specified ARN, events, prefix and suffix
426func (b *Configuration) RemoveLambdaByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error {
427 removeIndex := -1
428 for i, v := range b.LambdaConfigs {
429 // if it matches events and filters, mark the index for deletion
430 if v.Lambda == arn.String() && v.Config.Equal(events, prefix, suffix) {
431 removeIndex = i
432 break // since we have at most one matching config
433 }
434 }
435 if removeIndex >= 0 {
436 b.LambdaConfigs = append(b.LambdaConfigs[:removeIndex], b.LambdaConfigs[removeIndex+1:]...)
437 return nil
438 }
439 return ErrNoConfigMatch
440}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go
new file mode 100644
index 0000000..0abbf6e
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go
@@ -0,0 +1,971 @@
1/*
2 * MinIO Client (C) 2020 MinIO, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17package replication
18
19import (
20 "bytes"
21 "encoding/xml"
22 "fmt"
23 "math"
24 "strconv"
25 "strings"
26 "time"
27 "unicode/utf8"
28
29 "github.com/rs/xid"
30)
31
32var errInvalidFilter = fmt.Errorf("invalid filter")
33
34// OptionType specifies operation to be performed on config
35type OptionType string
36
37const (
38 // AddOption specifies addition of rule to config
39 AddOption OptionType = "Add"
40 // SetOption specifies modification of existing rule to config
41 SetOption OptionType = "Set"
42
43 // RemoveOption specifies rule options are for removing a rule
44 RemoveOption OptionType = "Remove"
45 // ImportOption is for getting current config
46 ImportOption OptionType = "Import"
47)
48
49// Options represents options to set a replication configuration rule
50type Options struct {
51 Op OptionType
52 RoleArn string
53 ID string
54 Prefix string
55 RuleStatus string
56 Priority string
57 TagString string
58 StorageClass string
59 DestBucket string
60 IsTagSet bool
61 IsSCSet bool
62 ReplicateDeletes string // replicate versioned deletes
63 ReplicateDeleteMarkers string // replicate soft deletes
64 ReplicaSync string // replicate replica metadata modifications
65 ExistingObjectReplicate string
66}
67
68// Tags returns a slice of tags for a rule
69func (opts Options) Tags() ([]Tag, error) {
70 var tagList []Tag
71 tagTokens := strings.Split(opts.TagString, "&")
72 for _, tok := range tagTokens {
73 if tok == "" {
74 break
75 }
76 kv := strings.SplitN(tok, "=", 2)
77 if len(kv) != 2 {
78 return []Tag{}, fmt.Errorf("tags should be entered as comma separated k=v pairs")
79 }
80 tagList = append(tagList, Tag{
81 Key: kv[0],
82 Value: kv[1],
83 })
84 }
85 return tagList, nil
86}
87
88// Config - replication configuration specified in
89// https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html
90type Config struct {
91 XMLName xml.Name `xml:"ReplicationConfiguration" json:"-"`
92 Rules []Rule `xml:"Rule" json:"Rules"`
93 Role string `xml:"Role" json:"Role"`
94}
95
96// Empty returns true if config is not set
97func (c *Config) Empty() bool {
98 return len(c.Rules) == 0
99}
100
101// AddRule adds a new rule to existing replication config. If a rule exists with the
102// same ID, then the rule is replaced.
103func (c *Config) AddRule(opts Options) error {
104 priority, err := strconv.Atoi(opts.Priority)
105 if err != nil {
106 return err
107 }
108 var compatSw bool // true if RoleArn is used with new mc client and older minio version prior to multisite
109 if opts.RoleArn != "" {
110 tokens := strings.Split(opts.RoleArn, ":")
111 if len(tokens) != 6 {
112 return fmt.Errorf("invalid format for replication Role Arn: %v", opts.RoleArn)
113 }
114 switch {
115 case strings.HasPrefix(opts.RoleArn, "arn:minio:replication") && len(c.Rules) == 0:
116 c.Role = opts.RoleArn
117 compatSw = true
118 case strings.HasPrefix(opts.RoleArn, "arn:aws:iam"):
119 c.Role = opts.RoleArn
120 default:
121 return fmt.Errorf("RoleArn invalid for AWS replication configuration: %v", opts.RoleArn)
122 }
123 }
124
125 var status Status
126 // toggle rule status for edit option
127 switch opts.RuleStatus {
128 case "enable":
129 status = Enabled
130 case "disable":
131 status = Disabled
132 default:
133 return fmt.Errorf("rule state should be either [enable|disable]")
134 }
135
136 tags, err := opts.Tags()
137 if err != nil {
138 return err
139 }
140 andVal := And{
141 Tags: tags,
142 }
143 filter := Filter{Prefix: opts.Prefix}
144 // only a single tag is set.
145 if opts.Prefix == "" && len(tags) == 1 {
146 filter.Tag = tags[0]
147 }
148 // both prefix and tag are present
149 if len(andVal.Tags) > 1 || opts.Prefix != "" {
150 filter.And = andVal
151 filter.And.Prefix = opts.Prefix
152 filter.Prefix = ""
153 filter.Tag = Tag{}
154 }
155 if opts.ID == "" {
156 opts.ID = xid.New().String()
157 }
158
159 destBucket := opts.DestBucket
160 // ref https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html
161 if btokens := strings.Split(destBucket, ":"); len(btokens) != 6 {
162 if len(btokens) == 1 && compatSw {
163 destBucket = fmt.Sprintf("arn:aws:s3:::%s", destBucket)
164 } else {
165 return fmt.Errorf("destination bucket needs to be in Arn format")
166 }
167 }
168 dmStatus := Disabled
169 if opts.ReplicateDeleteMarkers != "" {
170 switch opts.ReplicateDeleteMarkers {
171 case "enable":
172 dmStatus = Enabled
173 case "disable":
174 dmStatus = Disabled
175 default:
176 return fmt.Errorf("ReplicateDeleteMarkers should be either enable|disable")
177 }
178 }
179
180 vDeleteStatus := Disabled
181 if opts.ReplicateDeletes != "" {
182 switch opts.ReplicateDeletes {
183 case "enable":
184 vDeleteStatus = Enabled
185 case "disable":
186 vDeleteStatus = Disabled
187 default:
188 return fmt.Errorf("ReplicateDeletes should be either enable|disable")
189 }
190 }
191 var replicaSync Status
192 // replica sync is by default Enabled, unless specified.
193 switch opts.ReplicaSync {
194 case "enable", "":
195 replicaSync = Enabled
196 case "disable":
197 replicaSync = Disabled
198 default:
199 return fmt.Errorf("replica metadata sync should be either [enable|disable]")
200 }
201
202 var existingStatus Status
203 if opts.ExistingObjectReplicate != "" {
204 switch opts.ExistingObjectReplicate {
205 case "enable":
206 existingStatus = Enabled
207 case "disable", "":
208 existingStatus = Disabled
209 default:
210 return fmt.Errorf("existingObjectReplicate should be either enable|disable")
211 }
212 }
213 newRule := Rule{
214 ID: opts.ID,
215 Priority: priority,
216 Status: status,
217 Filter: filter,
218 Destination: Destination{
219 Bucket: destBucket,
220 StorageClass: opts.StorageClass,
221 },
222 DeleteMarkerReplication: DeleteMarkerReplication{Status: dmStatus},
223 DeleteReplication: DeleteReplication{Status: vDeleteStatus},
224 // MinIO enables replica metadata syncing by default in the case of bi-directional replication to allow
225 // automatic failover as the expectation in this case is that replica and source should be identical.
226 // However AWS leaves this configurable https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-for-metadata-changes.html
227 SourceSelectionCriteria: SourceSelectionCriteria{
228 ReplicaModifications: ReplicaModifications{
229 Status: replicaSync,
230 },
231 },
232 // By default disable existing object replication unless selected
233 ExistingObjectReplication: ExistingObjectReplication{
234 Status: existingStatus,
235 },
236 }
237
238 // validate rule after overlaying priority for pre-existing rule being disabled.
239 if err := newRule.Validate(); err != nil {
240 return err
241 }
242 // if replication config uses RoleArn, migrate this to the destination element as target ARN for remote bucket for MinIO configuration
243 if c.Role != "" && !strings.HasPrefix(c.Role, "arn:aws:iam") && !compatSw {
244 for i := range c.Rules {
245 c.Rules[i].Destination.Bucket = c.Role
246 }
247 c.Role = ""
248 }
249
250 for _, rule := range c.Rules {
251 if rule.Priority == newRule.Priority {
252 return fmt.Errorf("priority must be unique. Replication configuration already has a rule with this priority")
253 }
254 if rule.ID == newRule.ID {
255 return fmt.Errorf("a rule exists with this ID")
256 }
257 }
258
259 c.Rules = append(c.Rules, newRule)
260 return nil
261}
262
263// EditRule modifies an existing rule in replication config
264func (c *Config) EditRule(opts Options) error {
265 if opts.ID == "" {
266 return fmt.Errorf("rule ID missing")
267 }
268 // if replication config uses RoleArn, migrate this to the destination element as target ARN for remote bucket for non AWS.
269 if c.Role != "" && !strings.HasPrefix(c.Role, "arn:aws:iam") && len(c.Rules) > 1 {
270 for i := range c.Rules {
271 c.Rules[i].Destination.Bucket = c.Role
272 }
273 c.Role = ""
274 }
275
276 rIdx := -1
277 var newRule Rule
278 for i, rule := range c.Rules {
279 if rule.ID == opts.ID {
280 rIdx = i
281 newRule = rule
282 break
283 }
284 }
285 if rIdx < 0 {
286 return fmt.Errorf("rule with ID %s not found in replication configuration", opts.ID)
287 }
288 prefixChg := opts.Prefix != newRule.Prefix()
289 if opts.IsTagSet || prefixChg {
290 prefix := newRule.Prefix()
291 if prefix != opts.Prefix {
292 prefix = opts.Prefix
293 }
294 tags := []Tag{newRule.Filter.Tag}
295 if len(newRule.Filter.And.Tags) != 0 {
296 tags = newRule.Filter.And.Tags
297 }
298 var err error
299 if opts.IsTagSet {
300 tags, err = opts.Tags()
301 if err != nil {
302 return err
303 }
304 }
305 andVal := And{
306 Tags: tags,
307 }
308
309 filter := Filter{Prefix: prefix}
310 // only a single tag is set.
311 if prefix == "" && len(tags) == 1 {
312 filter.Tag = tags[0]
313 }
314 // both prefix and tag are present
315 if len(andVal.Tags) > 1 || prefix != "" {
316 filter.And = andVal
317 filter.And.Prefix = prefix
318 filter.Prefix = ""
319 filter.Tag = Tag{}
320 }
321 newRule.Filter = filter
322 }
323
324 // toggle rule status for edit option
325 if opts.RuleStatus != "" {
326 switch opts.RuleStatus {
327 case "enable":
328 newRule.Status = Enabled
329 case "disable":
330 newRule.Status = Disabled
331 default:
332 return fmt.Errorf("rule state should be either [enable|disable]")
333 }
334 }
335 // set DeleteMarkerReplication rule status for edit option
336 if opts.ReplicateDeleteMarkers != "" {
337 switch opts.ReplicateDeleteMarkers {
338 case "enable":
339 newRule.DeleteMarkerReplication.Status = Enabled
340 case "disable":
341 newRule.DeleteMarkerReplication.Status = Disabled
342 default:
343 return fmt.Errorf("ReplicateDeleteMarkers state should be either [enable|disable]")
344 }
345 }
346
347 // set DeleteReplication rule status for edit option. This is a MinIO specific
348 // option to replicate versioned deletes
349 if opts.ReplicateDeletes != "" {
350 switch opts.ReplicateDeletes {
351 case "enable":
352 newRule.DeleteReplication.Status = Enabled
353 case "disable":
354 newRule.DeleteReplication.Status = Disabled
355 default:
356 return fmt.Errorf("ReplicateDeletes state should be either [enable|disable]")
357 }
358 }
359
360 if opts.ReplicaSync != "" {
361 switch opts.ReplicaSync {
362 case "enable", "":
363 newRule.SourceSelectionCriteria.ReplicaModifications.Status = Enabled
364 case "disable":
365 newRule.SourceSelectionCriteria.ReplicaModifications.Status = Disabled
366 default:
367 return fmt.Errorf("replica metadata sync should be either [enable|disable]")
368 }
369 }
370
371 if opts.ExistingObjectReplicate != "" {
372 switch opts.ExistingObjectReplicate {
373 case "enable":
374 newRule.ExistingObjectReplication.Status = Enabled
375 case "disable":
376 newRule.ExistingObjectReplication.Status = Disabled
377 default:
378 return fmt.Errorf("existingObjectsReplication state should be either [enable|disable]")
379 }
380 }
381 if opts.IsSCSet {
382 newRule.Destination.StorageClass = opts.StorageClass
383 }
384 if opts.Priority != "" {
385 priority, err := strconv.Atoi(opts.Priority)
386 if err != nil {
387 return err
388 }
389 newRule.Priority = priority
390 }
391 if opts.DestBucket != "" {
392 destBucket := opts.DestBucket
393 // ref https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html
394 if btokens := strings.Split(opts.DestBucket, ":"); len(btokens) != 6 {
395 return fmt.Errorf("destination bucket needs to be in Arn format")
396 }
397 newRule.Destination.Bucket = destBucket
398 }
399 // validate rule
400 if err := newRule.Validate(); err != nil {
401 return err
402 }
403 // ensure priority and destination bucket restrictions are not violated
404 for idx, rule := range c.Rules {
405 if rule.Priority == newRule.Priority && rIdx != idx {
406 return fmt.Errorf("priority must be unique. Replication configuration already has a rule with this priority")
407 }
408 if rule.Destination.Bucket != newRule.Destination.Bucket && rule.ID == newRule.ID {
409 return fmt.Errorf("invalid destination bucket for this rule")
410 }
411 }
412
413 c.Rules[rIdx] = newRule
414 return nil
415}
416
417// RemoveRule removes a rule from replication config.
418func (c *Config) RemoveRule(opts Options) error {
419 var newRules []Rule
420 ruleFound := false
421 for _, rule := range c.Rules {
422 if rule.ID != opts.ID {
423 newRules = append(newRules, rule)
424 continue
425 }
426 ruleFound = true
427 }
428 if !ruleFound {
429 return fmt.Errorf("Rule with ID %s not found", opts.ID)
430 }
431 if len(newRules) == 0 {
432 return fmt.Errorf("replication configuration should have at least one rule")
433 }
434 c.Rules = newRules
435 return nil
436}
437
438// Rule - a rule for replication configuration.
439type Rule struct {
440 XMLName xml.Name `xml:"Rule" json:"-"`
441 ID string `xml:"ID,omitempty"`
442 Status Status `xml:"Status"`
443 Priority int `xml:"Priority"`
444 DeleteMarkerReplication DeleteMarkerReplication `xml:"DeleteMarkerReplication"`
445 DeleteReplication DeleteReplication `xml:"DeleteReplication"`
446 Destination Destination `xml:"Destination"`
447 Filter Filter `xml:"Filter" json:"Filter"`
448 SourceSelectionCriteria SourceSelectionCriteria `xml:"SourceSelectionCriteria" json:"SourceSelectionCriteria"`
449 ExistingObjectReplication ExistingObjectReplication `xml:"ExistingObjectReplication,omitempty" json:"ExistingObjectReplication,omitempty"`
450}
451
452// Validate validates the rule for correctness
453func (r Rule) Validate() error {
454 if err := r.validateID(); err != nil {
455 return err
456 }
457 if err := r.validateStatus(); err != nil {
458 return err
459 }
460 if err := r.validateFilter(); err != nil {
461 return err
462 }
463
464 if r.Priority < 0 && r.Status == Enabled {
465 return fmt.Errorf("priority must be set for the rule")
466 }
467
468 if err := r.validateStatus(); err != nil {
469 return err
470 }
471 return r.ExistingObjectReplication.Validate()
472}
473
474// validateID - checks if ID is valid or not.
475func (r Rule) validateID() error {
476 // cannot be longer than 255 characters
477 if len(r.ID) > 255 {
478 return fmt.Errorf("ID must be less than 255 characters")
479 }
480 return nil
481}
482
483// validateStatus - checks if status is valid or not.
484func (r Rule) validateStatus() error {
485 // Status can't be empty
486 if len(r.Status) == 0 {
487 return fmt.Errorf("status cannot be empty")
488 }
489
490 // Status must be one of Enabled or Disabled
491 if r.Status != Enabled && r.Status != Disabled {
492 return fmt.Errorf("status must be set to either Enabled or Disabled")
493 }
494 return nil
495}
496
497func (r Rule) validateFilter() error {
498 return r.Filter.Validate()
499}
500
501// Prefix - a rule can either have prefix under <filter></filter> or under
502// <filter><and></and></filter>. This method returns the prefix from the
503// location where it is available
504func (r Rule) Prefix() string {
505 if r.Filter.Prefix != "" {
506 return r.Filter.Prefix
507 }
508 return r.Filter.And.Prefix
509}
510
511// Tags - a rule can either have tag under <filter></filter> or under
512// <filter><and></and></filter>. This method returns all the tags from the
513// rule in the format tag1=value1&tag2=value2
514func (r Rule) Tags() string {
515 ts := []Tag{r.Filter.Tag}
516 if len(r.Filter.And.Tags) != 0 {
517 ts = r.Filter.And.Tags
518 }
519
520 var buf bytes.Buffer
521 for _, t := range ts {
522 if buf.Len() > 0 {
523 buf.WriteString("&")
524 }
525 buf.WriteString(t.String())
526 }
527 return buf.String()
528}
529
530// Filter - a filter for a replication configuration Rule.
531type Filter struct {
532 XMLName xml.Name `xml:"Filter" json:"-"`
533 Prefix string `json:"Prefix,omitempty"`
534 And And `xml:"And,omitempty" json:"And,omitempty"`
535 Tag Tag `xml:"Tag,omitempty" json:"Tag,omitempty"`
536}
537
538// Validate - validates the filter element
539func (f Filter) Validate() error {
540 // A Filter must have exactly one of Prefix, Tag, or And specified.
541 if !f.And.isEmpty() {
542 if f.Prefix != "" {
543 return errInvalidFilter
544 }
545 if !f.Tag.IsEmpty() {
546 return errInvalidFilter
547 }
548 }
549 if f.Prefix != "" {
550 if !f.Tag.IsEmpty() {
551 return errInvalidFilter
552 }
553 }
554 if !f.Tag.IsEmpty() {
555 if err := f.Tag.Validate(); err != nil {
556 return err
557 }
558 }
559 return nil
560}
561
562// Tag - a tag for a replication configuration Rule filter.
563type Tag struct {
564 XMLName xml.Name `json:"-"`
565 Key string `xml:"Key,omitempty" json:"Key,omitempty"`
566 Value string `xml:"Value,omitempty" json:"Value,omitempty"`
567}
568
569func (tag Tag) String() string {
570 if tag.IsEmpty() {
571 return ""
572 }
573 return tag.Key + "=" + tag.Value
574}
575
576// IsEmpty returns whether this tag is empty or not.
577func (tag Tag) IsEmpty() bool {
578 return tag.Key == ""
579}
580
581// Validate checks this tag.
582func (tag Tag) Validate() error {
583 if len(tag.Key) == 0 || utf8.RuneCountInString(tag.Key) > 128 {
584 return fmt.Errorf("invalid Tag Key")
585 }
586
587 if utf8.RuneCountInString(tag.Value) > 256 {
588 return fmt.Errorf("invalid Tag Value")
589 }
590 return nil
591}
592
593// Destination - destination in ReplicationConfiguration.
594type Destination struct {
595 XMLName xml.Name `xml:"Destination" json:"-"`
596 Bucket string `xml:"Bucket" json:"Bucket"`
597 StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"`
598}
599
600// And - a tag to combine a prefix and multiple tags for replication configuration rule.
601type And struct {
602 XMLName xml.Name `xml:"And,omitempty" json:"-"`
603 Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"`
604 Tags []Tag `xml:"Tag,omitempty" json:"Tag,omitempty"`
605}
606
607// isEmpty returns true if Tags field is null
608func (a And) isEmpty() bool {
609 return len(a.Tags) == 0 && a.Prefix == ""
610}
611
612// Status represents Enabled/Disabled status
613type Status string
614
615// Supported status types
616const (
617 Enabled Status = "Enabled"
618 Disabled Status = "Disabled"
619)
620
621// DeleteMarkerReplication - whether delete markers are replicated - https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html
622type DeleteMarkerReplication struct {
623 Status Status `xml:"Status" json:"Status"` // should be set to "Disabled" by default
624}
625
626// IsEmpty returns true if DeleteMarkerReplication is not set
627func (d DeleteMarkerReplication) IsEmpty() bool {
628 return len(d.Status) == 0
629}
630
631// DeleteReplication - whether versioned deletes are replicated - this
632// is a MinIO specific extension
633type DeleteReplication struct {
634 Status Status `xml:"Status" json:"Status"` // should be set to "Disabled" by default
635}
636
637// IsEmpty returns true if DeleteReplication is not set
638func (d DeleteReplication) IsEmpty() bool {
639 return len(d.Status) == 0
640}
641
642// ReplicaModifications specifies if replica modification sync is enabled
643type ReplicaModifications struct {
644 Status Status `xml:"Status" json:"Status"` // should be set to "Enabled" by default
645}
646
647// SourceSelectionCriteria - specifies additional source selection criteria in ReplicationConfiguration.
648type SourceSelectionCriteria struct {
649 ReplicaModifications ReplicaModifications `xml:"ReplicaModifications" json:"ReplicaModifications"`
650}
651
652// IsValid - checks whether SourceSelectionCriteria is valid or not.
653func (s SourceSelectionCriteria) IsValid() bool {
654 return s.ReplicaModifications.Status == Enabled || s.ReplicaModifications.Status == Disabled
655}
656
657// Validate source selection criteria
658func (s SourceSelectionCriteria) Validate() error {
659 if (s == SourceSelectionCriteria{}) {
660 return nil
661 }
662 if !s.IsValid() {
663 return fmt.Errorf("invalid ReplicaModification status")
664 }
665 return nil
666}
667
668// ExistingObjectReplication - whether existing object replication is enabled
669type ExistingObjectReplication struct {
670 Status Status `xml:"Status"` // should be set to "Disabled" by default
671}
672
673// IsEmpty returns true if DeleteMarkerReplication is not set
674func (e ExistingObjectReplication) IsEmpty() bool {
675 return len(e.Status) == 0
676}
677
678// Validate validates whether the status is disabled.
679func (e ExistingObjectReplication) Validate() error {
680 if e.IsEmpty() {
681 return nil
682 }
683 if e.Status != Disabled && e.Status != Enabled {
684 return fmt.Errorf("invalid ExistingObjectReplication status")
685 }
686 return nil
687}
688
689// TargetMetrics represents inline replication metrics
690// such as pending, failed and completed bytes in total for a bucket remote target
691type TargetMetrics struct {
692 // Completed count
693 ReplicatedCount uint64 `json:"replicationCount,omitempty"`
694 // Completed size in bytes
695 ReplicatedSize uint64 `json:"completedReplicationSize,omitempty"`
696 // Bandwidth limit in bytes/sec for this target
697 BandWidthLimitInBytesPerSecond int64 `json:"limitInBits,omitempty"`
698 // Current bandwidth used in bytes/sec for this target
699 CurrentBandwidthInBytesPerSecond float64 `json:"currentBandwidth,omitempty"`
700 // errors seen in replication in last minute, hour and total
701 Failed TimedErrStats `json:"failed,omitempty"`
702 // Deprecated fields
703 // Pending size in bytes
704 PendingSize uint64 `json:"pendingReplicationSize,omitempty"`
705 // Total Replica size in bytes
706 ReplicaSize uint64 `json:"replicaSize,omitempty"`
707 // Failed size in bytes
708 FailedSize uint64 `json:"failedReplicationSize,omitempty"`
709 // Total number of pending operations including metadata updates
710 PendingCount uint64 `json:"pendingReplicationCount,omitempty"`
711 // Total number of failed operations including metadata updates
712 FailedCount uint64 `json:"failedReplicationCount,omitempty"`
713}
714
715// Metrics represents inline replication metrics for a bucket.
716type Metrics struct {
717 Stats map[string]TargetMetrics
718 // Completed size in bytes across targets
719 ReplicatedSize uint64 `json:"completedReplicationSize,omitempty"`
720 // Total Replica size in bytes across targets
721 ReplicaSize uint64 `json:"replicaSize,omitempty"`
722 // Total Replica counts
723 ReplicaCount int64 `json:"replicaCount,omitempty"`
724 // Total Replicated count
725 ReplicatedCount int64 `json:"replicationCount,omitempty"`
726 // errors seen in replication in last minute, hour and total
727 Errors TimedErrStats `json:"failed,omitempty"`
728 // Total number of entries that are queued for replication
729 QStats InQueueMetric `json:"queued"`
730 // Deprecated fields
731 // Total Pending size in bytes across targets
732 PendingSize uint64 `json:"pendingReplicationSize,omitempty"`
733 // Failed size in bytes across targets
734 FailedSize uint64 `json:"failedReplicationSize,omitempty"`
735 // Total number of pending operations including metadata updates across targets
736 PendingCount uint64 `json:"pendingReplicationCount,omitempty"`
737 // Total number of failed operations including metadata updates across targets
738 FailedCount uint64 `json:"failedReplicationCount,omitempty"`
739}
740
741// RStat - has count and bytes for replication metrics
742type RStat struct {
743 Count float64 `json:"count"`
744 Bytes int64 `json:"bytes"`
745}
746
747// Add two RStat
748func (r RStat) Add(r1 RStat) RStat {
749 return RStat{
750 Count: r.Count + r1.Count,
751 Bytes: r.Bytes + r1.Bytes,
752 }
753}
754
755// TimedErrStats holds error stats for a time period
756type TimedErrStats struct {
757 LastMinute RStat `json:"lastMinute"`
758 LastHour RStat `json:"lastHour"`
759 Totals RStat `json:"totals"`
760}
761
762// Add two TimedErrStats
763func (te TimedErrStats) Add(o TimedErrStats) TimedErrStats {
764 return TimedErrStats{
765 LastMinute: te.LastMinute.Add(o.LastMinute),
766 LastHour: te.LastHour.Add(o.LastHour),
767 Totals: te.Totals.Add(o.Totals),
768 }
769}
770
771// ResyncTargetsInfo provides replication target information to resync replicated data.
772type ResyncTargetsInfo struct {
773 Targets []ResyncTarget `json:"target,omitempty"`
774}
775
776// ResyncTarget provides the replica resources and resetID to initiate resync replication.
777type ResyncTarget struct {
778 Arn string `json:"arn"`
779 ResetID string `json:"resetid"`
780 StartTime time.Time `json:"startTime,omitempty"`
781 EndTime time.Time `json:"endTime,omitempty"`
782 // Status of resync operation
783 ResyncStatus string `json:"resyncStatus,omitempty"`
784 // Completed size in bytes
785 ReplicatedSize int64 `json:"completedReplicationSize,omitempty"`
786 // Failed size in bytes
787 FailedSize int64 `json:"failedReplicationSize,omitempty"`
788 // Total number of failed operations
789 FailedCount int64 `json:"failedReplicationCount,omitempty"`
790 // Total number of completed operations
791 ReplicatedCount int64 `json:"replicationCount,omitempty"`
792 // Last bucket/object replicated.
793 Bucket string `json:"bucket,omitempty"`
794 Object string `json:"object,omitempty"`
795}
796
797// XferStats holds transfer rate info for uploads/sec
798type XferStats struct {
799 AvgRate float64 `json:"avgRate"`
800 PeakRate float64 `json:"peakRate"`
801 CurrRate float64 `json:"currRate"`
802}
803
804// Merge two XferStats
805func (x *XferStats) Merge(x1 XferStats) {
806 x.AvgRate += x1.AvgRate
807 x.PeakRate += x1.PeakRate
808 x.CurrRate += x1.CurrRate
809}
810
811// QStat holds count and bytes for objects in replication queue
812type QStat struct {
813 Count float64 `json:"count"`
814 Bytes float64 `json:"bytes"`
815}
816
817// Add 2 QStat entries
818func (q *QStat) Add(q1 QStat) {
819 q.Count += q1.Count
820 q.Bytes += q1.Bytes
821}
822
823// InQueueMetric holds stats for objects in replication queue
824type InQueueMetric struct {
825 Curr QStat `json:"curr" msg:"cq"`
826 Avg QStat `json:"avg" msg:"aq"`
827 Max QStat `json:"peak" msg:"pq"`
828}
829
830// MetricName name of replication metric
831type MetricName string
832
833const (
834 // Large is a metric name for large objects >=128MiB
835 Large MetricName = "Large"
836 // Small is a metric name for objects <128MiB size
837 Small MetricName = "Small"
838 // Total is a metric name for total objects
839 Total MetricName = "Total"
840)
841
842// WorkerStat has stats on number of replication workers
843type WorkerStat struct {
844 Curr int32 `json:"curr"`
845 Avg float32 `json:"avg"`
846 Max int32 `json:"max"`
847}
848
849// ReplMRFStats holds stats of MRF backlog saved to disk in the last 5 minutes
850// and number of entries that failed replication after 3 retries
851type ReplMRFStats struct {
852 LastFailedCount uint64 `json:"failedCount_last5min"`
853 // Count of unreplicated entries that were dropped after MRF retry limit reached since cluster start.
854 TotalDroppedCount uint64 `json:"droppedCount_since_uptime"`
855 // Bytes of unreplicated entries that were dropped after MRF retry limit reached since cluster start.
856 TotalDroppedBytes uint64 `json:"droppedBytes_since_uptime"`
857}
858
859// ReplQNodeStats holds stats for a node in replication queue
860type ReplQNodeStats struct {
861 NodeName string `json:"nodeName"`
862 Uptime int64 `json:"uptime"`
863 Workers WorkerStat `json:"activeWorkers"`
864
865 XferStats map[MetricName]XferStats `json:"transferSummary"`
866 TgtXferStats map[string]map[MetricName]XferStats `json:"tgtTransferStats"`
867
868 QStats InQueueMetric `json:"queueStats"`
869 MRFStats ReplMRFStats `json:"mrfStats"`
870}
871
872// ReplQueueStats holds stats for replication queue across nodes
873type ReplQueueStats struct {
874 Nodes []ReplQNodeStats `json:"nodes"`
875}
876
877// Workers returns number of workers across all nodes
878func (q ReplQueueStats) Workers() (tot WorkerStat) {
879 for _, node := range q.Nodes {
880 tot.Avg += node.Workers.Avg
881 tot.Curr += node.Workers.Curr
882 if tot.Max < node.Workers.Max {
883 tot.Max = node.Workers.Max
884 }
885 }
886 if len(q.Nodes) > 0 {
887 tot.Avg /= float32(len(q.Nodes))
888 tot.Curr /= int32(len(q.Nodes))
889 }
890 return tot
891}
892
893// qStatSummary returns cluster level stats for objects in replication queue
894func (q ReplQueueStats) qStatSummary() InQueueMetric {
895 m := InQueueMetric{}
896 for _, v := range q.Nodes {
897 m.Avg.Add(v.QStats.Avg)
898 m.Curr.Add(v.QStats.Curr)
899 if m.Max.Count < v.QStats.Max.Count {
900 m.Max.Add(v.QStats.Max)
901 }
902 }
903 return m
904}
905
906// ReplQStats holds stats for objects in replication queue
907type ReplQStats struct {
908 Uptime int64 `json:"uptime"`
909 Workers WorkerStat `json:"workers"`
910
911 XferStats map[MetricName]XferStats `json:"xferStats"`
912 TgtXferStats map[string]map[MetricName]XferStats `json:"tgtXferStats"`
913
914 QStats InQueueMetric `json:"qStats"`
915 MRFStats ReplMRFStats `json:"mrfStats"`
916}
917
918// QStats returns cluster level stats for objects in replication queue
919func (q ReplQueueStats) QStats() (r ReplQStats) {
920 r.QStats = q.qStatSummary()
921 r.XferStats = make(map[MetricName]XferStats)
922 r.TgtXferStats = make(map[string]map[MetricName]XferStats)
923 r.Workers = q.Workers()
924
925 for _, node := range q.Nodes {
926 for arn := range node.TgtXferStats {
927 xmap, ok := node.TgtXferStats[arn]
928 if !ok {
929 xmap = make(map[MetricName]XferStats)
930 }
931 for m, v := range xmap {
932 st, ok := r.XferStats[m]
933 if !ok {
934 st = XferStats{}
935 }
936 st.AvgRate += v.AvgRate
937 st.CurrRate += v.CurrRate
938 st.PeakRate = math.Max(st.PeakRate, v.PeakRate)
939 if _, ok := r.TgtXferStats[arn]; !ok {
940 r.TgtXferStats[arn] = make(map[MetricName]XferStats)
941 }
942 r.TgtXferStats[arn][m] = st
943 }
944 }
945 for k, v := range node.XferStats {
946 st, ok := r.XferStats[k]
947 if !ok {
948 st = XferStats{}
949 }
950 st.AvgRate += v.AvgRate
951 st.CurrRate += v.CurrRate
952 st.PeakRate = math.Max(st.PeakRate, v.PeakRate)
953 r.XferStats[k] = st
954 }
955 r.MRFStats.LastFailedCount += node.MRFStats.LastFailedCount
956 r.MRFStats.TotalDroppedCount += node.MRFStats.TotalDroppedCount
957 r.MRFStats.TotalDroppedBytes += node.MRFStats.TotalDroppedBytes
958 r.Uptime += node.Uptime
959 }
960 if len(q.Nodes) > 0 {
961 r.Uptime /= int64(len(q.Nodes)) // average uptime
962 }
963 return
964}
965
966// MetricsV2 represents replication metrics for a bucket.
967type MetricsV2 struct {
968 Uptime int64 `json:"uptime"`
969 CurrentStats Metrics `json:"currStats"`
970 QueueStats ReplQueueStats `json:"queueStats"`
971}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go b/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go
new file mode 100644
index 0000000..056e78a
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go
@@ -0,0 +1,411 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package s3utils
19
20import (
21 "bytes"
22 "encoding/hex"
23 "errors"
24 "net"
25 "net/url"
26 "regexp"
27 "sort"
28 "strings"
29 "unicode/utf8"
30)
31
32// Sentinel URL is the default url value which is invalid.
33var sentinelURL = url.URL{}
34
35// IsValidDomain validates if input string is a valid domain name.
36func IsValidDomain(host string) bool {
37 // See RFC 1035, RFC 3696.
38 host = strings.TrimSpace(host)
39 if len(host) == 0 || len(host) > 255 {
40 return false
41 }
42 // host cannot start or end with "-"
43 if host[len(host)-1:] == "-" || host[:1] == "-" {
44 return false
45 }
46 // host cannot start or end with "_"
47 if host[len(host)-1:] == "_" || host[:1] == "_" {
48 return false
49 }
50 // host cannot start with a "."
51 if host[:1] == "." {
52 return false
53 }
54 // All non alphanumeric characters are invalid.
55 if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:><?/") {
56 return false
57 }
58 // No need to regexp match, since the list is non-exhaustive.
59 // We let it valid and fail later.
60 return true
61}
62
63// IsValidIP parses input string for ip address validity.
64func IsValidIP(ip string) bool {
65 return net.ParseIP(ip) != nil
66}
67
68// IsVirtualHostSupported - verifies if bucketName can be part of
69// virtual host. Currently only Amazon S3 and Google Cloud Storage
70// would support this.
71func IsVirtualHostSupported(endpointURL url.URL, bucketName string) bool {
72 if endpointURL == sentinelURL {
73 return false
74 }
75 // bucketName can be valid but '.' in the hostname will fail SSL
76 // certificate validation. So do not use host-style for such buckets.
77 if endpointURL.Scheme == "https" && strings.Contains(bucketName, ".") {
78 return false
79 }
80 // Return true for all other cases
81 return IsAmazonEndpoint(endpointURL) || IsGoogleEndpoint(endpointURL) || IsAliyunOSSEndpoint(endpointURL)
82}
83
84// Refer for region styles - https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
85
86// amazonS3HostHyphen - regular expression used to determine if an arg is s3 host in hyphenated style.
87var amazonS3HostHyphen = regexp.MustCompile(`^s3-(.*?).amazonaws.com$`)
88
89// amazonS3HostDualStack - regular expression used to determine if an arg is s3 host dualstack.
90var amazonS3HostDualStack = regexp.MustCompile(`^s3.dualstack.(.*?).amazonaws.com$`)
91
92// amazonS3HostFIPS - regular expression used to determine if an arg is s3 FIPS host.
93var amazonS3HostFIPS = regexp.MustCompile(`^s3-fips.(.*?).amazonaws.com$`)
94
95// amazonS3HostFIPSDualStack - regular expression used to determine if an arg is s3 FIPS host dualstack.
96var amazonS3HostFIPSDualStack = regexp.MustCompile(`^s3-fips.dualstack.(.*?).amazonaws.com$`)
97
98// amazonS3HostDot - regular expression used to determine if an arg is s3 host in . style.
99var amazonS3HostDot = regexp.MustCompile(`^s3.(.*?).amazonaws.com$`)
100
101// amazonS3ChinaHost - regular expression used to determine if the arg is s3 china host.
102var amazonS3ChinaHost = regexp.MustCompile(`^s3.(cn.*?).amazonaws.com.cn$`)
103
104// amazonS3ChinaHostDualStack - regular expression used to determine if the arg is s3 china host dualstack.
105var amazonS3ChinaHostDualStack = regexp.MustCompile(`^s3.dualstack.(cn.*?).amazonaws.com.cn$`)
106
107// Regular expression used to determine if the arg is elb host.
108var elbAmazonRegex = regexp.MustCompile(`elb(.*?).amazonaws.com$`)
109
110// Regular expression used to determine if the arg is elb host in china.
111var elbAmazonCnRegex = regexp.MustCompile(`elb(.*?).amazonaws.com.cn$`)
112
113// amazonS3HostPrivateLink - regular expression used to determine if an arg is s3 host in AWS PrivateLink interface endpoints style
114var amazonS3HostPrivateLink = regexp.MustCompile(`^(?:bucket|accesspoint).vpce-.*?.s3.(.*?).vpce.amazonaws.com$`)
115
116// GetRegionFromURL - returns a region from url host.
117func GetRegionFromURL(endpointURL url.URL) string {
118 if endpointURL == sentinelURL {
119 return ""
120 }
121 if endpointURL.Host == "s3-external-1.amazonaws.com" {
122 return ""
123 }
124
125 // if elb's are used we cannot calculate which region it may be, just return empty.
126 if elbAmazonRegex.MatchString(endpointURL.Host) || elbAmazonCnRegex.MatchString(endpointURL.Host) {
127 return ""
128 }
129
130 // We check for FIPS dualstack matching first to avoid the non-greedy
131 // regex for FIPS non-dualstack matching a dualstack URL
132 parts := amazonS3HostFIPSDualStack.FindStringSubmatch(endpointURL.Host)
133 if len(parts) > 1 {
134 return parts[1]
135 }
136
137 parts = amazonS3HostFIPS.FindStringSubmatch(endpointURL.Host)
138 if len(parts) > 1 {
139 return parts[1]
140 }
141
142 parts = amazonS3HostDualStack.FindStringSubmatch(endpointURL.Host)
143 if len(parts) > 1 {
144 return parts[1]
145 }
146
147 parts = amazonS3HostHyphen.FindStringSubmatch(endpointURL.Host)
148 if len(parts) > 1 {
149 return parts[1]
150 }
151
152 parts = amazonS3ChinaHost.FindStringSubmatch(endpointURL.Host)
153 if len(parts) > 1 {
154 return parts[1]
155 }
156
157 parts = amazonS3ChinaHostDualStack.FindStringSubmatch(endpointURL.Host)
158 if len(parts) > 1 {
159 return parts[1]
160 }
161
162 parts = amazonS3HostDot.FindStringSubmatch(endpointURL.Host)
163 if len(parts) > 1 {
164 return parts[1]
165 }
166
167 parts = amazonS3HostPrivateLink.FindStringSubmatch(endpointURL.Host)
168 if len(parts) > 1 {
169 return parts[1]
170 }
171
172 return ""
173}
174
175// IsAliyunOSSEndpoint - Match if it is exactly Aliyun OSS endpoint.
176func IsAliyunOSSEndpoint(endpointURL url.URL) bool {
177 return strings.HasSuffix(endpointURL.Host, "aliyuncs.com")
178}
179
180// IsAmazonEndpoint - Match if it is exactly Amazon S3 endpoint.
181func IsAmazonEndpoint(endpointURL url.URL) bool {
182 if endpointURL.Host == "s3-external-1.amazonaws.com" || endpointURL.Host == "s3.amazonaws.com" {
183 return true
184 }
185 return GetRegionFromURL(endpointURL) != ""
186}
187
188// IsAmazonGovCloudEndpoint - Match if it is exactly Amazon S3 GovCloud endpoint.
189func IsAmazonGovCloudEndpoint(endpointURL url.URL) bool {
190 if endpointURL == sentinelURL {
191 return false
192 }
193 return (endpointURL.Host == "s3-us-gov-west-1.amazonaws.com" ||
194 endpointURL.Host == "s3-us-gov-east-1.amazonaws.com" ||
195 IsAmazonFIPSGovCloudEndpoint(endpointURL))
196}
197
198// IsAmazonFIPSGovCloudEndpoint - match if the endpoint is FIPS and GovCloud.
199func IsAmazonFIPSGovCloudEndpoint(endpointURL url.URL) bool {
200 if endpointURL == sentinelURL {
201 return false
202 }
203 return IsAmazonFIPSEndpoint(endpointURL) && strings.Contains(endpointURL.Host, "us-gov-")
204}
205
206// IsAmazonFIPSEndpoint - Match if it is exactly Amazon S3 FIPS endpoint.
207// See https://aws.amazon.com/compliance/fips.
208func IsAmazonFIPSEndpoint(endpointURL url.URL) bool {
209 if endpointURL == sentinelURL {
210 return false
211 }
212 return strings.HasPrefix(endpointURL.Host, "s3-fips") && strings.HasSuffix(endpointURL.Host, ".amazonaws.com")
213}
214
215// IsAmazonPrivateLinkEndpoint - Match if it is exactly Amazon S3 PrivateLink interface endpoint
216// See https://docs.aws.amazon.com/AmazonS3/latest/userguide/privatelink-interface-endpoints.html.
217func IsAmazonPrivateLinkEndpoint(endpointURL url.URL) bool {
218 if endpointURL == sentinelURL {
219 return false
220 }
221 return amazonS3HostPrivateLink.MatchString(endpointURL.Host)
222}
223
224// IsGoogleEndpoint - Match if it is exactly Google cloud storage endpoint.
225func IsGoogleEndpoint(endpointURL url.URL) bool {
226 if endpointURL == sentinelURL {
227 return false
228 }
229 return endpointURL.Host == "storage.googleapis.com"
230}
231
232// Expects ascii encoded strings - from output of urlEncodePath
233func percentEncodeSlash(s string) string {
234 return strings.ReplaceAll(s, "/", "%2F")
235}
236
237// QueryEncode - encodes query values in their URL encoded form. In
238// addition to the percent encoding performed by urlEncodePath() used
239// here, it also percent encodes '/' (forward slash)
240func QueryEncode(v url.Values) string {
241 if v == nil {
242 return ""
243 }
244 var buf bytes.Buffer
245 keys := make([]string, 0, len(v))
246 for k := range v {
247 keys = append(keys, k)
248 }
249 sort.Strings(keys)
250 for _, k := range keys {
251 vs := v[k]
252 prefix := percentEncodeSlash(EncodePath(k)) + "="
253 for _, v := range vs {
254 if buf.Len() > 0 {
255 buf.WriteByte('&')
256 }
257 buf.WriteString(prefix)
258 buf.WriteString(percentEncodeSlash(EncodePath(v)))
259 }
260 }
261 return buf.String()
262}
263
264// TagDecode - decodes canonical tag into map of key and value.
265func TagDecode(ctag string) map[string]string {
266 if ctag == "" {
267 return map[string]string{}
268 }
269 tags := strings.Split(ctag, "&")
270 tagMap := make(map[string]string, len(tags))
271 var err error
272 for _, tag := range tags {
273 kvs := strings.SplitN(tag, "=", 2)
274 if len(kvs) == 0 {
275 return map[string]string{}
276 }
277 if len(kvs) == 1 {
278 return map[string]string{}
279 }
280 tagMap[kvs[0]], err = url.PathUnescape(kvs[1])
281 if err != nil {
282 continue
283 }
284 }
285 return tagMap
286}
287
288// TagEncode - encodes tag values in their URL encoded form. In
289// addition to the percent encoding performed by urlEncodePath() used
290// here, it also percent encodes '/' (forward slash)
291func TagEncode(tags map[string]string) string {
292 if tags == nil {
293 return ""
294 }
295 values := url.Values{}
296 for k, v := range tags {
297 values[k] = []string{v}
298 }
299 return QueryEncode(values)
300}
301
302// if object matches reserved string, no need to encode them
303var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
304
305// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences
306//
307// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
308// non english characters cannot be parsed due to the nature in which url.Encode() is written
309//
310// This function on the other hand is a direct replacement for url.Encode() technique to support
311// pretty much every UTF-8 character.
312func EncodePath(pathName string) string {
313 if reservedObjectNames.MatchString(pathName) {
314 return pathName
315 }
316 var encodedPathname strings.Builder
317 for _, s := range pathName {
318 if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
319 encodedPathname.WriteRune(s)
320 continue
321 }
322 switch s {
323 case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
324 encodedPathname.WriteRune(s)
325 continue
326 default:
327 l := utf8.RuneLen(s)
328 if l < 0 {
329 // if utf8 cannot convert return the same string as is
330 return pathName
331 }
332 u := make([]byte, l)
333 utf8.EncodeRune(u, s)
334 for _, r := range u {
335 hex := hex.EncodeToString([]byte{r})
336 encodedPathname.WriteString("%" + strings.ToUpper(hex))
337 }
338 }
339 }
340 return encodedPathname.String()
341}
342
343// We support '.' with bucket names but we fallback to using path
344// style requests instead for such buckets.
345var (
346 validBucketName = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9\.\-\_\:]{1,61}[A-Za-z0-9]$`)
347 validBucketNameStrict = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
348 ipAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`)
349)
350
351// Common checker for both stricter and basic validation.
352func checkBucketNameCommon(bucketName string, strict bool) (err error) {
353 if strings.TrimSpace(bucketName) == "" {
354 return errors.New("Bucket name cannot be empty")
355 }
356 if len(bucketName) < 3 {
357 return errors.New("Bucket name cannot be shorter than 3 characters")
358 }
359 if len(bucketName) > 63 {
360 return errors.New("Bucket name cannot be longer than 63 characters")
361 }
362 if ipAddress.MatchString(bucketName) {
363 return errors.New("Bucket name cannot be an ip address")
364 }
365 if strings.Contains(bucketName, "..") || strings.Contains(bucketName, ".-") || strings.Contains(bucketName, "-.") {
366 return errors.New("Bucket name contains invalid characters")
367 }
368 if strict {
369 if !validBucketNameStrict.MatchString(bucketName) {
370 err = errors.New("Bucket name contains invalid characters")
371 }
372 return err
373 }
374 if !validBucketName.MatchString(bucketName) {
375 err = errors.New("Bucket name contains invalid characters")
376 }
377 return err
378}
379
380// CheckValidBucketName - checks if we have a valid input bucket name.
381func CheckValidBucketName(bucketName string) (err error) {
382 return checkBucketNameCommon(bucketName, false)
383}
384
385// CheckValidBucketNameStrict - checks if we have a valid input bucket name.
386// This is a stricter version.
387// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
388func CheckValidBucketNameStrict(bucketName string) (err error) {
389 return checkBucketNameCommon(bucketName, true)
390}
391
392// CheckValidObjectNamePrefix - checks if we have a valid input object name prefix.
393// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
394func CheckValidObjectNamePrefix(objectName string) error {
395 if len(objectName) > 1024 {
396 return errors.New("Object name cannot be longer than 1024 characters")
397 }
398 if !utf8.ValidString(objectName) {
399 return errors.New("Object name with non UTF-8 strings are not supported")
400 }
401 return nil
402}
403
404// CheckValidObjectName - checks if we have a valid input object name.
405// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
406func CheckValidObjectName(objectName string) error {
407 if strings.TrimSpace(objectName) == "" {
408 return errors.New("Object name cannot be empty")
409 }
410 return CheckValidObjectNamePrefix(objectName)
411}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go b/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go
new file mode 100644
index 0000000..c35e58e
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go
@@ -0,0 +1,200 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package set
19
20import (
21 "fmt"
22 "sort"
23
24 jsoniter "github.com/json-iterator/go"
25)
26
27// StringSet - uses map as set of strings.
28type StringSet map[string]struct{}
29
30var json = jsoniter.ConfigCompatibleWithStandardLibrary
31
32// ToSlice - returns StringSet as string slice.
33func (set StringSet) ToSlice() []string {
34 keys := make([]string, 0, len(set))
35 for k := range set {
36 keys = append(keys, k)
37 }
38 sort.Strings(keys)
39 return keys
40}
41
42// IsEmpty - returns whether the set is empty or not.
43func (set StringSet) IsEmpty() bool {
44 return len(set) == 0
45}
46
47// Add - adds string to the set.
48func (set StringSet) Add(s string) {
49 set[s] = struct{}{}
50}
51
52// Remove - removes string in the set. It does nothing if string does not exist in the set.
53func (set StringSet) Remove(s string) {
54 delete(set, s)
55}
56
57// Contains - checks if string is in the set.
58func (set StringSet) Contains(s string) bool {
59 _, ok := set[s]
60 return ok
61}
62
63// FuncMatch - returns new set containing each value who passes match function.
64// A 'matchFn' should accept element in a set as first argument and
65// 'matchString' as second argument. The function can do any logic to
66// compare both the arguments and should return true to accept element in
67// a set to include in output set else the element is ignored.
68func (set StringSet) FuncMatch(matchFn func(string, string) bool, matchString string) StringSet {
69 nset := NewStringSet()
70 for k := range set {
71 if matchFn(k, matchString) {
72 nset.Add(k)
73 }
74 }
75 return nset
76}
77
78// ApplyFunc - returns new set containing each value processed by 'applyFn'.
79// A 'applyFn' should accept element in a set as a argument and return
80// a processed string. The function can do any logic to return a processed
81// string.
82func (set StringSet) ApplyFunc(applyFn func(string) string) StringSet {
83 nset := NewStringSet()
84 for k := range set {
85 nset.Add(applyFn(k))
86 }
87 return nset
88}
89
90// Equals - checks whether given set is equal to current set or not.
91func (set StringSet) Equals(sset StringSet) bool {
92 // If length of set is not equal to length of given set, the
93 // set is not equal to given set.
94 if len(set) != len(sset) {
95 return false
96 }
97
98 // As both sets are equal in length, check each elements are equal.
99 for k := range set {
100 if _, ok := sset[k]; !ok {
101 return false
102 }
103 }
104
105 return true
106}
107
108// Intersection - returns the intersection with given set as new set.
109func (set StringSet) Intersection(sset StringSet) StringSet {
110 nset := NewStringSet()
111 for k := range set {
112 if _, ok := sset[k]; ok {
113 nset.Add(k)
114 }
115 }
116
117 return nset
118}
119
120// Difference - returns the difference with given set as new set.
121func (set StringSet) Difference(sset StringSet) StringSet {
122 nset := NewStringSet()
123 for k := range set {
124 if _, ok := sset[k]; !ok {
125 nset.Add(k)
126 }
127 }
128
129 return nset
130}
131
132// Union - returns the union with given set as new set.
133func (set StringSet) Union(sset StringSet) StringSet {
134 nset := NewStringSet()
135 for k := range set {
136 nset.Add(k)
137 }
138
139 for k := range sset {
140 nset.Add(k)
141 }
142
143 return nset
144}
145
146// MarshalJSON - converts to JSON data.
147func (set StringSet) MarshalJSON() ([]byte, error) {
148 return json.Marshal(set.ToSlice())
149}
150
151// UnmarshalJSON - parses JSON data and creates new set with it.
152// If 'data' contains JSON string array, the set contains each string.
153// If 'data' contains JSON string, the set contains the string as one element.
154// If 'data' contains Other JSON types, JSON parse error is returned.
155func (set *StringSet) UnmarshalJSON(data []byte) error {
156 sl := []string{}
157 var err error
158 if err = json.Unmarshal(data, &sl); err == nil {
159 *set = make(StringSet)
160 for _, s := range sl {
161 set.Add(s)
162 }
163 } else {
164 var s string
165 if err = json.Unmarshal(data, &s); err == nil {
166 *set = make(StringSet)
167 set.Add(s)
168 }
169 }
170
171 return err
172}
173
174// String - returns printable string of the set.
175func (set StringSet) String() string {
176 return fmt.Sprintf("%s", set.ToSlice())
177}
178
179// NewStringSet - creates new string set.
180func NewStringSet() StringSet {
181 return make(StringSet)
182}
183
184// CreateStringSet - creates new string set with given string values.
185func CreateStringSet(sl ...string) StringSet {
186 set := make(StringSet)
187 for _, k := range sl {
188 set.Add(k)
189 }
190 return set
191}
192
193// CopyStringSet - returns copy of given set.
194func CopyStringSet(set StringSet) StringSet {
195 nset := NewStringSet()
196 for k, v := range set {
197 nset[k] = v
198 }
199 return nset
200}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go
new file mode 100644
index 0000000..77540e2
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go
@@ -0,0 +1,224 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2022 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package signer
19
20import (
21 "bytes"
22 "fmt"
23 "io"
24 "net/http"
25 "strconv"
26 "strings"
27 "time"
28)
29
30// getUnsignedChunkLength - calculates the length of chunk metadata
31func getUnsignedChunkLength(chunkDataSize int64) int64 {
32 return int64(len(fmt.Sprintf("%x", chunkDataSize))) +
33 crlfLen +
34 chunkDataSize +
35 crlfLen
36}
37
38// getUSStreamLength - calculates the length of the overall stream (data + metadata)
39func getUSStreamLength(dataLen, chunkSize int64, trailers http.Header) int64 {
40 if dataLen <= 0 {
41 return 0
42 }
43
44 chunksCount := int64(dataLen / chunkSize)
45 remainingBytes := int64(dataLen % chunkSize)
46 streamLen := int64(0)
47 streamLen += chunksCount * getUnsignedChunkLength(chunkSize)
48 if remainingBytes > 0 {
49 streamLen += getUnsignedChunkLength(remainingBytes)
50 }
51 streamLen += getUnsignedChunkLength(0)
52 if len(trailers) > 0 {
53 for name, placeholder := range trailers {
54 if len(placeholder) > 0 {
55 streamLen += int64(len(name) + len(trailerKVSeparator) + len(placeholder[0]) + 1)
56 }
57 }
58 streamLen += crlfLen
59 }
60
61 return streamLen
62}
63
64// prepareStreamingRequest - prepares a request with appropriate
65// headers before computing the seed signature.
66func prepareUSStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) {
67 req.TransferEncoding = []string{"aws-chunked"}
68 if sessionToken != "" {
69 req.Header.Set("X-Amz-Security-Token", sessionToken)
70 }
71
72 req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat))
73 // Set content length with streaming signature for each chunk included.
74 req.ContentLength = getUSStreamLength(dataLen, int64(payloadChunkSize), req.Trailer)
75}
76
77// StreamingUSReader implements chunked upload signature as a reader on
78// top of req.Body's ReaderCloser chunk header;data;... repeat
79type StreamingUSReader struct {
80 contentLen int64 // Content-Length from req header
81 baseReadCloser io.ReadCloser // underlying io.Reader
82 bytesRead int64 // bytes read from underlying io.Reader
83 buf bytes.Buffer // holds signed chunk
84 chunkBuf []byte // holds raw data read from req Body
85 chunkBufLen int // no. of bytes read so far into chunkBuf
86 done bool // done reading the underlying reader to EOF
87 chunkNum int
88 totalChunks int
89 lastChunkSize int
90 trailer http.Header
91}
92
93// writeChunk - signs a chunk read from s.baseReader of chunkLen size.
94func (s *StreamingUSReader) writeChunk(chunkLen int, addCrLf bool) {
95 s.buf.WriteString(strconv.FormatInt(int64(chunkLen), 16) + "\r\n")
96
97 // Write chunk data into streaming buffer
98 s.buf.Write(s.chunkBuf[:chunkLen])
99
100 // Write the chunk trailer.
101 if addCrLf {
102 s.buf.Write([]byte("\r\n"))
103 }
104
105 // Reset chunkBufLen for next chunk read.
106 s.chunkBufLen = 0
107 s.chunkNum++
108}
109
110// addSignedTrailer - adds a trailer with the provided headers,
111// then signs a chunk and adds it to output.
112func (s *StreamingUSReader) addTrailer(h http.Header) {
113 olen := len(s.chunkBuf)
114 s.chunkBuf = s.chunkBuf[:0]
115 for k, v := range h {
116 s.chunkBuf = append(s.chunkBuf, []byte(strings.ToLower(k)+trailerKVSeparator+v[0]+"\n")...)
117 }
118
119 s.buf.Write(s.chunkBuf)
120 s.buf.WriteString("\r\n\r\n")
121
122 // Reset chunkBufLen for next chunk read.
123 s.chunkBuf = s.chunkBuf[:olen]
124 s.chunkBufLen = 0
125 s.chunkNum++
126}
127
128// StreamingUnsignedV4 - provides chunked upload
129func StreamingUnsignedV4(req *http.Request, sessionToken string, dataLen int64, reqTime time.Time) *http.Request {
130 // Set headers needed for streaming signature.
131 prepareUSStreamingRequest(req, sessionToken, dataLen, reqTime)
132
133 if req.Body == nil {
134 req.Body = io.NopCloser(bytes.NewReader([]byte("")))
135 }
136
137 stReader := &StreamingUSReader{
138 baseReadCloser: req.Body,
139 chunkBuf: make([]byte, payloadChunkSize),
140 contentLen: dataLen,
141 chunkNum: 1,
142 totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1,
143 lastChunkSize: int(dataLen % payloadChunkSize),
144 }
145 if len(req.Trailer) > 0 {
146 stReader.trailer = req.Trailer
147 // Remove...
148 req.Trailer = nil
149 }
150
151 req.Body = stReader
152
153 return req
154}
155
156// Read - this method performs chunk upload signature providing a
157// io.Reader interface.
158func (s *StreamingUSReader) Read(buf []byte) (int, error) {
159 switch {
160 // After the last chunk is read from underlying reader, we
161 // never re-fill s.buf.
162 case s.done:
163
164 // s.buf will be (re-)filled with next chunk when has lesser
165 // bytes than asked for.
166 case s.buf.Len() < len(buf):
167 s.chunkBufLen = 0
168 for {
169 n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:])
170 // Usually we validate `err` first, but in this case
171 // we are validating n > 0 for the following reasons.
172 //
173 // 1. n > 0, err is one of io.EOF, nil (near end of stream)
174 // A Reader returning a non-zero number of bytes at the end
175 // of the input stream may return either err == EOF or err == nil
176 //
177 // 2. n == 0, err is io.EOF (actual end of stream)
178 //
179 // Callers should always process the n > 0 bytes returned
180 // before considering the error err.
181 if n1 > 0 {
182 s.chunkBufLen += n1
183 s.bytesRead += int64(n1)
184
185 if s.chunkBufLen == payloadChunkSize ||
186 (s.chunkNum == s.totalChunks-1 &&
187 s.chunkBufLen == s.lastChunkSize) {
188 // Sign the chunk and write it to s.buf.
189 s.writeChunk(s.chunkBufLen, true)
190 break
191 }
192 }
193 if err != nil {
194 if err == io.EOF {
195 // No more data left in baseReader - last chunk.
196 // Done reading the last chunk from baseReader.
197 s.done = true
198
199 // bytes read from baseReader different than
200 // content length provided.
201 if s.bytesRead != s.contentLen {
202 return 0, fmt.Errorf("http: ContentLength=%d with Body length %d", s.contentLen, s.bytesRead)
203 }
204
205 // Sign the chunk and write it to s.buf.
206 s.writeChunk(0, len(s.trailer) == 0)
207 if len(s.trailer) > 0 {
208 // Trailer must be set now.
209 s.addTrailer(s.trailer)
210 }
211 break
212 }
213 return 0, err
214 }
215
216 }
217 }
218 return s.buf.Read(buf)
219}
220
221// Close - this method makes underlying io.ReadCloser's Close method available.
222func (s *StreamingUSReader) Close() error {
223 return s.baseReadCloser.Close()
224}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go
new file mode 100644
index 0000000..1c2f1dc
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go
@@ -0,0 +1,403 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package signer
19
20import (
21 "bytes"
22 "encoding/hex"
23 "fmt"
24 "io"
25 "net/http"
26 "strconv"
27 "strings"
28 "time"
29
30 md5simd "github.com/minio/md5-simd"
31)
32
33// Reference for constants used below -
34// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming
35const (
36 streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
37 streamingSignTrailerAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER"
38 streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD"
39 streamingTrailerHdr = "AWS4-HMAC-SHA256-TRAILER"
40 emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
41 payloadChunkSize = 64 * 1024
42 chunkSigConstLen = 17 // ";chunk-signature="
43 signatureStrLen = 64 // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2"
44 crlfLen = 2 // CRLF
45 trailerKVSeparator = ":"
46 trailerSignature = "x-amz-trailer-signature"
47)
48
49// Request headers to be ignored while calculating seed signature for
50// a request.
51var ignoredStreamingHeaders = map[string]bool{
52 "Authorization": true,
53 "User-Agent": true,
54 "Content-Type": true,
55}
56
57// getSignedChunkLength - calculates the length of chunk metadata
58func getSignedChunkLength(chunkDataSize int64) int64 {
59 return int64(len(fmt.Sprintf("%x", chunkDataSize))) +
60 chunkSigConstLen +
61 signatureStrLen +
62 crlfLen +
63 chunkDataSize +
64 crlfLen
65}
66
67// getStreamLength - calculates the length of the overall stream (data + metadata)
68func getStreamLength(dataLen, chunkSize int64, trailers http.Header) int64 {
69 if dataLen <= 0 {
70 return 0
71 }
72
73 chunksCount := int64(dataLen / chunkSize)
74 remainingBytes := int64(dataLen % chunkSize)
75 streamLen := int64(0)
76 streamLen += chunksCount * getSignedChunkLength(chunkSize)
77 if remainingBytes > 0 {
78 streamLen += getSignedChunkLength(remainingBytes)
79 }
80 streamLen += getSignedChunkLength(0)
81 if len(trailers) > 0 {
82 for name, placeholder := range trailers {
83 if len(placeholder) > 0 {
84 streamLen += int64(len(name) + len(trailerKVSeparator) + len(placeholder[0]) + 1)
85 }
86 }
87 streamLen += int64(len(trailerSignature)+len(trailerKVSeparator)) + signatureStrLen + crlfLen + crlfLen
88 }
89
90 return streamLen
91}
92
93// buildChunkStringToSign - returns the string to sign given chunk data
94// and previous signature.
95func buildChunkStringToSign(t time.Time, region, previousSig, chunkChecksum string) string {
96 stringToSignParts := []string{
97 streamingPayloadHdr,
98 t.Format(iso8601DateFormat),
99 getScope(region, t, ServiceTypeS3),
100 previousSig,
101 emptySHA256,
102 chunkChecksum,
103 }
104
105 return strings.Join(stringToSignParts, "\n")
106}
107
108// buildTrailerChunkStringToSign - returns the string to sign given chunk data
109// and previous signature.
110func buildTrailerChunkStringToSign(t time.Time, region, previousSig, chunkChecksum string) string {
111 stringToSignParts := []string{
112 streamingTrailerHdr,
113 t.Format(iso8601DateFormat),
114 getScope(region, t, ServiceTypeS3),
115 previousSig,
116 chunkChecksum,
117 }
118
119 return strings.Join(stringToSignParts, "\n")
120}
121
122// prepareStreamingRequest - prepares a request with appropriate
123// headers before computing the seed signature.
124func prepareStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) {
125 // Set x-amz-content-sha256 header.
126 if len(req.Trailer) == 0 {
127 req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm)
128 } else {
129 req.Header.Set("X-Amz-Content-Sha256", streamingSignTrailerAlgorithm)
130 for k := range req.Trailer {
131 req.Header.Add("X-Amz-Trailer", strings.ToLower(k))
132 }
133 req.TransferEncoding = []string{"aws-chunked"}
134 }
135
136 if sessionToken != "" {
137 req.Header.Set("X-Amz-Security-Token", sessionToken)
138 }
139
140 req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat))
141 // Set content length with streaming signature for each chunk included.
142 req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize), req.Trailer)
143 req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(dataLen, 10))
144}
145
146// buildChunkHeader - returns the chunk header.
147// e.g string(IntHexBase(chunk-size)) + ";chunk-signature=" + signature + \r\n + chunk-data + \r\n
148func buildChunkHeader(chunkLen int64, signature string) []byte {
149 return []byte(strconv.FormatInt(chunkLen, 16) + ";chunk-signature=" + signature + "\r\n")
150}
151
152// buildChunkSignature - returns chunk signature for a given chunk and previous signature.
153func buildChunkSignature(chunkCheckSum string, reqTime time.Time, region,
154 previousSignature, secretAccessKey string,
155) string {
156 chunkStringToSign := buildChunkStringToSign(reqTime, region,
157 previousSignature, chunkCheckSum)
158 signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3)
159 return getSignature(signingKey, chunkStringToSign)
160}
161
162// buildChunkSignature - returns chunk signature for a given chunk and previous signature.
163func buildTrailerChunkSignature(chunkChecksum string, reqTime time.Time, region,
164 previousSignature, secretAccessKey string,
165) string {
166 chunkStringToSign := buildTrailerChunkStringToSign(reqTime, region,
167 previousSignature, chunkChecksum)
168 signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3)
169 return getSignature(signingKey, chunkStringToSign)
170}
171
172// getSeedSignature - returns the seed signature for a given request.
173func (s *StreamingReader) setSeedSignature(req *http.Request) {
174 // Get canonical request
175 canonicalRequest := getCanonicalRequest(*req, ignoredStreamingHeaders, getHashedPayload(*req))
176
177 // Get string to sign from canonical request.
178 stringToSign := getStringToSignV4(s.reqTime, s.region, canonicalRequest, ServiceTypeS3)
179
180 signingKey := getSigningKey(s.secretAccessKey, s.region, s.reqTime, ServiceTypeS3)
181
182 // Calculate signature.
183 s.seedSignature = getSignature(signingKey, stringToSign)
184}
185
186// StreamingReader implements chunked upload signature as a reader on
187// top of req.Body's ReaderCloser chunk header;data;... repeat
188type StreamingReader struct {
189 accessKeyID string
190 secretAccessKey string
191 sessionToken string
192 region string
193 prevSignature string
194 seedSignature string
195 contentLen int64 // Content-Length from req header
196 baseReadCloser io.ReadCloser // underlying io.Reader
197 bytesRead int64 // bytes read from underlying io.Reader
198 buf bytes.Buffer // holds signed chunk
199 chunkBuf []byte // holds raw data read from req Body
200 chunkBufLen int // no. of bytes read so far into chunkBuf
201 done bool // done reading the underlying reader to EOF
202 reqTime time.Time
203 chunkNum int
204 totalChunks int
205 lastChunkSize int
206 trailer http.Header
207 sh256 md5simd.Hasher
208}
209
210// signChunk - signs a chunk read from s.baseReader of chunkLen size.
211func (s *StreamingReader) signChunk(chunkLen int, addCrLf bool) {
212 // Compute chunk signature for next header
213 s.sh256.Reset()
214 s.sh256.Write(s.chunkBuf[:chunkLen])
215 chunckChecksum := hex.EncodeToString(s.sh256.Sum(nil))
216
217 signature := buildChunkSignature(chunckChecksum, s.reqTime,
218 s.region, s.prevSignature, s.secretAccessKey)
219
220 // For next chunk signature computation
221 s.prevSignature = signature
222
223 // Write chunk header into streaming buffer
224 chunkHdr := buildChunkHeader(int64(chunkLen), signature)
225 s.buf.Write(chunkHdr)
226
227 // Write chunk data into streaming buffer
228 s.buf.Write(s.chunkBuf[:chunkLen])
229
230 // Write the chunk trailer.
231 if addCrLf {
232 s.buf.Write([]byte("\r\n"))
233 }
234
235 // Reset chunkBufLen for next chunk read.
236 s.chunkBufLen = 0
237 s.chunkNum++
238}
239
240// addSignedTrailer - adds a trailer with the provided headers,
241// then signs a chunk and adds it to output.
242func (s *StreamingReader) addSignedTrailer(h http.Header) {
243 olen := len(s.chunkBuf)
244 s.chunkBuf = s.chunkBuf[:0]
245 for k, v := range h {
246 s.chunkBuf = append(s.chunkBuf, []byte(strings.ToLower(k)+trailerKVSeparator+v[0]+"\n")...)
247 }
248
249 s.sh256.Reset()
250 s.sh256.Write(s.chunkBuf)
251 chunkChecksum := hex.EncodeToString(s.sh256.Sum(nil))
252 // Compute chunk signature
253 signature := buildTrailerChunkSignature(chunkChecksum, s.reqTime,
254 s.region, s.prevSignature, s.secretAccessKey)
255
256 // For next chunk signature computation
257 s.prevSignature = signature
258
259 s.buf.Write(s.chunkBuf)
260 s.buf.WriteString("\r\n" + trailerSignature + trailerKVSeparator + signature + "\r\n\r\n")
261
262 // Reset chunkBufLen for next chunk read.
263 s.chunkBuf = s.chunkBuf[:olen]
264 s.chunkBufLen = 0
265 s.chunkNum++
266}
267
268// setStreamingAuthHeader - builds and sets authorization header value
269// for streaming signature.
270func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) {
271 credential := GetCredential(s.accessKeyID, s.region, s.reqTime, ServiceTypeS3)
272 authParts := []string{
273 signV4Algorithm + " Credential=" + credential,
274 "SignedHeaders=" + getSignedHeaders(*req, ignoredStreamingHeaders),
275 "Signature=" + s.seedSignature,
276 }
277
278 // Set authorization header.
279 auth := strings.Join(authParts, ",")
280 req.Header.Set("Authorization", auth)
281}
282
283// StreamingSignV4 - provides chunked upload signatureV4 support by
284// implementing io.Reader.
285func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionToken,
286 region string, dataLen int64, reqTime time.Time, sh256 md5simd.Hasher,
287) *http.Request {
288 // Set headers needed for streaming signature.
289 prepareStreamingRequest(req, sessionToken, dataLen, reqTime)
290
291 if req.Body == nil {
292 req.Body = io.NopCloser(bytes.NewReader([]byte("")))
293 }
294
295 stReader := &StreamingReader{
296 baseReadCloser: req.Body,
297 accessKeyID: accessKeyID,
298 secretAccessKey: secretAccessKey,
299 sessionToken: sessionToken,
300 region: region,
301 reqTime: reqTime,
302 chunkBuf: make([]byte, payloadChunkSize),
303 contentLen: dataLen,
304 chunkNum: 1,
305 totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1,
306 lastChunkSize: int(dataLen % payloadChunkSize),
307 sh256: sh256,
308 }
309 if len(req.Trailer) > 0 {
310 stReader.trailer = req.Trailer
311 // Remove...
312 req.Trailer = nil
313 }
314
315 // Add the request headers required for chunk upload signing.
316
317 // Compute the seed signature.
318 stReader.setSeedSignature(req)
319
320 // Set the authorization header with the seed signature.
321 stReader.setStreamingAuthHeader(req)
322
323 // Set seed signature as prevSignature for subsequent
324 // streaming signing process.
325 stReader.prevSignature = stReader.seedSignature
326 req.Body = stReader
327
328 return req
329}
330
331// Read - this method performs chunk upload signature providing a
332// io.Reader interface.
333func (s *StreamingReader) Read(buf []byte) (int, error) {
334 switch {
335 // After the last chunk is read from underlying reader, we
336 // never re-fill s.buf.
337 case s.done:
338
339 // s.buf will be (re-)filled with next chunk when has lesser
340 // bytes than asked for.
341 case s.buf.Len() < len(buf):
342 s.chunkBufLen = 0
343 for {
344 n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:])
345 // Usually we validate `err` first, but in this case
346 // we are validating n > 0 for the following reasons.
347 //
348 // 1. n > 0, err is one of io.EOF, nil (near end of stream)
349 // A Reader returning a non-zero number of bytes at the end
350 // of the input stream may return either err == EOF or err == nil
351 //
352 // 2. n == 0, err is io.EOF (actual end of stream)
353 //
354 // Callers should always process the n > 0 bytes returned
355 // before considering the error err.
356 if n1 > 0 {
357 s.chunkBufLen += n1
358 s.bytesRead += int64(n1)
359
360 if s.chunkBufLen == payloadChunkSize ||
361 (s.chunkNum == s.totalChunks-1 &&
362 s.chunkBufLen == s.lastChunkSize) {
363 // Sign the chunk and write it to s.buf.
364 s.signChunk(s.chunkBufLen, true)
365 break
366 }
367 }
368 if err != nil {
369 if err == io.EOF {
370 // No more data left in baseReader - last chunk.
371 // Done reading the last chunk from baseReader.
372 s.done = true
373
374 // bytes read from baseReader different than
375 // content length provided.
376 if s.bytesRead != s.contentLen {
377 return 0, fmt.Errorf("http: ContentLength=%d with Body length %d", s.contentLen, s.bytesRead)
378 }
379
380 // Sign the chunk and write it to s.buf.
381 s.signChunk(0, len(s.trailer) == 0)
382 if len(s.trailer) > 0 {
383 // Trailer must be set now.
384 s.addSignedTrailer(s.trailer)
385 }
386 break
387 }
388 return 0, err
389 }
390
391 }
392 }
393 return s.buf.Read(buf)
394}
395
396// Close - this method makes underlying io.ReadCloser's Close method available.
397func (s *StreamingReader) Close() error {
398 if s.sh256 != nil {
399 s.sh256.Close()
400 s.sh256 = nil
401 }
402 return s.baseReadCloser.Close()
403}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go
new file mode 100644
index 0000000..fa4f8c9
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go
@@ -0,0 +1,319 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package signer
19
20import (
21 "bytes"
22 "crypto/hmac"
23 "crypto/sha1"
24 "encoding/base64"
25 "fmt"
26 "net/http"
27 "net/url"
28 "sort"
29 "strconv"
30 "strings"
31 "time"
32
33 "github.com/minio/minio-go/v7/pkg/s3utils"
34)
35
36// Signature and API related constants.
37const (
38 signV2Algorithm = "AWS"
39)
40
41// Encode input URL path to URL encoded path.
42func encodeURL2Path(req *http.Request, virtualHost bool) (path string) {
43 if virtualHost {
44 reqHost := getHostAddr(req)
45 dotPos := strings.Index(reqHost, ".")
46 if dotPos > -1 {
47 bucketName := reqHost[:dotPos]
48 path = "/" + bucketName
49 path += req.URL.Path
50 path = s3utils.EncodePath(path)
51 return
52 }
53 }
54 path = s3utils.EncodePath(req.URL.Path)
55 return
56}
57
58// PreSignV2 - presign the request in following style.
59// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}.
60func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64, virtualHost bool) *http.Request {
61 // Presign is not needed for anonymous credentials.
62 if accessKeyID == "" || secretAccessKey == "" {
63 return &req
64 }
65
66 d := time.Now().UTC()
67 // Find epoch expires when the request will expire.
68 epochExpires := d.Unix() + expires
69
70 // Add expires header if not present.
71 if expiresStr := req.Header.Get("Expires"); expiresStr == "" {
72 req.Header.Set("Expires", strconv.FormatInt(epochExpires, 10))
73 }
74
75 // Get presigned string to sign.
76 stringToSign := preStringToSignV2(req, virtualHost)
77 hm := hmac.New(sha1.New, []byte(secretAccessKey))
78 hm.Write([]byte(stringToSign))
79
80 // Calculate signature.
81 signature := base64.StdEncoding.EncodeToString(hm.Sum(nil))
82
83 query := req.URL.Query()
84 // Handle specially for Google Cloud Storage.
85 if strings.Contains(getHostAddr(&req), ".storage.googleapis.com") {
86 query.Set("GoogleAccessId", accessKeyID)
87 } else {
88 query.Set("AWSAccessKeyId", accessKeyID)
89 }
90
91 // Fill in Expires for presigned query.
92 query.Set("Expires", strconv.FormatInt(epochExpires, 10))
93
94 // Encode query and save.
95 req.URL.RawQuery = s3utils.QueryEncode(query)
96
97 // Save signature finally.
98 req.URL.RawQuery += "&Signature=" + s3utils.EncodePath(signature)
99
100 // Return.
101 return &req
102}
103
104// PostPresignSignatureV2 - presigned signature for PostPolicy
105// request.
106func PostPresignSignatureV2(policyBase64, secretAccessKey string) string {
107 hm := hmac.New(sha1.New, []byte(secretAccessKey))
108 hm.Write([]byte(policyBase64))
109 signature := base64.StdEncoding.EncodeToString(hm.Sum(nil))
110 return signature
111}
112
113// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature;
114// Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) );
115//
116// StringToSign = HTTP-Verb + "\n" +
117// Content-Md5 + "\n" +
118// Content-Type + "\n" +
119// Date + "\n" +
120// CanonicalizedProtocolHeaders +
121// CanonicalizedResource;
122//
123// CanonicalizedResource = [ "/" + Bucket ] +
124// <HTTP-Request-URI, from the protocol name up to the query string> +
125// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
126//
127// CanonicalizedProtocolHeaders = <described below>
128
129// SignV2 sign the request before Do() (AWS Signature Version 2).
130func SignV2(req http.Request, accessKeyID, secretAccessKey string, virtualHost bool) *http.Request {
131 // Signature calculation is not needed for anonymous credentials.
132 if accessKeyID == "" || secretAccessKey == "" {
133 return &req
134 }
135
136 // Initial time.
137 d := time.Now().UTC()
138
139 // Add date if not present.
140 if date := req.Header.Get("Date"); date == "" {
141 req.Header.Set("Date", d.Format(http.TimeFormat))
142 }
143
144 // Calculate HMAC for secretAccessKey.
145 stringToSign := stringToSignV2(req, virtualHost)
146 hm := hmac.New(sha1.New, []byte(secretAccessKey))
147 hm.Write([]byte(stringToSign))
148
149 // Prepare auth header.
150 authHeader := new(bytes.Buffer)
151 authHeader.WriteString(fmt.Sprintf("%s %s:", signV2Algorithm, accessKeyID))
152 encoder := base64.NewEncoder(base64.StdEncoding, authHeader)
153 encoder.Write(hm.Sum(nil))
154 encoder.Close()
155
156 // Set Authorization header.
157 req.Header.Set("Authorization", authHeader.String())
158
159 return &req
160}
161
162// From the Amazon docs:
163//
164// StringToSign = HTTP-Verb + "\n" +
165//
166// Content-Md5 + "\n" +
167// Content-Type + "\n" +
168// Expires + "\n" +
169// CanonicalizedProtocolHeaders +
170// CanonicalizedResource;
171func preStringToSignV2(req http.Request, virtualHost bool) string {
172 buf := new(bytes.Buffer)
173 // Write standard headers.
174 writePreSignV2Headers(buf, req)
175 // Write canonicalized protocol headers if any.
176 writeCanonicalizedHeaders(buf, req)
177 // Write canonicalized Query resources if any.
178 writeCanonicalizedResource(buf, req, virtualHost)
179 return buf.String()
180}
181
182// writePreSignV2Headers - write preSign v2 required headers.
183func writePreSignV2Headers(buf *bytes.Buffer, req http.Request) {
184 buf.WriteString(req.Method + "\n")
185 buf.WriteString(req.Header.Get("Content-Md5") + "\n")
186 buf.WriteString(req.Header.Get("Content-Type") + "\n")
187 buf.WriteString(req.Header.Get("Expires") + "\n")
188}
189
190// From the Amazon docs:
191//
192// StringToSign = HTTP-Verb + "\n" +
193//
194// Content-Md5 + "\n" +
195// Content-Type + "\n" +
196// Date + "\n" +
197// CanonicalizedProtocolHeaders +
198// CanonicalizedResource;
199func stringToSignV2(req http.Request, virtualHost bool) string {
200 buf := new(bytes.Buffer)
201 // Write standard headers.
202 writeSignV2Headers(buf, req)
203 // Write canonicalized protocol headers if any.
204 writeCanonicalizedHeaders(buf, req)
205 // Write canonicalized Query resources if any.
206 writeCanonicalizedResource(buf, req, virtualHost)
207 return buf.String()
208}
209
210// writeSignV2Headers - write signV2 required headers.
211func writeSignV2Headers(buf *bytes.Buffer, req http.Request) {
212 buf.WriteString(req.Method + "\n")
213 buf.WriteString(req.Header.Get("Content-Md5") + "\n")
214 buf.WriteString(req.Header.Get("Content-Type") + "\n")
215 buf.WriteString(req.Header.Get("Date") + "\n")
216}
217
218// writeCanonicalizedHeaders - write canonicalized headers.
219func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) {
220 var protoHeaders []string
221 vals := make(map[string][]string)
222 for k, vv := range req.Header {
223 // All the AMZ headers should be lowercase
224 lk := strings.ToLower(k)
225 if strings.HasPrefix(lk, "x-amz") {
226 protoHeaders = append(protoHeaders, lk)
227 vals[lk] = vv
228 }
229 }
230 sort.Strings(protoHeaders)
231 for _, k := range protoHeaders {
232 buf.WriteString(k)
233 buf.WriteByte(':')
234 for idx, v := range vals[k] {
235 if idx > 0 {
236 buf.WriteByte(',')
237 }
238 buf.WriteString(v)
239 }
240 buf.WriteByte('\n')
241 }
242}
243
244// AWS S3 Signature V2 calculation rule is give here:
245// http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationStringToSign
246
247// Whitelist resource list that will be used in query string for signature-V2 calculation.
248//
249// This list should be kept alphabetically sorted, do not hastily edit.
250var resourceList = []string{
251 "acl",
252 "cors",
253 "delete",
254 "encryption",
255 "legal-hold",
256 "lifecycle",
257 "location",
258 "logging",
259 "notification",
260 "partNumber",
261 "policy",
262 "replication",
263 "requestPayment",
264 "response-cache-control",
265 "response-content-disposition",
266 "response-content-encoding",
267 "response-content-language",
268 "response-content-type",
269 "response-expires",
270 "retention",
271 "select",
272 "select-type",
273 "tagging",
274 "torrent",
275 "uploadId",
276 "uploads",
277 "versionId",
278 "versioning",
279 "versions",
280 "website",
281}
282
283// From the Amazon docs:
284//
285// CanonicalizedResource = [ "/" + Bucket ] +
286//
287// <HTTP-Request-URI, from the protocol name up to the query string> +
288// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
289func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, virtualHost bool) {
290 // Save request URL.
291 requestURL := req.URL
292 // Get encoded URL path.
293 buf.WriteString(encodeURL2Path(&req, virtualHost))
294 if requestURL.RawQuery != "" {
295 var n int
296 vals, _ := url.ParseQuery(requestURL.RawQuery)
297 // Verify if any sub resource queries are present, if yes
298 // canonicallize them.
299 for _, resource := range resourceList {
300 if vv, ok := vals[resource]; ok && len(vv) > 0 {
301 n++
302 // First element
303 switch n {
304 case 1:
305 buf.WriteByte('?')
306 // The rest
307 default:
308 buf.WriteByte('&')
309 }
310 buf.WriteString(resource)
311 // Request parameters
312 if len(vv[0]) > 0 {
313 buf.WriteByte('=')
314 buf.WriteString(vv[0])
315 }
316 }
317 }
318 }
319}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go
new file mode 100644
index 0000000..ffd2514
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go
@@ -0,0 +1,351 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package signer
19
20import (
21 "bytes"
22 "encoding/hex"
23 "net/http"
24 "sort"
25 "strconv"
26 "strings"
27 "time"
28
29 "github.com/minio/minio-go/v7/pkg/s3utils"
30)
31
32// Signature and API related constants.
33const (
34 signV4Algorithm = "AWS4-HMAC-SHA256"
35 iso8601DateFormat = "20060102T150405Z"
36 yyyymmdd = "20060102"
37)
38
39// Different service types
40const (
41 ServiceTypeS3 = "s3"
42 ServiceTypeSTS = "sts"
43)
44
45// Excerpts from @lsegal -
46// https:/github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258.
47//
48// * User-Agent
49// This is ignored from signing because signing this causes problems with generating pre-signed
50// URLs (that are executed by other agents) or when customers pass requests through proxies, which
51// may modify the user-agent.
52//
53// * Authorization
54// Is skipped for obvious reasons.
55//
56// * Accept-Encoding
57// Some S3 servers like Hitachi Content Platform do not honor this header for signature
58// calculation.
59var v4IgnoredHeaders = map[string]bool{
60 "Accept-Encoding": true,
61 "Authorization": true,
62 "User-Agent": true,
63}
64
65// getSigningKey hmac seed to calculate final signature.
66func getSigningKey(secret, loc string, t time.Time, serviceType string) []byte {
67 date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd)))
68 location := sumHMAC(date, []byte(loc))
69 service := sumHMAC(location, []byte(serviceType))
70 signingKey := sumHMAC(service, []byte("aws4_request"))
71 return signingKey
72}
73
74// getSignature final signature in hexadecimal form.
75func getSignature(signingKey []byte, stringToSign string) string {
76 return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))
77}
78
79// getScope generate a string of a specific date, an AWS region, and a
80// service.
81func getScope(location string, t time.Time, serviceType string) string {
82 scope := strings.Join([]string{
83 t.Format(yyyymmdd),
84 location,
85 serviceType,
86 "aws4_request",
87 }, "/")
88 return scope
89}
90
91// GetCredential generate a credential string.
92func GetCredential(accessKeyID, location string, t time.Time, serviceType string) string {
93 scope := getScope(location, t, serviceType)
94 return accessKeyID + "/" + scope
95}
96
97// getHashedPayload get the hexadecimal value of the SHA256 hash of
98// the request payload.
99func getHashedPayload(req http.Request) string {
100 hashedPayload := req.Header.Get("X-Amz-Content-Sha256")
101 if hashedPayload == "" {
102 // Presign does not have a payload, use S3 recommended value.
103 hashedPayload = unsignedPayload
104 }
105 return hashedPayload
106}
107
108// getCanonicalHeaders generate a list of request headers for
109// signature.
110func getCanonicalHeaders(req http.Request, ignoredHeaders map[string]bool) string {
111 var headers []string
112 vals := make(map[string][]string)
113 for k, vv := range req.Header {
114 if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
115 continue // ignored header
116 }
117 headers = append(headers, strings.ToLower(k))
118 vals[strings.ToLower(k)] = vv
119 }
120 if !headerExists("host", headers) {
121 headers = append(headers, "host")
122 }
123 sort.Strings(headers)
124
125 var buf bytes.Buffer
126 // Save all the headers in canonical form <header>:<value> newline
127 // separated for each header.
128 for _, k := range headers {
129 buf.WriteString(k)
130 buf.WriteByte(':')
131 switch {
132 case k == "host":
133 buf.WriteString(getHostAddr(&req))
134 buf.WriteByte('\n')
135 default:
136 for idx, v := range vals[k] {
137 if idx > 0 {
138 buf.WriteByte(',')
139 }
140 buf.WriteString(signV4TrimAll(v))
141 }
142 buf.WriteByte('\n')
143 }
144 }
145 return buf.String()
146}
147
148func headerExists(key string, headers []string) bool {
149 for _, k := range headers {
150 if k == key {
151 return true
152 }
153 }
154 return false
155}
156
157// getSignedHeaders generate all signed request headers.
158// i.e lexically sorted, semicolon-separated list of lowercase
159// request header names.
160func getSignedHeaders(req http.Request, ignoredHeaders map[string]bool) string {
161 var headers []string
162 for k := range req.Header {
163 if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
164 continue // Ignored header found continue.
165 }
166 headers = append(headers, strings.ToLower(k))
167 }
168 if !headerExists("host", headers) {
169 headers = append(headers, "host")
170 }
171 sort.Strings(headers)
172 return strings.Join(headers, ";")
173}
174
175// getCanonicalRequest generate a canonical request of style.
176//
177// canonicalRequest =
178//
179// <HTTPMethod>\n
180// <CanonicalURI>\n
181// <CanonicalQueryString>\n
182// <CanonicalHeaders>\n
183// <SignedHeaders>\n
184// <HashedPayload>
185func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool, hashedPayload string) string {
186 req.URL.RawQuery = strings.ReplaceAll(req.URL.Query().Encode(), "+", "%20")
187 canonicalRequest := strings.Join([]string{
188 req.Method,
189 s3utils.EncodePath(req.URL.Path),
190 req.URL.RawQuery,
191 getCanonicalHeaders(req, ignoredHeaders),
192 getSignedHeaders(req, ignoredHeaders),
193 hashedPayload,
194 }, "\n")
195 return canonicalRequest
196}
197
198// getStringToSign a string based on selected query values.
199func getStringToSignV4(t time.Time, location, canonicalRequest, serviceType string) string {
200 stringToSign := signV4Algorithm + "\n" + t.Format(iso8601DateFormat) + "\n"
201 stringToSign = stringToSign + getScope(location, t, serviceType) + "\n"
202 stringToSign += hex.EncodeToString(sum256([]byte(canonicalRequest)))
203 return stringToSign
204}
205
206// PreSignV4 presign the request, in accordance with
207// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html.
208func PreSignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, expires int64) *http.Request {
209 // Presign is not needed for anonymous credentials.
210 if accessKeyID == "" || secretAccessKey == "" {
211 return &req
212 }
213
214 // Initial time.
215 t := time.Now().UTC()
216
217 // Get credential string.
218 credential := GetCredential(accessKeyID, location, t, ServiceTypeS3)
219
220 // Get all signed headers.
221 signedHeaders := getSignedHeaders(req, v4IgnoredHeaders)
222
223 // Set URL query.
224 query := req.URL.Query()
225 query.Set("X-Amz-Algorithm", signV4Algorithm)
226 query.Set("X-Amz-Date", t.Format(iso8601DateFormat))
227 query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10))
228 query.Set("X-Amz-SignedHeaders", signedHeaders)
229 query.Set("X-Amz-Credential", credential)
230 // Set session token if available.
231 if sessionToken != "" {
232 query.Set("X-Amz-Security-Token", sessionToken)
233 }
234 req.URL.RawQuery = query.Encode()
235
236 // Get canonical request.
237 canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders, getHashedPayload(req))
238
239 // Get string to sign from canonical request.
240 stringToSign := getStringToSignV4(t, location, canonicalRequest, ServiceTypeS3)
241
242 // Gext hmac signing key.
243 signingKey := getSigningKey(secretAccessKey, location, t, ServiceTypeS3)
244
245 // Calculate signature.
246 signature := getSignature(signingKey, stringToSign)
247
248 // Add signature header to RawQuery.
249 req.URL.RawQuery += "&X-Amz-Signature=" + signature
250
251 return &req
252}
253
254// PostPresignSignatureV4 - presigned signature for PostPolicy
255// requests.
256func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string {
257 // Get signining key.
258 signingkey := getSigningKey(secretAccessKey, location, t, ServiceTypeS3)
259 // Calculate signature.
260 signature := getSignature(signingkey, policyBase64)
261 return signature
262}
263
264// SignV4STS - signature v4 for STS request.
265func SignV4STS(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request {
266 return signV4(req, accessKeyID, secretAccessKey, "", location, ServiceTypeSTS, nil)
267}
268
269// Internal function called for different service types.
270func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location, serviceType string, trailer http.Header) *http.Request {
271 // Signature calculation is not needed for anonymous credentials.
272 if accessKeyID == "" || secretAccessKey == "" {
273 return &req
274 }
275
276 // Initial time.
277 t := time.Now().UTC()
278
279 // Set x-amz-date.
280 req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat))
281
282 // Set session token if available.
283 if sessionToken != "" {
284 req.Header.Set("X-Amz-Security-Token", sessionToken)
285 }
286
287 if len(trailer) > 0 {
288 for k := range trailer {
289 req.Header.Add("X-Amz-Trailer", strings.ToLower(k))
290 }
291
292 req.Header.Set("Content-Encoding", "aws-chunked")
293 req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(req.ContentLength, 10))
294 }
295
296 hashedPayload := getHashedPayload(req)
297 if serviceType == ServiceTypeSTS {
298 // Content sha256 header is not sent with the request
299 // but it is expected to have sha256 of payload for signature
300 // in STS service type request.
301 req.Header.Del("X-Amz-Content-Sha256")
302 }
303
304 // Get canonical request.
305 canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders, hashedPayload)
306
307 // Get string to sign from canonical request.
308 stringToSign := getStringToSignV4(t, location, canonicalRequest, serviceType)
309
310 // Get hmac signing key.
311 signingKey := getSigningKey(secretAccessKey, location, t, serviceType)
312
313 // Get credential string.
314 credential := GetCredential(accessKeyID, location, t, serviceType)
315
316 // Get all signed headers.
317 signedHeaders := getSignedHeaders(req, v4IgnoredHeaders)
318
319 // Calculate signature.
320 signature := getSignature(signingKey, stringToSign)
321
322 // If regular request, construct the final authorization header.
323 parts := []string{
324 signV4Algorithm + " Credential=" + credential,
325 "SignedHeaders=" + signedHeaders,
326 "Signature=" + signature,
327 }
328
329 // Set authorization header.
330 auth := strings.Join(parts, ", ")
331 req.Header.Set("Authorization", auth)
332
333 if len(trailer) > 0 {
334 // Use custom chunked encoding.
335 req.Trailer = trailer
336 return StreamingUnsignedV4(&req, sessionToken, req.ContentLength, time.Now().UTC())
337 }
338 return &req
339}
340
341// SignV4 sign the request before Do(), in accordance with
342// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html.
343func SignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string) *http.Request {
344 return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3, nil)
345}
346
347// SignV4Trailer sign the request before Do(), in accordance with
348// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html
349func SignV4Trailer(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, trailer http.Header) *http.Request {
350 return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3, trailer)
351}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go
new file mode 100644
index 0000000..87c9939
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go
@@ -0,0 +1,62 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package signer
19
20import (
21 "crypto/hmac"
22 "crypto/sha256"
23 "net/http"
24 "strings"
25)
26
27// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
28const unsignedPayload = "UNSIGNED-PAYLOAD"
29
30// sum256 calculate sha256 sum for an input byte array.
31func sum256(data []byte) []byte {
32 hash := sha256.New()
33 hash.Write(data)
34 return hash.Sum(nil)
35}
36
37// sumHMAC calculate hmac between two input byte array.
38func sumHMAC(key, data []byte) []byte {
39 hash := hmac.New(sha256.New, key)
40 hash.Write(data)
41 return hash.Sum(nil)
42}
43
44// getHostAddr returns host header if available, otherwise returns host from URL
45func getHostAddr(req *http.Request) string {
46 host := req.Header.Get("host")
47 if host != "" && req.Host != host {
48 return host
49 }
50 if req.Host != "" {
51 return req.Host
52 }
53 return req.URL.Host
54}
55
56// Trim leading and trailing spaces and replace sequential spaces with one space, following Trimall()
57// in http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
58func signV4TrimAll(input string) string {
59 // Compress adjacent spaces (a space is determined by
60 // unicode.IsSpace() internally here) to one space and return
61 return strings.Join(strings.Fields(input), " ")
62}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go b/vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go
new file mode 100644
index 0000000..b5fb956
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go
@@ -0,0 +1,66 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2020 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package sse
19
20import "encoding/xml"
21
22// ApplySSEByDefault defines default encryption configuration, KMS or SSE. To activate
23// KMS, SSEAlgoritm needs to be set to "aws:kms"
24// Minio currently does not support Kms.
25type ApplySSEByDefault struct {
26 KmsMasterKeyID string `xml:"KMSMasterKeyID,omitempty"`
27 SSEAlgorithm string `xml:"SSEAlgorithm"`
28}
29
30// Rule layer encapsulates default encryption configuration
31type Rule struct {
32 Apply ApplySSEByDefault `xml:"ApplyServerSideEncryptionByDefault"`
33}
34
35// Configuration is the default encryption configuration structure
36type Configuration struct {
37 XMLName xml.Name `xml:"ServerSideEncryptionConfiguration"`
38 Rules []Rule `xml:"Rule"`
39}
40
41// NewConfigurationSSES3 initializes a new SSE-S3 configuration
42func NewConfigurationSSES3() *Configuration {
43 return &Configuration{
44 Rules: []Rule{
45 {
46 Apply: ApplySSEByDefault{
47 SSEAlgorithm: "AES256",
48 },
49 },
50 },
51 }
52}
53
54// NewConfigurationSSEKMS initializes a new SSE-KMS configuration
55func NewConfigurationSSEKMS(kmsMasterKey string) *Configuration {
56 return &Configuration{
57 Rules: []Rule{
58 {
59 Apply: ApplySSEByDefault{
60 KmsMasterKeyID: kmsMasterKey,
61 SSEAlgorithm: "aws:kms",
62 },
63 },
64 },
65 }
66}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go b/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go
new file mode 100644
index 0000000..7a84a6f
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go
@@ -0,0 +1,413 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2020-2022 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package tags
19
20import (
21 "encoding/xml"
22 "io"
23 "net/url"
24 "regexp"
25 "sort"
26 "strings"
27 "unicode/utf8"
28)
29
30// Error contains tag specific error.
31type Error interface {
32 error
33 Code() string
34}
35
36type errTag struct {
37 code string
38 message string
39}
40
41// Code contains error code.
42func (err errTag) Code() string {
43 return err.code
44}
45
46// Error contains error message.
47func (err errTag) Error() string {
48 return err.message
49}
50
51var (
52 errTooManyObjectTags = &errTag{"BadRequest", "Tags cannot be more than 10"}
53 errTooManyTags = &errTag{"BadRequest", "Tags cannot be more than 50"}
54 errInvalidTagKey = &errTag{"InvalidTag", "The TagKey you have provided is invalid"}
55 errInvalidTagValue = &errTag{"InvalidTag", "The TagValue you have provided is invalid"}
56 errDuplicateTagKey = &errTag{"InvalidTag", "Cannot provide multiple Tags with the same key"}
57)
58
59// Tag comes with limitation as per
60// https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html amd
61// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions
62const (
63 maxKeyLength = 128
64 maxValueLength = 256
65 maxObjectTagCount = 10
66 maxTagCount = 50
67)
68
69// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions
70// borrowed from this article and also testing various ASCII characters following regex
71// is supported by AWS S3 for both tags and values.
72var validTagKeyValue = regexp.MustCompile(`^[a-zA-Z0-9-+\-._:/@ ]+$`)
73
74func checkKey(key string) error {
75 if len(key) == 0 {
76 return errInvalidTagKey
77 }
78
79 if utf8.RuneCountInString(key) > maxKeyLength || !validTagKeyValue.MatchString(key) {
80 return errInvalidTagKey
81 }
82
83 return nil
84}
85
86func checkValue(value string) error {
87 if value != "" {
88 if utf8.RuneCountInString(value) > maxValueLength || !validTagKeyValue.MatchString(value) {
89 return errInvalidTagValue
90 }
91 }
92
93 return nil
94}
95
96// Tag denotes key and value.
97type Tag struct {
98 Key string `xml:"Key"`
99 Value string `xml:"Value"`
100}
101
102func (tag Tag) String() string {
103 return tag.Key + "=" + tag.Value
104}
105
106// IsEmpty returns whether this tag is empty or not.
107func (tag Tag) IsEmpty() bool {
108 return tag.Key == ""
109}
110
111// Validate checks this tag.
112func (tag Tag) Validate() error {
113 if err := checkKey(tag.Key); err != nil {
114 return err
115 }
116
117 return checkValue(tag.Value)
118}
119
120// MarshalXML encodes to XML data.
121func (tag Tag) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
122 if err := tag.Validate(); err != nil {
123 return err
124 }
125
126 type subTag Tag // to avoid recursively calling MarshalXML()
127 return e.EncodeElement(subTag(tag), start)
128}
129
130// UnmarshalXML decodes XML data to tag.
131func (tag *Tag) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
132 type subTag Tag // to avoid recursively calling UnmarshalXML()
133 var st subTag
134 if err := d.DecodeElement(&st, &start); err != nil {
135 return err
136 }
137
138 if err := Tag(st).Validate(); err != nil {
139 return err
140 }
141
142 *tag = Tag(st)
143 return nil
144}
145
146// tagSet represents list of unique tags.
147type tagSet struct {
148 tagMap map[string]string
149 isObject bool
150}
151
152func (tags tagSet) String() string {
153 if len(tags.tagMap) == 0 {
154 return ""
155 }
156 var buf strings.Builder
157 keys := make([]string, 0, len(tags.tagMap))
158 for k := range tags.tagMap {
159 keys = append(keys, k)
160 }
161 sort.Strings(keys)
162 for _, k := range keys {
163 keyEscaped := url.QueryEscape(k)
164 valueEscaped := url.QueryEscape(tags.tagMap[k])
165 if buf.Len() > 0 {
166 buf.WriteByte('&')
167 }
168 buf.WriteString(keyEscaped)
169 buf.WriteByte('=')
170 buf.WriteString(valueEscaped)
171 }
172 return buf.String()
173}
174
175func (tags *tagSet) remove(key string) {
176 delete(tags.tagMap, key)
177}
178
179func (tags *tagSet) set(key, value string, failOnExist bool) error {
180 if failOnExist {
181 if _, found := tags.tagMap[key]; found {
182 return errDuplicateTagKey
183 }
184 }
185
186 if err := checkKey(key); err != nil {
187 return err
188 }
189
190 if err := checkValue(value); err != nil {
191 return err
192 }
193
194 if tags.isObject {
195 if len(tags.tagMap) == maxObjectTagCount {
196 return errTooManyObjectTags
197 }
198 } else if len(tags.tagMap) == maxTagCount {
199 return errTooManyTags
200 }
201
202 tags.tagMap[key] = value
203 return nil
204}
205
206func (tags tagSet) count() int {
207 return len(tags.tagMap)
208}
209
210func (tags tagSet) toMap() map[string]string {
211 m := make(map[string]string, len(tags.tagMap))
212 for key, value := range tags.tagMap {
213 m[key] = value
214 }
215 return m
216}
217
218// MarshalXML encodes to XML data.
219func (tags tagSet) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
220 tagList := struct {
221 Tags []Tag `xml:"Tag"`
222 }{}
223
224 tagList.Tags = make([]Tag, 0, len(tags.tagMap))
225 for key, value := range tags.tagMap {
226 tagList.Tags = append(tagList.Tags, Tag{key, value})
227 }
228
229 return e.EncodeElement(tagList, start)
230}
231
232// UnmarshalXML decodes XML data to tag list.
233func (tags *tagSet) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
234 tagList := struct {
235 Tags []Tag `xml:"Tag"`
236 }{}
237
238 if err := d.DecodeElement(&tagList, &start); err != nil {
239 return err
240 }
241
242 if tags.isObject {
243 if len(tagList.Tags) > maxObjectTagCount {
244 return errTooManyObjectTags
245 }
246 } else if len(tagList.Tags) > maxTagCount {
247 return errTooManyTags
248 }
249
250 m := make(map[string]string, len(tagList.Tags))
251 for _, tag := range tagList.Tags {
252 if _, found := m[tag.Key]; found {
253 return errDuplicateTagKey
254 }
255
256 m[tag.Key] = tag.Value
257 }
258
259 tags.tagMap = m
260 return nil
261}
262
263type tagging struct {
264 XMLName xml.Name `xml:"Tagging"`
265 TagSet *tagSet `xml:"TagSet"`
266}
267
268// Tags is list of tags of XML request/response as per
269// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html#API_GetBucketTagging_RequestBody
270type Tags tagging
271
272func (tags Tags) String() string {
273 return tags.TagSet.String()
274}
275
276// Remove removes a tag by its key.
277func (tags *Tags) Remove(key string) {
278 tags.TagSet.remove(key)
279}
280
281// Set sets new tag.
282func (tags *Tags) Set(key, value string) error {
283 return tags.TagSet.set(key, value, false)
284}
285
286// Count - return number of tags accounted for
287func (tags Tags) Count() int {
288 return tags.TagSet.count()
289}
290
291// ToMap returns copy of tags.
292func (tags Tags) ToMap() map[string]string {
293 return tags.TagSet.toMap()
294}
295
296// MapToObjectTags converts an input map of key and value into
297// *Tags data structure with validation.
298func MapToObjectTags(tagMap map[string]string) (*Tags, error) {
299 return NewTags(tagMap, true)
300}
301
302// MapToBucketTags converts an input map of key and value into
303// *Tags data structure with validation.
304func MapToBucketTags(tagMap map[string]string) (*Tags, error) {
305 return NewTags(tagMap, false)
306}
307
308// NewTags creates Tags from tagMap, If isObject is set, it validates for object tags.
309func NewTags(tagMap map[string]string, isObject bool) (*Tags, error) {
310 tagging := &Tags{
311 TagSet: &tagSet{
312 tagMap: make(map[string]string),
313 isObject: isObject,
314 },
315 }
316
317 for key, value := range tagMap {
318 if err := tagging.TagSet.set(key, value, true); err != nil {
319 return nil, err
320 }
321 }
322
323 return tagging, nil
324}
325
326func unmarshalXML(reader io.Reader, isObject bool) (*Tags, error) {
327 tagging := &Tags{
328 TagSet: &tagSet{
329 tagMap: make(map[string]string),
330 isObject: isObject,
331 },
332 }
333
334 if err := xml.NewDecoder(reader).Decode(tagging); err != nil {
335 return nil, err
336 }
337
338 return tagging, nil
339}
340
341// ParseBucketXML decodes XML data of tags in reader specified in
342// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html#API_PutBucketTagging_RequestSyntax.
343func ParseBucketXML(reader io.Reader) (*Tags, error) {
344 return unmarshalXML(reader, false)
345}
346
347// ParseObjectXML decodes XML data of tags in reader specified in
348// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html#API_PutObjectTagging_RequestSyntax
349func ParseObjectXML(reader io.Reader) (*Tags, error) {
350 return unmarshalXML(reader, true)
351}
352
353// stringsCut slices s around the first instance of sep,
354// returning the text before and after sep.
355// The found result reports whether sep appears in s.
356// If sep does not appear in s, cut returns s, "", false.
357func stringsCut(s, sep string) (before, after string, found bool) {
358 if i := strings.Index(s, sep); i >= 0 {
359 return s[:i], s[i+len(sep):], true
360 }
361 return s, "", false
362}
363
364func (tags *tagSet) parseTags(tgs string) (err error) {
365 for tgs != "" {
366 var key string
367 key, tgs, _ = stringsCut(tgs, "&")
368 if key == "" {
369 continue
370 }
371 key, value, _ := stringsCut(key, "=")
372 key, err1 := url.QueryUnescape(key)
373 if err1 != nil {
374 if err == nil {
375 err = err1
376 }
377 continue
378 }
379 value, err1 = url.QueryUnescape(value)
380 if err1 != nil {
381 if err == nil {
382 err = err1
383 }
384 continue
385 }
386 if err = tags.set(key, value, true); err != nil {
387 return err
388 }
389 }
390 return err
391}
392
393// Parse decodes HTTP query formatted string into tags which is limited by isObject.
394// A query formatted string is like "key1=value1&key2=value2".
395func Parse(s string, isObject bool) (*Tags, error) {
396 tagging := &Tags{
397 TagSet: &tagSet{
398 tagMap: make(map[string]string),
399 isObject: isObject,
400 },
401 }
402
403 if err := tagging.TagSet.parseTags(s); err != nil {
404 return nil, err
405 }
406
407 return tagging, nil
408}
409
410// ParseObjectTags decodes HTTP query formatted string into tags. A query formatted string is like "key1=value1&key2=value2".
411func ParseObjectTags(s string) (*Tags, error) {
412 return Parse(s, true)
413}
diff --git a/vendor/github.com/minio/minio-go/v7/post-policy.go b/vendor/github.com/minio/minio-go/v7/post-policy.go
new file mode 100644
index 0000000..3f4881e
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/post-policy.go
@@ -0,0 +1,349 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2023 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "encoding/base64"
22 "fmt"
23 "net/http"
24 "strings"
25 "time"
26
27 "github.com/minio/minio-go/v7/pkg/encrypt"
28)
29
30// expirationDateFormat date format for expiration key in json policy.
31const expirationDateFormat = "2006-01-02T15:04:05.000Z"
32
33// policyCondition explanation:
34// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
35//
36// Example:
37//
38// policyCondition {
39// matchType: "$eq",
40// key: "$Content-Type",
41// value: "image/png",
42// }
43type policyCondition struct {
44 matchType string
45 condition string
46 value string
47}
48
49// PostPolicy - Provides strict static type conversion and validation
50// for Amazon S3's POST policy JSON string.
51type PostPolicy struct {
52 // Expiration date and time of the POST policy.
53 expiration time.Time
54 // Collection of different policy conditions.
55 conditions []policyCondition
56 // ContentLengthRange minimum and maximum allowable size for the
57 // uploaded content.
58 contentLengthRange struct {
59 min int64
60 max int64
61 }
62
63 // Post form data.
64 formData map[string]string
65}
66
67// NewPostPolicy - Instantiate new post policy.
68func NewPostPolicy() *PostPolicy {
69 p := &PostPolicy{}
70 p.conditions = make([]policyCondition, 0)
71 p.formData = make(map[string]string)
72 return p
73}
74
75// SetExpires - Sets expiration time for the new policy.
76func (p *PostPolicy) SetExpires(t time.Time) error {
77 if t.IsZero() {
78 return errInvalidArgument("No expiry time set.")
79 }
80 p.expiration = t
81 return nil
82}
83
84// SetKey - Sets an object name for the policy based upload.
85func (p *PostPolicy) SetKey(key string) error {
86 if strings.TrimSpace(key) == "" || key == "" {
87 return errInvalidArgument("Object name is empty.")
88 }
89 policyCond := policyCondition{
90 matchType: "eq",
91 condition: "$key",
92 value: key,
93 }
94 if err := p.addNewPolicy(policyCond); err != nil {
95 return err
96 }
97 p.formData["key"] = key
98 return nil
99}
100
101// SetKeyStartsWith - Sets an object name that an policy based upload
102// can start with.
103// Can use an empty value ("") to allow any key.
104func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error {
105 policyCond := policyCondition{
106 matchType: "starts-with",
107 condition: "$key",
108 value: keyStartsWith,
109 }
110 if err := p.addNewPolicy(policyCond); err != nil {
111 return err
112 }
113 p.formData["key"] = keyStartsWith
114 return nil
115}
116
117// SetBucket - Sets bucket at which objects will be uploaded to.
118func (p *PostPolicy) SetBucket(bucketName string) error {
119 if strings.TrimSpace(bucketName) == "" || bucketName == "" {
120 return errInvalidArgument("Bucket name is empty.")
121 }
122 policyCond := policyCondition{
123 matchType: "eq",
124 condition: "$bucket",
125 value: bucketName,
126 }
127 if err := p.addNewPolicy(policyCond); err != nil {
128 return err
129 }
130 p.formData["bucket"] = bucketName
131 return nil
132}
133
134// SetCondition - Sets condition for credentials, date and algorithm
135func (p *PostPolicy) SetCondition(matchType, condition, value string) error {
136 if strings.TrimSpace(value) == "" || value == "" {
137 return errInvalidArgument("No value specified for condition")
138 }
139
140 policyCond := policyCondition{
141 matchType: matchType,
142 condition: "$" + condition,
143 value: value,
144 }
145 if condition == "X-Amz-Credential" || condition == "X-Amz-Date" || condition == "X-Amz-Algorithm" {
146 if err := p.addNewPolicy(policyCond); err != nil {
147 return err
148 }
149 p.formData[condition] = value
150 return nil
151 }
152 return errInvalidArgument("Invalid condition in policy")
153}
154
155// SetContentType - Sets content-type of the object for this policy
156// based upload.
157func (p *PostPolicy) SetContentType(contentType string) error {
158 if strings.TrimSpace(contentType) == "" || contentType == "" {
159 return errInvalidArgument("No content type specified.")
160 }
161 policyCond := policyCondition{
162 matchType: "eq",
163 condition: "$Content-Type",
164 value: contentType,
165 }
166 if err := p.addNewPolicy(policyCond); err != nil {
167 return err
168 }
169 p.formData["Content-Type"] = contentType
170 return nil
171}
172
173// SetContentTypeStartsWith - Sets what content-type of the object for this policy
174// based upload can start with.
175// Can use an empty value ("") to allow any content-type.
176func (p *PostPolicy) SetContentTypeStartsWith(contentTypeStartsWith string) error {
177 policyCond := policyCondition{
178 matchType: "starts-with",
179 condition: "$Content-Type",
180 value: contentTypeStartsWith,
181 }
182 if err := p.addNewPolicy(policyCond); err != nil {
183 return err
184 }
185 p.formData["Content-Type"] = contentTypeStartsWith
186 return nil
187}
188
189// SetContentLengthRange - Set new min and max content length
190// condition for all incoming uploads.
191func (p *PostPolicy) SetContentLengthRange(min, max int64) error {
192 if min > max {
193 return errInvalidArgument("Minimum limit is larger than maximum limit.")
194 }
195 if min < 0 {
196 return errInvalidArgument("Minimum limit cannot be negative.")
197 }
198 if max <= 0 {
199 return errInvalidArgument("Maximum limit cannot be non-positive.")
200 }
201 p.contentLengthRange.min = min
202 p.contentLengthRange.max = max
203 return nil
204}
205
206// SetSuccessActionRedirect - Sets the redirect success url of the object for this policy
207// based upload.
208func (p *PostPolicy) SetSuccessActionRedirect(redirect string) error {
209 if strings.TrimSpace(redirect) == "" || redirect == "" {
210 return errInvalidArgument("Redirect is empty")
211 }
212 policyCond := policyCondition{
213 matchType: "eq",
214 condition: "$success_action_redirect",
215 value: redirect,
216 }
217 if err := p.addNewPolicy(policyCond); err != nil {
218 return err
219 }
220 p.formData["success_action_redirect"] = redirect
221 return nil
222}
223
224// SetSuccessStatusAction - Sets the status success code of the object for this policy
225// based upload.
226func (p *PostPolicy) SetSuccessStatusAction(status string) error {
227 if strings.TrimSpace(status) == "" || status == "" {
228 return errInvalidArgument("Status is empty")
229 }
230 policyCond := policyCondition{
231 matchType: "eq",
232 condition: "$success_action_status",
233 value: status,
234 }
235 if err := p.addNewPolicy(policyCond); err != nil {
236 return err
237 }
238 p.formData["success_action_status"] = status
239 return nil
240}
241
242// SetUserMetadata - Set user metadata as a key/value couple.
243// Can be retrieved through a HEAD request or an event.
244func (p *PostPolicy) SetUserMetadata(key, value string) error {
245 if strings.TrimSpace(key) == "" || key == "" {
246 return errInvalidArgument("Key is empty")
247 }
248 if strings.TrimSpace(value) == "" || value == "" {
249 return errInvalidArgument("Value is empty")
250 }
251 headerName := fmt.Sprintf("x-amz-meta-%s", key)
252 policyCond := policyCondition{
253 matchType: "eq",
254 condition: fmt.Sprintf("$%s", headerName),
255 value: value,
256 }
257 if err := p.addNewPolicy(policyCond); err != nil {
258 return err
259 }
260 p.formData[headerName] = value
261 return nil
262}
263
264// SetChecksum sets the checksum of the request.
265func (p *PostPolicy) SetChecksum(c Checksum) {
266 if c.IsSet() {
267 p.formData[amzChecksumAlgo] = c.Type.String()
268 p.formData[c.Type.Key()] = c.Encoded()
269 }
270}
271
272// SetEncryption - sets encryption headers for POST API
273func (p *PostPolicy) SetEncryption(sse encrypt.ServerSide) {
274 if sse == nil {
275 return
276 }
277 h := http.Header{}
278 sse.Marshal(h)
279 for k, v := range h {
280 p.formData[k] = v[0]
281 }
282}
283
284// SetUserData - Set user data as a key/value couple.
285// Can be retrieved through a HEAD request or an event.
286func (p *PostPolicy) SetUserData(key, value string) error {
287 if key == "" {
288 return errInvalidArgument("Key is empty")
289 }
290 if value == "" {
291 return errInvalidArgument("Value is empty")
292 }
293 headerName := fmt.Sprintf("x-amz-%s", key)
294 policyCond := policyCondition{
295 matchType: "eq",
296 condition: fmt.Sprintf("$%s", headerName),
297 value: value,
298 }
299 if err := p.addNewPolicy(policyCond); err != nil {
300 return err
301 }
302 p.formData[headerName] = value
303 return nil
304}
305
306// addNewPolicy - internal helper to validate adding new policies.
307// Can use starts-with with an empty value ("") to allow any content within a form field.
308func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error {
309 if policyCond.matchType == "" || policyCond.condition == "" {
310 return errInvalidArgument("Policy fields are empty.")
311 }
312 if policyCond.matchType != "starts-with" && policyCond.value == "" {
313 return errInvalidArgument("Policy value is empty.")
314 }
315 p.conditions = append(p.conditions, policyCond)
316 return nil
317}
318
319// String function for printing policy in json formatted string.
320func (p PostPolicy) String() string {
321 return string(p.marshalJSON())
322}
323
324// marshalJSON - Provides Marshaled JSON in bytes.
325func (p PostPolicy) marshalJSON() []byte {
326 expirationStr := `"expiration":"` + p.expiration.Format(expirationDateFormat) + `"`
327 var conditionsStr string
328 conditions := []string{}
329 for _, po := range p.conditions {
330 conditions = append(conditions, fmt.Sprintf("[\"%s\",\"%s\",\"%s\"]", po.matchType, po.condition, po.value))
331 }
332 if p.contentLengthRange.min != 0 || p.contentLengthRange.max != 0 {
333 conditions = append(conditions, fmt.Sprintf("[\"content-length-range\", %d, %d]",
334 p.contentLengthRange.min, p.contentLengthRange.max))
335 }
336 if len(conditions) > 0 {
337 conditionsStr = `"conditions":[` + strings.Join(conditions, ",") + "]"
338 }
339 retStr := "{"
340 retStr = retStr + expirationStr + ","
341 retStr += conditionsStr
342 retStr += "}"
343 return []byte(retStr)
344}
345
346// base64 - Produces base64 of PostPolicy's Marshaled json.
347func (p PostPolicy) base64() string {
348 return base64.StdEncoding.EncodeToString(p.marshalJSON())
349}
diff --git a/vendor/github.com/minio/minio-go/v7/retry-continous.go b/vendor/github.com/minio/minio-go/v7/retry-continous.go
new file mode 100644
index 0000000..bfeea95
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/retry-continous.go
@@ -0,0 +1,69 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import "time"
21
22// newRetryTimerContinous creates a timer with exponentially increasing delays forever.
23func (c *Client) newRetryTimerContinous(unit, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int {
24 attemptCh := make(chan int)
25
26 // normalize jitter to the range [0, 1.0]
27 if jitter < NoJitter {
28 jitter = NoJitter
29 }
30 if jitter > MaxJitter {
31 jitter = MaxJitter
32 }
33
34 // computes the exponential backoff duration according to
35 // https://www.awsarchitectureblog.com/2015/03/backoff.html
36 exponentialBackoffWait := func(attempt int) time.Duration {
37 // 1<<uint(attempt) below could overflow, so limit the value of attempt
38 maxAttempt := 30
39 if attempt > maxAttempt {
40 attempt = maxAttempt
41 }
42 // sleep = random_between(0, min(cap, base * 2 ** attempt))
43 sleep := unit * time.Duration(1<<uint(attempt))
44 if sleep > cap {
45 sleep = cap
46 }
47 if jitter != NoJitter {
48 sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter)
49 }
50 return sleep
51 }
52
53 go func() {
54 defer close(attemptCh)
55 var nextBackoff int
56 for {
57 select {
58 // Attempts starts.
59 case attemptCh <- nextBackoff:
60 nextBackoff++
61 case <-doneCh:
62 // Stop the routine.
63 return
64 }
65 time.Sleep(exponentialBackoffWait(nextBackoff))
66 }
67 }()
68 return attemptCh
69}
diff --git a/vendor/github.com/minio/minio-go/v7/retry.go b/vendor/github.com/minio/minio-go/v7/retry.go
new file mode 100644
index 0000000..1c6105e
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/retry.go
@@ -0,0 +1,148 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "context"
22 "crypto/x509"
23 "errors"
24 "net/http"
25 "net/url"
26 "time"
27)
28
29// MaxRetry is the maximum number of retries before stopping.
30var MaxRetry = 10
31
32// MaxJitter will randomize over the full exponential backoff time
33const MaxJitter = 1.0
34
35// NoJitter disables the use of jitter for randomizing the exponential backoff time
36const NoJitter = 0.0
37
38// DefaultRetryUnit - default unit multiplicative per retry.
39// defaults to 200 * time.Millisecond
40var DefaultRetryUnit = 200 * time.Millisecond
41
42// DefaultRetryCap - Each retry attempt never waits no longer than
43// this maximum time duration.
44var DefaultRetryCap = time.Second
45
46// newRetryTimer creates a timer with exponentially increasing
47// delays until the maximum retry attempts are reached.
48func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, unit, cap time.Duration, jitter float64) <-chan int {
49 attemptCh := make(chan int)
50
51 // computes the exponential backoff duration according to
52 // https://www.awsarchitectureblog.com/2015/03/backoff.html
53 exponentialBackoffWait := func(attempt int) time.Duration {
54 // normalize jitter to the range [0, 1.0]
55 if jitter < NoJitter {
56 jitter = NoJitter
57 }
58 if jitter > MaxJitter {
59 jitter = MaxJitter
60 }
61
62 // sleep = random_between(0, min(cap, base * 2 ** attempt))
63 sleep := unit * time.Duration(1<<uint(attempt))
64 if sleep > cap {
65 sleep = cap
66 }
67 if jitter != NoJitter {
68 sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter)
69 }
70 return sleep
71 }
72
73 go func() {
74 defer close(attemptCh)
75 for i := 0; i < maxRetry; i++ {
76 select {
77 case attemptCh <- i + 1:
78 case <-ctx.Done():
79 return
80 }
81
82 select {
83 case <-time.After(exponentialBackoffWait(i)):
84 case <-ctx.Done():
85 return
86 }
87 }
88 }()
89 return attemptCh
90}
91
92// List of AWS S3 error codes which are retryable.
93var retryableS3Codes = map[string]struct{}{
94 "RequestError": {},
95 "RequestTimeout": {},
96 "Throttling": {},
97 "ThrottlingException": {},
98 "RequestLimitExceeded": {},
99 "RequestThrottled": {},
100 "InternalError": {},
101 "ExpiredToken": {},
102 "ExpiredTokenException": {},
103 "SlowDown": {},
104 // Add more AWS S3 codes here.
105}
106
107// isS3CodeRetryable - is s3 error code retryable.
108func isS3CodeRetryable(s3Code string) (ok bool) {
109 _, ok = retryableS3Codes[s3Code]
110 return ok
111}
112
113// List of HTTP status codes which are retryable.
114var retryableHTTPStatusCodes = map[int]struct{}{
115 429: {}, // http.StatusTooManyRequests is not part of the Go 1.5 library, yet
116 499: {}, // client closed request, retry. A non-standard status code introduced by nginx.
117 http.StatusInternalServerError: {},
118 http.StatusBadGateway: {},
119 http.StatusServiceUnavailable: {},
120 http.StatusGatewayTimeout: {},
121 // Add more HTTP status codes here.
122}
123
124// isHTTPStatusRetryable - is HTTP error code retryable.
125func isHTTPStatusRetryable(httpStatusCode int) (ok bool) {
126 _, ok = retryableHTTPStatusCodes[httpStatusCode]
127 return ok
128}
129
130// For now, all http Do() requests are retriable except some well defined errors
131func isRequestErrorRetryable(err error) bool {
132 if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
133 return false
134 }
135 if ue, ok := err.(*url.Error); ok {
136 e := ue.Unwrap()
137 switch e.(type) {
138 // x509: certificate signed by unknown authority
139 case x509.UnknownAuthorityError:
140 return false
141 }
142 switch e.Error() {
143 case "http: server gave HTTP response to HTTPS client":
144 return false
145 }
146 }
147 return true
148}
diff --git a/vendor/github.com/minio/minio-go/v7/s3-endpoints.go b/vendor/github.com/minio/minio-go/v7/s3-endpoints.go
new file mode 100644
index 0000000..b1de7b6
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/s3-endpoints.go
@@ -0,0 +1,64 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20// awsS3EndpointMap Amazon S3 endpoint map.
21var awsS3EndpointMap = map[string]string{
22 "us-east-1": "s3.dualstack.us-east-1.amazonaws.com",
23 "us-east-2": "s3.dualstack.us-east-2.amazonaws.com",
24 "us-west-2": "s3.dualstack.us-west-2.amazonaws.com",
25 "us-west-1": "s3.dualstack.us-west-1.amazonaws.com",
26 "ca-central-1": "s3.dualstack.ca-central-1.amazonaws.com",
27 "eu-west-1": "s3.dualstack.eu-west-1.amazonaws.com",
28 "eu-west-2": "s3.dualstack.eu-west-2.amazonaws.com",
29 "eu-west-3": "s3.dualstack.eu-west-3.amazonaws.com",
30 "eu-central-1": "s3.dualstack.eu-central-1.amazonaws.com",
31 "eu-central-2": "s3.dualstack.eu-central-2.amazonaws.com",
32 "eu-north-1": "s3.dualstack.eu-north-1.amazonaws.com",
33 "eu-south-1": "s3.dualstack.eu-south-1.amazonaws.com",
34 "eu-south-2": "s3.dualstack.eu-south-2.amazonaws.com",
35 "ap-east-1": "s3.dualstack.ap-east-1.amazonaws.com",
36 "ap-south-1": "s3.dualstack.ap-south-1.amazonaws.com",
37 "ap-south-2": "s3.dualstack.ap-south-2.amazonaws.com",
38 "ap-southeast-1": "s3.dualstack.ap-southeast-1.amazonaws.com",
39 "ap-southeast-2": "s3.dualstack.ap-southeast-2.amazonaws.com",
40 "ap-northeast-1": "s3.dualstack.ap-northeast-1.amazonaws.com",
41 "ap-northeast-2": "s3.dualstack.ap-northeast-2.amazonaws.com",
42 "ap-northeast-3": "s3.dualstack.ap-northeast-3.amazonaws.com",
43 "af-south-1": "s3.dualstack.af-south-1.amazonaws.com",
44 "me-central-1": "s3.dualstack.me-central-1.amazonaws.com",
45 "me-south-1": "s3.dualstack.me-south-1.amazonaws.com",
46 "sa-east-1": "s3.dualstack.sa-east-1.amazonaws.com",
47 "us-gov-west-1": "s3.dualstack.us-gov-west-1.amazonaws.com",
48 "us-gov-east-1": "s3.dualstack.us-gov-east-1.amazonaws.com",
49 "cn-north-1": "s3.dualstack.cn-north-1.amazonaws.com.cn",
50 "cn-northwest-1": "s3.dualstack.cn-northwest-1.amazonaws.com.cn",
51 "ap-southeast-3": "s3.dualstack.ap-southeast-3.amazonaws.com",
52 "ap-southeast-4": "s3.dualstack.ap-southeast-4.amazonaws.com",
53 "il-central-1": "s3.dualstack.il-central-1.amazonaws.com",
54}
55
56// getS3Endpoint get Amazon S3 endpoint based on the bucket location.
57func getS3Endpoint(bucketLocation string) (s3Endpoint string) {
58 s3Endpoint, ok := awsS3EndpointMap[bucketLocation]
59 if !ok {
60 // Default to 's3.dualstack.us-east-1.amazonaws.com' endpoint.
61 s3Endpoint = "s3.dualstack.us-east-1.amazonaws.com"
62 }
63 return s3Endpoint
64}
diff --git a/vendor/github.com/minio/minio-go/v7/s3-error.go b/vendor/github.com/minio/minio-go/v7/s3-error.go
new file mode 100644
index 0000000..f365157
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/s3-error.go
@@ -0,0 +1,61 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20// Non exhaustive list of AWS S3 standard error responses -
21// http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
22var s3ErrorResponseMap = map[string]string{
23 "AccessDenied": "Access Denied.",
24 "BadDigest": "The Content-Md5 you specified did not match what we received.",
25 "EntityTooSmall": "Your proposed upload is smaller than the minimum allowed object size.",
26 "EntityTooLarge": "Your proposed upload exceeds the maximum allowed object size.",
27 "IncompleteBody": "You did not provide the number of bytes specified by the Content-Length HTTP header.",
28 "InternalError": "We encountered an internal error, please try again.",
29 "InvalidAccessKeyId": "The access key ID you provided does not exist in our records.",
30 "InvalidBucketName": "The specified bucket is not valid.",
31 "InvalidDigest": "The Content-Md5 you specified is not valid.",
32 "InvalidRange": "The requested range is not satisfiable",
33 "MalformedXML": "The XML you provided was not well-formed or did not validate against our published schema.",
34 "MissingContentLength": "You must provide the Content-Length HTTP header.",
35 "MissingContentMD5": "Missing required header for this request: Content-Md5.",
36 "MissingRequestBodyError": "Request body is empty.",
37 "NoSuchBucket": "The specified bucket does not exist.",
38 "NoSuchBucketPolicy": "The bucket policy does not exist",
39 "NoSuchKey": "The specified key does not exist.",
40 "NoSuchUpload": "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.",
41 "NotImplemented": "A header you provided implies functionality that is not implemented",
42 "PreconditionFailed": "At least one of the pre-conditions you specified did not hold",
43 "RequestTimeTooSkewed": "The difference between the request time and the server's time is too large.",
44 "SignatureDoesNotMatch": "The request signature we calculated does not match the signature you provided. Check your key and signing method.",
45 "MethodNotAllowed": "The specified method is not allowed against this resource.",
46 "InvalidPart": "One or more of the specified parts could not be found.",
47 "InvalidPartOrder": "The list of parts was not in ascending order. The parts list must be specified in order by part number.",
48 "InvalidObjectState": "The operation is not valid for the current state of the object.",
49 "AuthorizationHeaderMalformed": "The authorization header is malformed; the region is wrong.",
50 "MalformedPOSTRequest": "The body of your POST request is not well-formed multipart/form-data.",
51 "BucketNotEmpty": "The bucket you tried to delete is not empty",
52 "AllAccessDisabled": "All access to this bucket has been disabled.",
53 "MalformedPolicy": "Policy has invalid resource.",
54 "MissingFields": "Missing fields in request.",
55 "AuthorizationQueryParametersError": "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"<YOUR-AKID>/YYYYMMDD/REGION/SERVICE/aws4_request\".",
56 "MalformedDate": "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.",
57 "BucketAlreadyOwnedByYou": "Your previous request to create the named bucket succeeded and you already own it.",
58 "InvalidDuration": "Duration provided in the request is invalid.",
59 "XAmzContentSHA256Mismatch": "The provided 'x-amz-content-sha256' header does not match what was computed.",
60 // Add new API errors here.
61}
diff --git a/vendor/github.com/minio/minio-go/v7/transport.go b/vendor/github.com/minio/minio-go/v7/transport.go
new file mode 100644
index 0000000..1bff664
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/transport.go
@@ -0,0 +1,83 @@
1//go:build go1.7 || go1.8
2// +build go1.7 go1.8
3
4/*
5 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
6 * Copyright 2017-2018 MinIO, Inc.
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 */
20
21package minio
22
23import (
24 "crypto/tls"
25 "crypto/x509"
26 "net"
27 "net/http"
28 "os"
29 "time"
30)
31
32// mustGetSystemCertPool - return system CAs or empty pool in case of error (or windows)
33func mustGetSystemCertPool() *x509.CertPool {
34 pool, err := x509.SystemCertPool()
35 if err != nil {
36 return x509.NewCertPool()
37 }
38 return pool
39}
40
41// DefaultTransport - this default transport is similar to
42// http.DefaultTransport but with additional param DisableCompression
43// is set to true to avoid decompressing content with 'gzip' encoding.
44var DefaultTransport = func(secure bool) (*http.Transport, error) {
45 tr := &http.Transport{
46 Proxy: http.ProxyFromEnvironment,
47 DialContext: (&net.Dialer{
48 Timeout: 30 * time.Second,
49 KeepAlive: 30 * time.Second,
50 }).DialContext,
51 MaxIdleConns: 256,
52 MaxIdleConnsPerHost: 16,
53 ResponseHeaderTimeout: time.Minute,
54 IdleConnTimeout: time.Minute,
55 TLSHandshakeTimeout: 10 * time.Second,
56 ExpectContinueTimeout: 10 * time.Second,
57 // Set this value so that the underlying transport round-tripper
58 // doesn't try to auto decode the body of objects with
59 // content-encoding set to `gzip`.
60 //
61 // Refer:
62 // https://golang.org/src/net/http/transport.go?h=roundTrip#L1843
63 DisableCompression: true,
64 }
65
66 if secure {
67 tr.TLSClientConfig = &tls.Config{
68 // Can't use SSLv3 because of POODLE and BEAST
69 // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher
70 // Can't use TLSv1.1 because of RC4 cipher usage
71 MinVersion: tls.VersionTLS12,
72 }
73 if f := os.Getenv("SSL_CERT_FILE"); f != "" {
74 rootCAs := mustGetSystemCertPool()
75 data, err := os.ReadFile(f)
76 if err == nil {
77 rootCAs.AppendCertsFromPEM(data)
78 }
79 tr.TLSClientConfig.RootCAs = rootCAs
80 }
81 }
82 return tr, nil
83}
diff --git a/vendor/github.com/minio/minio-go/v7/utils.go b/vendor/github.com/minio/minio-go/v7/utils.go
new file mode 100644
index 0000000..e39eba0
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/utils.go
@@ -0,0 +1,693 @@
1/*
2 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
3 * Copyright 2015-2017 MinIO, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18package minio
19
20import (
21 "context"
22 "crypto/md5"
23 fipssha256 "crypto/sha256"
24 "encoding/base64"
25 "encoding/hex"
26 "encoding/xml"
27 "errors"
28 "fmt"
29 "hash"
30 "io"
31 "math/rand"
32 "net"
33 "net/http"
34 "net/url"
35 "regexp"
36 "strconv"
37 "strings"
38 "sync"
39 "time"
40
41 md5simd "github.com/minio/md5-simd"
42 "github.com/minio/minio-go/v7/pkg/encrypt"
43 "github.com/minio/minio-go/v7/pkg/s3utils"
44 "github.com/minio/sha256-simd"
45)
46
47func trimEtag(etag string) string {
48 etag = strings.TrimPrefix(etag, "\"")
49 return strings.TrimSuffix(etag, "\"")
50}
51
52var expirationRegex = regexp.MustCompile(`expiry-date="(.*?)", rule-id="(.*?)"`)
53
54func amzExpirationToExpiryDateRuleID(expiration string) (time.Time, string) {
55 if matches := expirationRegex.FindStringSubmatch(expiration); len(matches) == 3 {
56 expTime, err := parseRFC7231Time(matches[1])
57 if err != nil {
58 return time.Time{}, ""
59 }
60 return expTime, matches[2]
61 }
62 return time.Time{}, ""
63}
64
65var restoreRegex = regexp.MustCompile(`ongoing-request="(.*?)"(, expiry-date="(.*?)")?`)
66
67func amzRestoreToStruct(restore string) (ongoing bool, expTime time.Time, err error) {
68 matches := restoreRegex.FindStringSubmatch(restore)
69 if len(matches) != 4 {
70 return false, time.Time{}, errors.New("unexpected restore header")
71 }
72 ongoing, err = strconv.ParseBool(matches[1])
73 if err != nil {
74 return false, time.Time{}, err
75 }
76 if matches[3] != "" {
77 expTime, err = parseRFC7231Time(matches[3])
78 if err != nil {
79 return false, time.Time{}, err
80 }
81 }
82 return
83}
84
85// xmlDecoder provide decoded value in xml.
86func xmlDecoder(body io.Reader, v interface{}) error {
87 d := xml.NewDecoder(body)
88 return d.Decode(v)
89}
90
91// sum256 calculate sha256sum for an input byte array, returns hex encoded.
92func sum256Hex(data []byte) string {
93 hash := newSHA256Hasher()
94 defer hash.Close()
95 hash.Write(data)
96 return hex.EncodeToString(hash.Sum(nil))
97}
98
99// sumMD5Base64 calculate md5sum for an input byte array, returns base64 encoded.
100func sumMD5Base64(data []byte) string {
101 hash := newMd5Hasher()
102 defer hash.Close()
103 hash.Write(data)
104 return base64.StdEncoding.EncodeToString(hash.Sum(nil))
105}
106
107// getEndpointURL - construct a new endpoint.
108func getEndpointURL(endpoint string, secure bool) (*url.URL, error) {
109 // If secure is false, use 'http' scheme.
110 scheme := "https"
111 if !secure {
112 scheme = "http"
113 }
114
115 // Construct a secured endpoint URL.
116 endpointURLStr := scheme + "://" + endpoint
117 endpointURL, err := url.Parse(endpointURLStr)
118 if err != nil {
119 return nil, err
120 }
121
122 // Validate incoming endpoint URL.
123 if err := isValidEndpointURL(*endpointURL); err != nil {
124 return nil, err
125 }
126 return endpointURL, nil
127}
128
129// closeResponse close non nil response with any response Body.
130// convenient wrapper to drain any remaining data on response body.
131//
132// Subsequently this allows golang http RoundTripper
133// to re-use the same connection for future requests.
134func closeResponse(resp *http.Response) {
135 // Callers should close resp.Body when done reading from it.
136 // If resp.Body is not closed, the Client's underlying RoundTripper
137 // (typically Transport) may not be able to re-use a persistent TCP
138 // connection to the server for a subsequent "keep-alive" request.
139 if resp != nil && resp.Body != nil {
140 // Drain any remaining Body and then close the connection.
141 // Without this closing connection would disallow re-using
142 // the same connection for future uses.
143 // - http://stackoverflow.com/a/17961593/4465767
144 io.Copy(io.Discard, resp.Body)
145 resp.Body.Close()
146 }
147}
148
149var (
150 // Hex encoded string of nil sha256sum bytes.
151 emptySHA256Hex = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
152
153 // Sentinel URL is the default url value which is invalid.
154 sentinelURL = url.URL{}
155)
156
157// Verify if input endpoint URL is valid.
158func isValidEndpointURL(endpointURL url.URL) error {
159 if endpointURL == sentinelURL {
160 return errInvalidArgument("Endpoint url cannot be empty.")
161 }
162 if endpointURL.Path != "/" && endpointURL.Path != "" {
163 return errInvalidArgument("Endpoint url cannot have fully qualified paths.")
164 }
165 host := endpointURL.Hostname()
166 if !s3utils.IsValidIP(host) && !s3utils.IsValidDomain(host) {
167 msg := "Endpoint: " + endpointURL.Host + " does not follow ip address or domain name standards."
168 return errInvalidArgument(msg)
169 }
170
171 if strings.Contains(host, ".s3.amazonaws.com") {
172 if !s3utils.IsAmazonEndpoint(endpointURL) {
173 return errInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.")
174 }
175 }
176 if strings.Contains(host, ".googleapis.com") {
177 if !s3utils.IsGoogleEndpoint(endpointURL) {
178 return errInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'.")
179 }
180 }
181 return nil
182}
183
184// Verify if input expires value is valid.
185func isValidExpiry(expires time.Duration) error {
186 expireSeconds := int64(expires / time.Second)
187 if expireSeconds < 1 {
188 return errInvalidArgument("Expires cannot be lesser than 1 second.")
189 }
190 if expireSeconds > 604800 {
191 return errInvalidArgument("Expires cannot be greater than 7 days.")
192 }
193 return nil
194}
195
196// Extract only necessary metadata header key/values by
197// filtering them out with a list of custom header keys.
198func extractObjMetadata(header http.Header) http.Header {
199 preserveKeys := []string{
200 "Content-Type",
201 "Cache-Control",
202 "Content-Encoding",
203 "Content-Language",
204 "Content-Disposition",
205 "X-Amz-Storage-Class",
206 "X-Amz-Object-Lock-Mode",
207 "X-Amz-Object-Lock-Retain-Until-Date",
208 "X-Amz-Object-Lock-Legal-Hold",
209 "X-Amz-Website-Redirect-Location",
210 "X-Amz-Server-Side-Encryption",
211 "X-Amz-Tagging-Count",
212 "X-Amz-Meta-",
213 // Add new headers to be preserved.
214 // if you add new headers here, please extend
215 // PutObjectOptions{} to preserve them
216 // upon upload as well.
217 }
218 filteredHeader := make(http.Header)
219 for k, v := range header {
220 var found bool
221 for _, prefix := range preserveKeys {
222 if !strings.HasPrefix(k, prefix) {
223 continue
224 }
225 found = true
226 break
227 }
228 if found {
229 filteredHeader[k] = v
230 }
231 }
232 return filteredHeader
233}
234
235const (
236 // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT
237 rfc822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT"
238 rfc822TimeFormatSingleDigitDay = "Mon, _2 Jan 2006 15:04:05 GMT"
239 rfc822TimeFormatSingleDigitDayTwoDigitYear = "Mon, _2 Jan 06 15:04:05 GMT"
240)
241
242func parseTime(t string, formats ...string) (time.Time, error) {
243 for _, format := range formats {
244 tt, err := time.Parse(format, t)
245 if err == nil {
246 return tt, nil
247 }
248 }
249 return time.Time{}, fmt.Errorf("unable to parse %s in any of the input formats: %s", t, formats)
250}
251
252func parseRFC7231Time(lastModified string) (time.Time, error) {
253 return parseTime(lastModified, rfc822TimeFormat, rfc822TimeFormatSingleDigitDay, rfc822TimeFormatSingleDigitDayTwoDigitYear)
254}
255
256// ToObjectInfo converts http header values into ObjectInfo type,
257// extracts metadata and fills in all the necessary fields in ObjectInfo.
258func ToObjectInfo(bucketName, objectName string, h http.Header) (ObjectInfo, error) {
259 var err error
260 // Trim off the odd double quotes from ETag in the beginning and end.
261 etag := trimEtag(h.Get("ETag"))
262
263 // Parse content length is exists
264 var size int64 = -1
265 contentLengthStr := h.Get("Content-Length")
266 if contentLengthStr != "" {
267 size, err = strconv.ParseInt(contentLengthStr, 10, 64)
268 if err != nil {
269 // Content-Length is not valid
270 return ObjectInfo{}, ErrorResponse{
271 Code: "InternalError",
272 Message: fmt.Sprintf("Content-Length is not an integer, failed with %v", err),
273 BucketName: bucketName,
274 Key: objectName,
275 RequestID: h.Get("x-amz-request-id"),
276 HostID: h.Get("x-amz-id-2"),
277 Region: h.Get("x-amz-bucket-region"),
278 }
279 }
280 }
281
282 // Parse Last-Modified has http time format.
283 mtime, err := parseRFC7231Time(h.Get("Last-Modified"))
284 if err != nil {
285 return ObjectInfo{}, ErrorResponse{
286 Code: "InternalError",
287 Message: fmt.Sprintf("Last-Modified time format is invalid, failed with %v", err),
288 BucketName: bucketName,
289 Key: objectName,
290 RequestID: h.Get("x-amz-request-id"),
291 HostID: h.Get("x-amz-id-2"),
292 Region: h.Get("x-amz-bucket-region"),
293 }
294 }
295
296 // Fetch content type if any present.
297 contentType := strings.TrimSpace(h.Get("Content-Type"))
298 if contentType == "" {
299 contentType = "application/octet-stream"
300 }
301
302 expiryStr := h.Get("Expires")
303 var expiry time.Time
304 if expiryStr != "" {
305 expiry, err = parseRFC7231Time(expiryStr)
306 if err != nil {
307 return ObjectInfo{}, ErrorResponse{
308 Code: "InternalError",
309 Message: fmt.Sprintf("'Expiry' is not in supported format: %v", err),
310 BucketName: bucketName,
311 Key: objectName,
312 RequestID: h.Get("x-amz-request-id"),
313 HostID: h.Get("x-amz-id-2"),
314 Region: h.Get("x-amz-bucket-region"),
315 }
316 }
317 }
318
319 metadata := extractObjMetadata(h)
320 userMetadata := make(map[string]string)
321 for k, v := range metadata {
322 if strings.HasPrefix(k, "X-Amz-Meta-") {
323 userMetadata[strings.TrimPrefix(k, "X-Amz-Meta-")] = v[0]
324 }
325 }
326 userTags := s3utils.TagDecode(h.Get(amzTaggingHeader))
327
328 var tagCount int
329 if count := h.Get(amzTaggingCount); count != "" {
330 tagCount, err = strconv.Atoi(count)
331 if err != nil {
332 return ObjectInfo{}, ErrorResponse{
333 Code: "InternalError",
334 Message: fmt.Sprintf("x-amz-tagging-count is not an integer, failed with %v", err),
335 BucketName: bucketName,
336 Key: objectName,
337 RequestID: h.Get("x-amz-request-id"),
338 HostID: h.Get("x-amz-id-2"),
339 Region: h.Get("x-amz-bucket-region"),
340 }
341 }
342 }
343
344 // Nil if not found
345 var restore *RestoreInfo
346 if restoreHdr := h.Get(amzRestore); restoreHdr != "" {
347 ongoing, expTime, err := amzRestoreToStruct(restoreHdr)
348 if err != nil {
349 return ObjectInfo{}, err
350 }
351 restore = &RestoreInfo{OngoingRestore: ongoing, ExpiryTime: expTime}
352 }
353
354 // extract lifecycle expiry date and rule ID
355 expTime, ruleID := amzExpirationToExpiryDateRuleID(h.Get(amzExpiration))
356
357 deleteMarker := h.Get(amzDeleteMarker) == "true"
358
359 // Save object metadata info.
360 return ObjectInfo{
361 ETag: etag,
362 Key: objectName,
363 Size: size,
364 LastModified: mtime,
365 ContentType: contentType,
366 Expires: expiry,
367 VersionID: h.Get(amzVersionID),
368 IsDeleteMarker: deleteMarker,
369 ReplicationStatus: h.Get(amzReplicationStatus),
370 Expiration: expTime,
371 ExpirationRuleID: ruleID,
372 // Extract only the relevant header keys describing the object.
373 // following function filters out a list of standard set of keys
374 // which are not part of object metadata.
375 Metadata: metadata,
376 UserMetadata: userMetadata,
377 UserTags: userTags,
378 UserTagCount: tagCount,
379 Restore: restore,
380
381 // Checksum values
382 ChecksumCRC32: h.Get("x-amz-checksum-crc32"),
383 ChecksumCRC32C: h.Get("x-amz-checksum-crc32c"),
384 ChecksumSHA1: h.Get("x-amz-checksum-sha1"),
385 ChecksumSHA256: h.Get("x-amz-checksum-sha256"),
386 }, nil
387}
388
389var readFull = func(r io.Reader, buf []byte) (n int, err error) {
390 // ReadFull reads exactly len(buf) bytes from r into buf.
391 // It returns the number of bytes copied and an error if
392 // fewer bytes were read. The error is EOF only if no bytes
393 // were read. If an EOF happens after reading some but not
394 // all the bytes, ReadFull returns ErrUnexpectedEOF.
395 // On return, n == len(buf) if and only if err == nil.
396 // If r returns an error having read at least len(buf) bytes,
397 // the error is dropped.
398 for n < len(buf) && err == nil {
399 var nn int
400 nn, err = r.Read(buf[n:])
401 // Some spurious io.Reader's return
402 // io.ErrUnexpectedEOF when nn == 0
403 // this behavior is undocumented
404 // so we are on purpose not using io.ReadFull
405 // implementation because this can lead
406 // to custom handling, to avoid that
407 // we simply modify the original io.ReadFull
408 // implementation to avoid this issue.
409 // io.ErrUnexpectedEOF with nn == 0 really
410 // means that io.EOF
411 if err == io.ErrUnexpectedEOF && nn == 0 {
412 err = io.EOF
413 }
414 n += nn
415 }
416 if n >= len(buf) {
417 err = nil
418 } else if n > 0 && err == io.EOF {
419 err = io.ErrUnexpectedEOF
420 }
421 return
422}
423
424// regCred matches credential string in HTTP header
425var regCred = regexp.MustCompile("Credential=([A-Z0-9]+)/")
426
427// regCred matches signature string in HTTP header
428var regSign = regexp.MustCompile("Signature=([[0-9a-f]+)")
429
430// Redact out signature value from authorization string.
431func redactSignature(origAuth string) string {
432 if !strings.HasPrefix(origAuth, signV4Algorithm) {
433 // Set a temporary redacted auth
434 return "AWS **REDACTED**:**REDACTED**"
435 }
436
437 // Signature V4 authorization header.
438
439 // Strip out accessKeyID from:
440 // Credential=<access-key-id>/<date>/<aws-region>/<aws-service>/aws4_request
441 newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/")
442
443 // Strip out 256-bit signature from: Signature=<256-bit signature>
444 return regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**")
445}
446
447// Get default location returns the location based on the input
448// URL `u`, if region override is provided then all location
449// defaults to regionOverride.
450//
451// If no other cases match then the location is set to `us-east-1`
452// as a last resort.
453func getDefaultLocation(u url.URL, regionOverride string) (location string) {
454 if regionOverride != "" {
455 return regionOverride
456 }
457 region := s3utils.GetRegionFromURL(u)
458 if region == "" {
459 region = "us-east-1"
460 }
461 return region
462}
463
464var supportedHeaders = map[string]bool{
465 "content-type": true,
466 "cache-control": true,
467 "content-encoding": true,
468 "content-disposition": true,
469 "content-language": true,
470 "x-amz-website-redirect-location": true,
471 "x-amz-object-lock-mode": true,
472 "x-amz-metadata-directive": true,
473 "x-amz-object-lock-retain-until-date": true,
474 "expires": true,
475 "x-amz-replication-status": true,
476 // Add more supported headers here.
477 // Must be lower case.
478}
479
480// isStorageClassHeader returns true if the header is a supported storage class header
481func isStorageClassHeader(headerKey string) bool {
482 return strings.EqualFold(amzStorageClass, headerKey)
483}
484
485// isStandardHeader returns true if header is a supported header and not a custom header
486func isStandardHeader(headerKey string) bool {
487 return supportedHeaders[strings.ToLower(headerKey)]
488}
489
490// sseHeaders is list of server side encryption headers
491var sseHeaders = map[string]bool{
492 "x-amz-server-side-encryption": true,
493 "x-amz-server-side-encryption-aws-kms-key-id": true,
494 "x-amz-server-side-encryption-context": true,
495 "x-amz-server-side-encryption-customer-algorithm": true,
496 "x-amz-server-side-encryption-customer-key": true,
497 "x-amz-server-side-encryption-customer-key-md5": true,
498 // Add more supported headers here.
499 // Must be lower case.
500}
501
502// isSSEHeader returns true if header is a server side encryption header.
503func isSSEHeader(headerKey string) bool {
504 return sseHeaders[strings.ToLower(headerKey)]
505}
506
507// isAmzHeader returns true if header is a x-amz-meta-* or x-amz-acl header.
508func isAmzHeader(headerKey string) bool {
509 key := strings.ToLower(headerKey)
510
511 return strings.HasPrefix(key, "x-amz-meta-") || strings.HasPrefix(key, "x-amz-grant-") || key == "x-amz-acl" || isSSEHeader(headerKey) || strings.HasPrefix(key, "x-amz-checksum-")
512}
513
514// supportedQueryValues is a list of query strings that can be passed in when using GetObject.
515var supportedQueryValues = map[string]bool{
516 "partNumber": true,
517 "versionId": true,
518 "response-cache-control": true,
519 "response-content-disposition": true,
520 "response-content-encoding": true,
521 "response-content-language": true,
522 "response-content-type": true,
523 "response-expires": true,
524}
525
526// isStandardQueryValue will return true when the passed in query string parameter is supported rather than customized.
527func isStandardQueryValue(qsKey string) bool {
528 return supportedQueryValues[qsKey]
529}
530
531// Per documentation at https://docs.aws.amazon.com/AmazonS3/latest/userguide/LogFormat.html#LogFormatCustom, the
532// set of query params starting with "x-" are ignored by S3.
533const allowedCustomQueryPrefix = "x-"
534
535func isCustomQueryValue(qsKey string) bool {
536 return strings.HasPrefix(qsKey, allowedCustomQueryPrefix)
537}
538
539var (
540 md5Pool = sync.Pool{New: func() interface{} { return md5.New() }}
541 sha256Pool = sync.Pool{New: func() interface{} { return sha256.New() }}
542)
543
544func newMd5Hasher() md5simd.Hasher {
545 return &hashWrapper{Hash: md5Pool.Get().(hash.Hash), isMD5: true}
546}
547
548func newSHA256Hasher() md5simd.Hasher {
549 if encrypt.FIPS {
550 return &hashWrapper{Hash: fipssha256.New(), isSHA256: true}
551 }
552 return &hashWrapper{Hash: sha256Pool.Get().(hash.Hash), isSHA256: true}
553}
554
555// hashWrapper implements the md5simd.Hasher interface.
556type hashWrapper struct {
557 hash.Hash
558 isMD5 bool
559 isSHA256 bool
560}
561
562// Close will put the hasher back into the pool.
563func (m *hashWrapper) Close() {
564 if m.isMD5 && m.Hash != nil {
565 m.Reset()
566 md5Pool.Put(m.Hash)
567 }
568 if m.isSHA256 && m.Hash != nil {
569 m.Reset()
570 sha256Pool.Put(m.Hash)
571 }
572 m.Hash = nil
573}
574
575const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569"
576const (
577 letterIdxBits = 6 // 6 bits to represent a letter index
578 letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
579 letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
580)
581
582// randString generates random names and prepends them with a known prefix.
583func randString(n int, src rand.Source, prefix string) string {
584 b := make([]byte, n)
585 // A rand.Int63() generates 63 random bits, enough for letterIdxMax letters!
586 for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; {
587 if remain == 0 {
588 cache, remain = src.Int63(), letterIdxMax
589 }
590 if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
591 b[i] = letterBytes[idx]
592 i--
593 }
594 cache >>= letterIdxBits
595 remain--
596 }
597 return prefix + string(b[0:30-len(prefix)])
598}
599
600// IsNetworkOrHostDown - if there was a network error or if the host is down.
601// expectTimeouts indicates that *context* timeouts are expected and does not
602// indicate a downed host. Other timeouts still returns down.
603func IsNetworkOrHostDown(err error, expectTimeouts bool) bool {
604 if err == nil {
605 return false
606 }
607
608 if errors.Is(err, context.Canceled) {
609 return false
610 }
611
612 if expectTimeouts && errors.Is(err, context.DeadlineExceeded) {
613 return false
614 }
615
616 if errors.Is(err, context.DeadlineExceeded) {
617 return true
618 }
619
620 // We need to figure if the error either a timeout
621 // or a non-temporary error.
622 urlErr := &url.Error{}
623 if errors.As(err, &urlErr) {
624 switch urlErr.Err.(type) {
625 case *net.DNSError, *net.OpError, net.UnknownNetworkError:
626 return true
627 }
628 }
629 var e net.Error
630 if errors.As(err, &e) {
631 if e.Timeout() {
632 return true
633 }
634 }
635
636 // Fallback to other mechanisms.
637 switch {
638 case strings.Contains(err.Error(), "Connection closed by foreign host"):
639 return true
640 case strings.Contains(err.Error(), "TLS handshake timeout"):
641 // If error is - tlsHandshakeTimeoutError.
642 return true
643 case strings.Contains(err.Error(), "i/o timeout"):
644 // If error is - tcp timeoutError.
645 return true
646 case strings.Contains(err.Error(), "connection timed out"):
647 // If err is a net.Dial timeout.
648 return true
649 case strings.Contains(err.Error(), "connection refused"):
650 // If err is connection refused
651 return true
652
653 case strings.Contains(strings.ToLower(err.Error()), "503 service unavailable"):
654 // Denial errors
655 return true
656 }
657 return false
658}
659
660// newHashReaderWrapper will hash all reads done through r.
661// When r returns io.EOF the done function will be called with the sum.
662func newHashReaderWrapper(r io.Reader, h hash.Hash, done func(hash []byte)) *hashReaderWrapper {
663 return &hashReaderWrapper{
664 r: r,
665 h: h,
666 done: done,
667 }
668}
669
670type hashReaderWrapper struct {
671 r io.Reader
672 h hash.Hash
673 done func(hash []byte)
674}
675
676// Read implements the io.Reader interface.
677func (h *hashReaderWrapper) Read(p []byte) (n int, err error) {
678 n, err = h.r.Read(p)
679 if n > 0 {
680 n2, err := h.h.Write(p[:n])
681 if err != nil {
682 return 0, err
683 }
684 if n2 != n {
685 return 0, io.ErrShortWrite
686 }
687 }
688 if err == io.EOF {
689 // Call back
690 h.done(h.h.Sum(nil))
691 }
692 return n, err
693}
diff --git a/vendor/github.com/minio/sha256-simd/.gitignore b/vendor/github.com/minio/sha256-simd/.gitignore
new file mode 100644
index 0000000..c56069f
--- /dev/null
+++ b/vendor/github.com/minio/sha256-simd/.gitignore
@@ -0,0 +1 @@
*.test \ No newline at end of file
diff --git a/vendor/github.com/minio/sha256-simd/LICENSE b/vendor/github.com/minio/sha256-simd/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/minio/sha256-simd/LICENSE
@@ -0,0 +1,202 @@
1
2 Apache License
3 Version 2.0, January 2004
4 http://www.apache.org/licenses/
5
6 TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7
8 1. Definitions.
9
10 "License" shall mean the terms and conditions for use, reproduction,
11 and distribution as defined by Sections 1 through 9 of this document.
12
13 "Licensor" shall mean the copyright owner or entity authorized by
14 the copyright owner that is granting the License.
15
16 "Legal Entity" shall mean the union of the acting entity and all
17 other entities that control, are controlled by, or are under common
18 control with that entity. For the purposes of this definition,
19 "control" means (i) the power, direct or indirect, to cause the
20 direction or management of such entity, whether by contract or
21 otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 outstanding shares, or (iii) beneficial ownership of such entity.
23
24 "You" (or "Your") shall mean an individual or Legal Entity
25 exercising permissions granted by this License.
26
27 "Source" form shall mean the preferred form for making modifications,
28 including but not limited to software source code, documentation
29 source, and configuration files.
30
31 "Object" form shall mean any form resulting from mechanical
32 transformation or translation of a Source form, including but
33 not limited to compiled object code, generated documentation,
34 and conversions to other media types.
35
36 "Work" shall mean the work of authorship, whether in Source or
37 Object form, made available under the License, as indicated by a
38 copyright notice that is included in or attached to the work
39 (an example is provided in the Appendix below).
40
41 "Derivative Works" shall mean any work, whether in Source or Object
42 form, that is based on (or derived from) the Work and for which the
43 editorial revisions, annotations, elaborations, or other modifications
44 represent, as a whole, an original work of authorship. For the purposes
45 of this License, Derivative Works shall not include works that remain
46 separable from, or merely link (or bind by name) to the interfaces of,
47 the Work and Derivative Works thereof.
48
49 "Contribution" shall mean any work of authorship, including
50 the original version of the Work and any modifications or additions
51 to that Work or Derivative Works thereof, that is intentionally
52 submitted to Licensor for inclusion in the Work by the copyright owner
53 or by an individual or Legal Entity authorized to submit on behalf of
54 the copyright owner. For the purposes of this definition, "submitted"
55 means any form of electronic, verbal, or written communication sent
56 to the Licensor or its representatives, including but not limited to
57 communication on electronic mailing lists, source code control systems,
58 and issue tracking systems that are managed by, or on behalf of, the
59 Licensor for the purpose of discussing and improving the Work, but
60 excluding communication that is conspicuously marked or otherwise
61 designated in writing by the copyright owner as "Not a Contribution."
62
63 "Contributor" shall mean Licensor and any individual or Legal Entity
64 on behalf of whom a Contribution has been received by Licensor and
65 subsequently incorporated within the Work.
66
67 2. Grant of Copyright License. Subject to the terms and conditions of
68 this License, each Contributor hereby grants to You a perpetual,
69 worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 copyright license to reproduce, prepare Derivative Works of,
71 publicly display, publicly perform, sublicense, and distribute the
72 Work and such Derivative Works in Source or Object form.
73
74 3. Grant of Patent License. Subject to the terms and conditions of
75 this License, each Contributor hereby grants to You a perpetual,
76 worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 (except as stated in this section) patent license to make, have made,
78 use, offer to sell, sell, import, and otherwise transfer the Work,
79 where such license applies only to those patent claims licensable
80 by such Contributor that are necessarily infringed by their
81 Contribution(s) alone or by combination of their Contribution(s)
82 with the Work to which such Contribution(s) was submitted. If You
83 institute patent litigation against any entity (including a
84 cross-claim or counterclaim in a lawsuit) alleging that the Work
85 or a Contribution incorporated within the Work constitutes direct
86 or contributory patent infringement, then any patent licenses
87 granted to You under this License for that Work shall terminate
88 as of the date such litigation is filed.
89
90 4. Redistribution. You may reproduce and distribute copies of the
91 Work or Derivative Works thereof in any medium, with or without
92 modifications, and in Source or Object form, provided that You
93 meet the following conditions:
94
95 (a) You must give any other recipients of the Work or
96 Derivative Works a copy of this License; and
97
98 (b) You must cause any modified files to carry prominent notices
99 stating that You changed the files; and
100
101 (c) You must retain, in the Source form of any Derivative Works
102 that You distribute, all copyright, patent, trademark, and
103 attribution notices from the Source form of the Work,
104 excluding those notices that do not pertain to any part of
105 the Derivative Works; and
106
107 (d) If the Work includes a "NOTICE" text file as part of its
108 distribution, then any Derivative Works that You distribute must
109 include a readable copy of the attribution notices contained
110 within such NOTICE file, excluding those notices that do not
111 pertain to any part of the Derivative Works, in at least one
112 of the following places: within a NOTICE text file distributed
113 as part of the Derivative Works; within the Source form or
114 documentation, if provided along with the Derivative Works; or,
115 within a display generated by the Derivative Works, if and
116 wherever such third-party notices normally appear. The contents
117 of the NOTICE file are for informational purposes only and
118 do not modify the License. You may add Your own attribution
119 notices within Derivative Works that You distribute, alongside
120 or as an addendum to the NOTICE text from the Work, provided
121 that such additional attribution notices cannot be construed
122 as modifying the License.
123
124 You may add Your own copyright statement to Your modifications and
125 may provide additional or different license terms and conditions
126 for use, reproduction, or distribution of Your modifications, or
127 for any such Derivative Works as a whole, provided Your use,
128 reproduction, and distribution of the Work otherwise complies with
129 the conditions stated in this License.
130
131 5. Submission of Contributions. Unless You explicitly state otherwise,
132 any Contribution intentionally submitted for inclusion in the Work
133 by You to the Licensor shall be under the terms and conditions of
134 this License, without any additional terms or conditions.
135 Notwithstanding the above, nothing herein shall supersede or modify
136 the terms of any separate license agreement you may have executed
137 with Licensor regarding such Contributions.
138
139 6. Trademarks. This License does not grant permission to use the trade
140 names, trademarks, service marks, or product names of the Licensor,
141 except as required for reasonable and customary use in describing the
142 origin of the Work and reproducing the content of the NOTICE file.
143
144 7. Disclaimer of Warranty. Unless required by applicable law or
145 agreed to in writing, Licensor provides the Work (and each
146 Contributor provides its Contributions) on an "AS IS" BASIS,
147 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 implied, including, without limitation, any warranties or conditions
149 of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 PARTICULAR PURPOSE. You are solely responsible for determining the
151 appropriateness of using or redistributing the Work and assume any
152 risks associated with Your exercise of permissions under this License.
153
154 8. Limitation of Liability. In no event and under no legal theory,
155 whether in tort (including negligence), contract, or otherwise,
156 unless required by applicable law (such as deliberate and grossly
157 negligent acts) or agreed to in writing, shall any Contributor be
158 liable to You for damages, including any direct, indirect, special,
159 incidental, or consequential damages of any character arising as a
160 result of this License or out of the use or inability to use the
161 Work (including but not limited to damages for loss of goodwill,
162 work stoppage, computer failure or malfunction, or any and all
163 other commercial damages or losses), even if such Contributor
164 has been advised of the possibility of such damages.
165
166 9. Accepting Warranty or Additional Liability. While redistributing
167 the Work or Derivative Works thereof, You may choose to offer,
168 and charge a fee for, acceptance of support, warranty, indemnity,
169 or other liability obligations and/or rights consistent with this
170 License. However, in accepting such obligations, You may act only
171 on Your own behalf and on Your sole responsibility, not on behalf
172 of any other Contributor, and only if You agree to indemnify,
173 defend, and hold each Contributor harmless for any liability
174 incurred by, or claims asserted against, such Contributor by reason
175 of your accepting any such warranty or additional liability.
176
177 END OF TERMS AND CONDITIONS
178
179 APPENDIX: How to apply the Apache License to your work.
180
181 To apply the Apache License to your work, attach the following
182 boilerplate notice, with the fields enclosed by brackets "[]"
183 replaced with your own identifying information. (Don't include
184 the brackets!) The text should be enclosed in the appropriate
185 comment syntax for the file format. We also recommend that a
186 file or class name and description of purpose be included on the
187 same "printed page" as the copyright notice for easier
188 identification within third-party archives.
189
190 Copyright [yyyy] [name of copyright owner]
191
192 Licensed under the Apache License, Version 2.0 (the "License");
193 you may not use this file except in compliance with the License.
194 You may obtain a copy of the License at
195
196 http://www.apache.org/licenses/LICENSE-2.0
197
198 Unless required by applicable law or agreed to in writing, software
199 distributed under the License is distributed on an "AS IS" BASIS,
200 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201 See the License for the specific language governing permissions and
202 limitations under the License.
diff --git a/vendor/github.com/minio/sha256-simd/README.md b/vendor/github.com/minio/sha256-simd/README.md
new file mode 100644
index 0000000..6117488
--- /dev/null
+++ b/vendor/github.com/minio/sha256-simd/README.md
@@ -0,0 +1,137 @@
1# sha256-simd
2
3Accelerate SHA256 computations in pure Go using AVX512, SHA Extensions for x86 and ARM64 for ARM.
4On AVX512 it provides an up to 8x improvement (over 3 GB/s per core).
5SHA Extensions give a performance boost of close to 4x over native.
6
7## Introduction
8
9This package is designed as a replacement for `crypto/sha256`.
10For ARM CPUs with the Cryptography Extensions, advantage is taken of the SHA2 instructions resulting in a massive performance improvement.
11
12This package uses Golang assembly.
13The AVX512 version is based on the Intel's "multi-buffer crypto library for IPSec" whereas the other Intel implementations are described in "Fast SHA-256 Implementations on Intel Architecture Processors" by J. Guilford et al.
14
15## Support for Intel SHA Extensions
16
17Support for the Intel SHA Extensions has been added by Kristofer Peterson (@svenski123), originally developed for spacemeshos [here](https://github.com/spacemeshos/POET/issues/23). On CPUs that support it (known thus far Intel Celeron J3455 and AMD Ryzen) it gives a significant boost in performance (with thanks to @AudriusButkevicius for reporting the results; full results [here](https://github.com/minio/sha256-simd/pull/37#issuecomment-451607827)).
18
19```
20$ benchcmp avx2.txt sha-ext.txt
21benchmark AVX2 MB/s SHA Ext MB/s speedup
22BenchmarkHash5M 514.40 1975.17 3.84x
23```
24
25Thanks to Kristofer Peterson, we also added additional performance changes such as optimized padding,
26endian conversions which sped up all implementations i.e. Intel SHA alone while doubled performance for small sizes,
27the other changes increased everything roughly 50%.
28
29## Support for AVX512
30
31We have added support for AVX512 which results in an up to 8x performance improvement over AVX2 (3.0 GHz Xeon Platinum 8124M CPU):
32
33```
34$ benchcmp avx2.txt avx512.txt
35benchmark AVX2 MB/s AVX512 MB/s speedup
36BenchmarkHash5M 448.62 3498.20 7.80x
37```
38
39The original code was developed by Intel as part of the [multi-buffer crypto library](https://github.com/intel/intel-ipsec-mb) for IPSec or more specifically this [AVX512](https://github.com/intel/intel-ipsec-mb/blob/master/avx512/sha256_x16_avx512.asm) implementation. The key idea behind it is to process a total of 16 checksums in parallel by “transposing” 16 (independent) messages of 64 bytes between a total of 16 ZMM registers (each 64 bytes wide).
40
41Transposing the input messages means that in order to take full advantage of the speedup you need to have a (server) workload where multiple threads are doing SHA256 calculations in parallel. Unfortunately for this algorithm it is not possible for two message blocks processed in parallel to be dependent on one another — because then the (interim) result of the first part of the message has to be an input into the processing of the second part of the message.
42
43Whereas the original Intel C implementation requires some sort of explicit scheduling of messages to be processed in parallel, for Golang it makes sense to take advantage of channels in order to group messages together and use channels as well for sending back the results (thereby effectively decoupling the calculations). We have implemented a fairly simple scheduling mechanism that seems to work well in practice.
44
45Due to this different way of scheduling, we decided to use an explicit method to instantiate the AVX512 version. Essentially one or more AVX512 processing servers ([`Avx512Server`](https://github.com/minio/sha256-simd/blob/master/sha256blockAvx512_amd64.go#L294)) have to be created whereby each server can hash over 3 GB/s on a single core. An `hash.Hash` object ([`Avx512Digest`](https://github.com/minio/sha256-simd/blob/master/sha256blockAvx512_amd64.go#L45)) is then instantiated using one of these servers and used in the regular fashion:
46
47```go
48import "github.com/minio/sha256-simd"
49
50func main() {
51 server := sha256.NewAvx512Server()
52 h512 := sha256.NewAvx512(server)
53 h512.Write(fileBlock)
54 digest := h512.Sum([]byte{})
55}
56```
57
58Note that, because of the scheduling overhead, for small messages (< 1 MB) you will be better off using the regular SHA256 hashing (but those are typically not performance critical anyway). Some other tips to get the best performance:
59* Have many go routines doing SHA256 calculations in parallel.
60* Try to Write() messages in multiples of 64 bytes.
61* Try to keep the overall length of messages to a roughly similar size ie. 5 MB (this way all 16 ‘lanes’ in the AVX512 computations are contributing as much as possible).
62
63More detailed information can be found in this [blog](https://blog.minio.io/accelerate-sha256-up-to-8x-over-3-gb-s-per-core-with-avx512-a0b1d64f78f) post including scaling across cores.
64
65## Drop-In Replacement
66
67The following code snippet shows how you can use `github.com/minio/sha256-simd`.
68This will automatically select the fastest method for the architecture on which it will be executed.
69
70```go
71import "github.com/minio/sha256-simd"
72
73func main() {
74 ...
75 shaWriter := sha256.New()
76 io.Copy(shaWriter, file)
77 ...
78}
79```
80
81## Performance
82
83Below is the speed in MB/s for a single core (ranked fast to slow) for blocks larger than 1 MB.
84
85| Processor | SIMD | Speed (MB/s) |
86| --------------------------------- | ------- | ------------:|
87| 3.0 GHz Intel Xeon Platinum 8124M | AVX512 | 3498 |
88| 3.7 GHz AMD Ryzen 7 2700X | SHA Ext | 1979 |
89| 1.2 GHz ARM Cortex-A53 | ARM64 | 638 |
90
91## asm2plan9s
92
93In order to be able to work more easily with AVX512/AVX2 instructions, a separate tool was developed to convert SIMD instructions into the corresponding BYTE sequence as accepted by Go assembly. See [asm2plan9s](https://github.com/minio/asm2plan9s) for more information.
94
95## Why and benefits
96
97One of the most performance sensitive parts of the [Minio](https://github.com/minio/minio) object storage server is related to SHA256 hash sums calculations. For instance during multi part uploads each part that is uploaded needs to be verified for data integrity by the server.
98
99Other applications that can benefit from enhanced SHA256 performance are deduplication in storage systems, intrusion detection, version control systems, integrity checking, etc.
100
101## ARM SHA Extensions
102
103The 64-bit ARMv8 core has introduced new instructions for SHA1 and SHA2 acceleration as part of the [Cryptography Extensions](http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0501f/CHDFJBCJ.html). Below you can see a small excerpt highlighting one of the rounds as is done for the SHA256 calculation process (for full code see [sha256block_arm64.s](https://github.com/minio/sha256-simd/blob/master/sha256block_arm64.s)).
104
105 ```
106 sha256h q2, q3, v9.4s
107 sha256h2 q3, q4, v9.4s
108 sha256su0 v5.4s, v6.4s
109 rev32 v8.16b, v8.16b
110 add v9.4s, v7.4s, v18.4s
111 mov v4.16b, v2.16b
112 sha256h q2, q3, v10.4s
113 sha256h2 q3, q4, v10.4s
114 sha256su0 v6.4s, v7.4s
115 sha256su1 v5.4s, v7.4s, v8.4s
116 ```
117
118### Detailed benchmarks
119
120Benchmarks generated on a 1.2 Ghz Quad-Core ARM Cortex A53 equipped [Pine64](https://www.pine64.com/).
121
122```
123minio@minio-arm:$ benchcmp golang.txt arm64.txt
124benchmark golang arm64 speedup
125BenchmarkHash8Bytes-4 0.68 MB/s 5.70 MB/s 8.38x
126BenchmarkHash1K-4 5.65 MB/s 326.30 MB/s 57.75x
127BenchmarkHash8K-4 6.00 MB/s 570.63 MB/s 95.11x
128BenchmarkHash1M-4 6.05 MB/s 638.23 MB/s 105.49x
129```
130
131## License
132
133Released under the Apache License v2.0. You can find the complete text in the file LICENSE.
134
135## Contributing
136
137Contributions are welcome, please send PRs for any enhancements.
diff --git a/vendor/github.com/minio/sha256-simd/cpuid_other.go b/vendor/github.com/minio/sha256-simd/cpuid_other.go
new file mode 100644
index 0000000..97af6a1
--- /dev/null
+++ b/vendor/github.com/minio/sha256-simd/cpuid_other.go
@@ -0,0 +1,50 @@
1// Minio Cloud Storage, (C) 2021 Minio, Inc.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14//
15
16package sha256
17
18import (
19 "bytes"
20 "io/ioutil"
21 "runtime"
22
23 "github.com/klauspost/cpuid/v2"
24)
25
26var (
27 hasIntelSha = runtime.GOARCH == "amd64" && cpuid.CPU.Supports(cpuid.SHA, cpuid.SSSE3, cpuid.SSE4)
28 hasAvx512 = cpuid.CPU.Supports(cpuid.AVX512F, cpuid.AVX512DQ, cpuid.AVX512BW, cpuid.AVX512VL)
29)
30
31func hasArmSha2() bool {
32 if cpuid.CPU.Has(cpuid.SHA2) {
33 return true
34 }
35 if runtime.GOARCH != "arm64" || runtime.GOOS != "linux" {
36 return false
37 }
38
39 // Fall back to hacky cpuinfo parsing...
40 const procCPUInfo = "/proc/cpuinfo"
41
42 // Feature to check for.
43 const sha256Feature = "sha2"
44
45 cpuInfo, err := ioutil.ReadFile(procCPUInfo)
46 if err != nil {
47 return false
48 }
49 return bytes.Contains(cpuInfo, []byte(sha256Feature))
50}
diff --git a/vendor/github.com/minio/sha256-simd/sha256.go b/vendor/github.com/minio/sha256-simd/sha256.go
new file mode 100644
index 0000000..f146bbd
--- /dev/null
+++ b/vendor/github.com/minio/sha256-simd/sha256.go
@@ -0,0 +1,468 @@
1/*
2 * Minio Cloud Storage, (C) 2016 Minio, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17package sha256
18
19import (
20 "crypto/sha256"
21 "encoding/binary"
22 "errors"
23 "hash"
24)
25
26// Size - The size of a SHA256 checksum in bytes.
27const Size = 32
28
29// BlockSize - The blocksize of SHA256 in bytes.
30const BlockSize = 64
31
32const (
33 chunk = BlockSize
34 init0 = 0x6A09E667
35 init1 = 0xBB67AE85
36 init2 = 0x3C6EF372
37 init3 = 0xA54FF53A
38 init4 = 0x510E527F
39 init5 = 0x9B05688C
40 init6 = 0x1F83D9AB
41 init7 = 0x5BE0CD19
42)
43
44// digest represents the partial evaluation of a checksum.
45type digest struct {
46 h [8]uint32
47 x [chunk]byte
48 nx int
49 len uint64
50}
51
52// Reset digest back to default
53func (d *digest) Reset() {
54 d.h[0] = init0
55 d.h[1] = init1
56 d.h[2] = init2
57 d.h[3] = init3
58 d.h[4] = init4
59 d.h[5] = init5
60 d.h[6] = init6
61 d.h[7] = init7
62 d.nx = 0
63 d.len = 0
64}
65
66type blockfuncType int
67
68const (
69 blockfuncStdlib blockfuncType = iota
70 blockfuncIntelSha
71 blockfuncArmSha2
72 blockfuncForceGeneric = -1
73)
74
75var blockfunc blockfuncType
76
77func init() {
78 switch {
79 case hasIntelSha:
80 blockfunc = blockfuncIntelSha
81 case hasArmSha2():
82 blockfunc = blockfuncArmSha2
83 }
84}
85
86// New returns a new hash.Hash computing the SHA256 checksum.
87func New() hash.Hash {
88 if blockfunc == blockfuncStdlib {
89 // Fallback to the standard golang implementation
90 // if no features were found.
91 return sha256.New()
92 }
93
94 d := new(digest)
95 d.Reset()
96 return d
97}
98
99// Sum256 - single caller sha256 helper
100func Sum256(data []byte) (result [Size]byte) {
101 var d digest
102 d.Reset()
103 d.Write(data)
104 result = d.checkSum()
105 return
106}
107
108// Return size of checksum
109func (d *digest) Size() int { return Size }
110
111// Return blocksize of checksum
112func (d *digest) BlockSize() int { return BlockSize }
113
114// Write to digest
115func (d *digest) Write(p []byte) (nn int, err error) {
116 nn = len(p)
117 d.len += uint64(nn)
118 if d.nx > 0 {
119 n := copy(d.x[d.nx:], p)
120 d.nx += n
121 if d.nx == chunk {
122 block(d, d.x[:])
123 d.nx = 0
124 }
125 p = p[n:]
126 }
127 if len(p) >= chunk {
128 n := len(p) &^ (chunk - 1)
129 block(d, p[:n])
130 p = p[n:]
131 }
132 if len(p) > 0 {
133 d.nx = copy(d.x[:], p)
134 }
135 return
136}
137
138// Return sha256 sum in bytes
139func (d *digest) Sum(in []byte) []byte {
140 // Make a copy of d0 so that caller can keep writing and summing.
141 d0 := *d
142 hash := d0.checkSum()
143 return append(in, hash[:]...)
144}
145
146// Intermediate checksum function
147func (d *digest) checkSum() (digest [Size]byte) {
148 n := d.nx
149
150 var k [64]byte
151 copy(k[:], d.x[:n])
152
153 k[n] = 0x80
154
155 if n >= 56 {
156 block(d, k[:])
157
158 // clear block buffer - go compiles this to optimal 1x xorps + 4x movups
159 // unfortunately expressing this more succinctly results in much worse code
160 k[0] = 0
161 k[1] = 0
162 k[2] = 0
163 k[3] = 0
164 k[4] = 0
165 k[5] = 0
166 k[6] = 0
167 k[7] = 0
168 k[8] = 0
169 k[9] = 0
170 k[10] = 0
171 k[11] = 0
172 k[12] = 0
173 k[13] = 0
174 k[14] = 0
175 k[15] = 0
176 k[16] = 0
177 k[17] = 0
178 k[18] = 0
179 k[19] = 0
180 k[20] = 0
181 k[21] = 0
182 k[22] = 0
183 k[23] = 0
184 k[24] = 0
185 k[25] = 0
186 k[26] = 0
187 k[27] = 0
188 k[28] = 0
189 k[29] = 0
190 k[30] = 0
191 k[31] = 0
192 k[32] = 0
193 k[33] = 0
194 k[34] = 0
195 k[35] = 0
196 k[36] = 0
197 k[37] = 0
198 k[38] = 0
199 k[39] = 0
200 k[40] = 0
201 k[41] = 0
202 k[42] = 0
203 k[43] = 0
204 k[44] = 0
205 k[45] = 0
206 k[46] = 0
207 k[47] = 0
208 k[48] = 0
209 k[49] = 0
210 k[50] = 0
211 k[51] = 0
212 k[52] = 0
213 k[53] = 0
214 k[54] = 0
215 k[55] = 0
216 k[56] = 0
217 k[57] = 0
218 k[58] = 0
219 k[59] = 0
220 k[60] = 0
221 k[61] = 0
222 k[62] = 0
223 k[63] = 0
224 }
225 binary.BigEndian.PutUint64(k[56:64], uint64(d.len)<<3)
226 block(d, k[:])
227
228 {
229 const i = 0
230 binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i])
231 }
232 {
233 const i = 1
234 binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i])
235 }
236 {
237 const i = 2
238 binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i])
239 }
240 {
241 const i = 3
242 binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i])
243 }
244 {
245 const i = 4
246 binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i])
247 }
248 {
249 const i = 5
250 binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i])
251 }
252 {
253 const i = 6
254 binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i])
255 }
256 {
257 const i = 7
258 binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i])
259 }
260
261 return
262}
263
264func block(dig *digest, p []byte) {
265 if blockfunc == blockfuncIntelSha {
266 blockIntelShaGo(dig, p)
267 } else if blockfunc == blockfuncArmSha2 {
268 blockArmSha2Go(dig, p)
269 } else {
270 blockGeneric(dig, p)
271 }
272}
273
274func blockGeneric(dig *digest, p []byte) {
275 var w [64]uint32
276 h0, h1, h2, h3, h4, h5, h6, h7 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7]
277 for len(p) >= chunk {
278 // Can interlace the computation of w with the
279 // rounds below if needed for speed.
280 for i := 0; i < 16; i++ {
281 j := i * 4
282 w[i] = uint32(p[j])<<24 | uint32(p[j+1])<<16 | uint32(p[j+2])<<8 | uint32(p[j+3])
283 }
284 for i := 16; i < 64; i++ {
285 v1 := w[i-2]
286 t1 := (v1>>17 | v1<<(32-17)) ^ (v1>>19 | v1<<(32-19)) ^ (v1 >> 10)
287 v2 := w[i-15]
288 t2 := (v2>>7 | v2<<(32-7)) ^ (v2>>18 | v2<<(32-18)) ^ (v2 >> 3)
289 w[i] = t1 + w[i-7] + t2 + w[i-16]
290 }
291
292 a, b, c, d, e, f, g, h := h0, h1, h2, h3, h4, h5, h6, h7
293
294 for i := 0; i < 64; i++ {
295 t1 := h + ((e>>6 | e<<(32-6)) ^ (e>>11 | e<<(32-11)) ^ (e>>25 | e<<(32-25))) + ((e & f) ^ (^e & g)) + _K[i] + w[i]
296
297 t2 := ((a>>2 | a<<(32-2)) ^ (a>>13 | a<<(32-13)) ^ (a>>22 | a<<(32-22))) + ((a & b) ^ (a & c) ^ (b & c))
298
299 h = g
300 g = f
301 f = e
302 e = d + t1
303 d = c
304 c = b
305 b = a
306 a = t1 + t2
307 }
308
309 h0 += a
310 h1 += b
311 h2 += c
312 h3 += d
313 h4 += e
314 h5 += f
315 h6 += g
316 h7 += h
317
318 p = p[chunk:]
319 }
320
321 dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h0, h1, h2, h3, h4, h5, h6, h7
322}
323
324var _K = []uint32{
325 0x428a2f98,
326 0x71374491,
327 0xb5c0fbcf,
328 0xe9b5dba5,
329 0x3956c25b,
330 0x59f111f1,
331 0x923f82a4,
332 0xab1c5ed5,
333 0xd807aa98,
334 0x12835b01,
335 0x243185be,
336 0x550c7dc3,
337 0x72be5d74,
338 0x80deb1fe,
339 0x9bdc06a7,
340 0xc19bf174,
341 0xe49b69c1,
342 0xefbe4786,
343 0x0fc19dc6,
344 0x240ca1cc,
345 0x2de92c6f,
346 0x4a7484aa,
347 0x5cb0a9dc,
348 0x76f988da,
349 0x983e5152,
350 0xa831c66d,
351 0xb00327c8,
352 0xbf597fc7,
353 0xc6e00bf3,
354 0xd5a79147,
355 0x06ca6351,
356 0x14292967,
357 0x27b70a85,
358 0x2e1b2138,
359 0x4d2c6dfc,
360 0x53380d13,
361 0x650a7354,
362 0x766a0abb,
363 0x81c2c92e,
364 0x92722c85,
365 0xa2bfe8a1,
366 0xa81a664b,
367 0xc24b8b70,
368 0xc76c51a3,
369 0xd192e819,
370 0xd6990624,
371 0xf40e3585,
372 0x106aa070,
373 0x19a4c116,
374 0x1e376c08,
375 0x2748774c,
376 0x34b0bcb5,
377 0x391c0cb3,
378 0x4ed8aa4a,
379 0x5b9cca4f,
380 0x682e6ff3,
381 0x748f82ee,
382 0x78a5636f,
383 0x84c87814,
384 0x8cc70208,
385 0x90befffa,
386 0xa4506ceb,
387 0xbef9a3f7,
388 0xc67178f2,
389}
390
391const (
392 magic256 = "sha\x03"
393 marshaledSize = len(magic256) + 8*4 + chunk + 8
394)
395
396func (d *digest) MarshalBinary() ([]byte, error) {
397 b := make([]byte, 0, marshaledSize)
398 b = append(b, magic256...)
399 b = appendUint32(b, d.h[0])
400 b = appendUint32(b, d.h[1])
401 b = appendUint32(b, d.h[2])
402 b = appendUint32(b, d.h[3])
403 b = appendUint32(b, d.h[4])
404 b = appendUint32(b, d.h[5])
405 b = appendUint32(b, d.h[6])
406 b = appendUint32(b, d.h[7])
407 b = append(b, d.x[:d.nx]...)
408 b = b[:len(b)+len(d.x)-d.nx] // already zero
409 b = appendUint64(b, d.len)
410 return b, nil
411}
412
413func (d *digest) UnmarshalBinary(b []byte) error {
414 if len(b) < len(magic256) || string(b[:len(magic256)]) != magic256 {
415 return errors.New("crypto/sha256: invalid hash state identifier")
416 }
417 if len(b) != marshaledSize {
418 return errors.New("crypto/sha256: invalid hash state size")
419 }
420 b = b[len(magic256):]
421 b, d.h[0] = consumeUint32(b)
422 b, d.h[1] = consumeUint32(b)
423 b, d.h[2] = consumeUint32(b)
424 b, d.h[3] = consumeUint32(b)
425 b, d.h[4] = consumeUint32(b)
426 b, d.h[5] = consumeUint32(b)
427 b, d.h[6] = consumeUint32(b)
428 b, d.h[7] = consumeUint32(b)
429 b = b[copy(d.x[:], b):]
430 b, d.len = consumeUint64(b)
431 d.nx = int(d.len % chunk)
432 return nil
433}
434
435func appendUint32(b []byte, v uint32) []byte {
436 return append(b,
437 byte(v>>24),
438 byte(v>>16),
439 byte(v>>8),
440 byte(v),
441 )
442}
443
444func appendUint64(b []byte, v uint64) []byte {
445 return append(b,
446 byte(v>>56),
447 byte(v>>48),
448 byte(v>>40),
449 byte(v>>32),
450 byte(v>>24),
451 byte(v>>16),
452 byte(v>>8),
453 byte(v),
454 )
455}
456
457func consumeUint64(b []byte) ([]byte, uint64) {
458 _ = b[7]
459 x := uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
460 uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
461 return b[8:], x
462}
463
464func consumeUint32(b []byte) ([]byte, uint32) {
465 _ = b[3]
466 x := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
467 return b[4:], x
468}
diff --git a/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.asm b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.asm
new file mode 100644
index 0000000..c959b1a
--- /dev/null
+++ b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.asm
@@ -0,0 +1,686 @@
1
2// 16x Parallel implementation of SHA256 for AVX512
3
4//
5// Minio Cloud Storage, (C) 2017 Minio, Inc.
6//
7// Licensed under the Apache License, Version 2.0 (the "License");
8// you may not use this file except in compliance with the License.
9// You may obtain a copy of the License at
10//
11// http://www.apache.org/licenses/LICENSE-2.0
12//
13// Unless required by applicable law or agreed to in writing, software
14// distributed under the License is distributed on an "AS IS" BASIS,
15// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16// See the License for the specific language governing permissions and
17// limitations under the License.
18
19//
20// This code is based on the Intel Multi-Buffer Crypto for IPSec library
21// and more specifically the following implementation:
22// https://github.com/intel/intel-ipsec-mb/blob/master/avx512/sha256_x16_avx512.asm
23//
24// For Golang it has been converted into Plan 9 assembly with the help of
25// github.com/minio/asm2plan9s to assemble the AVX512 instructions
26//
27
28// Copyright (c) 2017, Intel Corporation
29//
30// Redistribution and use in source and binary forms, with or without
31// modification, are permitted provided that the following conditions are met:
32//
33// * Redistributions of source code must retain the above copyright notice,
34// this list of conditions and the following disclaimer.
35// * Redistributions in binary form must reproduce the above copyright
36// notice, this list of conditions and the following disclaimer in the
37// documentation and/or other materials provided with the distribution.
38// * Neither the name of Intel Corporation nor the names of its contributors
39// may be used to endorse or promote products derived from this software
40// without specific prior written permission.
41//
42// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
43// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
45// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
46// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
48// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
49// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
50// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52
53#define SHA256_DIGEST_ROW_SIZE 64
54
55// arg1
56#define STATE rdi
57#define STATE_P9 DI
58// arg2
59#define INP_SIZE rsi
60#define INP_SIZE_P9 SI
61
62#define IDX rcx
63#define TBL rdx
64#define TBL_P9 DX
65
66#define INPUT rax
67#define INPUT_P9 AX
68
69#define inp0 r9
70#define SCRATCH_P9 R12
71#define SCRATCH r12
72#define maskp r13
73#define MASKP_P9 R13
74#define mask r14
75#define MASK_P9 R14
76
77#define A zmm0
78#define B zmm1
79#define C zmm2
80#define D zmm3
81#define E zmm4
82#define F zmm5
83#define G zmm6
84#define H zmm7
85#define T1 zmm8
86#define TMP0 zmm9
87#define TMP1 zmm10
88#define TMP2 zmm11
89#define TMP3 zmm12
90#define TMP4 zmm13
91#define TMP5 zmm14
92#define TMP6 zmm15
93
94#define W0 zmm16
95#define W1 zmm17
96#define W2 zmm18
97#define W3 zmm19
98#define W4 zmm20
99#define W5 zmm21
100#define W6 zmm22
101#define W7 zmm23
102#define W8 zmm24
103#define W9 zmm25
104#define W10 zmm26
105#define W11 zmm27
106#define W12 zmm28
107#define W13 zmm29
108#define W14 zmm30
109#define W15 zmm31
110
111
112#define TRANSPOSE16(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7, _r8, _r9, _r10, _r11, _r12, _r13, _r14, _r15, _t0, _t1) \
113 \
114 \ // input r0 = {a15 a14 a13 a12 a11 a10 a9 a8 a7 a6 a5 a4 a3 a2 a1 a0}
115 \ // r1 = {b15 b14 b13 b12 b11 b10 b9 b8 b7 b6 b5 b4 b3 b2 b1 b0}
116 \ // r2 = {c15 c14 c13 c12 c11 c10 c9 c8 c7 c6 c5 c4 c3 c2 c1 c0}
117 \ // r3 = {d15 d14 d13 d12 d11 d10 d9 d8 d7 d6 d5 d4 d3 d2 d1 d0}
118 \ // r4 = {e15 e14 e13 e12 e11 e10 e9 e8 e7 e6 e5 e4 e3 e2 e1 e0}
119 \ // r5 = {f15 f14 f13 f12 f11 f10 f9 f8 f7 f6 f5 f4 f3 f2 f1 f0}
120 \ // r6 = {g15 g14 g13 g12 g11 g10 g9 g8 g7 g6 g5 g4 g3 g2 g1 g0}
121 \ // r7 = {h15 h14 h13 h12 h11 h10 h9 h8 h7 h6 h5 h4 h3 h2 h1 h0}
122 \ // r8 = {i15 i14 i13 i12 i11 i10 i9 i8 i7 i6 i5 i4 i3 i2 i1 i0}
123 \ // r9 = {j15 j14 j13 j12 j11 j10 j9 j8 j7 j6 j5 j4 j3 j2 j1 j0}
124 \ // r10 = {k15 k14 k13 k12 k11 k10 k9 k8 k7 k6 k5 k4 k3 k2 k1 k0}
125 \ // r11 = {l15 l14 l13 l12 l11 l10 l9 l8 l7 l6 l5 l4 l3 l2 l1 l0}
126 \ // r12 = {m15 m14 m13 m12 m11 m10 m9 m8 m7 m6 m5 m4 m3 m2 m1 m0}
127 \ // r13 = {n15 n14 n13 n12 n11 n10 n9 n8 n7 n6 n5 n4 n3 n2 n1 n0}
128 \ // r14 = {o15 o14 o13 o12 o11 o10 o9 o8 o7 o6 o5 o4 o3 o2 o1 o0}
129 \ // r15 = {p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0}
130 \
131 \ // output r0 = { p0 o0 n0 m0 l0 k0 j0 i0 h0 g0 f0 e0 d0 c0 b0 a0}
132 \ // r1 = { p1 o1 n1 m1 l1 k1 j1 i1 h1 g1 f1 e1 d1 c1 b1 a1}
133 \ // r2 = { p2 o2 n2 m2 l2 k2 j2 i2 h2 g2 f2 e2 d2 c2 b2 a2}
134 \ // r3 = { p3 o3 n3 m3 l3 k3 j3 i3 h3 g3 f3 e3 d3 c3 b3 a3}
135 \ // r4 = { p4 o4 n4 m4 l4 k4 j4 i4 h4 g4 f4 e4 d4 c4 b4 a4}
136 \ // r5 = { p5 o5 n5 m5 l5 k5 j5 i5 h5 g5 f5 e5 d5 c5 b5 a5}
137 \ // r6 = { p6 o6 n6 m6 l6 k6 j6 i6 h6 g6 f6 e6 d6 c6 b6 a6}
138 \ // r7 = { p7 o7 n7 m7 l7 k7 j7 i7 h7 g7 f7 e7 d7 c7 b7 a7}
139 \ // r8 = { p8 o8 n8 m8 l8 k8 j8 i8 h8 g8 f8 e8 d8 c8 b8 a8}
140 \ // r9 = { p9 o9 n9 m9 l9 k9 j9 i9 h9 g9 f9 e9 d9 c9 b9 a9}
141 \ // r10 = {p10 o10 n10 m10 l10 k10 j10 i10 h10 g10 f10 e10 d10 c10 b10 a10}
142 \ // r11 = {p11 o11 n11 m11 l11 k11 j11 i11 h11 g11 f11 e11 d11 c11 b11 a11}
143 \ // r12 = {p12 o12 n12 m12 l12 k12 j12 i12 h12 g12 f12 e12 d12 c12 b12 a12}
144 \ // r13 = {p13 o13 n13 m13 l13 k13 j13 i13 h13 g13 f13 e13 d13 c13 b13 a13}
145 \ // r14 = {p14 o14 n14 m14 l14 k14 j14 i14 h14 g14 f14 e14 d14 c14 b14 a14}
146 \ // r15 = {p15 o15 n15 m15 l15 k15 j15 i15 h15 g15 f15 e15 d15 c15 b15 a15}
147 \
148 \ // process top half
149 vshufps _t0, _r0, _r1, 0x44 \ // t0 = {b13 b12 a13 a12 b9 b8 a9 a8 b5 b4 a5 a4 b1 b0 a1 a0}
150 vshufps _r0, _r0, _r1, 0xEE \ // r0 = {b15 b14 a15 a14 b11 b10 a11 a10 b7 b6 a7 a6 b3 b2 a3 a2}
151 vshufps _t1, _r2, _r3, 0x44 \ // t1 = {d13 d12 c13 c12 d9 d8 c9 c8 d5 d4 c5 c4 d1 d0 c1 c0}
152 vshufps _r2, _r2, _r3, 0xEE \ // r2 = {d15 d14 c15 c14 d11 d10 c11 c10 d7 d6 c7 c6 d3 d2 c3 c2}
153 \
154 vshufps _r3, _t0, _t1, 0xDD \ // r3 = {d13 c13 b13 a13 d9 c9 b9 a9 d5 c5 b5 a5 d1 c1 b1 a1}
155 vshufps _r1, _r0, _r2, 0x88 \ // r1 = {d14 c14 b14 a14 d10 c10 b10 a10 d6 c6 b6 a6 d2 c2 b2 a2}
156 vshufps _r0, _r0, _r2, 0xDD \ // r0 = {d15 c15 b15 a15 d11 c11 b11 a11 d7 c7 b7 a7 d3 c3 b3 a3}
157 vshufps _t0, _t0, _t1, 0x88 \ // t0 = {d12 c12 b12 a12 d8 c8 b8 a8 d4 c4 b4 a4 d0 c0 b0 a0}
158 \
159 \ // use r2 in place of t0
160 vshufps _r2, _r4, _r5, 0x44 \ // r2 = {f13 f12 e13 e12 f9 f8 e9 e8 f5 f4 e5 e4 f1 f0 e1 e0}
161 vshufps _r4, _r4, _r5, 0xEE \ // r4 = {f15 f14 e15 e14 f11 f10 e11 e10 f7 f6 e7 e6 f3 f2 e3 e2}
162 vshufps _t1, _r6, _r7, 0x44 \ // t1 = {h13 h12 g13 g12 h9 h8 g9 g8 h5 h4 g5 g4 h1 h0 g1 g0}
163 vshufps _r6, _r6, _r7, 0xEE \ // r6 = {h15 h14 g15 g14 h11 h10 g11 g10 h7 h6 g7 g6 h3 h2 g3 g2}
164 \
165 vshufps _r7, _r2, _t1, 0xDD \ // r7 = {h13 g13 f13 e13 h9 g9 f9 e9 h5 g5 f5 e5 h1 g1 f1 e1}
166 vshufps _r5, _r4, _r6, 0x88 \ // r5 = {h14 g14 f14 e14 h10 g10 f10 e10 h6 g6 f6 e6 h2 g2 f2 e2}
167 vshufps _r4, _r4, _r6, 0xDD \ // r4 = {h15 g15 f15 e15 h11 g11 f11 e11 h7 g7 f7 e7 h3 g3 f3 e3}
168 vshufps _r2, _r2, _t1, 0x88 \ // r2 = {h12 g12 f12 e12 h8 g8 f8 e8 h4 g4 f4 e4 h0 g0 f0 e0}
169 \
170 \ // use r6 in place of t0
171 vshufps _r6, _r8, _r9, 0x44 \ // r6 = {j13 j12 i13 i12 j9 j8 i9 i8 j5 j4 i5 i4 j1 j0 i1 i0}
172 vshufps _r8, _r8, _r9, 0xEE \ // r8 = {j15 j14 i15 i14 j11 j10 i11 i10 j7 j6 i7 i6 j3 j2 i3 i2}
173 vshufps _t1, _r10, _r11, 0x44 \ // t1 = {l13 l12 k13 k12 l9 l8 k9 k8 l5 l4 k5 k4 l1 l0 k1 k0}
174 vshufps _r10, _r10, _r11, 0xEE \ // r10 = {l15 l14 k15 k14 l11 l10 k11 k10 l7 l6 k7 k6 l3 l2 k3 k2}
175 \
176 vshufps _r11, _r6, _t1, 0xDD \ // r11 = {l13 k13 j13 113 l9 k9 j9 i9 l5 k5 j5 i5 l1 k1 j1 i1}
177 vshufps _r9, _r8, _r10, 0x88 \ // r9 = {l14 k14 j14 114 l10 k10 j10 i10 l6 k6 j6 i6 l2 k2 j2 i2}
178 vshufps _r8, _r8, _r10, 0xDD \ // r8 = {l15 k15 j15 115 l11 k11 j11 i11 l7 k7 j7 i7 l3 k3 j3 i3}
179 vshufps _r6, _r6, _t1, 0x88 \ // r6 = {l12 k12 j12 112 l8 k8 j8 i8 l4 k4 j4 i4 l0 k0 j0 i0}
180 \
181 \ // use r10 in place of t0
182 vshufps _r10, _r12, _r13, 0x44 \ // r10 = {n13 n12 m13 m12 n9 n8 m9 m8 n5 n4 m5 m4 n1 n0 a1 m0}
183 vshufps _r12, _r12, _r13, 0xEE \ // r12 = {n15 n14 m15 m14 n11 n10 m11 m10 n7 n6 m7 m6 n3 n2 a3 m2}
184 vshufps _t1, _r14, _r15, 0x44 \ // t1 = {p13 p12 013 012 p9 p8 09 08 p5 p4 05 04 p1 p0 01 00}
185 vshufps _r14, _r14, _r15, 0xEE \ // r14 = {p15 p14 015 014 p11 p10 011 010 p7 p6 07 06 p3 p2 03 02}
186 \
187 vshufps _r15, _r10, _t1, 0xDD \ // r15 = {p13 013 n13 m13 p9 09 n9 m9 p5 05 n5 m5 p1 01 n1 m1}
188 vshufps _r13, _r12, _r14, 0x88 \ // r13 = {p14 014 n14 m14 p10 010 n10 m10 p6 06 n6 m6 p2 02 n2 m2}
189 vshufps _r12, _r12, _r14, 0xDD \ // r12 = {p15 015 n15 m15 p11 011 n11 m11 p7 07 n7 m7 p3 03 n3 m3}
190 vshufps _r10, _r10, _t1, 0x88 \ // r10 = {p12 012 n12 m12 p8 08 n8 m8 p4 04 n4 m4 p0 00 n0 m0}
191 \
192 \ // At this point, the registers that contain interesting data are:
193 \ // t0, r3, r1, r0, r2, r7, r5, r4, r6, r11, r9, r8, r10, r15, r13, r12
194 \ // Can use t1 and r14 as scratch registers
195 LEAQ PSHUFFLE_TRANSPOSE16_MASK1<>(SB), BX \
196 LEAQ PSHUFFLE_TRANSPOSE16_MASK2<>(SB), R8 \
197 \
198 vmovdqu32 _r14, [rbx] \
199 vpermi2q _r14, _t0, _r2 \ // r14 = {h8 g8 f8 e8 d8 c8 b8 a8 h0 g0 f0 e0 d0 c0 b0 a0}
200 vmovdqu32 _t1, [r8] \
201 vpermi2q _t1, _t0, _r2 \ // t1 = {h12 g12 f12 e12 d12 c12 b12 a12 h4 g4 f4 e4 d4 c4 b4 a4}
202 \
203 vmovdqu32 _r2, [rbx] \
204 vpermi2q _r2, _r3, _r7 \ // r2 = {h9 g9 f9 e9 d9 c9 b9 a9 h1 g1 f1 e1 d1 c1 b1 a1}
205 vmovdqu32 _t0, [r8] \
206 vpermi2q _t0, _r3, _r7 \ // t0 = {h13 g13 f13 e13 d13 c13 b13 a13 h5 g5 f5 e5 d5 c5 b5 a5}
207 \
208 vmovdqu32 _r3, [rbx] \
209 vpermi2q _r3, _r1, _r5 \ // r3 = {h10 g10 f10 e10 d10 c10 b10 a10 h2 g2 f2 e2 d2 c2 b2 a2}
210 vmovdqu32 _r7, [r8] \
211 vpermi2q _r7, _r1, _r5 \ // r7 = {h14 g14 f14 e14 d14 c14 b14 a14 h6 g6 f6 e6 d6 c6 b6 a6}
212 \
213 vmovdqu32 _r1, [rbx] \
214 vpermi2q _r1, _r0, _r4 \ // r1 = {h11 g11 f11 e11 d11 c11 b11 a11 h3 g3 f3 e3 d3 c3 b3 a3}
215 vmovdqu32 _r5, [r8] \
216 vpermi2q _r5, _r0, _r4 \ // r5 = {h15 g15 f15 e15 d15 c15 b15 a15 h7 g7 f7 e7 d7 c7 b7 a7}
217 \
218 vmovdqu32 _r0, [rbx] \
219 vpermi2q _r0, _r6, _r10 \ // r0 = {p8 o8 n8 m8 l8 k8 j8 i8 p0 o0 n0 m0 l0 k0 j0 i0}
220 vmovdqu32 _r4, [r8] \
221 vpermi2q _r4, _r6, _r10 \ // r4 = {p12 o12 n12 m12 l12 k12 j12 i12 p4 o4 n4 m4 l4 k4 j4 i4}
222 \
223 vmovdqu32 _r6, [rbx] \
224 vpermi2q _r6, _r11, _r15 \ // r6 = {p9 o9 n9 m9 l9 k9 j9 i9 p1 o1 n1 m1 l1 k1 j1 i1}
225 vmovdqu32 _r10, [r8] \
226 vpermi2q _r10, _r11, _r15 \ // r10 = {p13 o13 n13 m13 l13 k13 j13 i13 p5 o5 n5 m5 l5 k5 j5 i5}
227 \
228 vmovdqu32 _r11, [rbx] \
229 vpermi2q _r11, _r9, _r13 \ // r11 = {p10 o10 n10 m10 l10 k10 j10 i10 p2 o2 n2 m2 l2 k2 j2 i2}
230 vmovdqu32 _r15, [r8] \
231 vpermi2q _r15, _r9, _r13 \ // r15 = {p14 o14 n14 m14 l14 k14 j14 i14 p6 o6 n6 m6 l6 k6 j6 i6}
232 \
233 vmovdqu32 _r9, [rbx] \
234 vpermi2q _r9, _r8, _r12 \ // r9 = {p11 o11 n11 m11 l11 k11 j11 i11 p3 o3 n3 m3 l3 k3 j3 i3}
235 vmovdqu32 _r13, [r8] \
236 vpermi2q _r13, _r8, _r12 \ // r13 = {p15 o15 n15 m15 l15 k15 j15 i15 p7 o7 n7 m7 l7 k7 j7 i7}
237 \
238 \ // At this point r8 and r12 can be used as scratch registers
239 vshuff64x2 _r8, _r14, _r0, 0xEE \ // r8 = {p8 o8 n8 m8 l8 k8 j8 i8 h8 g8 f8 e8 d8 c8 b8 a8}
240 vshuff64x2 _r0, _r14, _r0, 0x44 \ // r0 = {p0 o0 n0 m0 l0 k0 j0 i0 h0 g0 f0 e0 d0 c0 b0 a0}
241 \
242 vshuff64x2 _r12, _t1, _r4, 0xEE \ // r12 = {p12 o12 n12 m12 l12 k12 j12 i12 h12 g12 f12 e12 d12 c12 b12 a12}
243 vshuff64x2 _r4, _t1, _r4, 0x44 \ // r4 = {p4 o4 n4 m4 l4 k4 j4 i4 h4 g4 f4 e4 d4 c4 b4 a4}
244 \
245 vshuff64x2 _r14, _r7, _r15, 0xEE \ // r14 = {p14 o14 n14 m14 l14 k14 j14 i14 h14 g14 f14 e14 d14 c14 b14 a14}
246 vshuff64x2 _t1, _r7, _r15, 0x44 \ // t1 = {p6 o6 n6 m6 l6 k6 j6 i6 h6 g6 f6 e6 d6 c6 b6 a6}
247 \
248 vshuff64x2 _r15, _r5, _r13, 0xEE \ // r15 = {p15 o15 n15 m15 l15 k15 j15 i15 h15 g15 f15 e15 d15 c15 b15 a15}
249 vshuff64x2 _r7, _r5, _r13, 0x44 \ // r7 = {p7 o7 n7 m7 l7 k7 j7 i7 h7 g7 f7 e7 d7 c7 b7 a7}
250 \
251 vshuff64x2 _r13, _t0, _r10, 0xEE \ // r13 = {p13 o13 n13 m13 l13 k13 j13 i13 h13 g13 f13 e13 d13 c13 b13 a13}
252 vshuff64x2 _r5, _t0, _r10, 0x44 \ // r5 = {p5 o5 n5 m5 l5 k5 j5 i5 h5 g5 f5 e5 d5 c5 b5 a5}
253 \
254 vshuff64x2 _r10, _r3, _r11, 0xEE \ // r10 = {p10 o10 n10 m10 l10 k10 j10 i10 h10 g10 f10 e10 d10 c10 b10 a10}
255 vshuff64x2 _t0, _r3, _r11, 0x44 \ // t0 = {p2 o2 n2 m2 l2 k2 j2 i2 h2 g2 f2 e2 d2 c2 b2 a2}
256 \
257 vshuff64x2 _r11, _r1, _r9, 0xEE \ // r11 = {p11 o11 n11 m11 l11 k11 j11 i11 h11 g11 f11 e11 d11 c11 b11 a11}
258 vshuff64x2 _r3, _r1, _r9, 0x44 \ // r3 = {p3 o3 n3 m3 l3 k3 j3 i3 h3 g3 f3 e3 d3 c3 b3 a3}
259 \
260 vshuff64x2 _r9, _r2, _r6, 0xEE \ // r9 = {p9 o9 n9 m9 l9 k9 j9 i9 h9 g9 f9 e9 d9 c9 b9 a9}
261 vshuff64x2 _r1, _r2, _r6, 0x44 \ // r1 = {p1 o1 n1 m1 l1 k1 j1 i1 h1 g1 f1 e1 d1 c1 b1 a1}
262 \
263 vmovdqu32 _r2, _t0 \ // r2 = {p2 o2 n2 m2 l2 k2 j2 i2 h2 g2 f2 e2 d2 c2 b2 a2}
264 vmovdqu32 _r6, _t1 \ // r6 = {p6 o6 n6 m6 l6 k6 j6 i6 h6 g6 f6 e6 d6 c6 b6 a6}
265
266
267// CH(A, B, C) = (A&B) ^ (~A&C)
268// MAJ(E, F, G) = (E&F) ^ (E&G) ^ (F&G)
269// SIGMA0 = ROR_2 ^ ROR_13 ^ ROR_22
270// SIGMA1 = ROR_6 ^ ROR_11 ^ ROR_25
271// sigma0 = ROR_7 ^ ROR_18 ^ SHR_3
272// sigma1 = ROR_17 ^ ROR_19 ^ SHR_10
273
274// Main processing loop per round
275#define PROCESS_LOOP(_WT, _ROUND, _A, _B, _C, _D, _E, _F, _G, _H) \
276 \ // T1 = H + SIGMA1(E) + CH(E, F, G) + Kt + Wt
277 \ // T2 = SIGMA0(A) + MAJ(A, B, C)
278 \ // H=G, G=F, F=E, E=D+T1, D=C, C=B, B=A, A=T1+T2
279 \
280 \ // H becomes T2, then add T1 for A
281 \ // D becomes D + T1 for E
282 \
283 vpaddd T1, _H, TMP3 \ // T1 = H + Kt
284 vmovdqu32 TMP0, _E \
285 vprord TMP1, _E, 6 \ // ROR_6(E)
286 vprord TMP2, _E, 11 \ // ROR_11(E)
287 vprord TMP3, _E, 25 \ // ROR_25(E)
288 vpternlogd TMP0, _F, _G, 0xCA \ // TMP0 = CH(E,F,G)
289 vpaddd T1, T1, _WT \ // T1 = T1 + Wt
290 vpternlogd TMP1, TMP2, TMP3, 0x96 \ // TMP1 = SIGMA1(E)
291 vpaddd T1, T1, TMP0 \ // T1 = T1 + CH(E,F,G)
292 vpaddd T1, T1, TMP1 \ // T1 = T1 + SIGMA1(E)
293 vpaddd _D, _D, T1 \ // D = D + T1
294 \
295 vprord _H, _A, 2 \ // ROR_2(A)
296 vprord TMP2, _A, 13 \ // ROR_13(A)
297 vprord TMP3, _A, 22 \ // ROR_22(A)
298 vmovdqu32 TMP0, _A \
299 vpternlogd TMP0, _B, _C, 0xE8 \ // TMP0 = MAJ(A,B,C)
300 vpternlogd _H, TMP2, TMP3, 0x96 \ // H(T2) = SIGMA0(A)
301 vpaddd _H, _H, TMP0 \ // H(T2) = SIGMA0(A) + MAJ(A,B,C)
302 vpaddd _H, _H, T1 \ // H(A) = H(T2) + T1
303 \
304 vmovdqu32 TMP3, [TBL + ((_ROUND+1)*64)] \ // Next Kt
305
306
307#define MSG_SCHED_ROUND_16_63(_WT, _WTp1, _WTp9, _WTp14) \
308 vprord TMP4, _WTp14, 17 \ // ROR_17(Wt-2)
309 vprord TMP5, _WTp14, 19 \ // ROR_19(Wt-2)
310 vpsrld TMP6, _WTp14, 10 \ // SHR_10(Wt-2)
311 vpternlogd TMP4, TMP5, TMP6, 0x96 \ // TMP4 = sigma1(Wt-2)
312 \
313 vpaddd _WT, _WT, TMP4 \ // Wt = Wt-16 + sigma1(Wt-2)
314 vpaddd _WT, _WT, _WTp9 \ // Wt = Wt-16 + sigma1(Wt-2) + Wt-7
315 \
316 vprord TMP4, _WTp1, 7 \ // ROR_7(Wt-15)
317 vprord TMP5, _WTp1, 18 \ // ROR_18(Wt-15)
318 vpsrld TMP6, _WTp1, 3 \ // SHR_3(Wt-15)
319 vpternlogd TMP4, TMP5, TMP6, 0x96 \ // TMP4 = sigma0(Wt-15)
320 \
321 vpaddd _WT, _WT, TMP4 \ // Wt = Wt-16 + sigma1(Wt-2) +
322 \ // Wt-7 + sigma0(Wt-15) +
323
324
325// Note this is reading in a block of data for one lane
326// When all 16 are read, the data must be transposed to build msg schedule
327#define MSG_SCHED_ROUND_00_15(_WT, OFFSET, LABEL) \
328 TESTQ $(1<<OFFSET), MASK_P9 \
329 JE LABEL \
330 MOVQ OFFSET*24(INPUT_P9), R9 \
331 vmovups _WT, [inp0+IDX] \
332LABEL: \
333
334#define MASKED_LOAD(_WT, OFFSET, LABEL) \
335 TESTQ $(1<<OFFSET), MASK_P9 \
336 JE LABEL \
337 MOVQ OFFSET*24(INPUT_P9), R9 \
338 vmovups _WT,[inp0+IDX] \
339LABEL: \
340
341TEXT ·sha256_x16_avx512(SB), 7, $0
342 MOVQ digests+0(FP), STATE_P9 //
343 MOVQ scratch+8(FP), SCRATCH_P9
344 MOVQ mask_len+32(FP), INP_SIZE_P9 // number of blocks to process
345 MOVQ mask+24(FP), MASKP_P9
346 MOVQ (MASKP_P9), MASK_P9
347 kmovq k1, mask
348 LEAQ inputs+48(FP), INPUT_P9
349
350 // Initialize digests
351 vmovdqu32 A, [STATE + 0*SHA256_DIGEST_ROW_SIZE]
352 vmovdqu32 B, [STATE + 1*SHA256_DIGEST_ROW_SIZE]
353 vmovdqu32 C, [STATE + 2*SHA256_DIGEST_ROW_SIZE]
354 vmovdqu32 D, [STATE + 3*SHA256_DIGEST_ROW_SIZE]
355 vmovdqu32 E, [STATE + 4*SHA256_DIGEST_ROW_SIZE]
356 vmovdqu32 F, [STATE + 5*SHA256_DIGEST_ROW_SIZE]
357 vmovdqu32 G, [STATE + 6*SHA256_DIGEST_ROW_SIZE]
358 vmovdqu32 H, [STATE + 7*SHA256_DIGEST_ROW_SIZE]
359
360 MOVQ table+16(FP), TBL_P9
361
362 xor IDX, IDX
363
364 // Read in first block of input data
365 MASKED_LOAD( W0, 0, skipInput0)
366 MASKED_LOAD( W1, 1, skipInput1)
367 MASKED_LOAD( W2, 2, skipInput2)
368 MASKED_LOAD( W3, 3, skipInput3)
369 MASKED_LOAD( W4, 4, skipInput4)
370 MASKED_LOAD( W5, 5, skipInput5)
371 MASKED_LOAD( W6, 6, skipInput6)
372 MASKED_LOAD( W7, 7, skipInput7)
373 MASKED_LOAD( W8, 8, skipInput8)
374 MASKED_LOAD( W9, 9, skipInput9)
375 MASKED_LOAD(W10, 10, skipInput10)
376 MASKED_LOAD(W11, 11, skipInput11)
377 MASKED_LOAD(W12, 12, skipInput12)
378 MASKED_LOAD(W13, 13, skipInput13)
379 MASKED_LOAD(W14, 14, skipInput14)
380 MASKED_LOAD(W15, 15, skipInput15)
381
382lloop:
383 LEAQ PSHUFFLE_BYTE_FLIP_MASK<>(SB), TBL_P9
384 vmovdqu32 TMP2, [TBL]
385
386 // Get first K from table
387 MOVQ table+16(FP), TBL_P9
388 vmovdqu32 TMP3, [TBL]
389
390 // Save digests for later addition
391 vmovdqu32 [SCRATCH + 64*0], A
392 vmovdqu32 [SCRATCH + 64*1], B
393 vmovdqu32 [SCRATCH + 64*2], C
394 vmovdqu32 [SCRATCH + 64*3], D
395 vmovdqu32 [SCRATCH + 64*4], E
396 vmovdqu32 [SCRATCH + 64*5], F
397 vmovdqu32 [SCRATCH + 64*6], G
398 vmovdqu32 [SCRATCH + 64*7], H
399
400 add IDX, 64
401
402 // Transpose input data
403 TRANSPOSE16(W0, W1, W2, W3, W4, W5, W6, W7, W8, W9, W10, W11, W12, W13, W14, W15, TMP0, TMP1)
404
405 vpshufb W0, W0, TMP2
406 vpshufb W1, W1, TMP2
407 vpshufb W2, W2, TMP2
408 vpshufb W3, W3, TMP2
409 vpshufb W4, W4, TMP2
410 vpshufb W5, W5, TMP2
411 vpshufb W6, W6, TMP2
412 vpshufb W7, W7, TMP2
413 vpshufb W8, W8, TMP2
414 vpshufb W9, W9, TMP2
415 vpshufb W10, W10, TMP2
416 vpshufb W11, W11, TMP2
417 vpshufb W12, W12, TMP2
418 vpshufb W13, W13, TMP2
419 vpshufb W14, W14, TMP2
420 vpshufb W15, W15, TMP2
421
422 // MSG Schedule for W0-W15 is now complete in registers
423 // Process first 48 rounds
424 // Calculate next Wt+16 after processing is complete and Wt is unneeded
425
426 PROCESS_LOOP( W0, 0, A, B, C, D, E, F, G, H)
427 MSG_SCHED_ROUND_16_63( W0, W1, W9, W14)
428 PROCESS_LOOP( W1, 1, H, A, B, C, D, E, F, G)
429 MSG_SCHED_ROUND_16_63( W1, W2, W10, W15)
430 PROCESS_LOOP( W2, 2, G, H, A, B, C, D, E, F)
431 MSG_SCHED_ROUND_16_63( W2, W3, W11, W0)
432 PROCESS_LOOP( W3, 3, F, G, H, A, B, C, D, E)
433 MSG_SCHED_ROUND_16_63( W3, W4, W12, W1)
434 PROCESS_LOOP( W4, 4, E, F, G, H, A, B, C, D)
435 MSG_SCHED_ROUND_16_63( W4, W5, W13, W2)
436 PROCESS_LOOP( W5, 5, D, E, F, G, H, A, B, C)
437 MSG_SCHED_ROUND_16_63( W5, W6, W14, W3)
438 PROCESS_LOOP( W6, 6, C, D, E, F, G, H, A, B)
439 MSG_SCHED_ROUND_16_63( W6, W7, W15, W4)
440 PROCESS_LOOP( W7, 7, B, C, D, E, F, G, H, A)
441 MSG_SCHED_ROUND_16_63( W7, W8, W0, W5)
442 PROCESS_LOOP( W8, 8, A, B, C, D, E, F, G, H)
443 MSG_SCHED_ROUND_16_63( W8, W9, W1, W6)
444 PROCESS_LOOP( W9, 9, H, A, B, C, D, E, F, G)
445 MSG_SCHED_ROUND_16_63( W9, W10, W2, W7)
446 PROCESS_LOOP(W10, 10, G, H, A, B, C, D, E, F)
447 MSG_SCHED_ROUND_16_63(W10, W11, W3, W8)
448 PROCESS_LOOP(W11, 11, F, G, H, A, B, C, D, E)
449 MSG_SCHED_ROUND_16_63(W11, W12, W4, W9)
450 PROCESS_LOOP(W12, 12, E, F, G, H, A, B, C, D)
451 MSG_SCHED_ROUND_16_63(W12, W13, W5, W10)
452 PROCESS_LOOP(W13, 13, D, E, F, G, H, A, B, C)
453 MSG_SCHED_ROUND_16_63(W13, W14, W6, W11)
454 PROCESS_LOOP(W14, 14, C, D, E, F, G, H, A, B)
455 MSG_SCHED_ROUND_16_63(W14, W15, W7, W12)
456 PROCESS_LOOP(W15, 15, B, C, D, E, F, G, H, A)
457 MSG_SCHED_ROUND_16_63(W15, W0, W8, W13)
458 PROCESS_LOOP( W0, 16, A, B, C, D, E, F, G, H)
459 MSG_SCHED_ROUND_16_63( W0, W1, W9, W14)
460 PROCESS_LOOP( W1, 17, H, A, B, C, D, E, F, G)
461 MSG_SCHED_ROUND_16_63( W1, W2, W10, W15)
462 PROCESS_LOOP( W2, 18, G, H, A, B, C, D, E, F)
463 MSG_SCHED_ROUND_16_63( W2, W3, W11, W0)
464 PROCESS_LOOP( W3, 19, F, G, H, A, B, C, D, E)
465 MSG_SCHED_ROUND_16_63( W3, W4, W12, W1)
466 PROCESS_LOOP( W4, 20, E, F, G, H, A, B, C, D)
467 MSG_SCHED_ROUND_16_63( W4, W5, W13, W2)
468 PROCESS_LOOP( W5, 21, D, E, F, G, H, A, B, C)
469 MSG_SCHED_ROUND_16_63( W5, W6, W14, W3)
470 PROCESS_LOOP( W6, 22, C, D, E, F, G, H, A, B)
471 MSG_SCHED_ROUND_16_63( W6, W7, W15, W4)
472 PROCESS_LOOP( W7, 23, B, C, D, E, F, G, H, A)
473 MSG_SCHED_ROUND_16_63( W7, W8, W0, W5)
474 PROCESS_LOOP( W8, 24, A, B, C, D, E, F, G, H)
475 MSG_SCHED_ROUND_16_63( W8, W9, W1, W6)
476 PROCESS_LOOP( W9, 25, H, A, B, C, D, E, F, G)
477 MSG_SCHED_ROUND_16_63( W9, W10, W2, W7)
478 PROCESS_LOOP(W10, 26, G, H, A, B, C, D, E, F)
479 MSG_SCHED_ROUND_16_63(W10, W11, W3, W8)
480 PROCESS_LOOP(W11, 27, F, G, H, A, B, C, D, E)
481 MSG_SCHED_ROUND_16_63(W11, W12, W4, W9)
482 PROCESS_LOOP(W12, 28, E, F, G, H, A, B, C, D)
483 MSG_SCHED_ROUND_16_63(W12, W13, W5, W10)
484 PROCESS_LOOP(W13, 29, D, E, F, G, H, A, B, C)
485 MSG_SCHED_ROUND_16_63(W13, W14, W6, W11)
486 PROCESS_LOOP(W14, 30, C, D, E, F, G, H, A, B)
487 MSG_SCHED_ROUND_16_63(W14, W15, W7, W12)
488 PROCESS_LOOP(W15, 31, B, C, D, E, F, G, H, A)
489 MSG_SCHED_ROUND_16_63(W15, W0, W8, W13)
490 PROCESS_LOOP( W0, 32, A, B, C, D, E, F, G, H)
491 MSG_SCHED_ROUND_16_63( W0, W1, W9, W14)
492 PROCESS_LOOP( W1, 33, H, A, B, C, D, E, F, G)
493 MSG_SCHED_ROUND_16_63( W1, W2, W10, W15)
494 PROCESS_LOOP( W2, 34, G, H, A, B, C, D, E, F)
495 MSG_SCHED_ROUND_16_63( W2, W3, W11, W0)
496 PROCESS_LOOP( W3, 35, F, G, H, A, B, C, D, E)
497 MSG_SCHED_ROUND_16_63( W3, W4, W12, W1)
498 PROCESS_LOOP( W4, 36, E, F, G, H, A, B, C, D)
499 MSG_SCHED_ROUND_16_63( W4, W5, W13, W2)
500 PROCESS_LOOP( W5, 37, D, E, F, G, H, A, B, C)
501 MSG_SCHED_ROUND_16_63( W5, W6, W14, W3)
502 PROCESS_LOOP( W6, 38, C, D, E, F, G, H, A, B)
503 MSG_SCHED_ROUND_16_63( W6, W7, W15, W4)
504 PROCESS_LOOP( W7, 39, B, C, D, E, F, G, H, A)
505 MSG_SCHED_ROUND_16_63( W7, W8, W0, W5)
506 PROCESS_LOOP( W8, 40, A, B, C, D, E, F, G, H)
507 MSG_SCHED_ROUND_16_63( W8, W9, W1, W6)
508 PROCESS_LOOP( W9, 41, H, A, B, C, D, E, F, G)
509 MSG_SCHED_ROUND_16_63( W9, W10, W2, W7)
510 PROCESS_LOOP(W10, 42, G, H, A, B, C, D, E, F)
511 MSG_SCHED_ROUND_16_63(W10, W11, W3, W8)
512 PROCESS_LOOP(W11, 43, F, G, H, A, B, C, D, E)
513 MSG_SCHED_ROUND_16_63(W11, W12, W4, W9)
514 PROCESS_LOOP(W12, 44, E, F, G, H, A, B, C, D)
515 MSG_SCHED_ROUND_16_63(W12, W13, W5, W10)
516 PROCESS_LOOP(W13, 45, D, E, F, G, H, A, B, C)
517 MSG_SCHED_ROUND_16_63(W13, W14, W6, W11)
518 PROCESS_LOOP(W14, 46, C, D, E, F, G, H, A, B)
519 MSG_SCHED_ROUND_16_63(W14, W15, W7, W12)
520 PROCESS_LOOP(W15, 47, B, C, D, E, F, G, H, A)
521 MSG_SCHED_ROUND_16_63(W15, W0, W8, W13)
522
523 // Check if this is the last block
524 sub INP_SIZE, 1
525 JE lastLoop
526
527 // Load next mask for inputs
528 ADDQ $8, MASKP_P9
529 MOVQ (MASKP_P9), MASK_P9
530
531 // Process last 16 rounds
532 // Read in next block msg data for use in first 16 words of msg sched
533
534 PROCESS_LOOP( W0, 48, A, B, C, D, E, F, G, H)
535 MSG_SCHED_ROUND_00_15( W0, 0, skipNext0)
536 PROCESS_LOOP( W1, 49, H, A, B, C, D, E, F, G)
537 MSG_SCHED_ROUND_00_15( W1, 1, skipNext1)
538 PROCESS_LOOP( W2, 50, G, H, A, B, C, D, E, F)
539 MSG_SCHED_ROUND_00_15( W2, 2, skipNext2)
540 PROCESS_LOOP( W3, 51, F, G, H, A, B, C, D, E)
541 MSG_SCHED_ROUND_00_15( W3, 3, skipNext3)
542 PROCESS_LOOP( W4, 52, E, F, G, H, A, B, C, D)
543 MSG_SCHED_ROUND_00_15( W4, 4, skipNext4)
544 PROCESS_LOOP( W5, 53, D, E, F, G, H, A, B, C)
545 MSG_SCHED_ROUND_00_15( W5, 5, skipNext5)
546 PROCESS_LOOP( W6, 54, C, D, E, F, G, H, A, B)
547 MSG_SCHED_ROUND_00_15( W6, 6, skipNext6)
548 PROCESS_LOOP( W7, 55, B, C, D, E, F, G, H, A)
549 MSG_SCHED_ROUND_00_15( W7, 7, skipNext7)
550 PROCESS_LOOP( W8, 56, A, B, C, D, E, F, G, H)
551 MSG_SCHED_ROUND_00_15( W8, 8, skipNext8)
552 PROCESS_LOOP( W9, 57, H, A, B, C, D, E, F, G)
553 MSG_SCHED_ROUND_00_15( W9, 9, skipNext9)
554 PROCESS_LOOP(W10, 58, G, H, A, B, C, D, E, F)
555 MSG_SCHED_ROUND_00_15(W10, 10, skipNext10)
556 PROCESS_LOOP(W11, 59, F, G, H, A, B, C, D, E)
557 MSG_SCHED_ROUND_00_15(W11, 11, skipNext11)
558 PROCESS_LOOP(W12, 60, E, F, G, H, A, B, C, D)
559 MSG_SCHED_ROUND_00_15(W12, 12, skipNext12)
560 PROCESS_LOOP(W13, 61, D, E, F, G, H, A, B, C)
561 MSG_SCHED_ROUND_00_15(W13, 13, skipNext13)
562 PROCESS_LOOP(W14, 62, C, D, E, F, G, H, A, B)
563 MSG_SCHED_ROUND_00_15(W14, 14, skipNext14)
564 PROCESS_LOOP(W15, 63, B, C, D, E, F, G, H, A)
565 MSG_SCHED_ROUND_00_15(W15, 15, skipNext15)
566
567 // Add old digest
568 vmovdqu32 TMP2, A
569 vmovdqu32 A, [SCRATCH + 64*0]
570 vpaddd A{k1}, A, TMP2
571 vmovdqu32 TMP2, B
572 vmovdqu32 B, [SCRATCH + 64*1]
573 vpaddd B{k1}, B, TMP2
574 vmovdqu32 TMP2, C
575 vmovdqu32 C, [SCRATCH + 64*2]
576 vpaddd C{k1}, C, TMP2
577 vmovdqu32 TMP2, D
578 vmovdqu32 D, [SCRATCH + 64*3]
579 vpaddd D{k1}, D, TMP2
580 vmovdqu32 TMP2, E
581 vmovdqu32 E, [SCRATCH + 64*4]
582 vpaddd E{k1}, E, TMP2
583 vmovdqu32 TMP2, F
584 vmovdqu32 F, [SCRATCH + 64*5]
585 vpaddd F{k1}, F, TMP2
586 vmovdqu32 TMP2, G
587 vmovdqu32 G, [SCRATCH + 64*6]
588 vpaddd G{k1}, G, TMP2
589 vmovdqu32 TMP2, H
590 vmovdqu32 H, [SCRATCH + 64*7]
591 vpaddd H{k1}, H, TMP2
592
593 kmovq k1, mask
594 JMP lloop
595
596lastLoop:
597 // Process last 16 rounds
598 PROCESS_LOOP( W0, 48, A, B, C, D, E, F, G, H)
599 PROCESS_LOOP( W1, 49, H, A, B, C, D, E, F, G)
600 PROCESS_LOOP( W2, 50, G, H, A, B, C, D, E, F)
601 PROCESS_LOOP( W3, 51, F, G, H, A, B, C, D, E)
602 PROCESS_LOOP( W4, 52, E, F, G, H, A, B, C, D)
603 PROCESS_LOOP( W5, 53, D, E, F, G, H, A, B, C)
604 PROCESS_LOOP( W6, 54, C, D, E, F, G, H, A, B)
605 PROCESS_LOOP( W7, 55, B, C, D, E, F, G, H, A)
606 PROCESS_LOOP( W8, 56, A, B, C, D, E, F, G, H)
607 PROCESS_LOOP( W9, 57, H, A, B, C, D, E, F, G)
608 PROCESS_LOOP(W10, 58, G, H, A, B, C, D, E, F)
609 PROCESS_LOOP(W11, 59, F, G, H, A, B, C, D, E)
610 PROCESS_LOOP(W12, 60, E, F, G, H, A, B, C, D)
611 PROCESS_LOOP(W13, 61, D, E, F, G, H, A, B, C)
612 PROCESS_LOOP(W14, 62, C, D, E, F, G, H, A, B)
613 PROCESS_LOOP(W15, 63, B, C, D, E, F, G, H, A)
614
615 // Add old digest
616 vmovdqu32 TMP2, A
617 vmovdqu32 A, [SCRATCH + 64*0]
618 vpaddd A{k1}, A, TMP2
619 vmovdqu32 TMP2, B
620 vmovdqu32 B, [SCRATCH + 64*1]
621 vpaddd B{k1}, B, TMP2
622 vmovdqu32 TMP2, C
623 vmovdqu32 C, [SCRATCH + 64*2]
624 vpaddd C{k1}, C, TMP2
625 vmovdqu32 TMP2, D
626 vmovdqu32 D, [SCRATCH + 64*3]
627 vpaddd D{k1}, D, TMP2
628 vmovdqu32 TMP2, E
629 vmovdqu32 E, [SCRATCH + 64*4]
630 vpaddd E{k1}, E, TMP2
631 vmovdqu32 TMP2, F
632 vmovdqu32 F, [SCRATCH + 64*5]
633 vpaddd F{k1}, F, TMP2
634 vmovdqu32 TMP2, G
635 vmovdqu32 G, [SCRATCH + 64*6]
636 vpaddd G{k1}, G, TMP2
637 vmovdqu32 TMP2, H
638 vmovdqu32 H, [SCRATCH + 64*7]
639 vpaddd H{k1}, H, TMP2
640
641 // Write out digest
642 vmovdqu32 [STATE + 0*SHA256_DIGEST_ROW_SIZE], A
643 vmovdqu32 [STATE + 1*SHA256_DIGEST_ROW_SIZE], B
644 vmovdqu32 [STATE + 2*SHA256_DIGEST_ROW_SIZE], C
645 vmovdqu32 [STATE + 3*SHA256_DIGEST_ROW_SIZE], D
646 vmovdqu32 [STATE + 4*SHA256_DIGEST_ROW_SIZE], E
647 vmovdqu32 [STATE + 5*SHA256_DIGEST_ROW_SIZE], F
648 vmovdqu32 [STATE + 6*SHA256_DIGEST_ROW_SIZE], G
649 vmovdqu32 [STATE + 7*SHA256_DIGEST_ROW_SIZE], H
650
651 VZEROUPPER
652 RET
653
654//
655// Tables
656//
657
658DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x000(SB)/8, $0x0405060700010203
659DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x008(SB)/8, $0x0c0d0e0f08090a0b
660DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x010(SB)/8, $0x0405060700010203
661DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x018(SB)/8, $0x0c0d0e0f08090a0b
662DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x020(SB)/8, $0x0405060700010203
663DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x028(SB)/8, $0x0c0d0e0f08090a0b
664DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x030(SB)/8, $0x0405060700010203
665DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x038(SB)/8, $0x0c0d0e0f08090a0b
666GLOBL PSHUFFLE_BYTE_FLIP_MASK<>(SB), 8, $64
667
668DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x000(SB)/8, $0x0000000000000000
669DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x008(SB)/8, $0x0000000000000001
670DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x010(SB)/8, $0x0000000000000008
671DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x018(SB)/8, $0x0000000000000009
672DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x020(SB)/8, $0x0000000000000004
673DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x028(SB)/8, $0x0000000000000005
674DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x030(SB)/8, $0x000000000000000C
675DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x038(SB)/8, $0x000000000000000D
676GLOBL PSHUFFLE_TRANSPOSE16_MASK1<>(SB), 8, $64
677
678DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x000(SB)/8, $0x0000000000000002
679DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x008(SB)/8, $0x0000000000000003
680DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x010(SB)/8, $0x000000000000000A
681DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x018(SB)/8, $0x000000000000000B
682DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x020(SB)/8, $0x0000000000000006
683DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x028(SB)/8, $0x0000000000000007
684DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x030(SB)/8, $0x000000000000000E
685DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x038(SB)/8, $0x000000000000000F
686GLOBL PSHUFFLE_TRANSPOSE16_MASK2<>(SB), 8, $64
diff --git a/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.go b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.go
new file mode 100644
index 0000000..4b9473a
--- /dev/null
+++ b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.go
@@ -0,0 +1,501 @@
1//go:build !noasm && !appengine && gc
2// +build !noasm,!appengine,gc
3
4/*
5 * Minio Cloud Storage, (C) 2017 Minio, Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 */
19
20package sha256
21
22import (
23 "encoding/binary"
24 "errors"
25 "hash"
26 "sort"
27 "sync/atomic"
28 "time"
29)
30
31//go:noescape
32func sha256X16Avx512(digests *[512]byte, scratch *[512]byte, table *[512]uint64, mask []uint64, inputs [16][]byte)
33
34// Avx512ServerUID - Do not start at 0 but next multiple of 16 so as to be able to
35// differentiate with default initialiation value of 0
36const Avx512ServerUID = 16
37
38var uidCounter uint64
39
40// NewAvx512 - initialize sha256 Avx512 implementation.
41func NewAvx512(a512srv *Avx512Server) hash.Hash {
42 uid := atomic.AddUint64(&uidCounter, 1)
43 return &Avx512Digest{uid: uid, a512srv: a512srv}
44}
45
46// Avx512Digest - Type for computing SHA256 using Avx512
47type Avx512Digest struct {
48 uid uint64
49 a512srv *Avx512Server
50 x [chunk]byte
51 nx int
52 len uint64
53 final bool
54 result [Size]byte
55}
56
57// Size - Return size of checksum
58func (d *Avx512Digest) Size() int { return Size }
59
60// BlockSize - Return blocksize of checksum
61func (d Avx512Digest) BlockSize() int { return BlockSize }
62
63// Reset - reset sha digest to its initial values
64func (d *Avx512Digest) Reset() {
65 d.a512srv.blocksCh <- blockInput{uid: d.uid, reset: true}
66 d.nx = 0
67 d.len = 0
68 d.final = false
69}
70
71// Write to digest
72func (d *Avx512Digest) Write(p []byte) (nn int, err error) {
73
74 if d.final {
75 return 0, errors.New("Avx512Digest already finalized. Reset first before writing again")
76 }
77
78 nn = len(p)
79 d.len += uint64(nn)
80 if d.nx > 0 {
81 n := copy(d.x[d.nx:], p)
82 d.nx += n
83 if d.nx == chunk {
84 d.a512srv.blocksCh <- blockInput{uid: d.uid, msg: d.x[:]}
85 d.nx = 0
86 }
87 p = p[n:]
88 }
89 if len(p) >= chunk {
90 n := len(p) &^ (chunk - 1)
91 d.a512srv.blocksCh <- blockInput{uid: d.uid, msg: p[:n]}
92 p = p[n:]
93 }
94 if len(p) > 0 {
95 d.nx = copy(d.x[:], p)
96 }
97 return
98}
99
100// Sum - Return sha256 sum in bytes
101func (d *Avx512Digest) Sum(in []byte) (result []byte) {
102
103 if d.final {
104 return append(in, d.result[:]...)
105 }
106
107 trail := make([]byte, 0, 128)
108 trail = append(trail, d.x[:d.nx]...)
109
110 len := d.len
111 // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
112 var tmp [64]byte
113 tmp[0] = 0x80
114 if len%64 < 56 {
115 trail = append(trail, tmp[0:56-len%64]...)
116 } else {
117 trail = append(trail, tmp[0:64+56-len%64]...)
118 }
119 d.nx = 0
120
121 // Length in bits.
122 len <<= 3
123 for i := uint(0); i < 8; i++ {
124 tmp[i] = byte(len >> (56 - 8*i))
125 }
126 trail = append(trail, tmp[0:8]...)
127
128 sumCh := make(chan [Size]byte)
129 d.a512srv.blocksCh <- blockInput{uid: d.uid, msg: trail, final: true, sumCh: sumCh}
130 d.result = <-sumCh
131 d.final = true
132 return append(in, d.result[:]...)
133}
134
135var table = [512]uint64{
136 0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98,
137 0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98,
138 0x7137449171374491, 0x7137449171374491, 0x7137449171374491, 0x7137449171374491,
139 0x7137449171374491, 0x7137449171374491, 0x7137449171374491, 0x7137449171374491,
140 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf,
141 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf,
142 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5,
143 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5,
144 0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b,
145 0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b,
146 0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1,
147 0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1,
148 0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4,
149 0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4,
150 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5,
151 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5,
152 0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98,
153 0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98,
154 0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01,
155 0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01,
156 0x243185be243185be, 0x243185be243185be, 0x243185be243185be, 0x243185be243185be,
157 0x243185be243185be, 0x243185be243185be, 0x243185be243185be, 0x243185be243185be,
158 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3,
159 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3,
160 0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74,
161 0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74,
162 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe,
163 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe,
164 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7,
165 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7,
166 0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174,
167 0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174,
168 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1,
169 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1,
170 0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786,
171 0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786,
172 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6,
173 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6,
174 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc,
175 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc,
176 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f,
177 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f,
178 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa,
179 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa,
180 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc,
181 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc,
182 0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da,
183 0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da,
184 0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152,
185 0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152,
186 0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d,
187 0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d,
188 0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8,
189 0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8,
190 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7,
191 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7,
192 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3,
193 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3,
194 0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147,
195 0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147,
196 0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351,
197 0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351,
198 0x1429296714292967, 0x1429296714292967, 0x1429296714292967, 0x1429296714292967,
199 0x1429296714292967, 0x1429296714292967, 0x1429296714292967, 0x1429296714292967,
200 0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85,
201 0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85,
202 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138,
203 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138,
204 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc,
205 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc,
206 0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13,
207 0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13,
208 0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354,
209 0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354,
210 0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb,
211 0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb,
212 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e,
213 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e,
214 0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85,
215 0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85,
216 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1,
217 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1,
218 0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b,
219 0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b,
220 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70,
221 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70,
222 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3,
223 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3,
224 0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819,
225 0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819,
226 0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624,
227 0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624,
228 0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585,
229 0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585,
230 0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070,
231 0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070,
232 0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116,
233 0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116,
234 0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08,
235 0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08,
236 0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c,
237 0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c,
238 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5,
239 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5,
240 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3,
241 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3,
242 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a,
243 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a,
244 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f,
245 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f,
246 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3,
247 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3,
248 0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee,
249 0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee,
250 0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f,
251 0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f,
252 0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814,
253 0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814,
254 0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208,
255 0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208,
256 0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa,
257 0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa,
258 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb,
259 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb,
260 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7,
261 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7,
262 0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2,
263 0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2}
264
265// Interface function to assembly ode
266func blockAvx512(digests *[512]byte, input [16][]byte, mask []uint64) [16][Size]byte {
267
268 scratch := [512]byte{}
269 sha256X16Avx512(digests, &scratch, &table, mask, input)
270
271 output := [16][Size]byte{}
272 for i := 0; i < 16; i++ {
273 output[i] = getDigest(i, digests[:])
274 }
275
276 return output
277}
278
279func getDigest(index int, state []byte) (sum [Size]byte) {
280 for j := 0; j < 16; j += 2 {
281 for i := index*4 + j*Size; i < index*4+(j+1)*Size; i += Size {
282 binary.BigEndian.PutUint32(sum[j*2:], binary.LittleEndian.Uint32(state[i:i+4]))
283 }
284 }
285 return
286}
287
288// Message to send across input channel
289type blockInput struct {
290 uid uint64
291 msg []byte
292 reset bool
293 final bool
294 sumCh chan [Size]byte
295}
296
297// Avx512Server - Type to implement 16x parallel handling of SHA256 invocations
298type Avx512Server struct {
299 blocksCh chan blockInput // Input channel
300 totalIn int // Total number of inputs waiting to be processed
301 lanes [16]Avx512LaneInfo // Array with info per lane (out of 16)
302 digests map[uint64][Size]byte // Map of uids to (interim) digest results
303}
304
305// Avx512LaneInfo - Info for each lane
306type Avx512LaneInfo struct {
307 uid uint64 // unique identification for this SHA processing
308 block []byte // input block to be processed
309 outputCh chan [Size]byte // channel for output result
310}
311
312// NewAvx512Server - Create new object for parallel processing handling
313func NewAvx512Server() *Avx512Server {
314 a512srv := &Avx512Server{}
315 a512srv.digests = make(map[uint64][Size]byte)
316 a512srv.blocksCh = make(chan blockInput)
317
318 // Start a single thread for reading from the input channel
319 go a512srv.Process()
320 return a512srv
321}
322
323// Process - Sole handler for reading from the input channel
324func (a512srv *Avx512Server) Process() {
325 for {
326 select {
327 case block := <-a512srv.blocksCh:
328 if block.reset {
329 a512srv.reset(block.uid)
330 continue
331 }
332 index := block.uid & 0xf
333 // fmt.Println("Adding message:", block.uid, index)
334
335 if a512srv.lanes[index].block != nil { // If slot is already filled, process all inputs
336 //fmt.Println("Invoking Blocks()")
337 a512srv.blocks()
338 }
339 a512srv.totalIn++
340 a512srv.lanes[index] = Avx512LaneInfo{uid: block.uid, block: block.msg}
341 if block.final {
342 a512srv.lanes[index].outputCh = block.sumCh
343 }
344 if a512srv.totalIn == len(a512srv.lanes) {
345 // fmt.Println("Invoking Blocks() while FULL: ")
346 a512srv.blocks()
347 }
348
349 // TODO: test with larger timeout
350 case <-time.After(1 * time.Microsecond):
351 for _, lane := range a512srv.lanes {
352 if lane.block != nil { // check if there is any input to process
353 // fmt.Println("Invoking Blocks() on TIMEOUT: ")
354 a512srv.blocks()
355 break // we are done
356 }
357 }
358 }
359 }
360}
361
362// Do a reset for this calculation
363func (a512srv *Avx512Server) reset(uid uint64) {
364
365 // Check if there is a message still waiting to be processed (and remove if so)
366 for i, lane := range a512srv.lanes {
367 if lane.uid == uid {
368 if lane.block != nil {
369 a512srv.lanes[i] = Avx512LaneInfo{} // clear message
370 a512srv.totalIn--
371 }
372 }
373 }
374
375 // Delete entry from hash map
376 delete(a512srv.digests, uid)
377}
378
379// Invoke assembly and send results back
380func (a512srv *Avx512Server) blocks() {
381
382 inputs := [16][]byte{}
383 for i := range inputs {
384 inputs[i] = a512srv.lanes[i].block
385 }
386
387 mask := expandMask(genMask(inputs))
388 outputs := blockAvx512(a512srv.getDigests(), inputs, mask)
389
390 a512srv.totalIn = 0
391 for i := 0; i < len(outputs); i++ {
392 uid, outputCh := a512srv.lanes[i].uid, a512srv.lanes[i].outputCh
393 a512srv.digests[uid] = outputs[i]
394 a512srv.lanes[i] = Avx512LaneInfo{}
395
396 if outputCh != nil {
397 // Send back result
398 outputCh <- outputs[i]
399 delete(a512srv.digests, uid) // Delete entry from hashmap
400 }
401 }
402}
403
404func (a512srv *Avx512Server) Write(uid uint64, p []byte) (nn int, err error) {
405 a512srv.blocksCh <- blockInput{uid: uid, msg: p}
406 return len(p), nil
407}
408
409// Sum - return sha256 sum in bytes for a given sum id.
410func (a512srv *Avx512Server) Sum(uid uint64, p []byte) [32]byte {
411 sumCh := make(chan [32]byte)
412 a512srv.blocksCh <- blockInput{uid: uid, msg: p, final: true, sumCh: sumCh}
413 return <-sumCh
414}
415
416func (a512srv *Avx512Server) getDigests() *[512]byte {
417 digests := [512]byte{}
418 for i, lane := range a512srv.lanes {
419 a, ok := a512srv.digests[lane.uid]
420 if ok {
421 binary.BigEndian.PutUint32(digests[(i+0*16)*4:], binary.LittleEndian.Uint32(a[0:4]))
422 binary.BigEndian.PutUint32(digests[(i+1*16)*4:], binary.LittleEndian.Uint32(a[4:8]))
423 binary.BigEndian.PutUint32(digests[(i+2*16)*4:], binary.LittleEndian.Uint32(a[8:12]))
424 binary.BigEndian.PutUint32(digests[(i+3*16)*4:], binary.LittleEndian.Uint32(a[12:16]))
425 binary.BigEndian.PutUint32(digests[(i+4*16)*4:], binary.LittleEndian.Uint32(a[16:20]))
426 binary.BigEndian.PutUint32(digests[(i+5*16)*4:], binary.LittleEndian.Uint32(a[20:24]))
427 binary.BigEndian.PutUint32(digests[(i+6*16)*4:], binary.LittleEndian.Uint32(a[24:28]))
428 binary.BigEndian.PutUint32(digests[(i+7*16)*4:], binary.LittleEndian.Uint32(a[28:32]))
429 } else {
430 binary.LittleEndian.PutUint32(digests[(i+0*16)*4:], init0)
431 binary.LittleEndian.PutUint32(digests[(i+1*16)*4:], init1)
432 binary.LittleEndian.PutUint32(digests[(i+2*16)*4:], init2)
433 binary.LittleEndian.PutUint32(digests[(i+3*16)*4:], init3)
434 binary.LittleEndian.PutUint32(digests[(i+4*16)*4:], init4)
435 binary.LittleEndian.PutUint32(digests[(i+5*16)*4:], init5)
436 binary.LittleEndian.PutUint32(digests[(i+6*16)*4:], init6)
437 binary.LittleEndian.PutUint32(digests[(i+7*16)*4:], init7)
438 }
439 }
440 return &digests
441}
442
443// Helper struct for sorting blocks based on length
444type lane struct {
445 len uint
446 pos uint
447}
448
449type lanes []lane
450
451func (lns lanes) Len() int { return len(lns) }
452func (lns lanes) Swap(i, j int) { lns[i], lns[j] = lns[j], lns[i] }
453func (lns lanes) Less(i, j int) bool { return lns[i].len < lns[j].len }
454
455// Helper struct for
456type maskRounds struct {
457 mask uint64
458 rounds uint64
459}
460
461func genMask(input [16][]byte) [16]maskRounds {
462
463 // Sort on blocks length small to large
464 var sorted [16]lane
465 for c, inpt := range input {
466 sorted[c] = lane{uint(len(inpt)), uint(c)}
467 }
468 sort.Sort(lanes(sorted[:]))
469
470 // Create mask array including 'rounds' between masks
471 m, round, index := uint64(0xffff), uint64(0), 0
472 var mr [16]maskRounds
473 for _, s := range sorted {
474 if s.len > 0 {
475 if uint64(s.len)>>6 > round {
476 mr[index] = maskRounds{m, (uint64(s.len) >> 6) - round}
477 index++
478 }
479 round = uint64(s.len) >> 6
480 }
481 m = m & ^(1 << uint(s.pos))
482 }
483
484 return mr
485}
486
487// TODO: remove function
488func expandMask(mr [16]maskRounds) []uint64 {
489 size := uint64(0)
490 for _, r := range mr {
491 size += r.rounds
492 }
493 result, index := make([]uint64, size), 0
494 for _, r := range mr {
495 for j := uint64(0); j < r.rounds; j++ {
496 result[index] = r.mask
497 index++
498 }
499 }
500 return result
501}
diff --git a/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.s b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.s
new file mode 100644
index 0000000..cca534e
--- /dev/null
+++ b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.s
@@ -0,0 +1,267 @@
1//+build !noasm,!appengine,gc
2
3TEXT ·sha256X16Avx512(SB), 7, $0
4 MOVQ digests+0(FP), DI
5 MOVQ scratch+8(FP), R12
6 MOVQ mask_len+32(FP), SI
7 MOVQ mask_base+24(FP), R13
8 MOVQ (R13), R14
9 LONG $0x92fbc1c4; BYTE $0xce
10 LEAQ inputs+48(FP), AX
11 QUAD $0xf162076f487ef162; QUAD $0x7ef162014f6f487e; QUAD $0x487ef16202576f48; QUAD $0x6f487ef162035f6f; QUAD $0x6f6f487ef1620467; QUAD $0x06776f487ef16205; LONG $0x487ef162; WORD $0x7f6f; BYTE $0x07
12 MOVQ table+16(FP), DX
13 WORD $0x3148; BYTE $0xc9
14 TESTQ $(1<<0), R14
15 JE skipInput0
16 MOVQ 0*24(AX), R9
17 LONG $0x487cc162; WORD $0x0410; BYTE $0x09
18
19skipInput0:
20 TESTQ $(1<<1), R14
21 JE skipInput1
22 MOVQ 1*24(AX), R9
23 LONG $0x487cc162; WORD $0x0c10; BYTE $0x09
24
25skipInput1:
26 TESTQ $(1<<2), R14
27 JE skipInput2
28 MOVQ 2*24(AX), R9
29 LONG $0x487cc162; WORD $0x1410; BYTE $0x09
30
31skipInput2:
32 TESTQ $(1<<3), R14
33 JE skipInput3
34 MOVQ 3*24(AX), R9
35 LONG $0x487cc162; WORD $0x1c10; BYTE $0x09
36
37skipInput3:
38 TESTQ $(1<<4), R14
39 JE skipInput4
40 MOVQ 4*24(AX), R9
41 LONG $0x487cc162; WORD $0x2410; BYTE $0x09
42
43skipInput4:
44 TESTQ $(1<<5), R14
45 JE skipInput5
46 MOVQ 5*24(AX), R9
47 LONG $0x487cc162; WORD $0x2c10; BYTE $0x09
48
49skipInput5:
50 TESTQ $(1<<6), R14
51 JE skipInput6
52 MOVQ 6*24(AX), R9
53 LONG $0x487cc162; WORD $0x3410; BYTE $0x09
54
55skipInput6:
56 TESTQ $(1<<7), R14
57 JE skipInput7
58 MOVQ 7*24(AX), R9
59 LONG $0x487cc162; WORD $0x3c10; BYTE $0x09
60
61skipInput7:
62 TESTQ $(1<<8), R14
63 JE skipInput8
64 MOVQ 8*24(AX), R9
65 LONG $0x487c4162; WORD $0x0410; BYTE $0x09
66
67skipInput8:
68 TESTQ $(1<<9), R14
69 JE skipInput9
70 MOVQ 9*24(AX), R9
71 LONG $0x487c4162; WORD $0x0c10; BYTE $0x09
72
73skipInput9:
74 TESTQ $(1<<10), R14
75 JE skipInput10
76 MOVQ 10*24(AX), R9
77 LONG $0x487c4162; WORD $0x1410; BYTE $0x09
78
79skipInput10:
80 TESTQ $(1<<11), R14
81 JE skipInput11
82 MOVQ 11*24(AX), R9
83 LONG $0x487c4162; WORD $0x1c10; BYTE $0x09
84
85skipInput11:
86 TESTQ $(1<<12), R14
87 JE skipInput12
88 MOVQ 12*24(AX), R9
89 LONG $0x487c4162; WORD $0x2410; BYTE $0x09
90
91skipInput12:
92 TESTQ $(1<<13), R14
93 JE skipInput13
94 MOVQ 13*24(AX), R9
95 LONG $0x487c4162; WORD $0x2c10; BYTE $0x09
96
97skipInput13:
98 TESTQ $(1<<14), R14
99 JE skipInput14
100 MOVQ 14*24(AX), R9
101 LONG $0x487c4162; WORD $0x3410; BYTE $0x09
102
103skipInput14:
104 TESTQ $(1<<15), R14
105 JE skipInput15
106 MOVQ 15*24(AX), R9
107 LONG $0x487c4162; WORD $0x3c10; BYTE $0x09
108
109skipInput15:
110lloop:
111 LEAQ PSHUFFLE_BYTE_FLIP_MASK<>(SB), DX
112 LONG $0x487e7162; WORD $0x1a6f
113 MOVQ table+16(FP), DX
114 QUAD $0xd162226f487e7162; QUAD $0x7ed16224047f487e; QUAD $0x7ed16201244c7f48; QUAD $0x7ed1620224547f48; QUAD $0x7ed16203245c7f48; QUAD $0x7ed1620424647f48; QUAD $0x7ed16205246c7f48; QUAD $0x7ed1620624747f48; QUAD $0xc1834807247c7f48; QUAD $0x44c9c6407c316240; QUAD $0x62eec1c6407ca162; QUAD $0xa16244d3c6406c31; QUAD $0x34c162eed3c6406c; QUAD $0x407ca162dddac648; QUAD $0xc6407ca16288cac6; QUAD $0xcac648345162ddc2; QUAD $0x44d5c6405ca16288; QUAD $0x62eee5c6405ca162; QUAD $0xa16244d7c6404c31; QUAD $0x6cc162eef7c6404c; QUAD $0x405ca162ddfac640; QUAD $0xc6405ca16288eec6; QUAD $0xd2c6406cc162dde6; QUAD $0x44f1c6403c816288; QUAD $0x62eec1c6403c0162; QUAD $0x016244d3c6402c11; QUAD $0x4c4162eed3c6402c; QUAD $0x403c0162dddac640; QUAD $0xc6403c016288cac6; QUAD $0xf2c6404cc162ddc2; QUAD $0x44d5c6401c016288; QUAD $0x62eee5c6401c0162; QUAD $0x016244d7c6400c11; QUAD $0x2c4162eef7c6400c; QUAD $0x401c0162ddfac640; QUAD $0xc6401c016288eec6; QUAD $0xd2c6402c4162dde6; BYTE $0x88
115 LEAQ PSHUFFLE_TRANSPOSE16_MASK1<>(SB), BX
116 LEAQ PSHUFFLE_TRANSPOSE16_MASK2<>(SB), R8
117 QUAD $0x2262336f487e6162; QUAD $0x487e5162f27648b5; QUAD $0xd27648b53262106f; QUAD $0xa262136f487ee162; QUAD $0x487e5162d77640e5; QUAD $0xcf7640e53262086f; QUAD $0xa2621b6f487ee162; QUAD $0x487ec162dd7640f5; QUAD $0xfd7640f5a262386f; QUAD $0xa2620b6f487ee162; QUAD $0x487ec162cc7640fd; QUAD $0xec7640fda262286f; QUAD $0x8262036f487ee162; QUAD $0x487ec162c27640cd; QUAD $0xe27640cd8262206f; QUAD $0x8262336f487ee162; QUAD $0x487e4162f77640a5; QUAD $0xd77640a50262106f; QUAD $0x02621b6f487e6162; QUAD $0x487e4162dd7640b5; QUAD $0xfd7640b50262386f; QUAD $0x02620b6f487e6162; QUAD $0x487e4162cc7640bd; QUAD $0xec7640bd0262286f; QUAD $0x62eec023408d2362; QUAD $0x236244c023408da3; QUAD $0xada362eee42348ad; QUAD $0x40c5036244e42348; QUAD $0x2340c51362eef723; QUAD $0xfd2340d5036244d7; QUAD $0x44fd2340d58362ee; QUAD $0x62eeea2348b50362; QUAD $0x036244ea2348b583; QUAD $0xe51362eed32340e5; QUAD $0x40f5036244cb2340; QUAD $0x2340f58362eed923; QUAD $0xce2340ed236244d9; QUAD $0x44ce2340eda362ee; QUAD $0xc162d16f487ec162; QUAD $0x407dc262f26f487e; QUAD $0xcb004075c262c300; QUAD $0xc262d300406dc262; QUAD $0x405dc262db004065; QUAD $0xeb004055c262e300; QUAD $0xc262f300404dc262; QUAD $0x403d4262fb004045; QUAD $0xcb0040354262c300; QUAD $0x4262d300402d4262; QUAD $0x401d4262db004025; QUAD $0xeb0040154262e300; QUAD $0x4262f300400d4262; QUAD $0x48455162fb004005; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d3162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6201626f487e7162; QUAD $0x916211c672481591; QUAD $0x05916213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe407dc16296ef25; QUAD $0x62c1fe407d8162c5; QUAD $0xb16207c1724815b1; QUAD $0x05b16212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe407dc16296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d3162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815916202626f48; QUAD $0x72480d916211c772; QUAD $0xd7724805916213c7; QUAD $0x96ef25480d53620a; QUAD $0x8162cdfe4075c162; QUAD $0x4815b162cafe4075; QUAD $0x72480db16207c272; QUAD $0xd2724805b16212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe4075c162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x3162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c0724815b16203; QUAD $0x6213c072480db162; QUAD $0x53620ad0724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe406d8162d5fe40; QUAD $0x07c3724815b162d3; QUAD $0x6212c372480db162; QUAD $0x536203d3724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d3162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0xb16204626f487e71; QUAD $0x0db16211c1724815; QUAD $0x4805b16213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4065c16296ef; QUAD $0xb162dcfe40658162; QUAD $0x0db16207c4724815; QUAD $0x4805b16212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4065c16296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x724815b16205626f; QUAD $0xc272480db16211c2; QUAD $0x0ad2724805b16213; QUAD $0x6296ef25480d5362; QUAD $0x5d8162e5fe405dc1; QUAD $0x724815b162e5fe40; QUAD $0xc572480db16207c5; QUAD $0x03d5724805b16212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe405dc1; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d3162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x06626f487e7162d0; QUAD $0x6211c3724815b162; QUAD $0xb16213c372480db1; QUAD $0x0d53620ad3724805; QUAD $0x4055c16296ef2548; QUAD $0xeefe40558162edfe; QUAD $0x6207c6724815b162; QUAD $0xb16212c672480db1; QUAD $0x0d536203d6724805; QUAD $0x4055c16296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d3162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x15b16207626f487e; QUAD $0x480db16211c47248; QUAD $0x724805b16213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe404dc16296; QUAD $0x15b162f7fe404d81; QUAD $0x480db16207c77248; QUAD $0x724805b16212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe404dc16296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d31; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc5724815b1620862; QUAD $0x13c572480db16211; QUAD $0x620ad5724805b162; QUAD $0xc16296ef25480d53; QUAD $0x4045a162fdfe4045; QUAD $0xc07248159162f8fe; QUAD $0x12c072480d916207; QUAD $0x6203d07248059162; QUAD $0xc16296ef25480d53; QUAD $0x48455162fdfe4045; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d1162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6209626f487e7162; QUAD $0xb16211c6724815b1; QUAD $0x05b16213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe403d416296ef25; QUAD $0x62c1fe403d2162c5; QUAD $0x916207c172481591; QUAD $0x05916212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe403d416296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d1162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815b1620a626f48; QUAD $0x72480db16211c772; QUAD $0xd7724805b16213c7; QUAD $0x96ef25480d53620a; QUAD $0x2162cdfe40354162; QUAD $0x48159162cafe4035; QUAD $0x72480d916207c272; QUAD $0xd2724805916212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe40354162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x1162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c072481591620b; QUAD $0x6213c072480d9162; QUAD $0x53620ad072480591; QUAD $0x2d416296ef25480d; QUAD $0xfe402d2162d5fe40; QUAD $0x07c37248159162d3; QUAD $0x6212c372480d9162; QUAD $0x536203d372480591; QUAD $0x2d416296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d1162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0x91620c626f487e71; QUAD $0x0d916211c1724815; QUAD $0x4805916213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4025416296ef; QUAD $0x9162dcfe40252162; QUAD $0x0d916207c4724815; QUAD $0x4805916212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4025416296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x72481591620d626f; QUAD $0xc272480d916211c2; QUAD $0x0ad2724805916213; QUAD $0x6296ef25480d5362; QUAD $0x1d2162e5fe401d41; QUAD $0x7248159162e5fe40; QUAD $0xc572480d916207c5; QUAD $0x03d5724805916212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe401d41; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d1162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x0e626f487e7162d0; QUAD $0x6211c37248159162; QUAD $0x916213c372480d91; QUAD $0x0d53620ad3724805; QUAD $0x4015416296ef2548; QUAD $0xeefe40152162edfe; QUAD $0x6207c67248159162; QUAD $0x916212c672480d91; QUAD $0x0d536203d6724805; QUAD $0x4015416296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d1162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x1591620f626f487e; QUAD $0x480d916211c47248; QUAD $0x724805916213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe400d416296; QUAD $0x159162f7fe400d21; QUAD $0x480d916207c77248; QUAD $0x724805916212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe400d416296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d11; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc572481591621062; QUAD $0x13c572480d916211; QUAD $0x620ad57248059162; QUAD $0x416296ef25480d53; QUAD $0x40050162fdfe4005; QUAD $0xc0724815b162f8fe; QUAD $0x12c072480db16207; QUAD $0x6203d0724805b162; QUAD $0x416296ef25480d53; QUAD $0x48455162fdfe4005; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d3162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6211626f487e7162; QUAD $0x916211c672481591; QUAD $0x05916213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe407dc16296ef25; QUAD $0x62c1fe407d8162c5; QUAD $0xb16207c1724815b1; QUAD $0x05b16212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe407dc16296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d3162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815916212626f48; QUAD $0x72480d916211c772; QUAD $0xd7724805916213c7; QUAD $0x96ef25480d53620a; QUAD $0x8162cdfe4075c162; QUAD $0x4815b162cafe4075; QUAD $0x72480db16207c272; QUAD $0xd2724805b16212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe4075c162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x3162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c0724815b16213; QUAD $0x6213c072480db162; QUAD $0x53620ad0724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe406d8162d5fe40; QUAD $0x07c3724815b162d3; QUAD $0x6212c372480db162; QUAD $0x536203d3724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d3162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0xb16214626f487e71; QUAD $0x0db16211c1724815; QUAD $0x4805b16213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4065c16296ef; QUAD $0xb162dcfe40658162; QUAD $0x0db16207c4724815; QUAD $0x4805b16212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4065c16296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x724815b16215626f; QUAD $0xc272480db16211c2; QUAD $0x0ad2724805b16213; QUAD $0x6296ef25480d5362; QUAD $0x5d8162e5fe405dc1; QUAD $0x724815b162e5fe40; QUAD $0xc572480db16207c5; QUAD $0x03d5724805b16212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe405dc1; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d3162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x16626f487e7162d0; QUAD $0x6211c3724815b162; QUAD $0xb16213c372480db1; QUAD $0x0d53620ad3724805; QUAD $0x4055c16296ef2548; QUAD $0xeefe40558162edfe; QUAD $0x6207c6724815b162; QUAD $0xb16212c672480db1; QUAD $0x0d536203d6724805; QUAD $0x4055c16296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d3162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x15b16217626f487e; QUAD $0x480db16211c47248; QUAD $0x724805b16213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe404dc16296; QUAD $0x15b162f7fe404d81; QUAD $0x480db16207c77248; QUAD $0x724805b16212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe404dc16296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d31; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc5724815b1621862; QUAD $0x13c572480db16211; QUAD $0x620ad5724805b162; QUAD $0xc16296ef25480d53; QUAD $0x4045a162fdfe4045; QUAD $0xc07248159162f8fe; QUAD $0x12c072480d916207; QUAD $0x6203d07248059162; QUAD $0xc16296ef25480d53; QUAD $0x48455162fdfe4045; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d1162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6219626f487e7162; QUAD $0xb16211c6724815b1; QUAD $0x05b16213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe403d416296ef25; QUAD $0x62c1fe403d2162c5; QUAD $0x916207c172481591; QUAD $0x05916212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe403d416296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d1162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815b1621a626f48; QUAD $0x72480db16211c772; QUAD $0xd7724805b16213c7; QUAD $0x96ef25480d53620a; QUAD $0x2162cdfe40354162; QUAD $0x48159162cafe4035; QUAD $0x72480d916207c272; QUAD $0xd2724805916212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe40354162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x1162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c072481591621b; QUAD $0x6213c072480d9162; QUAD $0x53620ad072480591; QUAD $0x2d416296ef25480d; QUAD $0xfe402d2162d5fe40; QUAD $0x07c37248159162d3; QUAD $0x6212c372480d9162; QUAD $0x536203d372480591; QUAD $0x2d416296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d1162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0x91621c626f487e71; QUAD $0x0d916211c1724815; QUAD $0x4805916213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4025416296ef; QUAD $0x9162dcfe40252162; QUAD $0x0d916207c4724815; QUAD $0x4805916212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4025416296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x72481591621d626f; QUAD $0xc272480d916211c2; QUAD $0x0ad2724805916213; QUAD $0x6296ef25480d5362; QUAD $0x1d2162e5fe401d41; QUAD $0x7248159162e5fe40; QUAD $0xc572480d916207c5; QUAD $0x03d5724805916212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe401d41; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d1162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x1e626f487e7162d0; QUAD $0x6211c37248159162; QUAD $0x916213c372480d91; QUAD $0x0d53620ad3724805; QUAD $0x4015416296ef2548; QUAD $0xeefe40152162edfe; QUAD $0x6207c67248159162; QUAD $0x916212c672480d91; QUAD $0x0d536203d6724805; QUAD $0x4015416296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d1162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x1591621f626f487e; QUAD $0x480d916211c47248; QUAD $0x724805916213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe400d416296; QUAD $0x159162f7fe400d21; QUAD $0x480d916207c77248; QUAD $0x724805916212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe400d416296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d11; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc572481591622062; QUAD $0x13c572480d916211; QUAD $0x620ad57248059162; QUAD $0x416296ef25480d53; QUAD $0x40050162fdfe4005; QUAD $0xc0724815b162f8fe; QUAD $0x12c072480db16207; QUAD $0x6203d0724805b162; QUAD $0x416296ef25480d53; QUAD $0x48455162fdfe4005; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d3162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6221626f487e7162; QUAD $0x916211c672481591; QUAD $0x05916213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe407dc16296ef25; QUAD $0x62c1fe407d8162c5; QUAD $0xb16207c1724815b1; QUAD $0x05b16212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe407dc16296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d3162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815916222626f48; QUAD $0x72480d916211c772; QUAD $0xd7724805916213c7; QUAD $0x96ef25480d53620a; QUAD $0x8162cdfe4075c162; QUAD $0x4815b162cafe4075; QUAD $0x72480db16207c272; QUAD $0xd2724805b16212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe4075c162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x3162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c0724815b16223; QUAD $0x6213c072480db162; QUAD $0x53620ad0724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe406d8162d5fe40; QUAD $0x07c3724815b162d3; QUAD $0x6212c372480db162; QUAD $0x536203d3724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d3162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0xb16224626f487e71; QUAD $0x0db16211c1724815; QUAD $0x4805b16213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4065c16296ef; QUAD $0xb162dcfe40658162; QUAD $0x0db16207c4724815; QUAD $0x4805b16212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4065c16296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x724815b16225626f; QUAD $0xc272480db16211c2; QUAD $0x0ad2724805b16213; QUAD $0x6296ef25480d5362; QUAD $0x5d8162e5fe405dc1; QUAD $0x724815b162e5fe40; QUAD $0xc572480db16207c5; QUAD $0x03d5724805b16212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe405dc1; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d3162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x26626f487e7162d0; QUAD $0x6211c3724815b162; QUAD $0xb16213c372480db1; QUAD $0x0d53620ad3724805; QUAD $0x4055c16296ef2548; QUAD $0xeefe40558162edfe; QUAD $0x6207c6724815b162; QUAD $0xb16212c672480db1; QUAD $0x0d536203d6724805; QUAD $0x4055c16296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d3162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x15b16227626f487e; QUAD $0x480db16211c47248; QUAD $0x724805b16213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe404dc16296; QUAD $0x15b162f7fe404d81; QUAD $0x480db16207c77248; QUAD $0x724805b16212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe404dc16296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d31; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc5724815b1622862; QUAD $0x13c572480db16211; QUAD $0x620ad5724805b162; QUAD $0xc16296ef25480d53; QUAD $0x4045a162fdfe4045; QUAD $0xc07248159162f8fe; QUAD $0x12c072480d916207; QUAD $0x6203d07248059162; QUAD $0xc16296ef25480d53; QUAD $0x48455162fdfe4045; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d1162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6229626f487e7162; QUAD $0xb16211c6724815b1; QUAD $0x05b16213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe403d416296ef25; QUAD $0x62c1fe403d2162c5; QUAD $0x916207c172481591; QUAD $0x05916212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe403d416296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d1162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815b1622a626f48; QUAD $0x72480db16211c772; QUAD $0xd7724805b16213c7; QUAD $0x96ef25480d53620a; QUAD $0x2162cdfe40354162; QUAD $0x48159162cafe4035; QUAD $0x72480d916207c272; QUAD $0xd2724805916212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe40354162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x1162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c072481591622b; QUAD $0x6213c072480d9162; QUAD $0x53620ad072480591; QUAD $0x2d416296ef25480d; QUAD $0xfe402d2162d5fe40; QUAD $0x07c37248159162d3; QUAD $0x6212c372480d9162; QUAD $0x536203d372480591; QUAD $0x2d416296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d1162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0x91622c626f487e71; QUAD $0x0d916211c1724815; QUAD $0x4805916213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4025416296ef; QUAD $0x9162dcfe40252162; QUAD $0x0d916207c4724815; QUAD $0x4805916212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4025416296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x72481591622d626f; QUAD $0xc272480d916211c2; QUAD $0x0ad2724805916213; QUAD $0x6296ef25480d5362; QUAD $0x1d2162e5fe401d41; QUAD $0x7248159162e5fe40; QUAD $0xc572480d916207c5; QUAD $0x03d5724805916212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe401d41; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d1162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x2e626f487e7162d0; QUAD $0x6211c37248159162; QUAD $0x916213c372480d91; QUAD $0x0d53620ad3724805; QUAD $0x4015416296ef2548; QUAD $0xeefe40152162edfe; QUAD $0x6207c67248159162; QUAD $0x916212c672480d91; QUAD $0x0d536203d6724805; QUAD $0x4015416296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d1162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x1591622f626f487e; QUAD $0x480d916211c47248; QUAD $0x724805916213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe400d416296; QUAD $0x159162f7fe400d21; QUAD $0x480d916207c77248; QUAD $0x724805916212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe400d416296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d11; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc572481591623062; QUAD $0x13c572480d916211; QUAD $0x620ad57248059162; QUAD $0x416296ef25480d53; QUAD $0x40050162fdfe4005; QUAD $0xc0724815b162f8fe; QUAD $0x12c072480db16207; QUAD $0x6203d0724805b162; QUAD $0x416296ef25480d53; QUAD $0x01ee8348fdfe4005
118 JE lastLoop
119 ADDQ $8, R13
120 MOVQ (R13), R14
121 QUAD $0x7162c4fe48455162; QUAD $0x482df162cc6f487e; QUAD $0x724825f16206c472; QUAD $0xc472481df1620bc4; QUAD $0xcace254855736219; QUAD $0x5362c0fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d8fe4865d162c2; QUAD $0xf16202c0724845f1; QUAD $0x1df1620dc0724825; QUAD $0x487e716216c07248; QUAD $0xca2548757362c86f; QUAD $0x96fc254825d362e8; QUAD $0xd162f9fe4845d162; QUAD $0x487e7162f8fe4845; WORD $0x626f; BYTE $0x31
122 TESTQ $(1<<0), R14
123 JE skipNext0
124 MOVQ 0*24(AX), R9
125 LONG $0x487cc162; WORD $0x0410; BYTE $0x09
126
127skipNext0:
128 QUAD $0x7162c4fe484d5162; QUAD $0x482df162cb6f487e; QUAD $0x724825f16206c372; QUAD $0xc372481df1620bc3; QUAD $0xcacd25485d736219; QUAD $0x5362c1fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d0fe486dd162c2; QUAD $0xf16202c772484df1; QUAD $0x1df1620dc7724825; QUAD $0x487e716216c77248; QUAD $0xc925487d7362cf6f; QUAD $0x96f4254825d362e8; QUAD $0xd162f1fe484dd162; QUAD $0x487e7162f0fe484d; WORD $0x626f; BYTE $0x32
129 TESTQ $(1<<1), R14
130 JE skipNext1
131 MOVQ 1*24(AX), R9
132 LONG $0x487cc162; WORD $0x0c10; BYTE $0x09
133
134skipNext1:
135 QUAD $0x7162c4fe48555162; QUAD $0x482df162ca6f487e; QUAD $0x724825f16206c272; QUAD $0xc272481df1620bc2; QUAD $0xcacc254865736219; QUAD $0x5362c2fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62c8fe4875d162c2; QUAD $0xf16202c6724855f1; QUAD $0x1df1620dc6724825; QUAD $0x487e716216c67248; QUAD $0xc82548457362ce6f; QUAD $0x96ec254825d362e8; QUAD $0xd162e9fe4855d162; QUAD $0x487e7162e8fe4855; WORD $0x626f; BYTE $0x33
136 TESTQ $(1<<2), R14
137 JE skipNext2
138 MOVQ 2*24(AX), R9
139 LONG $0x487cc162; WORD $0x1410; BYTE $0x09
140
141skipNext2:
142 QUAD $0x7162c4fe485d5162; QUAD $0x482df162c96f487e; QUAD $0x724825f16206c172; QUAD $0xc172481df1620bc1; QUAD $0xcacb25486d736219; QUAD $0x5362c3fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62c0fe487dd162c2; QUAD $0xf16202c572485df1; QUAD $0x1df1620dc5724825; QUAD $0x487e716216c57248; QUAD $0xcf25484d7362cd6f; QUAD $0x96e4254825d362e8; QUAD $0xd162e1fe485dd162; QUAD $0x487e7162e0fe485d; WORD $0x626f; BYTE $0x34
143 TESTQ $(1<<3), R14
144 JE skipNext3
145 MOVQ 3*24(AX), R9
146 LONG $0x487cc162; WORD $0x1c10; BYTE $0x09
147
148skipNext3:
149 QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; WORD $0x626f; BYTE $0x35
150 TESTQ $(1<<4), R14
151 JE skipNext4
152 MOVQ 4*24(AX), R9
153 LONG $0x487cc162; WORD $0x2410; BYTE $0x09
154
155skipNext4:
156 QUAD $0x7162c4fe486d5162; QUAD $0x482df162cf6f487e; QUAD $0x724825f16206c772; QUAD $0xc772481df1620bc7; QUAD $0xcac925487d736219; QUAD $0x5362c5fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f0fe484dd162c2; QUAD $0xf16202c372486df1; QUAD $0x1df1620dc3724825; QUAD $0x487e716216c37248; QUAD $0xcd25485d7362cb6f; QUAD $0x96d4254825d362e8; QUAD $0xd162d1fe486dd162; QUAD $0x487e7162d0fe486d; WORD $0x626f; BYTE $0x36
157 TESTQ $(1<<5), R14
158 JE skipNext5
159 MOVQ 5*24(AX), R9
160 LONG $0x487cc162; WORD $0x2c10; BYTE $0x09
161
162skipNext5:
163 QUAD $0x7162c4fe48755162; QUAD $0x482df162ce6f487e; QUAD $0x724825f16206c672; QUAD $0xc672481df1620bc6; QUAD $0xcac8254845736219; QUAD $0x5362c6fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62e8fe4855d162c2; QUAD $0xf16202c2724875f1; QUAD $0x1df1620dc2724825; QUAD $0x487e716216c27248; QUAD $0xcc2548657362ca6f; QUAD $0x96cc254825d362e8; QUAD $0xd162c9fe4875d162; QUAD $0x487e7162c8fe4875; WORD $0x626f; BYTE $0x37
164 TESTQ $(1<<6), R14
165 JE skipNext6
166 MOVQ 6*24(AX), R9
167 LONG $0x487cc162; WORD $0x3410; BYTE $0x09
168
169skipNext6:
170 QUAD $0x7162c4fe487d5162; QUAD $0x482df162cd6f487e; QUAD $0x724825f16206c572; QUAD $0xc572481df1620bc5; QUAD $0xcacf25484d736219; QUAD $0x5362c7fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62e0fe485dd162c2; QUAD $0xf16202c172487df1; QUAD $0x1df1620dc1724825; QUAD $0x487e716216c17248; QUAD $0xcb25486d7362c96f; QUAD $0x96c4254825d362e8; QUAD $0xd162c1fe487dd162; QUAD $0x487e7162c0fe487d; WORD $0x626f; BYTE $0x38
171 TESTQ $(1<<7), R14
172 JE skipNext7
173 MOVQ 7*24(AX), R9
174 LONG $0x487cc162; WORD $0x3c10; BYTE $0x09
175
176skipNext7:
177 QUAD $0x7162c4fe48455162; QUAD $0x482df162cc6f487e; QUAD $0x724825f16206c472; QUAD $0xc472481df1620bc4; QUAD $0xcace254855736219; QUAD $0x5362c0fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d8fe4865d162c2; QUAD $0xf16202c0724845f1; QUAD $0x1df1620dc0724825; QUAD $0x487e716216c07248; QUAD $0xca2548757362c86f; QUAD $0x96fc254825d362e8; QUAD $0xd162f9fe4845d162; QUAD $0x487e7162f8fe4845; WORD $0x626f; BYTE $0x39
178 TESTQ $(1<<8), R14
179 JE skipNext8
180 MOVQ 8*24(AX), R9
181 LONG $0x487c4162; WORD $0x0410; BYTE $0x09
182
183skipNext8:
184 QUAD $0x7162c4fe484d5162; QUAD $0x482df162cb6f487e; QUAD $0x724825f16206c372; QUAD $0xc372481df1620bc3; QUAD $0xcacd25485d736219; QUAD $0x5362c1fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d0fe486dd162c2; QUAD $0xf16202c772484df1; QUAD $0x1df1620dc7724825; QUAD $0x487e716216c77248; QUAD $0xc925487d7362cf6f; QUAD $0x96f4254825d362e8; QUAD $0xd162f1fe484dd162; QUAD $0x487e7162f0fe484d; WORD $0x626f; BYTE $0x3a
185 TESTQ $(1<<9), R14
186 JE skipNext9
187 MOVQ 9*24(AX), R9
188 LONG $0x487c4162; WORD $0x0c10; BYTE $0x09
189
190skipNext9:
191 QUAD $0x7162c4fe48555162; QUAD $0x482df162ca6f487e; QUAD $0x724825f16206c272; QUAD $0xc272481df1620bc2; QUAD $0xcacc254865736219; QUAD $0x5362c2fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62c8fe4875d162c2; QUAD $0xf16202c6724855f1; QUAD $0x1df1620dc6724825; QUAD $0x487e716216c67248; QUAD $0xc82548457362ce6f; QUAD $0x96ec254825d362e8; QUAD $0xd162e9fe4855d162; QUAD $0x487e7162e8fe4855; WORD $0x626f; BYTE $0x3b
192 TESTQ $(1<<10), R14
193 JE skipNext10
194 MOVQ 10*24(AX), R9
195 LONG $0x487c4162; WORD $0x1410; BYTE $0x09
196
197skipNext10:
198 QUAD $0x7162c4fe485d5162; QUAD $0x482df162c96f487e; QUAD $0x724825f16206c172; QUAD $0xc172481df1620bc1; QUAD $0xcacb25486d736219; QUAD $0x5362c3fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62c0fe487dd162c2; QUAD $0xf16202c572485df1; QUAD $0x1df1620dc5724825; QUAD $0x487e716216c57248; QUAD $0xcf25484d7362cd6f; QUAD $0x96e4254825d362e8; QUAD $0xd162e1fe485dd162; QUAD $0x487e7162e0fe485d; WORD $0x626f; BYTE $0x3c
199 TESTQ $(1<<11), R14
200 JE skipNext11
201 MOVQ 11*24(AX), R9
202 LONG $0x487c4162; WORD $0x1c10; BYTE $0x09
203
204skipNext11:
205 QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; WORD $0x626f; BYTE $0x3d
206 TESTQ $(1<<12), R14
207 JE skipNext12
208 MOVQ 12*24(AX), R9
209 LONG $0x487c4162; WORD $0x2410; BYTE $0x09
210
211skipNext12:
212 QUAD $0x7162c4fe486d5162; QUAD $0x482df162cf6f487e; QUAD $0x724825f16206c772; QUAD $0xc772481df1620bc7; QUAD $0xcac925487d736219; QUAD $0x5362c5fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f0fe484dd162c2; QUAD $0xf16202c372486df1; QUAD $0x1df1620dc3724825; QUAD $0x487e716216c37248; QUAD $0xcd25485d7362cb6f; QUAD $0x96d4254825d362e8; QUAD $0xd162d1fe486dd162; QUAD $0x487e7162d0fe486d; WORD $0x626f; BYTE $0x3e
213 TESTQ $(1<<13), R14
214 JE skipNext13
215 MOVQ 13*24(AX), R9
216 LONG $0x487c4162; WORD $0x2c10; BYTE $0x09
217
218skipNext13:
219 QUAD $0x7162c4fe48755162; QUAD $0x482df162ce6f487e; QUAD $0x724825f16206c672; QUAD $0xc672481df1620bc6; QUAD $0xcac8254845736219; QUAD $0x5362c6fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62e8fe4855d162c2; QUAD $0xf16202c2724875f1; QUAD $0x1df1620dc2724825; QUAD $0x487e716216c27248; QUAD $0xcc2548657362ca6f; QUAD $0x96cc254825d362e8; QUAD $0xd162c9fe4875d162; QUAD $0x487e7162c8fe4875; WORD $0x626f; BYTE $0x3f
220 TESTQ $(1<<14), R14
221 JE skipNext14
222 MOVQ 14*24(AX), R9
223 LONG $0x487c4162; WORD $0x3410; BYTE $0x09
224
225skipNext14:
226 QUAD $0x7162c4fe487d5162; QUAD $0x482df162cd6f487e; QUAD $0x724825f16206c572; QUAD $0xc572481df1620bc5; QUAD $0xcacf25484d736219; QUAD $0x5362c7fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62e0fe485dd162c2; QUAD $0xf16202c172487df1; QUAD $0x1df1620dc1724825; QUAD $0x487e716216c17248; QUAD $0xcb25486d7362c96f; QUAD $0x96c4254825d362e8; QUAD $0xd162c1fe487dd162; QUAD $0x487e7162c0fe487d; WORD $0x626f; BYTE $0x40
227 TESTQ $(1<<15), R14
228 JE skipNext15
229 MOVQ 15*24(AX), R9
230 LONG $0x487c4162; WORD $0x3c10; BYTE $0x09
231
232skipNext15:
233 QUAD $0xd162d86f487e7162; QUAD $0x7dd16224046f487e; QUAD $0x6f487e7162c3fe49; QUAD $0x244c6f487ed162d9; QUAD $0x62cbfe4975d16201; QUAD $0x7ed162da6f487e71; QUAD $0x6dd1620224546f48; QUAD $0x6f487e7162d3fe49; QUAD $0x245c6f487ed162db; QUAD $0x62dbfe4965d16203; QUAD $0x7ed162dc6f487e71; QUAD $0x5dd1620424646f48; QUAD $0x6f487e7162e3fe49; QUAD $0x246c6f487ed162dd; QUAD $0x62ebfe4955d16205; QUAD $0x7ed162de6f487e71; QUAD $0x4dd1620624746f48; QUAD $0x6f487e7162f3fe49; QUAD $0x247c6f487ed162df; QUAD $0xc4fbfe4945d16207; LONG $0xce92fbc1
234 JMP lloop
235
236lastLoop:
237 QUAD $0x7162c4fe48455162; QUAD $0x482df162cc6f487e; QUAD $0x724825f16206c472; QUAD $0xc472481df1620bc4; QUAD $0xcace254855736219; QUAD $0x5362c0fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d8fe4865d162c2; QUAD $0xf16202c0724845f1; QUAD $0x1df1620dc0724825; QUAD $0x487e716216c07248; QUAD $0xca2548757362c86f; QUAD $0x96fc254825d362e8; QUAD $0xd162f9fe4845d162; QUAD $0x487e7162f8fe4845; QUAD $0xfe484d516231626f; QUAD $0x62cb6f487e7162c4; QUAD $0xf16206c372482df1; QUAD $0x1df1620bc3724825; QUAD $0x485d736219c37248; QUAD $0xfe483d3162cacd25; QUAD $0x96d42548255362c1; QUAD $0x5162c1fe483d5162; QUAD $0x486dd162c2fe483d; QUAD $0xc772484df162d0fe; QUAD $0x0dc7724825f16202; QUAD $0x6216c772481df162; QUAD $0x7d7362cf6f487e71; QUAD $0x4825d362e8c92548; QUAD $0xfe484dd16296f425; QUAD $0x62f0fe484dd162f1; QUAD $0x516232626f487e71; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x3162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x62c4fe485d516233; QUAD $0x2df162c96f487e71; QUAD $0x4825f16206c17248; QUAD $0x72481df1620bc172; QUAD $0xcb25486d736219c1; QUAD $0x62c3fe483d3162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xc0fe487dd162c2fe; QUAD $0x6202c572485df162; QUAD $0xf1620dc5724825f1; QUAD $0x7e716216c572481d; QUAD $0x25484d7362cd6f48; QUAD $0xe4254825d362e8cf; QUAD $0x62e1fe485dd16296; QUAD $0x7e7162e0fe485dd1; QUAD $0x4865516234626f48; QUAD $0xc86f487e7162c4fe; QUAD $0x6206c072482df162; QUAD $0xf1620bc0724825f1; QUAD $0x75736219c072481d; QUAD $0x483d3162caca2548; QUAD $0xd42548255362c4fe; QUAD $0x62c1fe483d516296; QUAD $0x45d162c2fe483d51; QUAD $0x724865f162f8fe48; QUAD $0xc4724825f16202c4; QUAD $0x16c472481df1620d; QUAD $0x7362cc6f487e7162; QUAD $0x25d362e8ce254855; QUAD $0x4865d16296dc2548; QUAD $0xd8fe4865d162d9fe; QUAD $0x6235626f487e7162; QUAD $0x7e7162c4fe486d51; QUAD $0x72482df162cf6f48; QUAD $0xc7724825f16206c7; QUAD $0x19c772481df1620b; QUAD $0x62cac925487d7362; QUAD $0x255362c5fe483d31; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162f0fe484dd162; QUAD $0x25f16202c372486d; QUAD $0x481df1620dc37248; QUAD $0x6f487e716216c372; QUAD $0xe8cd25485d7362cb; QUAD $0x6296d4254825d362; QUAD $0x6dd162d1fe486dd1; QUAD $0x6f487e7162d0fe48; QUAD $0xc4fe487551623662; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d3162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x7d516237626f487e; QUAD $0x6f487e7162c4fe48; QUAD $0x06c572482df162cd; QUAD $0x620bc5724825f162; QUAD $0x736219c572481df1; QUAD $0x3d3162cacf25484d; QUAD $0x2548255362c7fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x487df162e0fe485d; QUAD $0x724825f16202c172; QUAD $0xc172481df1620dc1; QUAD $0x62c96f487e716216; QUAD $0xd362e8cb25486d73; QUAD $0x7dd16296c4254825; QUAD $0xfe487dd162c1fe48; QUAD $0x38626f487e7162c0; QUAD $0x7162c4fe48455162; QUAD $0x482df162cc6f487e; QUAD $0x724825f16206c472; QUAD $0xc472481df1620bc4; QUAD $0xcace254855736219; QUAD $0x5362c0fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d8fe4865d162c2; QUAD $0xf16202c0724845f1; QUAD $0x1df1620dc0724825; QUAD $0x487e716216c07248; QUAD $0xca2548757362c86f; QUAD $0x96fc254825d362e8; QUAD $0xd162f9fe4845d162; QUAD $0x487e7162f8fe4845; QUAD $0xfe484d516239626f; QUAD $0x62cb6f487e7162c4; QUAD $0xf16206c372482df1; QUAD $0x1df1620bc3724825; QUAD $0x485d736219c37248; QUAD $0xfe483d1162cacd25; QUAD $0x96d42548255362c1; QUAD $0x5162c1fe483d5162; QUAD $0x486dd162c2fe483d; QUAD $0xc772484df162d0fe; QUAD $0x0dc7724825f16202; QUAD $0x6216c772481df162; QUAD $0x7d7362cf6f487e71; QUAD $0x4825d362e8c92548; QUAD $0xfe484dd16296f425; QUAD $0x62f0fe484dd162f1; QUAD $0x51623a626f487e71; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x1162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x62c4fe485d51623b; QUAD $0x2df162c96f487e71; QUAD $0x4825f16206c17248; QUAD $0x72481df1620bc172; QUAD $0xcb25486d736219c1; QUAD $0x62c3fe483d1162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xc0fe487dd162c2fe; QUAD $0x6202c572485df162; QUAD $0xf1620dc5724825f1; QUAD $0x7e716216c572481d; QUAD $0x25484d7362cd6f48; QUAD $0xe4254825d362e8cf; QUAD $0x62e1fe485dd16296; QUAD $0x7e7162e0fe485dd1; QUAD $0x486551623c626f48; QUAD $0xc86f487e7162c4fe; QUAD $0x6206c072482df162; QUAD $0xf1620bc0724825f1; QUAD $0x75736219c072481d; QUAD $0x483d1162caca2548; QUAD $0xd42548255362c4fe; QUAD $0x62c1fe483d516296; QUAD $0x45d162c2fe483d51; QUAD $0x724865f162f8fe48; QUAD $0xc4724825f16202c4; QUAD $0x16c472481df1620d; QUAD $0x7362cc6f487e7162; QUAD $0x25d362e8ce254855; QUAD $0x4865d16296dc2548; QUAD $0xd8fe4865d162d9fe; QUAD $0x623d626f487e7162; QUAD $0x7e7162c4fe486d51; QUAD $0x72482df162cf6f48; QUAD $0xc7724825f16206c7; QUAD $0x19c772481df1620b; QUAD $0x62cac925487d7362; QUAD $0x255362c5fe483d11; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162f0fe484dd162; QUAD $0x25f16202c372486d; QUAD $0x481df1620dc37248; QUAD $0x6f487e716216c372; QUAD $0xe8cd25485d7362cb; QUAD $0x6296d4254825d362; QUAD $0x6dd162d1fe486dd1; QUAD $0x6f487e7162d0fe48; QUAD $0xc4fe487551623e62; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d1162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x7d51623f626f487e; QUAD $0x6f487e7162c4fe48; QUAD $0x06c572482df162cd; QUAD $0x620bc5724825f162; QUAD $0x736219c572481df1; QUAD $0x3d1162cacf25484d; QUAD $0x2548255362c7fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x487df162e0fe485d; QUAD $0x724825f16202c172; QUAD $0xc172481df1620dc1; QUAD $0x62c96f487e716216; QUAD $0xd362e8cb25486d73; QUAD $0x7dd16296c4254825; QUAD $0xfe487dd162c1fe48; QUAD $0x40626f487e7162c0; QUAD $0xd162d86f487e7162; QUAD $0x7dd16224046f487e; QUAD $0x6f487e7162c3fe49; QUAD $0x244c6f487ed162d9; QUAD $0x62cbfe4975d16201; QUAD $0x7ed162da6f487e71; QUAD $0x6dd1620224546f48; QUAD $0x6f487e7162d3fe49; QUAD $0x245c6f487ed162db; QUAD $0x62dbfe4965d16203; QUAD $0x7ed162dc6f487e71; QUAD $0x5dd1620424646f48; QUAD $0x6f487e7162e3fe49; QUAD $0x246c6f487ed162dd; QUAD $0x62ebfe4955d16205; QUAD $0x7ed162de6f487e71; QUAD $0x4dd1620624746f48; QUAD $0x6f487e7162f3fe49; QUAD $0x247c6f487ed162df; QUAD $0x62fbfe4945d16207; QUAD $0x7ef162077f487ef1; QUAD $0x487ef162014f7f48; QUAD $0x7f487ef16202577f; QUAD $0x677f487ef162035f; QUAD $0x056f7f487ef16204; QUAD $0x6206777f487ef162; LONG $0x7f487ef1; WORD $0x077f
238 VZEROUPPER
239 RET
240
241DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x000(SB)/8, $0x0405060700010203
242DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x008(SB)/8, $0x0c0d0e0f08090a0b
243DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x010(SB)/8, $0x0405060700010203
244DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x018(SB)/8, $0x0c0d0e0f08090a0b
245DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x020(SB)/8, $0x0405060700010203
246DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x028(SB)/8, $0x0c0d0e0f08090a0b
247DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x030(SB)/8, $0x0405060700010203
248DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x038(SB)/8, $0x0c0d0e0f08090a0b
249GLOBL PSHUFFLE_BYTE_FLIP_MASK<>(SB), 8, $64
250DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x000(SB)/8, $0x0000000000000000
251DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x008(SB)/8, $0x0000000000000001
252DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x010(SB)/8, $0x0000000000000008
253DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x018(SB)/8, $0x0000000000000009
254DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x020(SB)/8, $0x0000000000000004
255DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x028(SB)/8, $0x0000000000000005
256DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x030(SB)/8, $0x000000000000000C
257DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x038(SB)/8, $0x000000000000000D
258GLOBL PSHUFFLE_TRANSPOSE16_MASK1<>(SB), 8, $64
259DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x000(SB)/8, $0x0000000000000002
260DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x008(SB)/8, $0x0000000000000003
261DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x010(SB)/8, $0x000000000000000A
262DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x018(SB)/8, $0x000000000000000B
263DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x020(SB)/8, $0x0000000000000006
264DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x028(SB)/8, $0x0000000000000007
265DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x030(SB)/8, $0x000000000000000E
266DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x038(SB)/8, $0x000000000000000F
267GLOBL PSHUFFLE_TRANSPOSE16_MASK2<>(SB), 8, $64
diff --git a/vendor/github.com/minio/sha256-simd/sha256block_amd64.go b/vendor/github.com/minio/sha256-simd/sha256block_amd64.go
new file mode 100644
index 0000000..e536f54
--- /dev/null
+++ b/vendor/github.com/minio/sha256-simd/sha256block_amd64.go
@@ -0,0 +1,31 @@
1//go:build !noasm && !appengine && gc
2// +build !noasm,!appengine,gc
3
4/*
5 * Minio Cloud Storage, (C) 2016 Minio, Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 */
19
20package sha256
21
22func blockArmSha2Go(dig *digest, p []byte) {
23 panic("blockArmSha2Go called unexpectedly")
24}
25
26//go:noescape
27func blockIntelSha(h *[8]uint32, message []uint8)
28
29func blockIntelShaGo(dig *digest, p []byte) {
30 blockIntelSha(&dig.h, p)
31}
diff --git a/vendor/github.com/minio/sha256-simd/sha256block_amd64.s b/vendor/github.com/minio/sha256-simd/sha256block_amd64.s
new file mode 100644
index 0000000..c98a1d8
--- /dev/null
+++ b/vendor/github.com/minio/sha256-simd/sha256block_amd64.s
@@ -0,0 +1,266 @@
1//+build !noasm,!appengine,gc
2
3// SHA intrinsic version of SHA256
4
5// Kristofer Peterson, (C) 2018.
6//
7// Licensed under the Apache License, Version 2.0 (the "License");
8// you may not use this file except in compliance with the License.
9// You may obtain a copy of the License at
10//
11// http://www.apache.org/licenses/LICENSE-2.0
12//
13// Unless required by applicable law or agreed to in writing, software
14// distributed under the License is distributed on an "AS IS" BASIS,
15// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16// See the License for the specific language governing permissions and
17// limitations under the License.
18//
19
20#include "textflag.h"
21
22DATA K<>+0x00(SB)/4, $0x428a2f98
23DATA K<>+0x04(SB)/4, $0x71374491
24DATA K<>+0x08(SB)/4, $0xb5c0fbcf
25DATA K<>+0x0c(SB)/4, $0xe9b5dba5
26DATA K<>+0x10(SB)/4, $0x3956c25b
27DATA K<>+0x14(SB)/4, $0x59f111f1
28DATA K<>+0x18(SB)/4, $0x923f82a4
29DATA K<>+0x1c(SB)/4, $0xab1c5ed5
30DATA K<>+0x20(SB)/4, $0xd807aa98
31DATA K<>+0x24(SB)/4, $0x12835b01
32DATA K<>+0x28(SB)/4, $0x243185be
33DATA K<>+0x2c(SB)/4, $0x550c7dc3
34DATA K<>+0x30(SB)/4, $0x72be5d74
35DATA K<>+0x34(SB)/4, $0x80deb1fe
36DATA K<>+0x38(SB)/4, $0x9bdc06a7
37DATA K<>+0x3c(SB)/4, $0xc19bf174
38DATA K<>+0x40(SB)/4, $0xe49b69c1
39DATA K<>+0x44(SB)/4, $0xefbe4786
40DATA K<>+0x48(SB)/4, $0x0fc19dc6
41DATA K<>+0x4c(SB)/4, $0x240ca1cc
42DATA K<>+0x50(SB)/4, $0x2de92c6f
43DATA K<>+0x54(SB)/4, $0x4a7484aa
44DATA K<>+0x58(SB)/4, $0x5cb0a9dc
45DATA K<>+0x5c(SB)/4, $0x76f988da
46DATA K<>+0x60(SB)/4, $0x983e5152
47DATA K<>+0x64(SB)/4, $0xa831c66d
48DATA K<>+0x68(SB)/4, $0xb00327c8
49DATA K<>+0x6c(SB)/4, $0xbf597fc7
50DATA K<>+0x70(SB)/4, $0xc6e00bf3
51DATA K<>+0x74(SB)/4, $0xd5a79147
52DATA K<>+0x78(SB)/4, $0x06ca6351
53DATA K<>+0x7c(SB)/4, $0x14292967
54DATA K<>+0x80(SB)/4, $0x27b70a85
55DATA K<>+0x84(SB)/4, $0x2e1b2138
56DATA K<>+0x88(SB)/4, $0x4d2c6dfc
57DATA K<>+0x8c(SB)/4, $0x53380d13
58DATA K<>+0x90(SB)/4, $0x650a7354
59DATA K<>+0x94(SB)/4, $0x766a0abb
60DATA K<>+0x98(SB)/4, $0x81c2c92e
61DATA K<>+0x9c(SB)/4, $0x92722c85
62DATA K<>+0xa0(SB)/4, $0xa2bfe8a1
63DATA K<>+0xa4(SB)/4, $0xa81a664b
64DATA K<>+0xa8(SB)/4, $0xc24b8b70
65DATA K<>+0xac(SB)/4, $0xc76c51a3
66DATA K<>+0xb0(SB)/4, $0xd192e819
67DATA K<>+0xb4(SB)/4, $0xd6990624
68DATA K<>+0xb8(SB)/4, $0xf40e3585
69DATA K<>+0xbc(SB)/4, $0x106aa070
70DATA K<>+0xc0(SB)/4, $0x19a4c116
71DATA K<>+0xc4(SB)/4, $0x1e376c08
72DATA K<>+0xc8(SB)/4, $0x2748774c
73DATA K<>+0xcc(SB)/4, $0x34b0bcb5
74DATA K<>+0xd0(SB)/4, $0x391c0cb3
75DATA K<>+0xd4(SB)/4, $0x4ed8aa4a
76DATA K<>+0xd8(SB)/4, $0x5b9cca4f
77DATA K<>+0xdc(SB)/4, $0x682e6ff3
78DATA K<>+0xe0(SB)/4, $0x748f82ee
79DATA K<>+0xe4(SB)/4, $0x78a5636f
80DATA K<>+0xe8(SB)/4, $0x84c87814
81DATA K<>+0xec(SB)/4, $0x8cc70208
82DATA K<>+0xf0(SB)/4, $0x90befffa
83DATA K<>+0xf4(SB)/4, $0xa4506ceb
84DATA K<>+0xf8(SB)/4, $0xbef9a3f7
85DATA K<>+0xfc(SB)/4, $0xc67178f2
86GLOBL K<>(SB), RODATA|NOPTR, $256
87
88DATA SHUF_MASK<>+0x00(SB)/8, $0x0405060700010203
89DATA SHUF_MASK<>+0x08(SB)/8, $0x0c0d0e0f08090a0b
90GLOBL SHUF_MASK<>(SB), RODATA|NOPTR, $16
91
92// Register Usage
93// BX base address of constant table (constant)
94// DX hash_state (constant)
95// SI hash_data.data
96// DI hash_data.data + hash_data.length - 64 (constant)
97// X0 scratch
98// X1 scratch
99// X2 working hash state // ABEF
100// X3 working hash state // CDGH
101// X4 first 16 bytes of block
102// X5 second 16 bytes of block
103// X6 third 16 bytes of block
104// X7 fourth 16 bytes of block
105// X12 saved hash state // ABEF
106// X13 saved hash state // CDGH
107// X15 data shuffle mask (constant)
108
109TEXT ·blockIntelSha(SB), NOSPLIT, $0-32
110 MOVQ h+0(FP), DX
111 MOVQ message_base+8(FP), SI
112 MOVQ message_len+16(FP), DI
113 LEAQ -64(SI)(DI*1), DI
114 MOVOU (DX), X2
115 MOVOU 16(DX), X1
116 MOVO X2, X3
117 PUNPCKLLQ X1, X2
118 PUNPCKHLQ X1, X3
119 PSHUFD $0x27, X2, X2
120 PSHUFD $0x27, X3, X3
121 MOVO SHUF_MASK<>(SB), X15
122 LEAQ K<>(SB), BX
123
124 JMP TEST
125
126LOOP:
127 MOVO X2, X12
128 MOVO X3, X13
129
130 // load block and shuffle
131 MOVOU (SI), X4
132 MOVOU 16(SI), X5
133 MOVOU 32(SI), X6
134 MOVOU 48(SI), X7
135 PSHUFB X15, X4
136 PSHUFB X15, X5
137 PSHUFB X15, X6
138 PSHUFB X15, X7
139
140#define ROUND456 \
141 PADDL X5, X0 \
142 LONG $0xdacb380f \ // SHA256RNDS2 XMM3, XMM2
143 MOVO X5, X1 \
144 LONG $0x0f3a0f66; WORD $0x04cc \ // PALIGNR XMM1, XMM4, 4
145 PADDL X1, X6 \
146 LONG $0xf5cd380f \ // SHA256MSG2 XMM6, XMM5
147 PSHUFD $0x4e, X0, X0 \
148 LONG $0xd3cb380f \ // SHA256RNDS2 XMM2, XMM3
149 LONG $0xe5cc380f // SHA256MSG1 XMM4, XMM5
150
151#define ROUND567 \
152 PADDL X6, X0 \
153 LONG $0xdacb380f \ // SHA256RNDS2 XMM3, XMM2
154 MOVO X6, X1 \
155 LONG $0x0f3a0f66; WORD $0x04cd \ // PALIGNR XMM1, XMM5, 4
156 PADDL X1, X7 \
157 LONG $0xfecd380f \ // SHA256MSG2 XMM7, XMM6
158 PSHUFD $0x4e, X0, X0 \
159 LONG $0xd3cb380f \ // SHA256RNDS2 XMM2, XMM3
160 LONG $0xeecc380f // SHA256MSG1 XMM5, XMM6
161
162#define ROUND674 \
163 PADDL X7, X0 \
164 LONG $0xdacb380f \ // SHA256RNDS2 XMM3, XMM2
165 MOVO X7, X1 \
166 LONG $0x0f3a0f66; WORD $0x04ce \ // PALIGNR XMM1, XMM6, 4
167 PADDL X1, X4 \
168 LONG $0xe7cd380f \ // SHA256MSG2 XMM4, XMM7
169 PSHUFD $0x4e, X0, X0 \
170 LONG $0xd3cb380f \ // SHA256RNDS2 XMM2, XMM3
171 LONG $0xf7cc380f // SHA256MSG1 XMM6, XMM7
172
173#define ROUND745 \
174 PADDL X4, X0 \
175 LONG $0xdacb380f \ // SHA256RNDS2 XMM3, XMM2
176 MOVO X4, X1 \
177 LONG $0x0f3a0f66; WORD $0x04cf \ // PALIGNR XMM1, XMM7, 4
178 PADDL X1, X5 \
179 LONG $0xeccd380f \ // SHA256MSG2 XMM5, XMM4
180 PSHUFD $0x4e, X0, X0 \
181 LONG $0xd3cb380f \ // SHA256RNDS2 XMM2, XMM3
182 LONG $0xfccc380f // SHA256MSG1 XMM7, XMM4
183
184 // rounds 0-3
185 MOVO (BX), X0
186 PADDL X4, X0
187 LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2
188 PSHUFD $0x4e, X0, X0
189 LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3
190
191 // rounds 4-7
192 MOVO 1*16(BX), X0
193 PADDL X5, X0
194 LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2
195 PSHUFD $0x4e, X0, X0
196 LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3
197 LONG $0xe5cc380f // SHA256MSG1 XMM4, XMM5
198
199 // rounds 8-11
200 MOVO 2*16(BX), X0
201 PADDL X6, X0
202 LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2
203 PSHUFD $0x4e, X0, X0
204 LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3
205 LONG $0xeecc380f // SHA256MSG1 XMM5, XMM6
206
207 MOVO 3*16(BX), X0; ROUND674 // rounds 12-15
208 MOVO 4*16(BX), X0; ROUND745 // rounds 16-19
209 MOVO 5*16(BX), X0; ROUND456 // rounds 20-23
210 MOVO 6*16(BX), X0; ROUND567 // rounds 24-27
211 MOVO 7*16(BX), X0; ROUND674 // rounds 28-31
212 MOVO 8*16(BX), X0; ROUND745 // rounds 32-35
213 MOVO 9*16(BX), X0; ROUND456 // rounds 36-39
214 MOVO 10*16(BX), X0; ROUND567 // rounds 40-43
215 MOVO 11*16(BX), X0; ROUND674 // rounds 44-47
216 MOVO 12*16(BX), X0; ROUND745 // rounds 48-51
217
218 // rounds 52-55
219 MOVO 13*16(BX), X0
220 PADDL X5, X0
221 LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2
222 MOVO X5, X1
223 LONG $0x0f3a0f66; WORD $0x04cc // PALIGNR XMM1, XMM4, 4
224 PADDL X1, X6
225 LONG $0xf5cd380f // SHA256MSG2 XMM6, XMM5
226 PSHUFD $0x4e, X0, X0
227 LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3
228
229 // rounds 56-59
230 MOVO 14*16(BX), X0
231 PADDL X6, X0
232 LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2
233 MOVO X6, X1
234 LONG $0x0f3a0f66; WORD $0x04cd // PALIGNR XMM1, XMM5, 4
235 PADDL X1, X7
236 LONG $0xfecd380f // SHA256MSG2 XMM7, XMM6
237 PSHUFD $0x4e, X0, X0
238 LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3
239
240 // rounds 60-63
241 MOVO 15*16(BX), X0
242 PADDL X7, X0
243 LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2
244 PSHUFD $0x4e, X0, X0
245 LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3
246
247 PADDL X12, X2
248 PADDL X13, X3
249
250 ADDQ $64, SI
251
252TEST:
253 CMPQ SI, DI
254 JBE LOOP
255
256 PSHUFD $0x4e, X3, X0
257 LONG $0x0e3a0f66; WORD $0xf0c2 // PBLENDW XMM0, XMM2, 0xf0
258 PSHUFD $0x4e, X2, X1
259 LONG $0x0e3a0f66; WORD $0x0fcb // PBLENDW XMM1, XMM3, 0x0f
260 PSHUFD $0x1b, X0, X0
261 PSHUFD $0x1b, X1, X1
262
263 MOVOU X0, (DX)
264 MOVOU X1, 16(DX)
265
266 RET
diff --git a/vendor/github.com/minio/sha256-simd/sha256block_arm64.go b/vendor/github.com/minio/sha256-simd/sha256block_arm64.go
new file mode 100644
index 0000000..d4369e2
--- /dev/null
+++ b/vendor/github.com/minio/sha256-simd/sha256block_arm64.go
@@ -0,0 +1,37 @@
1//go:build !noasm && !appengine && gc
2// +build !noasm,!appengine,gc
3
4/*
5 * Minio Cloud Storage, (C) 2016 Minio, Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 */
19
20package sha256
21
22func blockIntelShaGo(dig *digest, p []byte) {
23 panic("blockIntelShaGo called unexpectedly")
24}
25
26//go:noescape
27func blockArmSha2(h []uint32, message []uint8)
28
29func blockArmSha2Go(dig *digest, p []byte) {
30
31 h := []uint32{dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7]}
32
33 blockArmSha2(h[:], p[:])
34
35 dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h[0], h[1], h[2], h[3], h[4],
36 h[5], h[6], h[7]
37}
diff --git a/vendor/github.com/minio/sha256-simd/sha256block_arm64.s b/vendor/github.com/minio/sha256-simd/sha256block_arm64.s
new file mode 100644
index 0000000..7ab88b1
--- /dev/null
+++ b/vendor/github.com/minio/sha256-simd/sha256block_arm64.s
@@ -0,0 +1,192 @@
1//+build !noasm,!appengine,gc
2
3// ARM64 version of SHA256
4
5//
6// Minio Cloud Storage, (C) 2016 Minio, Inc.
7//
8// Licensed under the Apache License, Version 2.0 (the "License");
9// you may not use this file except in compliance with the License.
10// You may obtain a copy of the License at
11//
12// http://www.apache.org/licenses/LICENSE-2.0
13//
14// Unless required by applicable law or agreed to in writing, software
15// distributed under the License is distributed on an "AS IS" BASIS,
16// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17// See the License for the specific language governing permissions and
18// limitations under the License.
19//
20
21//
22// Based on implementation as found in https://github.com/jocover/sha256-armv8
23//
24// Use github.com/minio/asm2plan9s on this file to assemble ARM instructions to
25// their Plan9 equivalents
26//
27
28TEXT ·blockArmSha2(SB), 7, $0
29 MOVD h+0(FP), R0
30 MOVD message+24(FP), R1
31 MOVD message_len+32(FP), R2 // length of message
32 SUBS $64, R2
33 BMI complete
34
35 // Load constants table pointer
36 MOVD $·constants(SB), R3
37
38 // Cache constants table in registers v16 - v31
39 WORD $0x4cdf2870 // ld1 {v16.4s-v19.4s}, [x3], #64
40 WORD $0x4cdf7800 // ld1 {v0.4s}, [x0], #16
41 WORD $0x4cdf2874 // ld1 {v20.4s-v23.4s}, [x3], #64
42
43 WORD $0x4c407801 // ld1 {v1.4s}, [x0]
44 WORD $0x4cdf2878 // ld1 {v24.4s-v27.4s}, [x3], #64
45 WORD $0xd1004000 // sub x0, x0, #0x10
46 WORD $0x4cdf287c // ld1 {v28.4s-v31.4s}, [x3], #64
47
48loop:
49 // Main loop
50 WORD $0x4cdf2025 // ld1 {v5.16b-v8.16b}, [x1], #64
51 WORD $0x4ea01c02 // mov v2.16b, v0.16b
52 WORD $0x4ea11c23 // mov v3.16b, v1.16b
53 WORD $0x6e2008a5 // rev32 v5.16b, v5.16b
54 WORD $0x6e2008c6 // rev32 v6.16b, v6.16b
55 WORD $0x4eb084a9 // add v9.4s, v5.4s, v16.4s
56 WORD $0x6e2008e7 // rev32 v7.16b, v7.16b
57 WORD $0x4eb184ca // add v10.4s, v6.4s, v17.4s
58 WORD $0x4ea21c44 // mov v4.16b, v2.16b
59 WORD $0x5e094062 // sha256h q2, q3, v9.4s
60 WORD $0x5e095083 // sha256h2 q3, q4, v9.4s
61 WORD $0x5e2828c5 // sha256su0 v5.4s, v6.4s
62 WORD $0x6e200908 // rev32 v8.16b, v8.16b
63 WORD $0x4eb284e9 // add v9.4s, v7.4s, v18.4s
64 WORD $0x4ea21c44 // mov v4.16b, v2.16b
65 WORD $0x5e0a4062 // sha256h q2, q3, v10.4s
66 WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s
67 WORD $0x5e2828e6 // sha256su0 v6.4s, v7.4s
68 WORD $0x5e0860e5 // sha256su1 v5.4s, v7.4s, v8.4s
69 WORD $0x4eb3850a // add v10.4s, v8.4s, v19.4s
70 WORD $0x4ea21c44 // mov v4.16b, v2.16b
71 WORD $0x5e094062 // sha256h q2, q3, v9.4s
72 WORD $0x5e095083 // sha256h2 q3, q4, v9.4s
73 WORD $0x5e282907 // sha256su0 v7.4s, v8.4s
74 WORD $0x5e056106 // sha256su1 v6.4s, v8.4s, v5.4s
75 WORD $0x4eb484a9 // add v9.4s, v5.4s, v20.4s
76 WORD $0x4ea21c44 // mov v4.16b, v2.16b
77 WORD $0x5e0a4062 // sha256h q2, q3, v10.4s
78 WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s
79 WORD $0x5e2828a8 // sha256su0 v8.4s, v5.4s
80 WORD $0x5e0660a7 // sha256su1 v7.4s, v5.4s, v6.4s
81 WORD $0x4eb584ca // add v10.4s, v6.4s, v21.4s
82 WORD $0x4ea21c44 // mov v4.16b, v2.16b
83 WORD $0x5e094062 // sha256h q2, q3, v9.4s
84 WORD $0x5e095083 // sha256h2 q3, q4, v9.4s
85 WORD $0x5e2828c5 // sha256su0 v5.4s, v6.4s
86 WORD $0x5e0760c8 // sha256su1 v8.4s, v6.4s, v7.4s
87 WORD $0x4eb684e9 // add v9.4s, v7.4s, v22.4s
88 WORD $0x4ea21c44 // mov v4.16b, v2.16b
89 WORD $0x5e0a4062 // sha256h q2, q3, v10.4s
90 WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s
91 WORD $0x5e2828e6 // sha256su0 v6.4s, v7.4s
92 WORD $0x5e0860e5 // sha256su1 v5.4s, v7.4s, v8.4s
93 WORD $0x4eb7850a // add v10.4s, v8.4s, v23.4s
94 WORD $0x4ea21c44 // mov v4.16b, v2.16b
95 WORD $0x5e094062 // sha256h q2, q3, v9.4s
96 WORD $0x5e095083 // sha256h2 q3, q4, v9.4s
97 WORD $0x5e282907 // sha256su0 v7.4s, v8.4s
98 WORD $0x5e056106 // sha256su1 v6.4s, v8.4s, v5.4s
99 WORD $0x4eb884a9 // add v9.4s, v5.4s, v24.4s
100 WORD $0x4ea21c44 // mov v4.16b, v2.16b
101 WORD $0x5e0a4062 // sha256h q2, q3, v10.4s
102 WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s
103 WORD $0x5e2828a8 // sha256su0 v8.4s, v5.4s
104 WORD $0x5e0660a7 // sha256su1 v7.4s, v5.4s, v6.4s
105 WORD $0x4eb984ca // add v10.4s, v6.4s, v25.4s
106 WORD $0x4ea21c44 // mov v4.16b, v2.16b
107 WORD $0x5e094062 // sha256h q2, q3, v9.4s
108 WORD $0x5e095083 // sha256h2 q3, q4, v9.4s
109 WORD $0x5e2828c5 // sha256su0 v5.4s, v6.4s
110 WORD $0x5e0760c8 // sha256su1 v8.4s, v6.4s, v7.4s
111 WORD $0x4eba84e9 // add v9.4s, v7.4s, v26.4s
112 WORD $0x4ea21c44 // mov v4.16b, v2.16b
113 WORD $0x5e0a4062 // sha256h q2, q3, v10.4s
114 WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s
115 WORD $0x5e2828e6 // sha256su0 v6.4s, v7.4s
116 WORD $0x5e0860e5 // sha256su1 v5.4s, v7.4s, v8.4s
117 WORD $0x4ebb850a // add v10.4s, v8.4s, v27.4s
118 WORD $0x4ea21c44 // mov v4.16b, v2.16b
119 WORD $0x5e094062 // sha256h q2, q3, v9.4s
120 WORD $0x5e095083 // sha256h2 q3, q4, v9.4s
121 WORD $0x5e282907 // sha256su0 v7.4s, v8.4s
122 WORD $0x5e056106 // sha256su1 v6.4s, v8.4s, v5.4s
123 WORD $0x4ebc84a9 // add v9.4s, v5.4s, v28.4s
124 WORD $0x4ea21c44 // mov v4.16b, v2.16b
125 WORD $0x5e0a4062 // sha256h q2, q3, v10.4s
126 WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s
127 WORD $0x5e2828a8 // sha256su0 v8.4s, v5.4s
128 WORD $0x5e0660a7 // sha256su1 v7.4s, v5.4s, v6.4s
129 WORD $0x4ebd84ca // add v10.4s, v6.4s, v29.4s
130 WORD $0x4ea21c44 // mov v4.16b, v2.16b
131 WORD $0x5e094062 // sha256h q2, q3, v9.4s
132 WORD $0x5e095083 // sha256h2 q3, q4, v9.4s
133 WORD $0x5e0760c8 // sha256su1 v8.4s, v6.4s, v7.4s
134 WORD $0x4ebe84e9 // add v9.4s, v7.4s, v30.4s
135 WORD $0x4ea21c44 // mov v4.16b, v2.16b
136 WORD $0x5e0a4062 // sha256h q2, q3, v10.4s
137 WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s
138 WORD $0x4ebf850a // add v10.4s, v8.4s, v31.4s
139 WORD $0x4ea21c44 // mov v4.16b, v2.16b
140 WORD $0x5e094062 // sha256h q2, q3, v9.4s
141 WORD $0x5e095083 // sha256h2 q3, q4, v9.4s
142 WORD $0x4ea21c44 // mov v4.16b, v2.16b
143 WORD $0x5e0a4062 // sha256h q2, q3, v10.4s
144 WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s
145 WORD $0x4ea38421 // add v1.4s, v1.4s, v3.4s
146 WORD $0x4ea28400 // add v0.4s, v0.4s, v2.4s
147
148 SUBS $64, R2
149 BPL loop
150
151 // Store result
152 WORD $0x4c00a800 // st1 {v0.4s, v1.4s}, [x0]
153
154complete:
155 RET
156
157// Constants table
158DATA ·constants+0x0(SB)/8, $0x71374491428a2f98
159DATA ·constants+0x8(SB)/8, $0xe9b5dba5b5c0fbcf
160DATA ·constants+0x10(SB)/8, $0x59f111f13956c25b
161DATA ·constants+0x18(SB)/8, $0xab1c5ed5923f82a4
162DATA ·constants+0x20(SB)/8, $0x12835b01d807aa98
163DATA ·constants+0x28(SB)/8, $0x550c7dc3243185be
164DATA ·constants+0x30(SB)/8, $0x80deb1fe72be5d74
165DATA ·constants+0x38(SB)/8, $0xc19bf1749bdc06a7
166DATA ·constants+0x40(SB)/8, $0xefbe4786e49b69c1
167DATA ·constants+0x48(SB)/8, $0x240ca1cc0fc19dc6
168DATA ·constants+0x50(SB)/8, $0x4a7484aa2de92c6f
169DATA ·constants+0x58(SB)/8, $0x76f988da5cb0a9dc
170DATA ·constants+0x60(SB)/8, $0xa831c66d983e5152
171DATA ·constants+0x68(SB)/8, $0xbf597fc7b00327c8
172DATA ·constants+0x70(SB)/8, $0xd5a79147c6e00bf3
173DATA ·constants+0x78(SB)/8, $0x1429296706ca6351
174DATA ·constants+0x80(SB)/8, $0x2e1b213827b70a85
175DATA ·constants+0x88(SB)/8, $0x53380d134d2c6dfc
176DATA ·constants+0x90(SB)/8, $0x766a0abb650a7354
177DATA ·constants+0x98(SB)/8, $0x92722c8581c2c92e
178DATA ·constants+0xa0(SB)/8, $0xa81a664ba2bfe8a1
179DATA ·constants+0xa8(SB)/8, $0xc76c51a3c24b8b70
180DATA ·constants+0xb0(SB)/8, $0xd6990624d192e819
181DATA ·constants+0xb8(SB)/8, $0x106aa070f40e3585
182DATA ·constants+0xc0(SB)/8, $0x1e376c0819a4c116
183DATA ·constants+0xc8(SB)/8, $0x34b0bcb52748774c
184DATA ·constants+0xd0(SB)/8, $0x4ed8aa4a391c0cb3
185DATA ·constants+0xd8(SB)/8, $0x682e6ff35b9cca4f
186DATA ·constants+0xe0(SB)/8, $0x78a5636f748f82ee
187DATA ·constants+0xe8(SB)/8, $0x8cc7020884c87814
188DATA ·constants+0xf0(SB)/8, $0xa4506ceb90befffa
189DATA ·constants+0xf8(SB)/8, $0xc67178f2bef9a3f7
190
191GLOBL ·constants(SB), 8, $256
192
diff --git a/vendor/github.com/minio/sha256-simd/sha256block_other.go b/vendor/github.com/minio/sha256-simd/sha256block_other.go
new file mode 100644
index 0000000..94d7eb0
--- /dev/null
+++ b/vendor/github.com/minio/sha256-simd/sha256block_other.go
@@ -0,0 +1,29 @@
1//go:build appengine || noasm || (!amd64 && !arm64) || !gc
2// +build appengine noasm !amd64,!arm64 !gc
3
4/*
5 * Minio Cloud Storage, (C) 2019 Minio, Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 */
19
20package sha256
21
22func blockIntelShaGo(dig *digest, p []byte) {
23 panic("blockIntelShaGo called unexpectedly")
24
25}
26
27func blockArmSha2Go(dig *digest, p []byte) {
28 panic("blockArmSha2Go called unexpectedly")
29}
diff --git a/vendor/github.com/minio/sha256-simd/test-architectures.sh b/vendor/github.com/minio/sha256-simd/test-architectures.sh
new file mode 100644
index 0000000..50150ea
--- /dev/null
+++ b/vendor/github.com/minio/sha256-simd/test-architectures.sh
@@ -0,0 +1,15 @@
1#!/bin/sh
2
3set -e
4
5go tool dist list | while IFS=/ read os arch; do
6 echo "Checking $os/$arch..."
7 echo " normal"
8 GOARCH=$arch GOOS=$os go build -o /dev/null ./...
9 echo " noasm"
10 GOARCH=$arch GOOS=$os go build -tags noasm -o /dev/null ./...
11 echo " appengine"
12 GOARCH=$arch GOOS=$os go build -tags appengine -o /dev/null ./...
13 echo " noasm,appengine"
14 GOARCH=$arch GOOS=$os go build -tags 'appengine noasm' -o /dev/null ./...
15done
diff --git a/vendor/github.com/modern-go/concurrent/.gitignore b/vendor/github.com/modern-go/concurrent/.gitignore
new file mode 100644
index 0000000..3f2bc47
--- /dev/null
+++ b/vendor/github.com/modern-go/concurrent/.gitignore
@@ -0,0 +1 @@
/coverage.txt
diff --git a/vendor/github.com/modern-go/concurrent/.travis.yml b/vendor/github.com/modern-go/concurrent/.travis.yml
new file mode 100644
index 0000000..449e67c
--- /dev/null
+++ b/vendor/github.com/modern-go/concurrent/.travis.yml
@@ -0,0 +1,14 @@
1language: go
2
3go:
4 - 1.8.x
5 - 1.x
6
7before_install:
8 - go get -t -v ./...
9
10script:
11 - ./test.sh
12
13after_success:
14 - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/modern-go/concurrent/LICENSE b/vendor/github.com/modern-go/concurrent/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/modern-go/concurrent/LICENSE
@@ -0,0 +1,201 @@
1 Apache License
2 Version 2.0, January 2004
3 http://www.apache.org/licenses/
4
5 TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
7 1. Definitions.
8
9 "License" shall mean the terms and conditions for use, reproduction,
10 and distribution as defined by Sections 1 through 9 of this document.
11
12 "Licensor" shall mean the copyright owner or entity authorized by
13 the copyright owner that is granting the License.
14
15 "Legal Entity" shall mean the union of the acting entity and all
16 other entities that control, are controlled by, or are under common
17 control with that entity. For the purposes of this definition,
18 "control" means (i) the power, direct or indirect, to cause the
19 direction or management of such entity, whether by contract or
20 otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 outstanding shares, or (iii) beneficial ownership of such entity.
22
23 "You" (or "Your") shall mean an individual or Legal Entity
24 exercising permissions granted by this License.
25
26 "Source" form shall mean the preferred form for making modifications,
27 including but not limited to software source code, documentation
28 source, and configuration files.
29
30 "Object" form shall mean any form resulting from mechanical
31 transformation or translation of a Source form, including but
32 not limited to compiled object code, generated documentation,
33 and conversions to other media types.
34
35 "Work" shall mean the work of authorship, whether in Source or
36 Object form, made available under the License, as indicated by a
37 copyright notice that is included in or attached to the work
38 (an example is provided in the Appendix below).
39
40 "Derivative Works" shall mean any work, whether in Source or Object
41 form, that is based on (or derived from) the Work and for which the
42 editorial revisions, annotations, elaborations, or other modifications
43 represent, as a whole, an original work of authorship. For the purposes
44 of this License, Derivative Works shall not include works that remain
45 separable from, or merely link (or bind by name) to the interfaces of,
46 the Work and Derivative Works thereof.
47
48 "Contribution" shall mean any work of authorship, including
49 the original version of the Work and any modifications or additions
50 to that Work or Derivative Works thereof, that is intentionally
51 submitted to Licensor for inclusion in the Work by the copyright owner
52 or by an individual or Legal Entity authorized to submit on behalf of
53 the copyright owner. For the purposes of this definition, "submitted"
54 means any form of electronic, verbal, or written communication sent
55 to the Licensor or its representatives, including but not limited to
56 communication on electronic mailing lists, source code control systems,
57 and issue tracking systems that are managed by, or on behalf of, the
58 Licensor for the purpose of discussing and improving the Work, but
59 excluding communication that is conspicuously marked or otherwise
60 designated in writing by the copyright owner as "Not a Contribution."
61
62 "Contributor" shall mean Licensor and any individual or Legal Entity
63 on behalf of whom a Contribution has been received by Licensor and
64 subsequently incorporated within the Work.
65
66 2. Grant of Copyright License. Subject to the terms and conditions of
67 this License, each Contributor hereby grants to You a perpetual,
68 worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 copyright license to reproduce, prepare Derivative Works of,
70 publicly display, publicly perform, sublicense, and distribute the
71 Work and such Derivative Works in Source or Object form.
72
73 3. Grant of Patent License. Subject to the terms and conditions of
74 this License, each Contributor hereby grants to You a perpetual,
75 worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 (except as stated in this section) patent license to make, have made,
77 use, offer to sell, sell, import, and otherwise transfer the Work,
78 where such license applies only to those patent claims licensable
79 by such Contributor that are necessarily infringed by their
80 Contribution(s) alone or by combination of their Contribution(s)
81 with the Work to which such Contribution(s) was submitted. If You
82 institute patent litigation against any entity (including a
83 cross-claim or counterclaim in a lawsuit) alleging that the Work
84 or a Contribution incorporated within the Work constitutes direct
85 or contributory patent infringement, then any patent licenses
86 granted to You under this License for that Work shall terminate
87 as of the date such litigation is filed.
88
89 4. Redistribution. You may reproduce and distribute copies of the
90 Work or Derivative Works thereof in any medium, with or without
91 modifications, and in Source or Object form, provided that You
92 meet the following conditions:
93
94 (a) You must give any other recipients of the Work or
95 Derivative Works a copy of this License; and
96
97 (b) You must cause any modified files to carry prominent notices
98 stating that You changed the files; and
99
100 (c) You must retain, in the Source form of any Derivative Works
101 that You distribute, all copyright, patent, trademark, and
102 attribution notices from the Source form of the Work,
103 excluding those notices that do not pertain to any part of
104 the Derivative Works; and
105
106 (d) If the Work includes a "NOTICE" text file as part of its
107 distribution, then any Derivative Works that You distribute must
108 include a readable copy of the attribution notices contained
109 within such NOTICE file, excluding those notices that do not
110 pertain to any part of the Derivative Works, in at least one
111 of the following places: within a NOTICE text file distributed
112 as part of the Derivative Works; within the Source form or
113 documentation, if provided along with the Derivative Works; or,
114 within a display generated by the Derivative Works, if and
115 wherever such third-party notices normally appear. The contents
116 of the NOTICE file are for informational purposes only and
117 do not modify the License. You may add Your own attribution
118 notices within Derivative Works that You distribute, alongside
119 or as an addendum to the NOTICE text from the Work, provided
120 that such additional attribution notices cannot be construed
121 as modifying the License.
122
123 You may add Your own copyright statement to Your modifications and
124 may provide additional or different license terms and conditions
125 for use, reproduction, or distribution of Your modifications, or
126 for any such Derivative Works as a whole, provided Your use,
127 reproduction, and distribution of the Work otherwise complies with
128 the conditions stated in this License.
129
130 5. Submission of Contributions. Unless You explicitly state otherwise,
131 any Contribution intentionally submitted for inclusion in the Work
132 by You to the Licensor shall be under the terms and conditions of
133 this License, without any additional terms or conditions.
134 Notwithstanding the above, nothing herein shall supersede or modify
135 the terms of any separate license agreement you may have executed
136 with Licensor regarding such Contributions.
137
138 6. Trademarks. This License does not grant permission to use the trade
139 names, trademarks, service marks, or product names of the Licensor,
140 except as required for reasonable and customary use in describing the
141 origin of the Work and reproducing the content of the NOTICE file.
142
143 7. Disclaimer of Warranty. Unless required by applicable law or
144 agreed to in writing, Licensor provides the Work (and each
145 Contributor provides its Contributions) on an "AS IS" BASIS,
146 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 implied, including, without limitation, any warranties or conditions
148 of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 PARTICULAR PURPOSE. You are solely responsible for determining the
150 appropriateness of using or redistributing the Work and assume any
151 risks associated with Your exercise of permissions under this License.
152
153 8. Limitation of Liability. In no event and under no legal theory,
154 whether in tort (including negligence), contract, or otherwise,
155 unless required by applicable law (such as deliberate and grossly
156 negligent acts) or agreed to in writing, shall any Contributor be
157 liable to You for damages, including any direct, indirect, special,
158 incidental, or consequential damages of any character arising as a
159 result of this License or out of the use or inability to use the
160 Work (including but not limited to damages for loss of goodwill,
161 work stoppage, computer failure or malfunction, or any and all
162 other commercial damages or losses), even if such Contributor
163 has been advised of the possibility of such damages.
164
165 9. Accepting Warranty or Additional Liability. While redistributing
166 the Work or Derivative Works thereof, You may choose to offer,
167 and charge a fee for, acceptance of support, warranty, indemnity,
168 or other liability obligations and/or rights consistent with this
169 License. However, in accepting such obligations, You may act only
170 on Your own behalf and on Your sole responsibility, not on behalf
171 of any other Contributor, and only if You agree to indemnify,
172 defend, and hold each Contributor harmless for any liability
173 incurred by, or claims asserted against, such Contributor by reason
174 of your accepting any such warranty or additional liability.
175
176 END OF TERMS AND CONDITIONS
177
178 APPENDIX: How to apply the Apache License to your work.
179
180 To apply the Apache License to your work, attach the following
181 boilerplate notice, with the fields enclosed by brackets "[]"
182 replaced with your own identifying information. (Don't include
183 the brackets!) The text should be enclosed in the appropriate
184 comment syntax for the file format. We also recommend that a
185 file or class name and description of purpose be included on the
186 same "printed page" as the copyright notice for easier
187 identification within third-party archives.
188
189 Copyright [yyyy] [name of copyright owner]
190
191 Licensed under the Apache License, Version 2.0 (the "License");
192 you may not use this file except in compliance with the License.
193 You may obtain a copy of the License at
194
195 http://www.apache.org/licenses/LICENSE-2.0
196
197 Unless required by applicable law or agreed to in writing, software
198 distributed under the License is distributed on an "AS IS" BASIS,
199 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 See the License for the specific language governing permissions and
201 limitations under the License.
diff --git a/vendor/github.com/modern-go/concurrent/README.md b/vendor/github.com/modern-go/concurrent/README.md
new file mode 100644
index 0000000..acab320
--- /dev/null
+++ b/vendor/github.com/modern-go/concurrent/README.md
@@ -0,0 +1,49 @@
1# concurrent
2
3[![Sourcegraph](https://sourcegraph.com/github.com/modern-go/concurrent/-/badge.svg)](https://sourcegraph.com/github.com/modern-go/concurrent?badge)
4[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/modern-go/concurrent)
5[![Build Status](https://travis-ci.org/modern-go/concurrent.svg?branch=master)](https://travis-ci.org/modern-go/concurrent)
6[![codecov](https://codecov.io/gh/modern-go/concurrent/branch/master/graph/badge.svg)](https://codecov.io/gh/modern-go/concurrent)
7[![rcard](https://goreportcard.com/badge/github.com/modern-go/concurrent)](https://goreportcard.com/report/github.com/modern-go/concurrent)
8[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://raw.githubusercontent.com/modern-go/concurrent/master/LICENSE)
9
10* concurrent.Map: backport sync.Map for go below 1.9
11* concurrent.Executor: goroutine with explicit ownership and cancellable
12
13# concurrent.Map
14
15because sync.Map is only available in go 1.9, we can use concurrent.Map to make code portable
16
17```go
18m := concurrent.NewMap()
19m.Store("hello", "world")
20elem, found := m.Load("hello")
21// elem will be "world"
22// found will be true
23```
24
25# concurrent.Executor
26
27```go
28executor := concurrent.NewUnboundedExecutor()
29executor.Go(func(ctx context.Context) {
30 everyMillisecond := time.NewTicker(time.Millisecond)
31 for {
32 select {
33 case <-ctx.Done():
34 fmt.Println("goroutine exited")
35 return
36 case <-everyMillisecond.C:
37 // do something
38 }
39 }
40})
41time.Sleep(time.Second)
42executor.StopAndWaitForever()
43fmt.Println("executor stopped")
44```
45
46attach goroutine to executor instance, so that we can
47
48* cancel it by stop the executor with Stop/StopAndWait/StopAndWaitForever
49* handle panic by callback: the default behavior will no longer crash your application \ No newline at end of file
diff --git a/vendor/github.com/modern-go/concurrent/executor.go b/vendor/github.com/modern-go/concurrent/executor.go
new file mode 100644
index 0000000..623dba1
--- /dev/null
+++ b/vendor/github.com/modern-go/concurrent/executor.go
@@ -0,0 +1,14 @@
1package concurrent
2
3import "context"
4
5// Executor replace go keyword to start a new goroutine
6// the goroutine should cancel itself if the context passed in has been cancelled
7// the goroutine started by the executor, is owned by the executor
8// we can cancel all executors owned by the executor just by stop the executor itself
9// however Executor interface does not Stop method, the one starting and owning executor
10// should use the concrete type of executor, instead of this interface.
11type Executor interface {
12 // Go starts a new goroutine controlled by the context
13 Go(handler func(ctx context.Context))
14}
diff --git a/vendor/github.com/modern-go/concurrent/go_above_19.go b/vendor/github.com/modern-go/concurrent/go_above_19.go
new file mode 100644
index 0000000..aeabf8c
--- /dev/null
+++ b/vendor/github.com/modern-go/concurrent/go_above_19.go
@@ -0,0 +1,15 @@
1//+build go1.9
2
3package concurrent
4
5import "sync"
6
7// Map is a wrapper for sync.Map introduced in go1.9
8type Map struct {
9 sync.Map
10}
11
12// NewMap creates a thread safe Map
13func NewMap() *Map {
14 return &Map{}
15}
diff --git a/vendor/github.com/modern-go/concurrent/go_below_19.go b/vendor/github.com/modern-go/concurrent/go_below_19.go
new file mode 100644
index 0000000..b9c8df7
--- /dev/null
+++ b/vendor/github.com/modern-go/concurrent/go_below_19.go
@@ -0,0 +1,33 @@
1//+build !go1.9
2
3package concurrent
4
5import "sync"
6
7// Map implements a thread safe map for go version below 1.9 using mutex
8type Map struct {
9 lock sync.RWMutex
10 data map[interface{}]interface{}
11}
12
13// NewMap creates a thread safe map
14func NewMap() *Map {
15 return &Map{
16 data: make(map[interface{}]interface{}, 32),
17 }
18}
19
20// Load is same as sync.Map Load
21func (m *Map) Load(key interface{}) (elem interface{}, found bool) {
22 m.lock.RLock()
23 elem, found = m.data[key]
24 m.lock.RUnlock()
25 return
26}
27
28// Load is same as sync.Map Store
29func (m *Map) Store(key interface{}, elem interface{}) {
30 m.lock.Lock()
31 m.data[key] = elem
32 m.lock.Unlock()
33}
diff --git a/vendor/github.com/modern-go/concurrent/log.go b/vendor/github.com/modern-go/concurrent/log.go
new file mode 100644
index 0000000..9756fcc
--- /dev/null
+++ b/vendor/github.com/modern-go/concurrent/log.go
@@ -0,0 +1,13 @@
1package concurrent
2
3import (
4 "os"
5 "log"
6 "io/ioutil"
7)
8
9// ErrorLogger is used to print out error, can be set to writer other than stderr
10var ErrorLogger = log.New(os.Stderr, "", 0)
11
12// InfoLogger is used to print informational message, default to off
13var InfoLogger = log.New(ioutil.Discard, "", 0) \ No newline at end of file
diff --git a/vendor/github.com/modern-go/concurrent/test.sh b/vendor/github.com/modern-go/concurrent/test.sh
new file mode 100644
index 0000000..d1e6b2e
--- /dev/null
+++ b/vendor/github.com/modern-go/concurrent/test.sh
@@ -0,0 +1,12 @@
1#!/usr/bin/env bash
2
3set -e
4echo "" > coverage.txt
5
6for d in $(go list ./... | grep -v vendor); do
7 go test -coverprofile=profile.out -coverpkg=github.com/modern-go/concurrent $d
8 if [ -f profile.out ]; then
9 cat profile.out >> coverage.txt
10 rm profile.out
11 fi
12done
diff --git a/vendor/github.com/modern-go/concurrent/unbounded_executor.go b/vendor/github.com/modern-go/concurrent/unbounded_executor.go
new file mode 100644
index 0000000..05a77dc
--- /dev/null
+++ b/vendor/github.com/modern-go/concurrent/unbounded_executor.go
@@ -0,0 +1,119 @@
1package concurrent
2
3import (
4 "context"
5 "fmt"
6 "runtime"
7 "runtime/debug"
8 "sync"
9 "time"
10 "reflect"
11)
12
13// HandlePanic logs goroutine panic by default
14var HandlePanic = func(recovered interface{}, funcName string) {
15 ErrorLogger.Println(fmt.Sprintf("%s panic: %v", funcName, recovered))
16 ErrorLogger.Println(string(debug.Stack()))
17}
18
19// UnboundedExecutor is a executor without limits on counts of alive goroutines
20// it tracks the goroutine started by it, and can cancel them when shutdown
21type UnboundedExecutor struct {
22 ctx context.Context
23 cancel context.CancelFunc
24 activeGoroutinesMutex *sync.Mutex
25 activeGoroutines map[string]int
26 HandlePanic func(recovered interface{}, funcName string)
27}
28
29// GlobalUnboundedExecutor has the life cycle of the program itself
30// any goroutine want to be shutdown before main exit can be started from this executor
31// GlobalUnboundedExecutor expects the main function to call stop
32// it does not magically knows the main function exits
33var GlobalUnboundedExecutor = NewUnboundedExecutor()
34
35// NewUnboundedExecutor creates a new UnboundedExecutor,
36// UnboundedExecutor can not be created by &UnboundedExecutor{}
37// HandlePanic can be set with a callback to override global HandlePanic
38func NewUnboundedExecutor() *UnboundedExecutor {
39 ctx, cancel := context.WithCancel(context.TODO())
40 return &UnboundedExecutor{
41 ctx: ctx,
42 cancel: cancel,
43 activeGoroutinesMutex: &sync.Mutex{},
44 activeGoroutines: map[string]int{},
45 }
46}
47
48// Go starts a new goroutine and tracks its lifecycle.
49// Panic will be recovered and logged automatically, except for StopSignal
50func (executor *UnboundedExecutor) Go(handler func(ctx context.Context)) {
51 pc := reflect.ValueOf(handler).Pointer()
52 f := runtime.FuncForPC(pc)
53 funcName := f.Name()
54 file, line := f.FileLine(pc)
55 executor.activeGoroutinesMutex.Lock()
56 defer executor.activeGoroutinesMutex.Unlock()
57 startFrom := fmt.Sprintf("%s:%d", file, line)
58 executor.activeGoroutines[startFrom] += 1
59 go func() {
60 defer func() {
61 recovered := recover()
62 // if you want to quit a goroutine without trigger HandlePanic
63 // use runtime.Goexit() to quit
64 if recovered != nil {
65 if executor.HandlePanic == nil {
66 HandlePanic(recovered, funcName)
67 } else {
68 executor.HandlePanic(recovered, funcName)
69 }
70 }
71 executor.activeGoroutinesMutex.Lock()
72 executor.activeGoroutines[startFrom] -= 1
73 executor.activeGoroutinesMutex.Unlock()
74 }()
75 handler(executor.ctx)
76 }()
77}
78
79// Stop cancel all goroutines started by this executor without wait
80func (executor *UnboundedExecutor) Stop() {
81 executor.cancel()
82}
83
84// StopAndWaitForever cancel all goroutines started by this executor and
85// wait until all goroutines exited
86func (executor *UnboundedExecutor) StopAndWaitForever() {
87 executor.StopAndWait(context.Background())
88}
89
90// StopAndWait cancel all goroutines started by this executor and wait.
91// Wait can be cancelled by the context passed in.
92func (executor *UnboundedExecutor) StopAndWait(ctx context.Context) {
93 executor.cancel()
94 for {
95 oneHundredMilliseconds := time.NewTimer(time.Millisecond * 100)
96 select {
97 case <-oneHundredMilliseconds.C:
98 if executor.checkNoActiveGoroutines() {
99 return
100 }
101 case <-ctx.Done():
102 return
103 }
104 }
105}
106
107func (executor *UnboundedExecutor) checkNoActiveGoroutines() bool {
108 executor.activeGoroutinesMutex.Lock()
109 defer executor.activeGoroutinesMutex.Unlock()
110 for startFrom, count := range executor.activeGoroutines {
111 if count > 0 {
112 InfoLogger.Println("UnboundedExecutor is still waiting goroutines to quit",
113 "startFrom", startFrom,
114 "count", count)
115 return false
116 }
117 }
118 return true
119}
diff --git a/vendor/github.com/modern-go/reflect2/.gitignore b/vendor/github.com/modern-go/reflect2/.gitignore
new file mode 100644
index 0000000..7b26c94
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/.gitignore
@@ -0,0 +1,2 @@
1/vendor
2/coverage.txt
diff --git a/vendor/github.com/modern-go/reflect2/.travis.yml b/vendor/github.com/modern-go/reflect2/.travis.yml
new file mode 100644
index 0000000..b097728
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/.travis.yml
@@ -0,0 +1,15 @@
1language: go
2
3go:
4 - 1.9.x
5 - 1.x
6
7before_install:
8 - go get -t -v ./...
9 - go get -t -v github.com/modern-go/reflect2-tests/...
10
11script:
12 - ./test.sh
13
14after_success:
15 - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/modern-go/reflect2/Gopkg.lock b/vendor/github.com/modern-go/reflect2/Gopkg.lock
new file mode 100644
index 0000000..10ef811
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/Gopkg.lock
@@ -0,0 +1,9 @@
1# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
2
3
4[solve-meta]
5 analyzer-name = "dep"
6 analyzer-version = 1
7 input-imports = []
8 solver-name = "gps-cdcl"
9 solver-version = 1
diff --git a/vendor/github.com/modern-go/reflect2/Gopkg.toml b/vendor/github.com/modern-go/reflect2/Gopkg.toml
new file mode 100644
index 0000000..a9bc506
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/Gopkg.toml
@@ -0,0 +1,31 @@
1# Gopkg.toml example
2#
3# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
4# for detailed Gopkg.toml documentation.
5#
6# required = ["github.com/user/thing/cmd/thing"]
7# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
8#
9# [[constraint]]
10# name = "github.com/user/project"
11# version = "1.0.0"
12#
13# [[constraint]]
14# name = "github.com/user/project2"
15# branch = "dev"
16# source = "github.com/myfork/project2"
17#
18# [[override]]
19# name = "github.com/x/y"
20# version = "2.4.0"
21#
22# [prune]
23# non-go = false
24# go-tests = true
25# unused-packages = true
26
27ignored = []
28
29[prune]
30 go-tests = true
31 unused-packages = true
diff --git a/vendor/github.com/modern-go/reflect2/LICENSE b/vendor/github.com/modern-go/reflect2/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/LICENSE
@@ -0,0 +1,201 @@
1 Apache License
2 Version 2.0, January 2004
3 http://www.apache.org/licenses/
4
5 TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
7 1. Definitions.
8
9 "License" shall mean the terms and conditions for use, reproduction,
10 and distribution as defined by Sections 1 through 9 of this document.
11
12 "Licensor" shall mean the copyright owner or entity authorized by
13 the copyright owner that is granting the License.
14
15 "Legal Entity" shall mean the union of the acting entity and all
16 other entities that control, are controlled by, or are under common
17 control with that entity. For the purposes of this definition,
18 "control" means (i) the power, direct or indirect, to cause the
19 direction or management of such entity, whether by contract or
20 otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 outstanding shares, or (iii) beneficial ownership of such entity.
22
23 "You" (or "Your") shall mean an individual or Legal Entity
24 exercising permissions granted by this License.
25
26 "Source" form shall mean the preferred form for making modifications,
27 including but not limited to software source code, documentation
28 source, and configuration files.
29
30 "Object" form shall mean any form resulting from mechanical
31 transformation or translation of a Source form, including but
32 not limited to compiled object code, generated documentation,
33 and conversions to other media types.
34
35 "Work" shall mean the work of authorship, whether in Source or
36 Object form, made available under the License, as indicated by a
37 copyright notice that is included in or attached to the work
38 (an example is provided in the Appendix below).
39
40 "Derivative Works" shall mean any work, whether in Source or Object
41 form, that is based on (or derived from) the Work and for which the
42 editorial revisions, annotations, elaborations, or other modifications
43 represent, as a whole, an original work of authorship. For the purposes
44 of this License, Derivative Works shall not include works that remain
45 separable from, or merely link (or bind by name) to the interfaces of,
46 the Work and Derivative Works thereof.
47
48 "Contribution" shall mean any work of authorship, including
49 the original version of the Work and any modifications or additions
50 to that Work or Derivative Works thereof, that is intentionally
51 submitted to Licensor for inclusion in the Work by the copyright owner
52 or by an individual or Legal Entity authorized to submit on behalf of
53 the copyright owner. For the purposes of this definition, "submitted"
54 means any form of electronic, verbal, or written communication sent
55 to the Licensor or its representatives, including but not limited to
56 communication on electronic mailing lists, source code control systems,
57 and issue tracking systems that are managed by, or on behalf of, the
58 Licensor for the purpose of discussing and improving the Work, but
59 excluding communication that is conspicuously marked or otherwise
60 designated in writing by the copyright owner as "Not a Contribution."
61
62 "Contributor" shall mean Licensor and any individual or Legal Entity
63 on behalf of whom a Contribution has been received by Licensor and
64 subsequently incorporated within the Work.
65
66 2. Grant of Copyright License. Subject to the terms and conditions of
67 this License, each Contributor hereby grants to You a perpetual,
68 worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 copyright license to reproduce, prepare Derivative Works of,
70 publicly display, publicly perform, sublicense, and distribute the
71 Work and such Derivative Works in Source or Object form.
72
73 3. Grant of Patent License. Subject to the terms and conditions of
74 this License, each Contributor hereby grants to You a perpetual,
75 worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 (except as stated in this section) patent license to make, have made,
77 use, offer to sell, sell, import, and otherwise transfer the Work,
78 where such license applies only to those patent claims licensable
79 by such Contributor that are necessarily infringed by their
80 Contribution(s) alone or by combination of their Contribution(s)
81 with the Work to which such Contribution(s) was submitted. If You
82 institute patent litigation against any entity (including a
83 cross-claim or counterclaim in a lawsuit) alleging that the Work
84 or a Contribution incorporated within the Work constitutes direct
85 or contributory patent infringement, then any patent licenses
86 granted to You under this License for that Work shall terminate
87 as of the date such litigation is filed.
88
89 4. Redistribution. You may reproduce and distribute copies of the
90 Work or Derivative Works thereof in any medium, with or without
91 modifications, and in Source or Object form, provided that You
92 meet the following conditions:
93
94 (a) You must give any other recipients of the Work or
95 Derivative Works a copy of this License; and
96
97 (b) You must cause any modified files to carry prominent notices
98 stating that You changed the files; and
99
100 (c) You must retain, in the Source form of any Derivative Works
101 that You distribute, all copyright, patent, trademark, and
102 attribution notices from the Source form of the Work,
103 excluding those notices that do not pertain to any part of
104 the Derivative Works; and
105
106 (d) If the Work includes a "NOTICE" text file as part of its
107 distribution, then any Derivative Works that You distribute must
108 include a readable copy of the attribution notices contained
109 within such NOTICE file, excluding those notices that do not
110 pertain to any part of the Derivative Works, in at least one
111 of the following places: within a NOTICE text file distributed
112 as part of the Derivative Works; within the Source form or
113 documentation, if provided along with the Derivative Works; or,
114 within a display generated by the Derivative Works, if and
115 wherever such third-party notices normally appear. The contents
116 of the NOTICE file are for informational purposes only and
117 do not modify the License. You may add Your own attribution
118 notices within Derivative Works that You distribute, alongside
119 or as an addendum to the NOTICE text from the Work, provided
120 that such additional attribution notices cannot be construed
121 as modifying the License.
122
123 You may add Your own copyright statement to Your modifications and
124 may provide additional or different license terms and conditions
125 for use, reproduction, or distribution of Your modifications, or
126 for any such Derivative Works as a whole, provided Your use,
127 reproduction, and distribution of the Work otherwise complies with
128 the conditions stated in this License.
129
130 5. Submission of Contributions. Unless You explicitly state otherwise,
131 any Contribution intentionally submitted for inclusion in the Work
132 by You to the Licensor shall be under the terms and conditions of
133 this License, without any additional terms or conditions.
134 Notwithstanding the above, nothing herein shall supersede or modify
135 the terms of any separate license agreement you may have executed
136 with Licensor regarding such Contributions.
137
138 6. Trademarks. This License does not grant permission to use the trade
139 names, trademarks, service marks, or product names of the Licensor,
140 except as required for reasonable and customary use in describing the
141 origin of the Work and reproducing the content of the NOTICE file.
142
143 7. Disclaimer of Warranty. Unless required by applicable law or
144 agreed to in writing, Licensor provides the Work (and each
145 Contributor provides its Contributions) on an "AS IS" BASIS,
146 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 implied, including, without limitation, any warranties or conditions
148 of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 PARTICULAR PURPOSE. You are solely responsible for determining the
150 appropriateness of using or redistributing the Work and assume any
151 risks associated with Your exercise of permissions under this License.
152
153 8. Limitation of Liability. In no event and under no legal theory,
154 whether in tort (including negligence), contract, or otherwise,
155 unless required by applicable law (such as deliberate and grossly
156 negligent acts) or agreed to in writing, shall any Contributor be
157 liable to You for damages, including any direct, indirect, special,
158 incidental, or consequential damages of any character arising as a
159 result of this License or out of the use or inability to use the
160 Work (including but not limited to damages for loss of goodwill,
161 work stoppage, computer failure or malfunction, or any and all
162 other commercial damages or losses), even if such Contributor
163 has been advised of the possibility of such damages.
164
165 9. Accepting Warranty or Additional Liability. While redistributing
166 the Work or Derivative Works thereof, You may choose to offer,
167 and charge a fee for, acceptance of support, warranty, indemnity,
168 or other liability obligations and/or rights consistent with this
169 License. However, in accepting such obligations, You may act only
170 on Your own behalf and on Your sole responsibility, not on behalf
171 of any other Contributor, and only if You agree to indemnify,
172 defend, and hold each Contributor harmless for any liability
173 incurred by, or claims asserted against, such Contributor by reason
174 of your accepting any such warranty or additional liability.
175
176 END OF TERMS AND CONDITIONS
177
178 APPENDIX: How to apply the Apache License to your work.
179
180 To apply the Apache License to your work, attach the following
181 boilerplate notice, with the fields enclosed by brackets "[]"
182 replaced with your own identifying information. (Don't include
183 the brackets!) The text should be enclosed in the appropriate
184 comment syntax for the file format. We also recommend that a
185 file or class name and description of purpose be included on the
186 same "printed page" as the copyright notice for easier
187 identification within third-party archives.
188
189 Copyright [yyyy] [name of copyright owner]
190
191 Licensed under the Apache License, Version 2.0 (the "License");
192 you may not use this file except in compliance with the License.
193 You may obtain a copy of the License at
194
195 http://www.apache.org/licenses/LICENSE-2.0
196
197 Unless required by applicable law or agreed to in writing, software
198 distributed under the License is distributed on an "AS IS" BASIS,
199 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 See the License for the specific language governing permissions and
201 limitations under the License.
diff --git a/vendor/github.com/modern-go/reflect2/README.md b/vendor/github.com/modern-go/reflect2/README.md
new file mode 100644
index 0000000..6f968aa
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/README.md
@@ -0,0 +1,71 @@
1# reflect2
2
3[![Sourcegraph](https://sourcegraph.com/github.com/modern-go/reflect2/-/badge.svg)](https://sourcegraph.com/github.com/modern-go/reflect2?badge)
4[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/modern-go/reflect2)
5[![Build Status](https://travis-ci.org/modern-go/reflect2.svg?branch=master)](https://travis-ci.org/modern-go/reflect2)
6[![codecov](https://codecov.io/gh/modern-go/reflect2/branch/master/graph/badge.svg)](https://codecov.io/gh/modern-go/reflect2)
7[![rcard](https://goreportcard.com/badge/github.com/modern-go/reflect2)](https://goreportcard.com/report/github.com/modern-go/reflect2)
8[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://raw.githubusercontent.com/modern-go/reflect2/master/LICENSE)
9
10reflect api that avoids runtime reflect.Value cost
11
12* reflect get/set interface{}, with type checking
13* reflect get/set unsafe.Pointer, without type checking
14* `reflect2.TypeByName` works like `Class.forName` found in java
15
16[json-iterator](https://github.com/json-iterator/go) use this package to save runtime dispatching cost.
17This package is designed for low level libraries to optimize reflection performance.
18General application should still use reflect standard library.
19
20# reflect2.TypeByName
21
22```go
23// given package is github.com/your/awesome-package
24type MyStruct struct {
25 // ...
26}
27
28// will return the type
29reflect2.TypeByName("awesome-package.MyStruct")
30// however, if the type has not been used
31// it will be eliminated by compiler, so we can not get it in runtime
32```
33
34# reflect2 get/set interface{}
35
36```go
37valType := reflect2.TypeOf(1)
38i := 1
39j := 10
40valType.Set(&i, &j)
41// i will be 10
42```
43
44to get set `type`, always use its pointer `*type`
45
46# reflect2 get/set unsafe.Pointer
47
48```go
49valType := reflect2.TypeOf(1)
50i := 1
51j := 10
52valType.UnsafeSet(unsafe.Pointer(&i), unsafe.Pointer(&j))
53// i will be 10
54```
55
56to get set `type`, always use its pointer `*type`
57
58# benchmark
59
60Benchmark is not necessary for this package. It does nothing actually.
61As it is just a thin wrapper to make go runtime public.
62Both `reflect2` and `reflect` call same function
63provided by `runtime` package exposed by go language.
64
65# unsafe safety
66
67Instead of casting `[]byte` to `sliceHeader` in your application using unsafe.
68We can use reflect2 instead. This way, if `sliceHeader` changes in the future,
69only reflect2 need to be upgraded.
70
71reflect2 tries its best to keep the implementation same as reflect (by testing). \ No newline at end of file
diff --git a/vendor/github.com/modern-go/reflect2/go_above_118.go b/vendor/github.com/modern-go/reflect2/go_above_118.go
new file mode 100644
index 0000000..2b4116f
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/go_above_118.go
@@ -0,0 +1,23 @@
1//+build go1.18
2
3package reflect2
4
5import (
6 "unsafe"
7)
8
9// m escapes into the return value, but the caller of mapiterinit
10// doesn't let the return value escape.
11//go:noescape
12//go:linkname mapiterinit reflect.mapiterinit
13func mapiterinit(rtype unsafe.Pointer, m unsafe.Pointer, it *hiter)
14
15func (type2 *UnsafeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator {
16 var it hiter
17 mapiterinit(type2.rtype, *(*unsafe.Pointer)(obj), &it)
18 return &UnsafeMapIterator{
19 hiter: &it,
20 pKeyRType: type2.pKeyRType,
21 pElemRType: type2.pElemRType,
22 }
23} \ No newline at end of file
diff --git a/vendor/github.com/modern-go/reflect2/go_above_19.go b/vendor/github.com/modern-go/reflect2/go_above_19.go
new file mode 100644
index 0000000..974f768
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/go_above_19.go
@@ -0,0 +1,17 @@
1//+build go1.9
2
3package reflect2
4
5import (
6 "unsafe"
7)
8
9//go:linkname resolveTypeOff reflect.resolveTypeOff
10func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
11
12//go:linkname makemap reflect.makemap
13func makemap(rtype unsafe.Pointer, cap int) (m unsafe.Pointer)
14
15func makeMapWithSize(rtype unsafe.Pointer, cap int) unsafe.Pointer {
16 return makemap(rtype, cap)
17}
diff --git a/vendor/github.com/modern-go/reflect2/go_below_118.go b/vendor/github.com/modern-go/reflect2/go_below_118.go
new file mode 100644
index 0000000..00003db
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/go_below_118.go
@@ -0,0 +1,21 @@
1//+build !go1.18
2
3package reflect2
4
5import (
6 "unsafe"
7)
8
9// m escapes into the return value, but the caller of mapiterinit
10// doesn't let the return value escape.
11//go:noescape
12//go:linkname mapiterinit reflect.mapiterinit
13func mapiterinit(rtype unsafe.Pointer, m unsafe.Pointer) (val *hiter)
14
15func (type2 *UnsafeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator {
16 return &UnsafeMapIterator{
17 hiter: mapiterinit(type2.rtype, *(*unsafe.Pointer)(obj)),
18 pKeyRType: type2.pKeyRType,
19 pElemRType: type2.pElemRType,
20 }
21} \ No newline at end of file
diff --git a/vendor/github.com/modern-go/reflect2/reflect2.go b/vendor/github.com/modern-go/reflect2/reflect2.go
new file mode 100644
index 0000000..c43c8b9
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/reflect2.go
@@ -0,0 +1,300 @@
1package reflect2
2
3import (
4 "reflect"
5 "runtime"
6 "sync"
7 "unsafe"
8)
9
10type Type interface {
11 Kind() reflect.Kind
12 // New return pointer to data of this type
13 New() interface{}
14 // UnsafeNew return the allocated space pointed by unsafe.Pointer
15 UnsafeNew() unsafe.Pointer
16 // PackEFace cast a unsafe pointer to object represented pointer
17 PackEFace(ptr unsafe.Pointer) interface{}
18 // Indirect dereference object represented pointer to this type
19 Indirect(obj interface{}) interface{}
20 // UnsafeIndirect dereference pointer to this type
21 UnsafeIndirect(ptr unsafe.Pointer) interface{}
22 // Type1 returns reflect.Type
23 Type1() reflect.Type
24 Implements(thatType Type) bool
25 String() string
26 RType() uintptr
27 // interface{} of this type has pointer like behavior
28 LikePtr() bool
29 IsNullable() bool
30 IsNil(obj interface{}) bool
31 UnsafeIsNil(ptr unsafe.Pointer) bool
32 Set(obj interface{}, val interface{})
33 UnsafeSet(ptr unsafe.Pointer, val unsafe.Pointer)
34 AssignableTo(anotherType Type) bool
35}
36
37type ListType interface {
38 Type
39 Elem() Type
40 SetIndex(obj interface{}, index int, elem interface{})
41 UnsafeSetIndex(obj unsafe.Pointer, index int, elem unsafe.Pointer)
42 GetIndex(obj interface{}, index int) interface{}
43 UnsafeGetIndex(obj unsafe.Pointer, index int) unsafe.Pointer
44}
45
46type ArrayType interface {
47 ListType
48 Len() int
49}
50
51type SliceType interface {
52 ListType
53 MakeSlice(length int, cap int) interface{}
54 UnsafeMakeSlice(length int, cap int) unsafe.Pointer
55 Grow(obj interface{}, newLength int)
56 UnsafeGrow(ptr unsafe.Pointer, newLength int)
57 Append(obj interface{}, elem interface{})
58 UnsafeAppend(obj unsafe.Pointer, elem unsafe.Pointer)
59 LengthOf(obj interface{}) int
60 UnsafeLengthOf(ptr unsafe.Pointer) int
61 SetNil(obj interface{})
62 UnsafeSetNil(ptr unsafe.Pointer)
63 Cap(obj interface{}) int
64 UnsafeCap(ptr unsafe.Pointer) int
65}
66
67type StructType interface {
68 Type
69 NumField() int
70 Field(i int) StructField
71 FieldByName(name string) StructField
72 FieldByIndex(index []int) StructField
73 FieldByNameFunc(match func(string) bool) StructField
74}
75
76type StructField interface {
77 Offset() uintptr
78 Name() string
79 PkgPath() string
80 Type() Type
81 Tag() reflect.StructTag
82 Index() []int
83 Anonymous() bool
84 Set(obj interface{}, value interface{})
85 UnsafeSet(obj unsafe.Pointer, value unsafe.Pointer)
86 Get(obj interface{}) interface{}
87 UnsafeGet(obj unsafe.Pointer) unsafe.Pointer
88}
89
90type MapType interface {
91 Type
92 Key() Type
93 Elem() Type
94 MakeMap(cap int) interface{}
95 UnsafeMakeMap(cap int) unsafe.Pointer
96 SetIndex(obj interface{}, key interface{}, elem interface{})
97 UnsafeSetIndex(obj unsafe.Pointer, key unsafe.Pointer, elem unsafe.Pointer)
98 TryGetIndex(obj interface{}, key interface{}) (interface{}, bool)
99 GetIndex(obj interface{}, key interface{}) interface{}
100 UnsafeGetIndex(obj unsafe.Pointer, key unsafe.Pointer) unsafe.Pointer
101 Iterate(obj interface{}) MapIterator
102 UnsafeIterate(obj unsafe.Pointer) MapIterator
103}
104
105type MapIterator interface {
106 HasNext() bool
107 Next() (key interface{}, elem interface{})
108 UnsafeNext() (key unsafe.Pointer, elem unsafe.Pointer)
109}
110
111type PtrType interface {
112 Type
113 Elem() Type
114}
115
116type InterfaceType interface {
117 NumMethod() int
118}
119
120type Config struct {
121 UseSafeImplementation bool
122}
123
124type API interface {
125 TypeOf(obj interface{}) Type
126 Type2(type1 reflect.Type) Type
127}
128
129var ConfigUnsafe = Config{UseSafeImplementation: false}.Froze()
130var ConfigSafe = Config{UseSafeImplementation: true}.Froze()
131
132type frozenConfig struct {
133 useSafeImplementation bool
134 cache *sync.Map
135}
136
137func (cfg Config) Froze() *frozenConfig {
138 return &frozenConfig{
139 useSafeImplementation: cfg.UseSafeImplementation,
140 cache: new(sync.Map),
141 }
142}
143
144func (cfg *frozenConfig) TypeOf(obj interface{}) Type {
145 cacheKey := uintptr(unpackEFace(obj).rtype)
146 typeObj, found := cfg.cache.Load(cacheKey)
147 if found {
148 return typeObj.(Type)
149 }
150 return cfg.Type2(reflect.TypeOf(obj))
151}
152
153func (cfg *frozenConfig) Type2(type1 reflect.Type) Type {
154 if type1 == nil {
155 return nil
156 }
157 cacheKey := uintptr(unpackEFace(type1).data)
158 typeObj, found := cfg.cache.Load(cacheKey)
159 if found {
160 return typeObj.(Type)
161 }
162 type2 := cfg.wrapType(type1)
163 cfg.cache.Store(cacheKey, type2)
164 return type2
165}
166
167func (cfg *frozenConfig) wrapType(type1 reflect.Type) Type {
168 safeType := safeType{Type: type1, cfg: cfg}
169 switch type1.Kind() {
170 case reflect.Struct:
171 if cfg.useSafeImplementation {
172 return &safeStructType{safeType}
173 }
174 return newUnsafeStructType(cfg, type1)
175 case reflect.Array:
176 if cfg.useSafeImplementation {
177 return &safeSliceType{safeType}
178 }
179 return newUnsafeArrayType(cfg, type1)
180 case reflect.Slice:
181 if cfg.useSafeImplementation {
182 return &safeSliceType{safeType}
183 }
184 return newUnsafeSliceType(cfg, type1)
185 case reflect.Map:
186 if cfg.useSafeImplementation {
187 return &safeMapType{safeType}
188 }
189 return newUnsafeMapType(cfg, type1)
190 case reflect.Ptr, reflect.Chan, reflect.Func:
191 if cfg.useSafeImplementation {
192 return &safeMapType{safeType}
193 }
194 return newUnsafePtrType(cfg, type1)
195 case reflect.Interface:
196 if cfg.useSafeImplementation {
197 return &safeMapType{safeType}
198 }
199 if type1.NumMethod() == 0 {
200 return newUnsafeEFaceType(cfg, type1)
201 }
202 return newUnsafeIFaceType(cfg, type1)
203 default:
204 if cfg.useSafeImplementation {
205 return &safeType
206 }
207 return newUnsafeType(cfg, type1)
208 }
209}
210
211func TypeOf(obj interface{}) Type {
212 return ConfigUnsafe.TypeOf(obj)
213}
214
215func TypeOfPtr(obj interface{}) PtrType {
216 return TypeOf(obj).(PtrType)
217}
218
219func Type2(type1 reflect.Type) Type {
220 if type1 == nil {
221 return nil
222 }
223 return ConfigUnsafe.Type2(type1)
224}
225
226func PtrTo(typ Type) Type {
227 return Type2(reflect.PtrTo(typ.Type1()))
228}
229
230func PtrOf(obj interface{}) unsafe.Pointer {
231 return unpackEFace(obj).data
232}
233
234func RTypeOf(obj interface{}) uintptr {
235 return uintptr(unpackEFace(obj).rtype)
236}
237
238func IsNil(obj interface{}) bool {
239 if obj == nil {
240 return true
241 }
242 return unpackEFace(obj).data == nil
243}
244
245func IsNullable(kind reflect.Kind) bool {
246 switch kind {
247 case reflect.Ptr, reflect.Map, reflect.Chan, reflect.Func, reflect.Slice, reflect.Interface:
248 return true
249 }
250 return false
251}
252
253func likePtrKind(kind reflect.Kind) bool {
254 switch kind {
255 case reflect.Ptr, reflect.Map, reflect.Chan, reflect.Func:
256 return true
257 }
258 return false
259}
260
261func likePtrType(typ reflect.Type) bool {
262 if likePtrKind(typ.Kind()) {
263 return true
264 }
265 if typ.Kind() == reflect.Struct {
266 if typ.NumField() != 1 {
267 return false
268 }
269 return likePtrType(typ.Field(0).Type)
270 }
271 if typ.Kind() == reflect.Array {
272 if typ.Len() != 1 {
273 return false
274 }
275 return likePtrType(typ.Elem())
276 }
277 return false
278}
279
280// NoEscape hides a pointer from escape analysis. noescape is
281// the identity function but escape analysis doesn't think the
282// output depends on the input. noescape is inlined and currently
283// compiles down to zero instructions.
284// USE CAREFULLY!
285//go:nosplit
286func NoEscape(p unsafe.Pointer) unsafe.Pointer {
287 x := uintptr(p)
288 return unsafe.Pointer(x ^ 0)
289}
290
291func UnsafeCastString(str string) []byte {
292 bytes := make([]byte, 0)
293 stringHeader := (*reflect.StringHeader)(unsafe.Pointer(&str))
294 sliceHeader := (*reflect.SliceHeader)(unsafe.Pointer(&bytes))
295 sliceHeader.Data = stringHeader.Data
296 sliceHeader.Cap = stringHeader.Len
297 sliceHeader.Len = stringHeader.Len
298 runtime.KeepAlive(str)
299 return bytes
300}
diff --git a/vendor/github.com/modern-go/reflect2/reflect2_amd64.s b/vendor/github.com/modern-go/reflect2/reflect2_amd64.s
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/reflect2_amd64.s
diff --git a/vendor/github.com/modern-go/reflect2/reflect2_kind.go b/vendor/github.com/modern-go/reflect2/reflect2_kind.go
new file mode 100644
index 0000000..62f299e
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/reflect2_kind.go
@@ -0,0 +1,30 @@
1package reflect2
2
3import (
4 "reflect"
5 "unsafe"
6)
7
8// DefaultTypeOfKind return the non aliased default type for the kind
9func DefaultTypeOfKind(kind reflect.Kind) Type {
10 return kindTypes[kind]
11}
12
13var kindTypes = map[reflect.Kind]Type{
14 reflect.Bool: TypeOf(true),
15 reflect.Uint8: TypeOf(uint8(0)),
16 reflect.Int8: TypeOf(int8(0)),
17 reflect.Uint16: TypeOf(uint16(0)),
18 reflect.Int16: TypeOf(int16(0)),
19 reflect.Uint32: TypeOf(uint32(0)),
20 reflect.Int32: TypeOf(int32(0)),
21 reflect.Uint64: TypeOf(uint64(0)),
22 reflect.Int64: TypeOf(int64(0)),
23 reflect.Uint: TypeOf(uint(0)),
24 reflect.Int: TypeOf(int(0)),
25 reflect.Float32: TypeOf(float32(0)),
26 reflect.Float64: TypeOf(float64(0)),
27 reflect.Uintptr: TypeOf(uintptr(0)),
28 reflect.String: TypeOf(""),
29 reflect.UnsafePointer: TypeOf(unsafe.Pointer(nil)),
30}
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_386.s b/vendor/github.com/modern-go/reflect2/relfect2_386.s
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/relfect2_386.s
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_amd64p32.s b/vendor/github.com/modern-go/reflect2/relfect2_amd64p32.s
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/relfect2_amd64p32.s
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_arm.s b/vendor/github.com/modern-go/reflect2/relfect2_arm.s
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/relfect2_arm.s
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_arm64.s b/vendor/github.com/modern-go/reflect2/relfect2_arm64.s
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/relfect2_arm64.s
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_mips64x.s b/vendor/github.com/modern-go/reflect2/relfect2_mips64x.s
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/relfect2_mips64x.s
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_mipsx.s b/vendor/github.com/modern-go/reflect2/relfect2_mipsx.s
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/relfect2_mipsx.s
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_ppc64x.s b/vendor/github.com/modern-go/reflect2/relfect2_ppc64x.s
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/relfect2_ppc64x.s
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_s390x.s b/vendor/github.com/modern-go/reflect2/relfect2_s390x.s
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/relfect2_s390x.s
diff --git a/vendor/github.com/modern-go/reflect2/safe_field.go b/vendor/github.com/modern-go/reflect2/safe_field.go
new file mode 100644
index 0000000..d4ba1f4
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/safe_field.go
@@ -0,0 +1,58 @@
1package reflect2
2
3import (
4 "reflect"
5 "unsafe"
6)
7
8type safeField struct {
9 reflect.StructField
10}
11
12func (field *safeField) Offset() uintptr {
13 return field.StructField.Offset
14}
15
16func (field *safeField) Name() string {
17 return field.StructField.Name
18}
19
20func (field *safeField) PkgPath() string {
21 return field.StructField.PkgPath
22}
23
24func (field *safeField) Type() Type {
25 panic("not implemented")
26}
27
28func (field *safeField) Tag() reflect.StructTag {
29 return field.StructField.Tag
30}
31
32func (field *safeField) Index() []int {
33 return field.StructField.Index
34}
35
36func (field *safeField) Anonymous() bool {
37 return field.StructField.Anonymous
38}
39
40func (field *safeField) Set(obj interface{}, value interface{}) {
41 val := reflect.ValueOf(obj).Elem()
42 val.FieldByIndex(field.Index()).Set(reflect.ValueOf(value).Elem())
43}
44
45func (field *safeField) UnsafeSet(obj unsafe.Pointer, value unsafe.Pointer) {
46 panic("unsafe operation is not supported")
47}
48
49func (field *safeField) Get(obj interface{}) interface{} {
50 val := reflect.ValueOf(obj).Elem().FieldByIndex(field.Index())
51 ptr := reflect.New(val.Type())
52 ptr.Elem().Set(val)
53 return ptr.Interface()
54}
55
56func (field *safeField) UnsafeGet(obj unsafe.Pointer) unsafe.Pointer {
57 panic("does not support unsafe operation")
58}
diff --git a/vendor/github.com/modern-go/reflect2/safe_map.go b/vendor/github.com/modern-go/reflect2/safe_map.go
new file mode 100644
index 0000000..8836220
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/safe_map.go
@@ -0,0 +1,101 @@
1package reflect2
2
3import (
4 "reflect"
5 "unsafe"
6)
7
8type safeMapType struct {
9 safeType
10}
11
12func (type2 *safeMapType) Key() Type {
13 return type2.safeType.cfg.Type2(type2.Type.Key())
14}
15
16func (type2 *safeMapType) MakeMap(cap int) interface{} {
17 ptr := reflect.New(type2.Type)
18 ptr.Elem().Set(reflect.MakeMap(type2.Type))
19 return ptr.Interface()
20}
21
22func (type2 *safeMapType) UnsafeMakeMap(cap int) unsafe.Pointer {
23 panic("does not support unsafe operation")
24}
25
26func (type2 *safeMapType) SetIndex(obj interface{}, key interface{}, elem interface{}) {
27 keyVal := reflect.ValueOf(key)
28 elemVal := reflect.ValueOf(elem)
29 val := reflect.ValueOf(obj)
30 val.Elem().SetMapIndex(keyVal.Elem(), elemVal.Elem())
31}
32
33func (type2 *safeMapType) UnsafeSetIndex(obj unsafe.Pointer, key unsafe.Pointer, elem unsafe.Pointer) {
34 panic("does not support unsafe operation")
35}
36
37func (type2 *safeMapType) TryGetIndex(obj interface{}, key interface{}) (interface{}, bool) {
38 keyVal := reflect.ValueOf(key)
39 if key == nil {
40 keyVal = reflect.New(type2.Type.Key()).Elem()
41 }
42 val := reflect.ValueOf(obj).MapIndex(keyVal)
43 if !val.IsValid() {
44 return nil, false
45 }
46 return val.Interface(), true
47}
48
49func (type2 *safeMapType) GetIndex(obj interface{}, key interface{}) interface{} {
50 val := reflect.ValueOf(obj).Elem()
51 keyVal := reflect.ValueOf(key).Elem()
52 elemVal := val.MapIndex(keyVal)
53 if !elemVal.IsValid() {
54 ptr := reflect.New(reflect.PtrTo(val.Type().Elem()))
55 return ptr.Elem().Interface()
56 }
57 ptr := reflect.New(elemVal.Type())
58 ptr.Elem().Set(elemVal)
59 return ptr.Interface()
60}
61
62func (type2 *safeMapType) UnsafeGetIndex(obj unsafe.Pointer, key unsafe.Pointer) unsafe.Pointer {
63 panic("does not support unsafe operation")
64}
65
66func (type2 *safeMapType) Iterate(obj interface{}) MapIterator {
67 m := reflect.ValueOf(obj).Elem()
68 return &safeMapIterator{
69 m: m,
70 keys: m.MapKeys(),
71 }
72}
73
74func (type2 *safeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator {
75 panic("does not support unsafe operation")
76}
77
78type safeMapIterator struct {
79 i int
80 m reflect.Value
81 keys []reflect.Value
82}
83
84func (iter *safeMapIterator) HasNext() bool {
85 return iter.i != len(iter.keys)
86}
87
88func (iter *safeMapIterator) Next() (interface{}, interface{}) {
89 key := iter.keys[iter.i]
90 elem := iter.m.MapIndex(key)
91 iter.i += 1
92 keyPtr := reflect.New(key.Type())
93 keyPtr.Elem().Set(key)
94 elemPtr := reflect.New(elem.Type())
95 elemPtr.Elem().Set(elem)
96 return keyPtr.Interface(), elemPtr.Interface()
97}
98
99func (iter *safeMapIterator) UnsafeNext() (unsafe.Pointer, unsafe.Pointer) {
100 panic("does not support unsafe operation")
101}
diff --git a/vendor/github.com/modern-go/reflect2/safe_slice.go b/vendor/github.com/modern-go/reflect2/safe_slice.go
new file mode 100644
index 0000000..bcce6fd
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/safe_slice.go
@@ -0,0 +1,92 @@
1package reflect2
2
3import (
4 "reflect"
5 "unsafe"
6)
7
8type safeSliceType struct {
9 safeType
10}
11
12func (type2 *safeSliceType) SetIndex(obj interface{}, index int, value interface{}) {
13 val := reflect.ValueOf(obj).Elem()
14 elem := reflect.ValueOf(value).Elem()
15 val.Index(index).Set(elem)
16}
17
18func (type2 *safeSliceType) UnsafeSetIndex(obj unsafe.Pointer, index int, value unsafe.Pointer) {
19 panic("does not support unsafe operation")
20}
21
22func (type2 *safeSliceType) GetIndex(obj interface{}, index int) interface{} {
23 val := reflect.ValueOf(obj).Elem()
24 elem := val.Index(index)
25 ptr := reflect.New(elem.Type())
26 ptr.Elem().Set(elem)
27 return ptr.Interface()
28}
29
30func (type2 *safeSliceType) UnsafeGetIndex(obj unsafe.Pointer, index int) unsafe.Pointer {
31 panic("does not support unsafe operation")
32}
33
34func (type2 *safeSliceType) MakeSlice(length int, cap int) interface{} {
35 val := reflect.MakeSlice(type2.Type, length, cap)
36 ptr := reflect.New(val.Type())
37 ptr.Elem().Set(val)
38 return ptr.Interface()
39}
40
41func (type2 *safeSliceType) UnsafeMakeSlice(length int, cap int) unsafe.Pointer {
42 panic("does not support unsafe operation")
43}
44
45func (type2 *safeSliceType) Grow(obj interface{}, newLength int) {
46 oldCap := type2.Cap(obj)
47 oldSlice := reflect.ValueOf(obj).Elem()
48 delta := newLength - oldCap
49 deltaVals := make([]reflect.Value, delta)
50 newSlice := reflect.Append(oldSlice, deltaVals...)
51 oldSlice.Set(newSlice)
52}
53
54func (type2 *safeSliceType) UnsafeGrow(ptr unsafe.Pointer, newLength int) {
55 panic("does not support unsafe operation")
56}
57
58func (type2 *safeSliceType) Append(obj interface{}, elem interface{}) {
59 val := reflect.ValueOf(obj).Elem()
60 elemVal := reflect.ValueOf(elem).Elem()
61 newVal := reflect.Append(val, elemVal)
62 val.Set(newVal)
63}
64
65func (type2 *safeSliceType) UnsafeAppend(obj unsafe.Pointer, elem unsafe.Pointer) {
66 panic("does not support unsafe operation")
67}
68
69func (type2 *safeSliceType) SetNil(obj interface{}) {
70 val := reflect.ValueOf(obj).Elem()
71 val.Set(reflect.Zero(val.Type()))
72}
73
74func (type2 *safeSliceType) UnsafeSetNil(ptr unsafe.Pointer) {
75 panic("does not support unsafe operation")
76}
77
78func (type2 *safeSliceType) LengthOf(obj interface{}) int {
79 return reflect.ValueOf(obj).Elem().Len()
80}
81
82func (type2 *safeSliceType) UnsafeLengthOf(ptr unsafe.Pointer) int {
83 panic("does not support unsafe operation")
84}
85
86func (type2 *safeSliceType) Cap(obj interface{}) int {
87 return reflect.ValueOf(obj).Elem().Cap()
88}
89
90func (type2 *safeSliceType) UnsafeCap(ptr unsafe.Pointer) int {
91 panic("does not support unsafe operation")
92}
diff --git a/vendor/github.com/modern-go/reflect2/safe_struct.go b/vendor/github.com/modern-go/reflect2/safe_struct.go
new file mode 100644
index 0000000..e5fb9b3
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/safe_struct.go
@@ -0,0 +1,29 @@
1package reflect2
2
3type safeStructType struct {
4 safeType
5}
6
7func (type2 *safeStructType) FieldByName(name string) StructField {
8 field, found := type2.Type.FieldByName(name)
9 if !found {
10 panic("field " + name + " not found")
11 }
12 return &safeField{StructField: field}
13}
14
15func (type2 *safeStructType) Field(i int) StructField {
16 return &safeField{StructField: type2.Type.Field(i)}
17}
18
19func (type2 *safeStructType) FieldByIndex(index []int) StructField {
20 return &safeField{StructField: type2.Type.FieldByIndex(index)}
21}
22
23func (type2 *safeStructType) FieldByNameFunc(match func(string) bool) StructField {
24 field, found := type2.Type.FieldByNameFunc(match)
25 if !found {
26 panic("field match condition not found in " + type2.Type.String())
27 }
28 return &safeField{StructField: field}
29}
diff --git a/vendor/github.com/modern-go/reflect2/safe_type.go b/vendor/github.com/modern-go/reflect2/safe_type.go
new file mode 100644
index 0000000..ee4e7bb
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/safe_type.go
@@ -0,0 +1,78 @@
1package reflect2
2
3import (
4 "reflect"
5 "unsafe"
6)
7
8type safeType struct {
9 reflect.Type
10 cfg *frozenConfig
11}
12
13func (type2 *safeType) New() interface{} {
14 return reflect.New(type2.Type).Interface()
15}
16
17func (type2 *safeType) UnsafeNew() unsafe.Pointer {
18 panic("does not support unsafe operation")
19}
20
21func (type2 *safeType) Elem() Type {
22 return type2.cfg.Type2(type2.Type.Elem())
23}
24
25func (type2 *safeType) Type1() reflect.Type {
26 return type2.Type
27}
28
29func (type2 *safeType) PackEFace(ptr unsafe.Pointer) interface{} {
30 panic("does not support unsafe operation")
31}
32
33func (type2 *safeType) Implements(thatType Type) bool {
34 return type2.Type.Implements(thatType.Type1())
35}
36
37func (type2 *safeType) RType() uintptr {
38 panic("does not support unsafe operation")
39}
40
41func (type2 *safeType) Indirect(obj interface{}) interface{} {
42 return reflect.Indirect(reflect.ValueOf(obj)).Interface()
43}
44
45func (type2 *safeType) UnsafeIndirect(ptr unsafe.Pointer) interface{} {
46 panic("does not support unsafe operation")
47}
48
49func (type2 *safeType) LikePtr() bool {
50 panic("does not support unsafe operation")
51}
52
53func (type2 *safeType) IsNullable() bool {
54 return IsNullable(type2.Kind())
55}
56
57func (type2 *safeType) IsNil(obj interface{}) bool {
58 if obj == nil {
59 return true
60 }
61 return reflect.ValueOf(obj).Elem().IsNil()
62}
63
64func (type2 *safeType) UnsafeIsNil(ptr unsafe.Pointer) bool {
65 panic("does not support unsafe operation")
66}
67
68func (type2 *safeType) Set(obj interface{}, val interface{}) {
69 reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(val).Elem())
70}
71
72func (type2 *safeType) UnsafeSet(ptr unsafe.Pointer, val unsafe.Pointer) {
73 panic("does not support unsafe operation")
74}
75
76func (type2 *safeType) AssignableTo(anotherType Type) bool {
77 return type2.Type1().AssignableTo(anotherType.Type1())
78}
diff --git a/vendor/github.com/modern-go/reflect2/type_map.go b/vendor/github.com/modern-go/reflect2/type_map.go
new file mode 100644
index 0000000..4b13c31
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/type_map.go
@@ -0,0 +1,70 @@
1// +build !gccgo
2
3package reflect2
4
5import (
6 "reflect"
7 "sync"
8 "unsafe"
9)
10
11// typelinks2 for 1.7 ~
12//go:linkname typelinks2 reflect.typelinks
13func typelinks2() (sections []unsafe.Pointer, offset [][]int32)
14
15// initOnce guards initialization of types and packages
16var initOnce sync.Once
17
18var types map[string]reflect.Type
19var packages map[string]map[string]reflect.Type
20
21// discoverTypes initializes types and packages
22func discoverTypes() {
23 types = make(map[string]reflect.Type)
24 packages = make(map[string]map[string]reflect.Type)
25
26 loadGoTypes()
27}
28
29func loadGoTypes() {
30 var obj interface{} = reflect.TypeOf(0)
31 sections, offset := typelinks2()
32 for i, offs := range offset {
33 rodata := sections[i]
34 for _, off := range offs {
35 (*emptyInterface)(unsafe.Pointer(&obj)).word = resolveTypeOff(unsafe.Pointer(rodata), off)
36 typ := obj.(reflect.Type)
37 if typ.Kind() == reflect.Ptr && typ.Elem().Kind() == reflect.Struct {
38 loadedType := typ.Elem()
39 pkgTypes := packages[loadedType.PkgPath()]
40 if pkgTypes == nil {
41 pkgTypes = map[string]reflect.Type{}
42 packages[loadedType.PkgPath()] = pkgTypes
43 }
44 types[loadedType.String()] = loadedType
45 pkgTypes[loadedType.Name()] = loadedType
46 }
47 }
48 }
49}
50
51type emptyInterface struct {
52 typ unsafe.Pointer
53 word unsafe.Pointer
54}
55
56// TypeByName return the type by its name, just like Class.forName in java
57func TypeByName(typeName string) Type {
58 initOnce.Do(discoverTypes)
59 return Type2(types[typeName])
60}
61
62// TypeByPackageName return the type by its package and name
63func TypeByPackageName(pkgPath string, name string) Type {
64 initOnce.Do(discoverTypes)
65 pkgTypes := packages[pkgPath]
66 if pkgTypes == nil {
67 return nil
68 }
69 return Type2(pkgTypes[name])
70}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_array.go b/vendor/github.com/modern-go/reflect2/unsafe_array.go
new file mode 100644
index 0000000..76cbdba
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/unsafe_array.go
@@ -0,0 +1,65 @@
1package reflect2
2
3import (
4 "reflect"
5 "unsafe"
6)
7
8type UnsafeArrayType struct {
9 unsafeType
10 elemRType unsafe.Pointer
11 pElemRType unsafe.Pointer
12 elemSize uintptr
13 likePtr bool
14}
15
16func newUnsafeArrayType(cfg *frozenConfig, type1 reflect.Type) *UnsafeArrayType {
17 return &UnsafeArrayType{
18 unsafeType: *newUnsafeType(cfg, type1),
19 elemRType: unpackEFace(type1.Elem()).data,
20 pElemRType: unpackEFace(reflect.PtrTo(type1.Elem())).data,
21 elemSize: type1.Elem().Size(),
22 likePtr: likePtrType(type1),
23 }
24}
25
26func (type2 *UnsafeArrayType) LikePtr() bool {
27 return type2.likePtr
28}
29
30func (type2 *UnsafeArrayType) Indirect(obj interface{}) interface{} {
31 objEFace := unpackEFace(obj)
32 assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype)
33 return type2.UnsafeIndirect(objEFace.data)
34}
35
36func (type2 *UnsafeArrayType) UnsafeIndirect(ptr unsafe.Pointer) interface{} {
37 if type2.likePtr {
38 return packEFace(type2.rtype, *(*unsafe.Pointer)(ptr))
39 }
40 return packEFace(type2.rtype, ptr)
41}
42
43func (type2 *UnsafeArrayType) SetIndex(obj interface{}, index int, elem interface{}) {
44 objEFace := unpackEFace(obj)
45 assertType("ArrayType.SetIndex argument 1", type2.ptrRType, objEFace.rtype)
46 elemEFace := unpackEFace(elem)
47 assertType("ArrayType.SetIndex argument 3", type2.pElemRType, elemEFace.rtype)
48 type2.UnsafeSetIndex(objEFace.data, index, elemEFace.data)
49}
50
51func (type2 *UnsafeArrayType) UnsafeSetIndex(obj unsafe.Pointer, index int, elem unsafe.Pointer) {
52 elemPtr := arrayAt(obj, index, type2.elemSize, "i < s.Len")
53 typedmemmove(type2.elemRType, elemPtr, elem)
54}
55
56func (type2 *UnsafeArrayType) GetIndex(obj interface{}, index int) interface{} {
57 objEFace := unpackEFace(obj)
58 assertType("ArrayType.GetIndex argument 1", type2.ptrRType, objEFace.rtype)
59 elemPtr := type2.UnsafeGetIndex(objEFace.data, index)
60 return packEFace(type2.pElemRType, elemPtr)
61}
62
63func (type2 *UnsafeArrayType) UnsafeGetIndex(obj unsafe.Pointer, index int) unsafe.Pointer {
64 return arrayAt(obj, index, type2.elemSize, "i < s.Len")
65}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_eface.go b/vendor/github.com/modern-go/reflect2/unsafe_eface.go
new file mode 100644
index 0000000..805010f
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/unsafe_eface.go
@@ -0,0 +1,59 @@
1package reflect2
2
3import (
4 "reflect"
5 "unsafe"
6)
7
8type eface struct {
9 rtype unsafe.Pointer
10 data unsafe.Pointer
11}
12
13func unpackEFace(obj interface{}) *eface {
14 return (*eface)(unsafe.Pointer(&obj))
15}
16
17func packEFace(rtype unsafe.Pointer, data unsafe.Pointer) interface{} {
18 var i interface{}
19 e := (*eface)(unsafe.Pointer(&i))
20 e.rtype = rtype
21 e.data = data
22 return i
23}
24
25type UnsafeEFaceType struct {
26 unsafeType
27}
28
29func newUnsafeEFaceType(cfg *frozenConfig, type1 reflect.Type) *UnsafeEFaceType {
30 return &UnsafeEFaceType{
31 unsafeType: *newUnsafeType(cfg, type1),
32 }
33}
34
35func (type2 *UnsafeEFaceType) IsNil(obj interface{}) bool {
36 if obj == nil {
37 return true
38 }
39 objEFace := unpackEFace(obj)
40 assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype)
41 return type2.UnsafeIsNil(objEFace.data)
42}
43
44func (type2 *UnsafeEFaceType) UnsafeIsNil(ptr unsafe.Pointer) bool {
45 if ptr == nil {
46 return true
47 }
48 return unpackEFace(*(*interface{})(ptr)).data == nil
49}
50
51func (type2 *UnsafeEFaceType) Indirect(obj interface{}) interface{} {
52 objEFace := unpackEFace(obj)
53 assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype)
54 return type2.UnsafeIndirect(objEFace.data)
55}
56
57func (type2 *UnsafeEFaceType) UnsafeIndirect(ptr unsafe.Pointer) interface{} {
58 return *(*interface{})(ptr)
59}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_field.go b/vendor/github.com/modern-go/reflect2/unsafe_field.go
new file mode 100644
index 0000000..5eb5313
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/unsafe_field.go
@@ -0,0 +1,74 @@
1package reflect2
2
3import (
4 "reflect"
5 "unsafe"
6)
7
8type UnsafeStructField struct {
9 reflect.StructField
10 structType *UnsafeStructType
11 rtype unsafe.Pointer
12 ptrRType unsafe.Pointer
13}
14
15func newUnsafeStructField(structType *UnsafeStructType, structField reflect.StructField) *UnsafeStructField {
16 return &UnsafeStructField{
17 StructField: structField,
18 rtype: unpackEFace(structField.Type).data,
19 ptrRType: unpackEFace(reflect.PtrTo(structField.Type)).data,
20 structType: structType,
21 }
22}
23
24func (field *UnsafeStructField) Offset() uintptr {
25 return field.StructField.Offset
26}
27
28func (field *UnsafeStructField) Name() string {
29 return field.StructField.Name
30}
31
32func (field *UnsafeStructField) PkgPath() string {
33 return field.StructField.PkgPath
34}
35
36func (field *UnsafeStructField) Type() Type {
37 return field.structType.cfg.Type2(field.StructField.Type)
38}
39
40func (field *UnsafeStructField) Tag() reflect.StructTag {
41 return field.StructField.Tag
42}
43
44func (field *UnsafeStructField) Index() []int {
45 return field.StructField.Index
46}
47
48func (field *UnsafeStructField) Anonymous() bool {
49 return field.StructField.Anonymous
50}
51
52func (field *UnsafeStructField) Set(obj interface{}, value interface{}) {
53 objEFace := unpackEFace(obj)
54 assertType("StructField.SetIndex argument 1", field.structType.ptrRType, objEFace.rtype)
55 valueEFace := unpackEFace(value)
56 assertType("StructField.SetIndex argument 2", field.ptrRType, valueEFace.rtype)
57 field.UnsafeSet(objEFace.data, valueEFace.data)
58}
59
60func (field *UnsafeStructField) UnsafeSet(obj unsafe.Pointer, value unsafe.Pointer) {
61 fieldPtr := add(obj, field.StructField.Offset, "same as non-reflect &v.field")
62 typedmemmove(field.rtype, fieldPtr, value)
63}
64
65func (field *UnsafeStructField) Get(obj interface{}) interface{} {
66 objEFace := unpackEFace(obj)
67 assertType("StructField.GetIndex argument 1", field.structType.ptrRType, objEFace.rtype)
68 value := field.UnsafeGet(objEFace.data)
69 return packEFace(field.ptrRType, value)
70}
71
72func (field *UnsafeStructField) UnsafeGet(obj unsafe.Pointer) unsafe.Pointer {
73 return add(obj, field.StructField.Offset, "same as non-reflect &v.field")
74}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_iface.go b/vendor/github.com/modern-go/reflect2/unsafe_iface.go
new file mode 100644
index 0000000..b601955
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/unsafe_iface.go
@@ -0,0 +1,64 @@
1package reflect2
2
3import (
4 "reflect"
5 "unsafe"
6)
7
8type iface struct {
9 itab *itab
10 data unsafe.Pointer
11}
12
13type itab struct {
14 ignore unsafe.Pointer
15 rtype unsafe.Pointer
16}
17
18func IFaceToEFace(ptr unsafe.Pointer) interface{} {
19 iface := (*iface)(ptr)
20 if iface.itab == nil {
21 return nil
22 }
23 return packEFace(iface.itab.rtype, iface.data)
24}
25
26type UnsafeIFaceType struct {
27 unsafeType
28}
29
30func newUnsafeIFaceType(cfg *frozenConfig, type1 reflect.Type) *UnsafeIFaceType {
31 return &UnsafeIFaceType{
32 unsafeType: *newUnsafeType(cfg, type1),
33 }
34}
35
36func (type2 *UnsafeIFaceType) Indirect(obj interface{}) interface{} {
37 objEFace := unpackEFace(obj)
38 assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype)
39 return type2.UnsafeIndirect(objEFace.data)
40}
41
42func (type2 *UnsafeIFaceType) UnsafeIndirect(ptr unsafe.Pointer) interface{} {
43 return IFaceToEFace(ptr)
44}
45
46func (type2 *UnsafeIFaceType) IsNil(obj interface{}) bool {
47 if obj == nil {
48 return true
49 }
50 objEFace := unpackEFace(obj)
51 assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype)
52 return type2.UnsafeIsNil(objEFace.data)
53}
54
55func (type2 *UnsafeIFaceType) UnsafeIsNil(ptr unsafe.Pointer) bool {
56 if ptr == nil {
57 return true
58 }
59 iface := (*iface)(ptr)
60 if iface.itab == nil {
61 return true
62 }
63 return false
64}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_link.go b/vendor/github.com/modern-go/reflect2/unsafe_link.go
new file mode 100644
index 0000000..b49f614
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/unsafe_link.go
@@ -0,0 +1,76 @@
1package reflect2
2
3import "unsafe"
4
5//go:linkname unsafe_New reflect.unsafe_New
6func unsafe_New(rtype unsafe.Pointer) unsafe.Pointer
7
8//go:linkname typedmemmove reflect.typedmemmove
9func typedmemmove(rtype unsafe.Pointer, dst, src unsafe.Pointer)
10
11//go:linkname unsafe_NewArray reflect.unsafe_NewArray
12func unsafe_NewArray(rtype unsafe.Pointer, length int) unsafe.Pointer
13
14// typedslicecopy copies a slice of elemType values from src to dst,
15// returning the number of elements copied.
16//go:linkname typedslicecopy reflect.typedslicecopy
17//go:noescape
18func typedslicecopy(elemType unsafe.Pointer, dst, src sliceHeader) int
19
20//go:linkname mapassign reflect.mapassign
21//go:noescape
22func mapassign(rtype unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer, val unsafe.Pointer)
23
24//go:linkname mapaccess reflect.mapaccess
25//go:noescape
26func mapaccess(rtype unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer)
27
28//go:noescape
29//go:linkname mapiternext reflect.mapiternext
30func mapiternext(it *hiter)
31
32//go:linkname ifaceE2I reflect.ifaceE2I
33func ifaceE2I(rtype unsafe.Pointer, src interface{}, dst unsafe.Pointer)
34
35// A hash iteration structure.
36// If you modify hiter, also change cmd/internal/gc/reflect.go to indicate
37// the layout of this structure.
38type hiter struct {
39 key unsafe.Pointer
40 value unsafe.Pointer
41 t unsafe.Pointer
42 h unsafe.Pointer
43 buckets unsafe.Pointer
44 bptr unsafe.Pointer
45 overflow *[]unsafe.Pointer
46 oldoverflow *[]unsafe.Pointer
47 startBucket uintptr
48 offset uint8
49 wrapped bool
50 B uint8
51 i uint8
52 bucket uintptr
53 checkBucket uintptr
54}
55
56// add returns p+x.
57//
58// The whySafe string is ignored, so that the function still inlines
59// as efficiently as p+x, but all call sites should use the string to
60// record why the addition is safe, which is to say why the addition
61// does not cause x to advance to the very end of p's allocation
62// and therefore point incorrectly at the next block in memory.
63func add(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer {
64 return unsafe.Pointer(uintptr(p) + x)
65}
66
67// arrayAt returns the i-th element of p,
68// an array whose elements are eltSize bytes wide.
69// The array pointed at by p must have at least i+1 elements:
70// it is invalid (but impossible to check here) to pass i >= len,
71// because then the result will point outside the array.
72// whySafe must explain why i < len. (Passing "i < len" is fine;
73// the benefit is to surface this assumption at the call site.)
74func arrayAt(p unsafe.Pointer, i int, eltSize uintptr, whySafe string) unsafe.Pointer {
75 return add(p, uintptr(i)*eltSize, "i < len")
76}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_map.go b/vendor/github.com/modern-go/reflect2/unsafe_map.go
new file mode 100644
index 0000000..37872da
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/unsafe_map.go
@@ -0,0 +1,130 @@
1package reflect2
2
3import (
4 "reflect"
5 "unsafe"
6)
7
8type UnsafeMapType struct {
9 unsafeType
10 pKeyRType unsafe.Pointer
11 pElemRType unsafe.Pointer
12}
13
14func newUnsafeMapType(cfg *frozenConfig, type1 reflect.Type) MapType {
15 return &UnsafeMapType{
16 unsafeType: *newUnsafeType(cfg, type1),
17 pKeyRType: unpackEFace(reflect.PtrTo(type1.Key())).data,
18 pElemRType: unpackEFace(reflect.PtrTo(type1.Elem())).data,
19 }
20}
21
22func (type2 *UnsafeMapType) IsNil(obj interface{}) bool {
23 if obj == nil {
24 return true
25 }
26 objEFace := unpackEFace(obj)
27 assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype)
28 return type2.UnsafeIsNil(objEFace.data)
29}
30
31func (type2 *UnsafeMapType) UnsafeIsNil(ptr unsafe.Pointer) bool {
32 if ptr == nil {
33 return true
34 }
35 return *(*unsafe.Pointer)(ptr) == nil
36}
37
38func (type2 *UnsafeMapType) LikePtr() bool {
39 return true
40}
41
42func (type2 *UnsafeMapType) Indirect(obj interface{}) interface{} {
43 objEFace := unpackEFace(obj)
44 assertType("MapType.Indirect argument 1", type2.ptrRType, objEFace.rtype)
45 return type2.UnsafeIndirect(objEFace.data)
46}
47
48func (type2 *UnsafeMapType) UnsafeIndirect(ptr unsafe.Pointer) interface{} {
49 return packEFace(type2.rtype, *(*unsafe.Pointer)(ptr))
50}
51
52func (type2 *UnsafeMapType) Key() Type {
53 return type2.cfg.Type2(type2.Type.Key())
54}
55
56func (type2 *UnsafeMapType) MakeMap(cap int) interface{} {
57 return packEFace(type2.ptrRType, type2.UnsafeMakeMap(cap))
58}
59
60func (type2 *UnsafeMapType) UnsafeMakeMap(cap int) unsafe.Pointer {
61 m := makeMapWithSize(type2.rtype, cap)
62 return unsafe.Pointer(&m)
63}
64
65func (type2 *UnsafeMapType) SetIndex(obj interface{}, key interface{}, elem interface{}) {
66 objEFace := unpackEFace(obj)
67 assertType("MapType.SetIndex argument 1", type2.ptrRType, objEFace.rtype)
68 keyEFace := unpackEFace(key)
69 assertType("MapType.SetIndex argument 2", type2.pKeyRType, keyEFace.rtype)
70 elemEFace := unpackEFace(elem)
71 assertType("MapType.SetIndex argument 3", type2.pElemRType, elemEFace.rtype)
72 type2.UnsafeSetIndex(objEFace.data, keyEFace.data, elemEFace.data)
73}
74
75func (type2 *UnsafeMapType) UnsafeSetIndex(obj unsafe.Pointer, key unsafe.Pointer, elem unsafe.Pointer) {
76 mapassign(type2.rtype, *(*unsafe.Pointer)(obj), key, elem)
77}
78
79func (type2 *UnsafeMapType) TryGetIndex(obj interface{}, key interface{}) (interface{}, bool) {
80 objEFace := unpackEFace(obj)
81 assertType("MapType.TryGetIndex argument 1", type2.ptrRType, objEFace.rtype)
82 keyEFace := unpackEFace(key)
83 assertType("MapType.TryGetIndex argument 2", type2.pKeyRType, keyEFace.rtype)
84 elemPtr := type2.UnsafeGetIndex(objEFace.data, keyEFace.data)
85 if elemPtr == nil {
86 return nil, false
87 }
88 return packEFace(type2.pElemRType, elemPtr), true
89}
90
91func (type2 *UnsafeMapType) GetIndex(obj interface{}, key interface{}) interface{} {
92 objEFace := unpackEFace(obj)
93 assertType("MapType.GetIndex argument 1", type2.ptrRType, objEFace.rtype)
94 keyEFace := unpackEFace(key)
95 assertType("MapType.GetIndex argument 2", type2.pKeyRType, keyEFace.rtype)
96 elemPtr := type2.UnsafeGetIndex(objEFace.data, keyEFace.data)
97 return packEFace(type2.pElemRType, elemPtr)
98}
99
100func (type2 *UnsafeMapType) UnsafeGetIndex(obj unsafe.Pointer, key unsafe.Pointer) unsafe.Pointer {
101 return mapaccess(type2.rtype, *(*unsafe.Pointer)(obj), key)
102}
103
104func (type2 *UnsafeMapType) Iterate(obj interface{}) MapIterator {
105 objEFace := unpackEFace(obj)
106 assertType("MapType.Iterate argument 1", type2.ptrRType, objEFace.rtype)
107 return type2.UnsafeIterate(objEFace.data)
108}
109
110type UnsafeMapIterator struct {
111 *hiter
112 pKeyRType unsafe.Pointer
113 pElemRType unsafe.Pointer
114}
115
116func (iter *UnsafeMapIterator) HasNext() bool {
117 return iter.key != nil
118}
119
120func (iter *UnsafeMapIterator) Next() (interface{}, interface{}) {
121 key, elem := iter.UnsafeNext()
122 return packEFace(iter.pKeyRType, key), packEFace(iter.pElemRType, elem)
123}
124
125func (iter *UnsafeMapIterator) UnsafeNext() (unsafe.Pointer, unsafe.Pointer) {
126 key := iter.key
127 elem := iter.value
128 mapiternext(iter.hiter)
129 return key, elem
130}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_ptr.go b/vendor/github.com/modern-go/reflect2/unsafe_ptr.go
new file mode 100644
index 0000000..8e5ec9c
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/unsafe_ptr.go
@@ -0,0 +1,46 @@
1package reflect2
2
3import (
4 "reflect"
5 "unsafe"
6)
7
8type UnsafePtrType struct {
9 unsafeType
10}
11
12func newUnsafePtrType(cfg *frozenConfig, type1 reflect.Type) *UnsafePtrType {
13 return &UnsafePtrType{
14 unsafeType: *newUnsafeType(cfg, type1),
15 }
16}
17
18func (type2 *UnsafePtrType) IsNil(obj interface{}) bool {
19 if obj == nil {
20 return true
21 }
22 objEFace := unpackEFace(obj)
23 assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype)
24 return type2.UnsafeIsNil(objEFace.data)
25}
26
27func (type2 *UnsafePtrType) UnsafeIsNil(ptr unsafe.Pointer) bool {
28 if ptr == nil {
29 return true
30 }
31 return *(*unsafe.Pointer)(ptr) == nil
32}
33
34func (type2 *UnsafePtrType) LikePtr() bool {
35 return true
36}
37
38func (type2 *UnsafePtrType) Indirect(obj interface{}) interface{} {
39 objEFace := unpackEFace(obj)
40 assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype)
41 return type2.UnsafeIndirect(objEFace.data)
42}
43
44func (type2 *UnsafePtrType) UnsafeIndirect(ptr unsafe.Pointer) interface{} {
45 return packEFace(type2.rtype, *(*unsafe.Pointer)(ptr))
46}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_slice.go b/vendor/github.com/modern-go/reflect2/unsafe_slice.go
new file mode 100644
index 0000000..1c6d876
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/unsafe_slice.go
@@ -0,0 +1,177 @@
1package reflect2
2
3import (
4 "reflect"
5 "unsafe"
6)
7
8// sliceHeader is a safe version of SliceHeader used within this package.
9type sliceHeader struct {
10 Data unsafe.Pointer
11 Len int
12 Cap int
13}
14
15type UnsafeSliceType struct {
16 unsafeType
17 elemRType unsafe.Pointer
18 pElemRType unsafe.Pointer
19 elemSize uintptr
20}
21
22func newUnsafeSliceType(cfg *frozenConfig, type1 reflect.Type) SliceType {
23 elemType := type1.Elem()
24 return &UnsafeSliceType{
25 unsafeType: *newUnsafeType(cfg, type1),
26 pElemRType: unpackEFace(reflect.PtrTo(elemType)).data,
27 elemRType: unpackEFace(elemType).data,
28 elemSize: elemType.Size(),
29 }
30}
31
32func (type2 *UnsafeSliceType) Set(obj interface{}, val interface{}) {
33 objEFace := unpackEFace(obj)
34 assertType("Type.Set argument 1", type2.ptrRType, objEFace.rtype)
35 valEFace := unpackEFace(val)
36 assertType("Type.Set argument 2", type2.ptrRType, valEFace.rtype)
37 type2.UnsafeSet(objEFace.data, valEFace.data)
38}
39
40func (type2 *UnsafeSliceType) UnsafeSet(ptr unsafe.Pointer, val unsafe.Pointer) {
41 *(*sliceHeader)(ptr) = *(*sliceHeader)(val)
42}
43
44func (type2 *UnsafeSliceType) IsNil(obj interface{}) bool {
45 if obj == nil {
46 return true
47 }
48 objEFace := unpackEFace(obj)
49 assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype)
50 return type2.UnsafeIsNil(objEFace.data)
51}
52
53func (type2 *UnsafeSliceType) UnsafeIsNil(ptr unsafe.Pointer) bool {
54 if ptr == nil {
55 return true
56 }
57 return (*sliceHeader)(ptr).Data == nil
58}
59
60func (type2 *UnsafeSliceType) SetNil(obj interface{}) {
61 objEFace := unpackEFace(obj)
62 assertType("SliceType.SetNil argument 1", type2.ptrRType, objEFace.rtype)
63 type2.UnsafeSetNil(objEFace.data)
64}
65
66func (type2 *UnsafeSliceType) UnsafeSetNil(ptr unsafe.Pointer) {
67 header := (*sliceHeader)(ptr)
68 header.Len = 0
69 header.Cap = 0
70 header.Data = nil
71}
72
73func (type2 *UnsafeSliceType) MakeSlice(length int, cap int) interface{} {
74 return packEFace(type2.ptrRType, type2.UnsafeMakeSlice(length, cap))
75}
76
77func (type2 *UnsafeSliceType) UnsafeMakeSlice(length int, cap int) unsafe.Pointer {
78 header := &sliceHeader{unsafe_NewArray(type2.elemRType, cap), length, cap}
79 return unsafe.Pointer(header)
80}
81
82func (type2 *UnsafeSliceType) LengthOf(obj interface{}) int {
83 objEFace := unpackEFace(obj)
84 assertType("SliceType.Len argument 1", type2.ptrRType, objEFace.rtype)
85 return type2.UnsafeLengthOf(objEFace.data)
86}
87
88func (type2 *UnsafeSliceType) UnsafeLengthOf(obj unsafe.Pointer) int {
89 header := (*sliceHeader)(obj)
90 return header.Len
91}
92
93func (type2 *UnsafeSliceType) SetIndex(obj interface{}, index int, elem interface{}) {
94 objEFace := unpackEFace(obj)
95 assertType("SliceType.SetIndex argument 1", type2.ptrRType, objEFace.rtype)
96 elemEFace := unpackEFace(elem)
97 assertType("SliceType.SetIndex argument 3", type2.pElemRType, elemEFace.rtype)
98 type2.UnsafeSetIndex(objEFace.data, index, elemEFace.data)
99}
100
101func (type2 *UnsafeSliceType) UnsafeSetIndex(obj unsafe.Pointer, index int, elem unsafe.Pointer) {
102 header := (*sliceHeader)(obj)
103 elemPtr := arrayAt(header.Data, index, type2.elemSize, "i < s.Len")
104 typedmemmove(type2.elemRType, elemPtr, elem)
105}
106
107func (type2 *UnsafeSliceType) GetIndex(obj interface{}, index int) interface{} {
108 objEFace := unpackEFace(obj)
109 assertType("SliceType.GetIndex argument 1", type2.ptrRType, objEFace.rtype)
110 elemPtr := type2.UnsafeGetIndex(objEFace.data, index)
111 return packEFace(type2.pElemRType, elemPtr)
112}
113
114func (type2 *UnsafeSliceType) UnsafeGetIndex(obj unsafe.Pointer, index int) unsafe.Pointer {
115 header := (*sliceHeader)(obj)
116 return arrayAt(header.Data, index, type2.elemSize, "i < s.Len")
117}
118
119func (type2 *UnsafeSliceType) Append(obj interface{}, elem interface{}) {
120 objEFace := unpackEFace(obj)
121 assertType("SliceType.Append argument 1", type2.ptrRType, objEFace.rtype)
122 elemEFace := unpackEFace(elem)
123 assertType("SliceType.Append argument 2", type2.pElemRType, elemEFace.rtype)
124 type2.UnsafeAppend(objEFace.data, elemEFace.data)
125}
126
127func (type2 *UnsafeSliceType) UnsafeAppend(obj unsafe.Pointer, elem unsafe.Pointer) {
128 header := (*sliceHeader)(obj)
129 oldLen := header.Len
130 type2.UnsafeGrow(obj, oldLen+1)
131 type2.UnsafeSetIndex(obj, oldLen, elem)
132}
133
134func (type2 *UnsafeSliceType) Cap(obj interface{}) int {
135 objEFace := unpackEFace(obj)
136 assertType("SliceType.Cap argument 1", type2.ptrRType, objEFace.rtype)
137 return type2.UnsafeCap(objEFace.data)
138}
139
140func (type2 *UnsafeSliceType) UnsafeCap(ptr unsafe.Pointer) int {
141 return (*sliceHeader)(ptr).Cap
142}
143
144func (type2 *UnsafeSliceType) Grow(obj interface{}, newLength int) {
145 objEFace := unpackEFace(obj)
146 assertType("SliceType.Grow argument 1", type2.ptrRType, objEFace.rtype)
147 type2.UnsafeGrow(objEFace.data, newLength)
148}
149
150func (type2 *UnsafeSliceType) UnsafeGrow(obj unsafe.Pointer, newLength int) {
151 header := (*sliceHeader)(obj)
152 if newLength <= header.Cap {
153 header.Len = newLength
154 return
155 }
156 newCap := calcNewCap(header.Cap, newLength)
157 newHeader := (*sliceHeader)(type2.UnsafeMakeSlice(header.Len, newCap))
158 typedslicecopy(type2.elemRType, *newHeader, *header)
159 header.Data = newHeader.Data
160 header.Cap = newHeader.Cap
161 header.Len = newLength
162}
163
164func calcNewCap(cap int, expectedCap int) int {
165 if cap == 0 {
166 cap = expectedCap
167 } else {
168 for cap < expectedCap {
169 if cap < 1024 {
170 cap += cap
171 } else {
172 cap += cap / 4
173 }
174 }
175 }
176 return cap
177}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_struct.go b/vendor/github.com/modern-go/reflect2/unsafe_struct.go
new file mode 100644
index 0000000..804d916
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/unsafe_struct.go
@@ -0,0 +1,59 @@
1package reflect2
2
3import (
4 "reflect"
5 "unsafe"
6)
7
8type UnsafeStructType struct {
9 unsafeType
10 likePtr bool
11}
12
13func newUnsafeStructType(cfg *frozenConfig, type1 reflect.Type) *UnsafeStructType {
14 return &UnsafeStructType{
15 unsafeType: *newUnsafeType(cfg, type1),
16 likePtr: likePtrType(type1),
17 }
18}
19
20func (type2 *UnsafeStructType) LikePtr() bool {
21 return type2.likePtr
22}
23
24func (type2 *UnsafeStructType) Indirect(obj interface{}) interface{} {
25 objEFace := unpackEFace(obj)
26 assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype)
27 return type2.UnsafeIndirect(objEFace.data)
28}
29
30func (type2 *UnsafeStructType) UnsafeIndirect(ptr unsafe.Pointer) interface{} {
31 if type2.likePtr {
32 return packEFace(type2.rtype, *(*unsafe.Pointer)(ptr))
33 }
34 return packEFace(type2.rtype, ptr)
35}
36
37func (type2 *UnsafeStructType) FieldByName(name string) StructField {
38 structField, found := type2.Type.FieldByName(name)
39 if !found {
40 return nil
41 }
42 return newUnsafeStructField(type2, structField)
43}
44
45func (type2 *UnsafeStructType) Field(i int) StructField {
46 return newUnsafeStructField(type2, type2.Type.Field(i))
47}
48
49func (type2 *UnsafeStructType) FieldByIndex(index []int) StructField {
50 return newUnsafeStructField(type2, type2.Type.FieldByIndex(index))
51}
52
53func (type2 *UnsafeStructType) FieldByNameFunc(match func(string) bool) StructField {
54 structField, found := type2.Type.FieldByNameFunc(match)
55 if !found {
56 panic("field match condition not found in " + type2.Type.String())
57 }
58 return newUnsafeStructField(type2, structField)
59}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_type.go b/vendor/github.com/modern-go/reflect2/unsafe_type.go
new file mode 100644
index 0000000..1394171
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/unsafe_type.go
@@ -0,0 +1,85 @@
1package reflect2
2
3import (
4 "reflect"
5 "unsafe"
6)
7
8type unsafeType struct {
9 safeType
10 rtype unsafe.Pointer
11 ptrRType unsafe.Pointer
12}
13
14func newUnsafeType(cfg *frozenConfig, type1 reflect.Type) *unsafeType {
15 return &unsafeType{
16 safeType: safeType{
17 Type: type1,
18 cfg: cfg,
19 },
20 rtype: unpackEFace(type1).data,
21 ptrRType: unpackEFace(reflect.PtrTo(type1)).data,
22 }
23}
24
25func (type2 *unsafeType) Set(obj interface{}, val interface{}) {
26 objEFace := unpackEFace(obj)
27 assertType("Type.Set argument 1", type2.ptrRType, objEFace.rtype)
28 valEFace := unpackEFace(val)
29 assertType("Type.Set argument 2", type2.ptrRType, valEFace.rtype)
30 type2.UnsafeSet(objEFace.data, valEFace.data)
31}
32
33func (type2 *unsafeType) UnsafeSet(ptr unsafe.Pointer, val unsafe.Pointer) {
34 typedmemmove(type2.rtype, ptr, val)
35}
36
37func (type2 *unsafeType) IsNil(obj interface{}) bool {
38 objEFace := unpackEFace(obj)
39 assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype)
40 return type2.UnsafeIsNil(objEFace.data)
41}
42
43func (type2 *unsafeType) UnsafeIsNil(ptr unsafe.Pointer) bool {
44 return ptr == nil
45}
46
47func (type2 *unsafeType) UnsafeNew() unsafe.Pointer {
48 return unsafe_New(type2.rtype)
49}
50
51func (type2 *unsafeType) New() interface{} {
52 return packEFace(type2.ptrRType, type2.UnsafeNew())
53}
54
55func (type2 *unsafeType) PackEFace(ptr unsafe.Pointer) interface{} {
56 return packEFace(type2.ptrRType, ptr)
57}
58
59func (type2 *unsafeType) RType() uintptr {
60 return uintptr(type2.rtype)
61}
62
63func (type2 *unsafeType) Indirect(obj interface{}) interface{} {
64 objEFace := unpackEFace(obj)
65 assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype)
66 return type2.UnsafeIndirect(objEFace.data)
67}
68
69func (type2 *unsafeType) UnsafeIndirect(obj unsafe.Pointer) interface{} {
70 return packEFace(type2.rtype, obj)
71}
72
73func (type2 *unsafeType) LikePtr() bool {
74 return false
75}
76
77func assertType(where string, expectRType unsafe.Pointer, actualRType unsafe.Pointer) {
78 if expectRType != actualRType {
79 expectType := reflect.TypeOf(0)
80 (*iface)(unsafe.Pointer(&expectType)).data = expectRType
81 actualType := reflect.TypeOf(0)
82 (*iface)(unsafe.Pointer(&actualType)).data = actualRType
83 panic(where + ": expect " + expectType.String() + ", actual " + actualType.String())
84 }
85}
diff --git a/vendor/github.com/rs/xid/.appveyor.yml b/vendor/github.com/rs/xid/.appveyor.yml
new file mode 100644
index 0000000..c73bb33
--- /dev/null
+++ b/vendor/github.com/rs/xid/.appveyor.yml
@@ -0,0 +1,27 @@
1version: 1.0.0.{build}
2
3platform: x64
4
5branches:
6 only:
7 - master
8
9clone_folder: c:\gopath\src\github.com\rs\xid
10
11environment:
12 GOPATH: c:\gopath
13
14install:
15 - echo %PATH%
16 - echo %GOPATH%
17 - set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
18 - go version
19 - go env
20 - go get -t .
21
22build_script:
23 - go build
24
25test_script:
26 - go test
27
diff --git a/vendor/github.com/rs/xid/.golangci.yml b/vendor/github.com/rs/xid/.golangci.yml
new file mode 100644
index 0000000..7929600
--- /dev/null
+++ b/vendor/github.com/rs/xid/.golangci.yml
@@ -0,0 +1,5 @@
1run:
2 tests: false
3
4output:
5 sort-results: true
diff --git a/vendor/github.com/rs/xid/.travis.yml b/vendor/github.com/rs/xid/.travis.yml
new file mode 100644
index 0000000..b37da15
--- /dev/null
+++ b/vendor/github.com/rs/xid/.travis.yml
@@ -0,0 +1,8 @@
1language: go
2go:
3- "1.9"
4- "1.10"
5- "master"
6matrix:
7 allow_failures:
8 - go: "master"
diff --git a/vendor/github.com/rs/xid/LICENSE b/vendor/github.com/rs/xid/LICENSE
new file mode 100644
index 0000000..47c5e9d
--- /dev/null
+++ b/vendor/github.com/rs/xid/LICENSE
@@ -0,0 +1,19 @@
1Copyright (c) 2015 Olivier Poitrey <[email protected]>
2
3Permission is hereby granted, free of charge, to any person obtaining a copy
4of this software and associated documentation files (the "Software"), to deal
5in the Software without restriction, including without limitation the rights
6to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7copies of the Software, and to permit persons to whom the Software is furnished
8to do so, subject to the following conditions:
9
10The above copyright notice and this permission notice shall be included in all
11copies or substantial portions of the Software.
12
13THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19THE SOFTWARE.
diff --git a/vendor/github.com/rs/xid/README.md b/vendor/github.com/rs/xid/README.md
new file mode 100644
index 0000000..974e67d
--- /dev/null
+++ b/vendor/github.com/rs/xid/README.md
@@ -0,0 +1,119 @@
1# Globally Unique ID Generator
2
3[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/rs/xid) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/rs/xid/master/LICENSE) [![Build Status](https://travis-ci.org/rs/xid.svg?branch=master)](https://travis-ci.org/rs/xid) [![Coverage](http://gocover.io/_badge/github.com/rs/xid)](http://gocover.io/github.com/rs/xid)
4
5Package xid is a globally unique id generator library, ready to safely be used directly in your server code.
6
7Xid uses the Mongo Object ID algorithm to generate globally unique ids with a different serialization (base64) to make it shorter when transported as a string:
8https://docs.mongodb.org/manual/reference/object-id/
9
10- 4-byte value representing the seconds since the Unix epoch,
11- 3-byte machine identifier,
12- 2-byte process id, and
13- 3-byte counter, starting with a random value.
14
15The binary representation of the id is compatible with Mongo 12 bytes Object IDs.
16The string representation is using base32 hex (w/o padding) for better space efficiency
17when stored in that form (20 bytes). The hex variant of base32 is used to retain the
18sortable property of the id.
19
20Xid doesn't use base64 because case sensitivity and the 2 non alphanum chars may be an
21issue when transported as a string between various systems. Base36 wasn't retained either
22because 1/ it's not standard 2/ the resulting size is not predictable (not bit aligned)
23and 3/ it would not remain sortable. To validate a base32 `xid`, expect a 20 chars long,
24all lowercase sequence of `a` to `v` letters and `0` to `9` numbers (`[0-9a-v]{20}`).
25
26UUIDs are 16 bytes (128 bits) and 36 chars as string representation. Twitter Snowflake
27ids are 8 bytes (64 bits) but require machine/data-center configuration and/or central
28generator servers. xid stands in between with 12 bytes (96 bits) and a more compact
29URL-safe string representation (20 chars). No configuration or central generator server
30is required so it can be used directly in server's code.
31
32| Name | Binary Size | String Size | Features
33|-------------|-------------|----------------|----------------
34| [UUID] | 16 bytes | 36 chars | configuration free, not sortable
35| [shortuuid] | 16 bytes | 22 chars | configuration free, not sortable
36| [Snowflake] | 8 bytes | up to 20 chars | needs machine/DC configuration, needs central server, sortable
37| [MongoID] | 12 bytes | 24 chars | configuration free, sortable
38| xid | 12 bytes | 20 chars | configuration free, sortable
39
40[UUID]: https://en.wikipedia.org/wiki/Universally_unique_identifier
41[shortuuid]: https://github.com/stochastic-technologies/shortuuid
42[Snowflake]: https://blog.twitter.com/2010/announcing-snowflake
43[MongoID]: https://docs.mongodb.org/manual/reference/object-id/
44
45Features:
46
47- Size: 12 bytes (96 bits), smaller than UUID, larger than snowflake
48- Base32 hex encoded by default (20 chars when transported as printable string, still sortable)
49- Non configured, you don't need set a unique machine and/or data center id
50- K-ordered
51- Embedded time with 1 second precision
52- Unicity guaranteed for 16,777,216 (24 bits) unique ids per second and per host/process
53- Lock-free (i.e.: unlike UUIDv1 and v2)
54
55Best used with [zerolog](https://github.com/rs/zerolog)'s
56[RequestIDHandler](https://godoc.org/github.com/rs/zerolog/hlog#RequestIDHandler).
57
58Notes:
59
60- Xid is dependent on the system time, a monotonic counter and so is not cryptographically secure. If unpredictability of IDs is important, you should not use Xids. It is worth noting that most other UUID-like implementations are also not cryptographically secure. You should use libraries that rely on cryptographically secure sources (like /dev/urandom on unix, crypto/rand in golang), if you want a truly random ID generator.
61
62References:
63
64- http://www.slideshare.net/davegardnerisme/unique-id-generation-in-distributed-systems
65- https://en.wikipedia.org/wiki/Universally_unique_identifier
66- https://blog.twitter.com/2010/announcing-snowflake
67- Python port by [Graham Abbott](https://github.com/graham): https://github.com/graham/python_xid
68- Scala port by [Egor Kolotaev](https://github.com/kolotaev): https://github.com/kolotaev/ride
69- Rust port by [Jérôme Renard](https://github.com/jeromer/): https://github.com/jeromer/libxid
70- Ruby port by [Valar](https://github.com/valarpirai/): https://github.com/valarpirai/ruby_xid
71- Java port by [0xShamil](https://github.com/0xShamil/): https://github.com/0xShamil/java-xid
72- Dart port by [Peter Bwire](https://github.com/pitabwire): https://pub.dev/packages/xid
73- PostgreSQL port by [Rasmus Holm](https://github.com/crholm): https://github.com/modfin/pg-xid
74- Swift port by [Uditha Atukorala](https://github.com/uditha-atukorala): https://github.com/uditha-atukorala/swift-xid
75- C++ port by [Uditha Atukorala](https://github.com/uditha-atukorala): https://github.com/uditha-atukorala/libxid
76
77## Install
78
79 go get github.com/rs/xid
80
81## Usage
82
83```go
84guid := xid.New()
85
86println(guid.String())
87// Output: 9m4e2mr0ui3e8a215n4g
88```
89
90Get `xid` embedded info:
91
92```go
93guid.Machine()
94guid.Pid()
95guid.Time()
96guid.Counter()
97```
98
99## Benchmark
100
101Benchmark against Go [Maxim Bublis](https://github.com/satori)'s [UUID](https://github.com/satori/go.uuid).
102
103```
104BenchmarkXID 20000000 91.1 ns/op 32 B/op 1 allocs/op
105BenchmarkXID-2 20000000 55.9 ns/op 32 B/op 1 allocs/op
106BenchmarkXID-4 50000000 32.3 ns/op 32 B/op 1 allocs/op
107BenchmarkUUIDv1 10000000 204 ns/op 48 B/op 1 allocs/op
108BenchmarkUUIDv1-2 10000000 160 ns/op 48 B/op 1 allocs/op
109BenchmarkUUIDv1-4 10000000 195 ns/op 48 B/op 1 allocs/op
110BenchmarkUUIDv4 1000000 1503 ns/op 64 B/op 2 allocs/op
111BenchmarkUUIDv4-2 1000000 1427 ns/op 64 B/op 2 allocs/op
112BenchmarkUUIDv4-4 1000000 1452 ns/op 64 B/op 2 allocs/op
113```
114
115Note: UUIDv1 requires a global lock, hence the performance degradation as we add more CPUs.
116
117## Licenses
118
119All source code is licensed under the [MIT License](https://raw.github.com/rs/xid/master/LICENSE).
diff --git a/vendor/github.com/rs/xid/error.go b/vendor/github.com/rs/xid/error.go
new file mode 100644
index 0000000..ea25374
--- /dev/null
+++ b/vendor/github.com/rs/xid/error.go
@@ -0,0 +1,11 @@
1package xid
2
3const (
4 // ErrInvalidID is returned when trying to unmarshal an invalid ID.
5 ErrInvalidID strErr = "xid: invalid ID"
6)
7
8// strErr allows declaring errors as constants.
9type strErr string
10
11func (err strErr) Error() string { return string(err) }
diff --git a/vendor/github.com/rs/xid/hostid_darwin.go b/vendor/github.com/rs/xid/hostid_darwin.go
new file mode 100644
index 0000000..08351ff
--- /dev/null
+++ b/vendor/github.com/rs/xid/hostid_darwin.go
@@ -0,0 +1,9 @@
1// +build darwin
2
3package xid
4
5import "syscall"
6
7func readPlatformMachineID() (string, error) {
8 return syscall.Sysctl("kern.uuid")
9}
diff --git a/vendor/github.com/rs/xid/hostid_fallback.go b/vendor/github.com/rs/xid/hostid_fallback.go
new file mode 100644
index 0000000..7fbd3c0
--- /dev/null
+++ b/vendor/github.com/rs/xid/hostid_fallback.go
@@ -0,0 +1,9 @@
1// +build !darwin,!linux,!freebsd,!windows
2
3package xid
4
5import "errors"
6
7func readPlatformMachineID() (string, error) {
8 return "", errors.New("not implemented")
9}
diff --git a/vendor/github.com/rs/xid/hostid_freebsd.go b/vendor/github.com/rs/xid/hostid_freebsd.go
new file mode 100644
index 0000000..be25a03
--- /dev/null
+++ b/vendor/github.com/rs/xid/hostid_freebsd.go
@@ -0,0 +1,9 @@
1// +build freebsd
2
3package xid
4
5import "syscall"
6
7func readPlatformMachineID() (string, error) {
8 return syscall.Sysctl("kern.hostuuid")
9}
diff --git a/vendor/github.com/rs/xid/hostid_linux.go b/vendor/github.com/rs/xid/hostid_linux.go
new file mode 100644
index 0000000..837b204
--- /dev/null
+++ b/vendor/github.com/rs/xid/hostid_linux.go
@@ -0,0 +1,13 @@
1// +build linux
2
3package xid
4
5import "io/ioutil"
6
7func readPlatformMachineID() (string, error) {
8 b, err := ioutil.ReadFile("/etc/machine-id")
9 if err != nil || len(b) == 0 {
10 b, err = ioutil.ReadFile("/sys/class/dmi/id/product_uuid")
11 }
12 return string(b), err
13}
diff --git a/vendor/github.com/rs/xid/hostid_windows.go b/vendor/github.com/rs/xid/hostid_windows.go
new file mode 100644
index 0000000..ec2593e
--- /dev/null
+++ b/vendor/github.com/rs/xid/hostid_windows.go
@@ -0,0 +1,38 @@
1// +build windows
2
3package xid
4
5import (
6 "fmt"
7 "syscall"
8 "unsafe"
9)
10
11func readPlatformMachineID() (string, error) {
12 // source: https://github.com/shirou/gopsutil/blob/master/host/host_syscall.go
13 var h syscall.Handle
14 err := syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, syscall.StringToUTF16Ptr(`SOFTWARE\Microsoft\Cryptography`), 0, syscall.KEY_READ|syscall.KEY_WOW64_64KEY, &h)
15 if err != nil {
16 return "", err
17 }
18 defer syscall.RegCloseKey(h)
19
20 const syscallRegBufLen = 74 // len(`{`) + len(`abcdefgh-1234-456789012-123345456671` * 2) + len(`}`) // 2 == bytes/UTF16
21 const uuidLen = 36
22
23 var regBuf [syscallRegBufLen]uint16
24 bufLen := uint32(syscallRegBufLen)
25 var valType uint32
26 err = syscall.RegQueryValueEx(h, syscall.StringToUTF16Ptr(`MachineGuid`), nil, &valType, (*byte)(unsafe.Pointer(&regBuf[0])), &bufLen)
27 if err != nil {
28 return "", err
29 }
30
31 hostID := syscall.UTF16ToString(regBuf[:])
32 hostIDLen := len(hostID)
33 if hostIDLen != uuidLen {
34 return "", fmt.Errorf("HostID incorrect: %q\n", hostID)
35 }
36
37 return hostID, nil
38}
diff --git a/vendor/github.com/rs/xid/id.go b/vendor/github.com/rs/xid/id.go
new file mode 100644
index 0000000..fcd7a04
--- /dev/null
+++ b/vendor/github.com/rs/xid/id.go
@@ -0,0 +1,391 @@
1// Package xid is a globally unique id generator suited for web scale
2//
3// Xid is using Mongo Object ID algorithm to generate globally unique ids:
4// https://docs.mongodb.org/manual/reference/object-id/
5//
6// - 4-byte value representing the seconds since the Unix epoch,
7// - 3-byte machine identifier,
8// - 2-byte process id, and
9// - 3-byte counter, starting with a random value.
10//
11// The binary representation of the id is compatible with Mongo 12 bytes Object IDs.
12// The string representation is using base32 hex (w/o padding) for better space efficiency
13// when stored in that form (20 bytes). The hex variant of base32 is used to retain the
14// sortable property of the id.
15//
16// Xid doesn't use base64 because case sensitivity and the 2 non alphanum chars may be an
17// issue when transported as a string between various systems. Base36 wasn't retained either
18// because 1/ it's not standard 2/ the resulting size is not predictable (not bit aligned)
19// and 3/ it would not remain sortable. To validate a base32 `xid`, expect a 20 chars long,
20// all lowercase sequence of `a` to `v` letters and `0` to `9` numbers (`[0-9a-v]{20}`).
21//
22// UUID is 16 bytes (128 bits), snowflake is 8 bytes (64 bits), xid stands in between
23// with 12 bytes with a more compact string representation ready for the web and no
24// required configuration or central generation server.
25//
26// Features:
27//
28// - Size: 12 bytes (96 bits), smaller than UUID, larger than snowflake
29// - Base32 hex encoded by default (16 bytes storage when transported as printable string)
30// - Non configured, you don't need set a unique machine and/or data center id
31// - K-ordered
32// - Embedded time with 1 second precision
33// - Unicity guaranteed for 16,777,216 (24 bits) unique ids per second and per host/process
34//
35// Best used with xlog's RequestIDHandler (https://godoc.org/github.com/rs/xlog#RequestIDHandler).
36//
37// References:
38//
39// - http://www.slideshare.net/davegardnerisme/unique-id-generation-in-distributed-systems
40// - https://en.wikipedia.org/wiki/Universally_unique_identifier
41// - https://blog.twitter.com/2010/announcing-snowflake
42package xid
43
44import (
45 "bytes"
46 "crypto/sha256"
47 "crypto/rand"
48 "database/sql/driver"
49 "encoding/binary"
50 "fmt"
51 "hash/crc32"
52 "io/ioutil"
53 "os"
54 "sort"
55 "sync/atomic"
56 "time"
57 "unsafe"
58)
59
60// Code inspired from mgo/bson ObjectId
61
62// ID represents a unique request id
63type ID [rawLen]byte
64
65const (
66 encodedLen = 20 // string encoded len
67 rawLen = 12 // binary raw len
68
69 // encoding stores a custom version of the base32 encoding with lower case
70 // letters.
71 encoding = "0123456789abcdefghijklmnopqrstuv"
72)
73
74var (
75 // objectIDCounter is atomically incremented when generating a new ObjectId. It's
76 // used as the counter part of an id. This id is initialized with a random value.
77 objectIDCounter = randInt()
78
79 // machineID is generated once and used in subsequent calls to the New* functions.
80 machineID = readMachineID()
81
82 // pid stores the current process id
83 pid = os.Getpid()
84
85 nilID ID
86
87 // dec is the decoding map for base32 encoding
88 dec [256]byte
89)
90
91func init() {
92 for i := 0; i < len(dec); i++ {
93 dec[i] = 0xFF
94 }
95 for i := 0; i < len(encoding); i++ {
96 dec[encoding[i]] = byte(i)
97 }
98
99 // If /proc/self/cpuset exists and is not /, we can assume that we are in a
100 // form of container and use the content of cpuset xor-ed with the PID in
101 // order get a reasonable machine global unique PID.
102 b, err := ioutil.ReadFile("/proc/self/cpuset")
103 if err == nil && len(b) > 1 {
104 pid ^= int(crc32.ChecksumIEEE(b))
105 }
106}
107
108// readMachineID generates a machine ID, derived from a platform-specific machine ID
109// value, or else the machine's hostname, or else a randomly-generated number.
110// It panics if all of these methods fail.
111func readMachineID() []byte {
112 id := make([]byte, 3)
113 hid, err := readPlatformMachineID()
114 if err != nil || len(hid) == 0 {
115 hid, err = os.Hostname()
116 }
117 if err == nil && len(hid) != 0 {
118 hw := sha256.New()
119 hw.Write([]byte(hid))
120 copy(id, hw.Sum(nil))
121 } else {
122 // Fallback to rand number if machine id can't be gathered
123 if _, randErr := rand.Reader.Read(id); randErr != nil {
124 panic(fmt.Errorf("xid: cannot get hostname nor generate a random number: %v; %v", err, randErr))
125 }
126 }
127 return id
128}
129
130// randInt generates a random uint32
131func randInt() uint32 {
132 b := make([]byte, 3)
133 if _, err := rand.Reader.Read(b); err != nil {
134 panic(fmt.Errorf("xid: cannot generate random number: %v;", err))
135 }
136 return uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2])
137}
138
139// New generates a globally unique ID
140func New() ID {
141 return NewWithTime(time.Now())
142}
143
144// NewWithTime generates a globally unique ID with the passed in time
145func NewWithTime(t time.Time) ID {
146 var id ID
147 // Timestamp, 4 bytes, big endian
148 binary.BigEndian.PutUint32(id[:], uint32(t.Unix()))
149 // Machine ID, 3 bytes
150 id[4] = machineID[0]
151 id[5] = machineID[1]
152 id[6] = machineID[2]
153 // Pid, 2 bytes, specs don't specify endianness, but we use big endian.
154 id[7] = byte(pid >> 8)
155 id[8] = byte(pid)
156 // Increment, 3 bytes, big endian
157 i := atomic.AddUint32(&objectIDCounter, 1)
158 id[9] = byte(i >> 16)
159 id[10] = byte(i >> 8)
160 id[11] = byte(i)
161 return id
162}
163
164// FromString reads an ID from its string representation
165func FromString(id string) (ID, error) {
166 i := &ID{}
167 err := i.UnmarshalText([]byte(id))
168 return *i, err
169}
170
171// String returns a base32 hex lowercased with no padding representation of the id (char set is 0-9, a-v).
172func (id ID) String() string {
173 text := make([]byte, encodedLen)
174 encode(text, id[:])
175 return *(*string)(unsafe.Pointer(&text))
176}
177
178// Encode encodes the id using base32 encoding, writing 20 bytes to dst and return it.
179func (id ID) Encode(dst []byte) []byte {
180 encode(dst, id[:])
181 return dst
182}
183
184// MarshalText implements encoding/text TextMarshaler interface
185func (id ID) MarshalText() ([]byte, error) {
186 text := make([]byte, encodedLen)
187 encode(text, id[:])
188 return text, nil
189}
190
191// MarshalJSON implements encoding/json Marshaler interface
192func (id ID) MarshalJSON() ([]byte, error) {
193 if id.IsNil() {
194 return []byte("null"), nil
195 }
196 text := make([]byte, encodedLen+2)
197 encode(text[1:encodedLen+1], id[:])
198 text[0], text[encodedLen+1] = '"', '"'
199 return text, nil
200}
201
202// encode by unrolling the stdlib base32 algorithm + removing all safe checks
203func encode(dst, id []byte) {
204 _ = dst[19]
205 _ = id[11]
206
207 dst[19] = encoding[(id[11]<<4)&0x1F]
208 dst[18] = encoding[(id[11]>>1)&0x1F]
209 dst[17] = encoding[(id[11]>>6)&0x1F|(id[10]<<2)&0x1F]
210 dst[16] = encoding[id[10]>>3]
211 dst[15] = encoding[id[9]&0x1F]
212 dst[14] = encoding[(id[9]>>5)|(id[8]<<3)&0x1F]
213 dst[13] = encoding[(id[8]>>2)&0x1F]
214 dst[12] = encoding[id[8]>>7|(id[7]<<1)&0x1F]
215 dst[11] = encoding[(id[7]>>4)&0x1F|(id[6]<<4)&0x1F]
216 dst[10] = encoding[(id[6]>>1)&0x1F]
217 dst[9] = encoding[(id[6]>>6)&0x1F|(id[5]<<2)&0x1F]
218 dst[8] = encoding[id[5]>>3]
219 dst[7] = encoding[id[4]&0x1F]
220 dst[6] = encoding[id[4]>>5|(id[3]<<3)&0x1F]
221 dst[5] = encoding[(id[3]>>2)&0x1F]
222 dst[4] = encoding[id[3]>>7|(id[2]<<1)&0x1F]
223 dst[3] = encoding[(id[2]>>4)&0x1F|(id[1]<<4)&0x1F]
224 dst[2] = encoding[(id[1]>>1)&0x1F]
225 dst[1] = encoding[(id[1]>>6)&0x1F|(id[0]<<2)&0x1F]
226 dst[0] = encoding[id[0]>>3]
227}
228
229// UnmarshalText implements encoding/text TextUnmarshaler interface
230func (id *ID) UnmarshalText(text []byte) error {
231 if len(text) != encodedLen {
232 return ErrInvalidID
233 }
234 for _, c := range text {
235 if dec[c] == 0xFF {
236 return ErrInvalidID
237 }
238 }
239 if !decode(id, text) {
240 *id = nilID
241 return ErrInvalidID
242 }
243 return nil
244}
245
246// UnmarshalJSON implements encoding/json Unmarshaler interface
247func (id *ID) UnmarshalJSON(b []byte) error {
248 s := string(b)
249 if s == "null" {
250 *id = nilID
251 return nil
252 }
253 // Check the slice length to prevent panic on passing it to UnmarshalText()
254 if len(b) < 2 {
255 return ErrInvalidID
256 }
257 return id.UnmarshalText(b[1 : len(b)-1])
258}
259
260// decode by unrolling the stdlib base32 algorithm + customized safe check.
261func decode(id *ID, src []byte) bool {
262 _ = src[19]
263 _ = id[11]
264
265 id[11] = dec[src[17]]<<6 | dec[src[18]]<<1 | dec[src[19]]>>4
266 // check the last byte
267 if encoding[(id[11]<<4)&0x1F] != src[19] {
268 return false
269 }
270 id[10] = dec[src[16]]<<3 | dec[src[17]]>>2
271 id[9] = dec[src[14]]<<5 | dec[src[15]]
272 id[8] = dec[src[12]]<<7 | dec[src[13]]<<2 | dec[src[14]]>>3
273 id[7] = dec[src[11]]<<4 | dec[src[12]]>>1
274 id[6] = dec[src[9]]<<6 | dec[src[10]]<<1 | dec[src[11]]>>4
275 id[5] = dec[src[8]]<<3 | dec[src[9]]>>2
276 id[4] = dec[src[6]]<<5 | dec[src[7]]
277 id[3] = dec[src[4]]<<7 | dec[src[5]]<<2 | dec[src[6]]>>3
278 id[2] = dec[src[3]]<<4 | dec[src[4]]>>1
279 id[1] = dec[src[1]]<<6 | dec[src[2]]<<1 | dec[src[3]]>>4
280 id[0] = dec[src[0]]<<3 | dec[src[1]]>>2
281 return true
282}
283
284// Time returns the timestamp part of the id.
285// It's a runtime error to call this method with an invalid id.
286func (id ID) Time() time.Time {
287 // First 4 bytes of ObjectId is 32-bit big-endian seconds from epoch.
288 secs := int64(binary.BigEndian.Uint32(id[0:4]))
289 return time.Unix(secs, 0)
290}
291
292// Machine returns the 3-byte machine id part of the id.
293// It's a runtime error to call this method with an invalid id.
294func (id ID) Machine() []byte {
295 return id[4:7]
296}
297
298// Pid returns the process id part of the id.
299// It's a runtime error to call this method with an invalid id.
300func (id ID) Pid() uint16 {
301 return binary.BigEndian.Uint16(id[7:9])
302}
303
304// Counter returns the incrementing value part of the id.
305// It's a runtime error to call this method with an invalid id.
306func (id ID) Counter() int32 {
307 b := id[9:12]
308 // Counter is stored as big-endian 3-byte value
309 return int32(uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2]))
310}
311
312// Value implements the driver.Valuer interface.
313func (id ID) Value() (driver.Value, error) {
314 if id.IsNil() {
315 return nil, nil
316 }
317 b, err := id.MarshalText()
318 return string(b), err
319}
320
321// Scan implements the sql.Scanner interface.
322func (id *ID) Scan(value interface{}) (err error) {
323 switch val := value.(type) {
324 case string:
325 return id.UnmarshalText([]byte(val))
326 case []byte:
327 return id.UnmarshalText(val)
328 case nil:
329 *id = nilID
330 return nil
331 default:
332 return fmt.Errorf("xid: scanning unsupported type: %T", value)
333 }
334}
335
336// IsNil Returns true if this is a "nil" ID
337func (id ID) IsNil() bool {
338 return id == nilID
339}
340
341// Alias of IsNil
342func (id ID) IsZero() bool {
343 return id.IsNil()
344}
345
346// NilID returns a zero value for `xid.ID`.
347func NilID() ID {
348 return nilID
349}
350
351// Bytes returns the byte array representation of `ID`
352func (id ID) Bytes() []byte {
353 return id[:]
354}
355
356// FromBytes convert the byte array representation of `ID` back to `ID`
357func FromBytes(b []byte) (ID, error) {
358 var id ID
359 if len(b) != rawLen {
360 return id, ErrInvalidID
361 }
362 copy(id[:], b)
363 return id, nil
364}
365
366// Compare returns an integer comparing two IDs. It behaves just like `bytes.Compare`.
367// The result will be 0 if two IDs are identical, -1 if current id is less than the other one,
368// and 1 if current id is greater than the other.
369func (id ID) Compare(other ID) int {
370 return bytes.Compare(id[:], other[:])
371}
372
373type sorter []ID
374
375func (s sorter) Len() int {
376 return len(s)
377}
378
379func (s sorter) Less(i, j int) bool {
380 return s[i].Compare(s[j]) < 0
381}
382
383func (s sorter) Swap(i, j int) {
384 s[i], s[j] = s[j], s[i]
385}
386
387// Sort sorts an array of IDs inplace.
388// It works by wrapping `[]ID` and use `sort.Sort`.
389func Sort(ids []ID) {
390 sort.Sort(sorter(ids))
391}
diff --git a/vendor/github.com/sirupsen/logrus/.gitignore b/vendor/github.com/sirupsen/logrus/.gitignore
new file mode 100644
index 0000000..1fb13ab
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/.gitignore
@@ -0,0 +1,4 @@
1logrus
2vendor
3
4.idea/
diff --git a/vendor/github.com/sirupsen/logrus/.golangci.yml b/vendor/github.com/sirupsen/logrus/.golangci.yml
new file mode 100644
index 0000000..65dc285
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/.golangci.yml
@@ -0,0 +1,40 @@
1run:
2 # do not run on test files yet
3 tests: false
4
5# all available settings of specific linters
6linters-settings:
7 errcheck:
8 # report about not checking of errors in type assetions: `a := b.(MyStruct)`;
9 # default is false: such cases aren't reported by default.
10 check-type-assertions: false
11
12 # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`;
13 # default is false: such cases aren't reported by default.
14 check-blank: false
15
16 lll:
17 line-length: 100
18 tab-width: 4
19
20 prealloc:
21 simple: false
22 range-loops: false
23 for-loops: false
24
25 whitespace:
26 multi-if: false # Enforces newlines (or comments) after every multi-line if statement
27 multi-func: false # Enforces newlines (or comments) after every multi-line function signature
28
29linters:
30 enable:
31 - megacheck
32 - govet
33 disable:
34 - maligned
35 - prealloc
36 disable-all: false
37 presets:
38 - bugs
39 - unused
40 fast: false
diff --git a/vendor/github.com/sirupsen/logrus/.travis.yml b/vendor/github.com/sirupsen/logrus/.travis.yml
new file mode 100644
index 0000000..c1dbd5a
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/.travis.yml
@@ -0,0 +1,15 @@
1language: go
2go_import_path: github.com/sirupsen/logrus
3git:
4 depth: 1
5env:
6 - GO111MODULE=on
7go: 1.15.x
8os: linux
9install:
10 - ./travis/install.sh
11script:
12 - cd ci
13 - go run mage.go -v -w ../ crossBuild
14 - go run mage.go -v -w ../ lint
15 - go run mage.go -v -w ../ test
diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md
new file mode 100644
index 0000000..7567f61
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/CHANGELOG.md
@@ -0,0 +1,259 @@
1# 1.8.1
2Code quality:
3 * move magefile in its own subdir/submodule to remove magefile dependency on logrus consumer
4 * improve timestamp format documentation
5
6Fixes:
7 * fix race condition on logger hooks
8
9
10# 1.8.0
11
12Correct versioning number replacing v1.7.1.
13
14# 1.7.1
15
16Beware this release has introduced a new public API and its semver is therefore incorrect.
17
18Code quality:
19 * use go 1.15 in travis
20 * use magefile as task runner
21
22Fixes:
23 * small fixes about new go 1.13 error formatting system
24 * Fix for long time race condiction with mutating data hooks
25
26Features:
27 * build support for zos
28
29# 1.7.0
30Fixes:
31 * the dependency toward a windows terminal library has been removed
32
33Features:
34 * a new buffer pool management API has been added
35 * a set of `<LogLevel>Fn()` functions have been added
36
37# 1.6.0
38Fixes:
39 * end of line cleanup
40 * revert the entry concurrency bug fix whic leads to deadlock under some circumstances
41 * update dependency on go-windows-terminal-sequences to fix a crash with go 1.14
42
43Features:
44 * add an option to the `TextFormatter` to completely disable fields quoting
45
46# 1.5.0
47Code quality:
48 * add golangci linter run on travis
49
50Fixes:
51 * add mutex for hooks concurrent access on `Entry` data
52 * caller function field for go1.14
53 * fix build issue for gopherjs target
54
55Feature:
56 * add an hooks/writer sub-package whose goal is to split output on different stream depending on the trace level
57 * add a `DisableHTMLEscape` option in the `JSONFormatter`
58 * add `ForceQuote` and `PadLevelText` options in the `TextFormatter`
59
60# 1.4.2
61 * Fixes build break for plan9, nacl, solaris
62# 1.4.1
63This new release introduces:
64 * Enhance TextFormatter to not print caller information when they are empty (#944)
65 * Remove dependency on golang.org/x/crypto (#932, #943)
66
67Fixes:
68 * Fix Entry.WithContext method to return a copy of the initial entry (#941)
69
70# 1.4.0
71This new release introduces:
72 * Add `DeferExitHandler`, similar to `RegisterExitHandler` but prepending the handler to the list of handlers (semantically like `defer`) (#848).
73 * Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter` (#909, #911)
74 * Add `Entry.WithContext()` and `Entry.Context`, to set a context on entries to be used e.g. in hooks (#919).
75
76Fixes:
77 * Fix wrong method calls `Logger.Print` and `Logger.Warningln` (#893).
78 * Update `Entry.Logf` to not do string formatting unless the log level is enabled (#903)
79 * Fix infinite recursion on unknown `Level.String()` (#907)
80 * Fix race condition in `getCaller` (#916).
81
82
83# 1.3.0
84This new release introduces:
85 * Log, Logf, Logln functions for Logger and Entry that take a Level
86
87Fixes:
88 * Building prometheus node_exporter on AIX (#840)
89 * Race condition in TextFormatter (#468)
90 * Travis CI import path (#868)
91 * Remove coloured output on Windows (#862)
92 * Pointer to func as field in JSONFormatter (#870)
93 * Properly marshal Levels (#873)
94
95# 1.2.0
96This new release introduces:
97 * A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued
98 * A new trace level named `Trace` whose level is below `Debug`
99 * A configurable exit function to be called upon a Fatal trace
100 * The `Level` object now implements `encoding.TextUnmarshaler` interface
101
102# 1.1.1
103This is a bug fix release.
104 * fix the build break on Solaris
105 * don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized
106
107# 1.1.0
108This new release introduces:
109 * several fixes:
110 * a fix for a race condition on entry formatting
111 * proper cleanup of previously used entries before putting them back in the pool
112 * the extra new line at the end of message in text formatter has been removed
113 * a new global public API to check if a level is activated: IsLevelEnabled
114 * the following methods have been added to the Logger object
115 * IsLevelEnabled
116 * SetFormatter
117 * SetOutput
118 * ReplaceHooks
119 * introduction of go module
120 * an indent configuration for the json formatter
121 * output colour support for windows
122 * the field sort function is now configurable for text formatter
123 * the CLICOLOR and CLICOLOR\_FORCE environment variable support in text formater
124
125# 1.0.6
126
127This new release introduces:
128 * a new api WithTime which allows to easily force the time of the log entry
129 which is mostly useful for logger wrapper
130 * a fix reverting the immutability of the entry given as parameter to the hooks
131 a new configuration field of the json formatter in order to put all the fields
132 in a nested dictionnary
133 * a new SetOutput method in the Logger
134 * a new configuration of the textformatter to configure the name of the default keys
135 * a new configuration of the text formatter to disable the level truncation
136
137# 1.0.5
138
139* Fix hooks race (#707)
140* Fix panic deadlock (#695)
141
142# 1.0.4
143
144* Fix race when adding hooks (#612)
145* Fix terminal check in AppEngine (#635)
146
147# 1.0.3
148
149* Replace example files with testable examples
150
151# 1.0.2
152
153* bug: quote non-string values in text formatter (#583)
154* Make (*Logger) SetLevel a public method
155
156# 1.0.1
157
158* bug: fix escaping in text formatter (#575)
159
160# 1.0.0
161
162* Officially changed name to lower-case
163* bug: colors on Windows 10 (#541)
164* bug: fix race in accessing level (#512)
165
166# 0.11.5
167
168* feature: add writer and writerlevel to entry (#372)
169
170# 0.11.4
171
172* bug: fix undefined variable on solaris (#493)
173
174# 0.11.3
175
176* formatter: configure quoting of empty values (#484)
177* formatter: configure quoting character (default is `"`) (#484)
178* bug: fix not importing io correctly in non-linux environments (#481)
179
180# 0.11.2
181
182* bug: fix windows terminal detection (#476)
183
184# 0.11.1
185
186* bug: fix tty detection with custom out (#471)
187
188# 0.11.0
189
190* performance: Use bufferpool to allocate (#370)
191* terminal: terminal detection for app-engine (#343)
192* feature: exit handler (#375)
193
194# 0.10.0
195
196* feature: Add a test hook (#180)
197* feature: `ParseLevel` is now case-insensitive (#326)
198* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
199* performance: avoid re-allocations on `WithFields` (#335)
200
201# 0.9.0
202
203* logrus/text_formatter: don't emit empty msg
204* logrus/hooks/airbrake: move out of main repository
205* logrus/hooks/sentry: move out of main repository
206* logrus/hooks/papertrail: move out of main repository
207* logrus/hooks/bugsnag: move out of main repository
208* logrus/core: run tests with `-race`
209* logrus/core: detect TTY based on `stderr`
210* logrus/core: support `WithError` on logger
211* logrus/core: Solaris support
212
213# 0.8.7
214
215* logrus/core: fix possible race (#216)
216* logrus/doc: small typo fixes and doc improvements
217
218
219# 0.8.6
220
221* hooks/raven: allow passing an initialized client
222
223# 0.8.5
224
225* logrus/core: revert #208
226
227# 0.8.4
228
229* formatter/text: fix data race (#218)
230
231# 0.8.3
232
233* logrus/core: fix entry log level (#208)
234* logrus/core: improve performance of text formatter by 40%
235* logrus/core: expose `LevelHooks` type
236* logrus/core: add support for DragonflyBSD and NetBSD
237* formatter/text: print structs more verbosely
238
239# 0.8.2
240
241* logrus: fix more Fatal family functions
242
243# 0.8.1
244
245* logrus: fix not exiting on `Fatalf` and `Fatalln`
246
247# 0.8.0
248
249* logrus: defaults to stderr instead of stdout
250* hooks/sentry: add special field for `*http.Request`
251* formatter/text: ignore Windows for colors
252
253# 0.7.3
254
255* formatter/\*: allow configuration of timestamp layout
256
257# 0.7.2
258
259* formatter/text: Add configuration option for time format (#158)
diff --git a/vendor/github.com/sirupsen/logrus/LICENSE b/vendor/github.com/sirupsen/logrus/LICENSE
new file mode 100644
index 0000000..f090cb4
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/LICENSE
@@ -0,0 +1,21 @@
1The MIT License (MIT)
2
3Copyright (c) 2014 Simon Eskildsen
4
5Permission is hereby granted, free of charge, to any person obtaining a copy
6of this software and associated documentation files (the "Software"), to deal
7in the Software without restriction, including without limitation the rights
8to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9copies of the Software, and to permit persons to whom the Software is
10furnished to do so, subject to the following conditions:
11
12The above copyright notice and this permission notice shall be included in
13all copies or substantial portions of the Software.
14
15THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21THE SOFTWARE.
diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md
new file mode 100644
index 0000000..d1d4a85
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/README.md
@@ -0,0 +1,515 @@
1# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/> [![Build Status](https://github.com/sirupsen/logrus/workflows/CI/badge.svg)](https://github.com/sirupsen/logrus/actions?query=workflow%3ACI) [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![Go Reference](https://pkg.go.dev/badge/github.com/sirupsen/logrus.svg)](https://pkg.go.dev/github.com/sirupsen/logrus)
2
3Logrus is a structured logger for Go (golang), completely API compatible with
4the standard library logger.
5
6**Logrus is in maintenance-mode.** We will not be introducing new features. It's
7simply too hard to do in a way that won't break many people's projects, which is
8the last thing you want from your Logging library (again...).
9
10This does not mean Logrus is dead. Logrus will continue to be maintained for
11security, (backwards compatible) bug fixes, and performance (where we are
12limited by the interface).
13
14I believe Logrus' biggest contribution is to have played a part in today's
15widespread use of structured logging in Golang. There doesn't seem to be a
16reason to do a major, breaking iteration into Logrus V2, since the fantastic Go
17community has built those independently. Many fantastic alternatives have sprung
18up. Logrus would look like those, had it been re-designed with what we know
19about structured logging in Go today. Check out, for example,
20[Zerolog][zerolog], [Zap][zap], and [Apex][apex].
21
22[zerolog]: https://github.com/rs/zerolog
23[zap]: https://github.com/uber-go/zap
24[apex]: https://github.com/apex/log
25
26**Seeing weird case-sensitive problems?** It's in the past been possible to
27import Logrus as both upper- and lower-case. Due to the Go package environment,
28this caused issues in the community and we needed a standard. Some environments
29experienced problems with the upper-case variant, so the lower-case was decided.
30Everything using `logrus` will need to use the lower-case:
31`github.com/sirupsen/logrus`. Any package that isn't, should be changed.
32
33To fix Glide, see [these
34comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437).
35For an in-depth explanation of the casing issue, see [this
36comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276).
37
38Nicely color-coded in development (when a TTY is attached, otherwise just
39plain text):
40
41![Colored](http://i.imgur.com/PY7qMwd.png)
42
43With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
44or Splunk:
45
46```text
47{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
48ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
49
50{"level":"warning","msg":"The group's number increased tremendously!",
51"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
52
53{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
54"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
55
56{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
57"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
58
59{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
60"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
61```
62
63With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
64attached, the output is compatible with the
65[logfmt](http://godoc.org/github.com/kr/logfmt) format:
66
67```text
68time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
69time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
70time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
71time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
72time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
73time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
74```
75To ensure this behaviour even if a TTY is attached, set your formatter as follows:
76
77```go
78 log.SetFormatter(&log.TextFormatter{
79 DisableColors: true,
80 FullTimestamp: true,
81 })
82```
83
84#### Logging Method Name
85
86If you wish to add the calling method as a field, instruct the logger via:
87```go
88log.SetReportCaller(true)
89```
90This adds the caller as 'method' like so:
91
92```json
93{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by",
94"time":"2014-03-10 19:57:38.562543129 -0400 EDT"}
95```
96
97```text
98time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin
99```
100Note that this does add measurable overhead - the cost will depend on the version of Go, but is
101between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your
102environment via benchmarks:
103```
104go test -bench=.*CallerTracing
105```
106
107
108#### Case-sensitivity
109
110The organization's name was changed to lower-case--and this will not be changed
111back. If you are getting import conflicts due to case sensitivity, please use
112the lower-case import: `github.com/sirupsen/logrus`.
113
114#### Example
115
116The simplest way to use Logrus is simply the package-level exported logger:
117
118```go
119package main
120
121import (
122 log "github.com/sirupsen/logrus"
123)
124
125func main() {
126 log.WithFields(log.Fields{
127 "animal": "walrus",
128 }).Info("A walrus appears")
129}
130```
131
132Note that it's completely api-compatible with the stdlib logger, so you can
133replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"`
134and you'll now have the flexibility of Logrus. You can customize it all you
135want:
136
137```go
138package main
139
140import (
141 "os"
142 log "github.com/sirupsen/logrus"
143)
144
145func init() {
146 // Log as JSON instead of the default ASCII formatter.
147 log.SetFormatter(&log.JSONFormatter{})
148
149 // Output to stdout instead of the default stderr
150 // Can be any io.Writer, see below for File example
151 log.SetOutput(os.Stdout)
152
153 // Only log the warning severity or above.
154 log.SetLevel(log.WarnLevel)
155}
156
157func main() {
158 log.WithFields(log.Fields{
159 "animal": "walrus",
160 "size": 10,
161 }).Info("A group of walrus emerges from the ocean")
162
163 log.WithFields(log.Fields{
164 "omg": true,
165 "number": 122,
166 }).Warn("The group's number increased tremendously!")
167
168 log.WithFields(log.Fields{
169 "omg": true,
170 "number": 100,
171 }).Fatal("The ice breaks!")
172
173 // A common pattern is to re-use fields between logging statements by re-using
174 // the logrus.Entry returned from WithFields()
175 contextLogger := log.WithFields(log.Fields{
176 "common": "this is a common field",
177 "other": "I also should be logged always",
178 })
179
180 contextLogger.Info("I'll be logged with common and other field")
181 contextLogger.Info("Me too")
182}
183```
184
185For more advanced usage such as logging to multiple locations from the same
186application, you can also create an instance of the `logrus` Logger:
187
188```go
189package main
190
191import (
192 "os"
193 "github.com/sirupsen/logrus"
194)
195
196// Create a new instance of the logger. You can have any number of instances.
197var log = logrus.New()
198
199func main() {
200 // The API for setting attributes is a little different than the package level
201 // exported logger. See Godoc.
202 log.Out = os.Stdout
203
204 // You could set this to any `io.Writer` such as a file
205 // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
206 // if err == nil {
207 // log.Out = file
208 // } else {
209 // log.Info("Failed to log to file, using default stderr")
210 // }
211
212 log.WithFields(logrus.Fields{
213 "animal": "walrus",
214 "size": 10,
215 }).Info("A group of walrus emerges from the ocean")
216}
217```
218
219#### Fields
220
221Logrus encourages careful, structured logging through logging fields instead of
222long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
223to send event %s to topic %s with key %d")`, you should log the much more
224discoverable:
225
226```go
227log.WithFields(log.Fields{
228 "event": event,
229 "topic": topic,
230 "key": key,
231}).Fatal("Failed to send event")
232```
233
234We've found this API forces you to think about logging in a way that produces
235much more useful logging messages. We've been in countless situations where just
236a single added field to a log statement that was already there would've saved us
237hours. The `WithFields` call is optional.
238
239In general, with Logrus using any of the `printf`-family functions should be
240seen as a hint you should add a field, however, you can still use the
241`printf`-family functions with Logrus.
242
243#### Default Fields
244
245Often it's helpful to have fields _always_ attached to log statements in an
246application or parts of one. For example, you may want to always log the
247`request_id` and `user_ip` in the context of a request. Instead of writing
248`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on
249every line, you can create a `logrus.Entry` to pass around instead:
250
251```go
252requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})
253requestLogger.Info("something happened on that request") # will log request_id and user_ip
254requestLogger.Warn("something not great happened")
255```
256
257#### Hooks
258
259You can add hooks for logging levels. For example to send errors to an exception
260tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
261multiple places simultaneously, e.g. syslog.
262
263Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
264`init`:
265
266```go
267import (
268 log "github.com/sirupsen/logrus"
269 "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake"
270 logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
271 "log/syslog"
272)
273
274func init() {
275
276 // Use the Airbrake hook to report errors that have Error severity or above to
277 // an exception tracker. You can create custom hooks, see the Hooks section.
278 log.AddHook(airbrake.NewHook(123, "xyz", "production"))
279
280 hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
281 if err != nil {
282 log.Error("Unable to connect to local syslog daemon")
283 } else {
284 log.AddHook(hook)
285 }
286}
287```
288Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
289
290A list of currently known service hooks can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks)
291
292
293#### Level logging
294
295Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic.
296
297```go
298log.Trace("Something very low level.")
299log.Debug("Useful debugging information.")
300log.Info("Something noteworthy happened!")
301log.Warn("You should probably take a look at this.")
302log.Error("Something failed but I'm not quitting.")
303// Calls os.Exit(1) after logging
304log.Fatal("Bye.")
305// Calls panic() after logging
306log.Panic("I'm bailing.")
307```
308
309You can set the logging level on a `Logger`, then it will only log entries with
310that severity or anything above it:
311
312```go
313// Will log anything that is info or above (warn, error, fatal, panic). Default.
314log.SetLevel(log.InfoLevel)
315```
316
317It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
318environment if your application has that.
319
320Note: If you want different log levels for global (`log.SetLevel(...)`) and syslog logging, please check the [syslog hook README](hooks/syslog/README.md#different-log-levels-for-local-and-remote-logging).
321
322#### Entries
323
324Besides the fields added with `WithField` or `WithFields` some fields are
325automatically added to all logging events:
326
3271. `time`. The timestamp when the entry was created.
3282. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
329 the `AddFields` call. E.g. `Failed to send event.`
3303. `level`. The logging level. E.g. `info`.
331
332#### Environments
333
334Logrus has no notion of environment.
335
336If you wish for hooks and formatters to only be used in specific environments,
337you should handle that yourself. For example, if your application has a global
338variable `Environment`, which is a string representation of the environment you
339could do:
340
341```go
342import (
343 log "github.com/sirupsen/logrus"
344)
345
346func init() {
347 // do something here to set environment depending on an environment variable
348 // or command-line flag
349 if Environment == "production" {
350 log.SetFormatter(&log.JSONFormatter{})
351 } else {
352 // The TextFormatter is default, you don't actually have to do this.
353 log.SetFormatter(&log.TextFormatter{})
354 }
355}
356```
357
358This configuration is how `logrus` was intended to be used, but JSON in
359production is mostly only useful if you do log aggregation with tools like
360Splunk or Logstash.
361
362#### Formatters
363
364The built-in logging formatters are:
365
366* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
367 without colors.
368 * *Note:* to force colored output when there is no TTY, set the `ForceColors`
369 field to `true`. To force no colored output even if there is a TTY set the
370 `DisableColors` field to `true`. For Windows, see
371 [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable).
372 * When colors are enabled, levels are truncated to 4 characters by default. To disable
373 truncation set the `DisableLevelTruncation` field to `true`.
374 * When outputting to a TTY, it's often helpful to visually scan down a column where all the levels are the same width. Setting the `PadLevelText` field to `true` enables this behavior, by adding padding to the level text.
375 * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
376* `logrus.JSONFormatter`. Logs fields as JSON.
377 * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
378
379Third party logging formatters:
380
381* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine.
382* [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html).
383* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
384* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
385* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the Power of Zalgo.
386* [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure.
387* [`powerful-logrus-formatter`](https://github.com/zput/zxcTool). get fileName, log's line number and the latest function's name when print log; Sava log to files.
388* [`caption-json-formatter`](https://github.com/nolleh/caption_json_formatter). logrus's message json formatter with human-readable caption added.
389
390You can define your formatter by implementing the `Formatter` interface,
391requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
392`Fields` type (`map[string]interface{}`) with all your fields as well as the
393default ones (see Entries section above):
394
395```go
396type MyJSONFormatter struct {
397}
398
399log.SetFormatter(new(MyJSONFormatter))
400
401func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
402 // Note this doesn't include Time, Level and Message which are available on
403 // the Entry. Consult `godoc` on information about those fields or read the
404 // source of the official loggers.
405 serialized, err := json.Marshal(entry.Data)
406 if err != nil {
407 return nil, fmt.Errorf("Failed to marshal fields to JSON, %w", err)
408 }
409 return append(serialized, '\n'), nil
410}
411```
412
413#### Logger as an `io.Writer`
414
415Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
416
417```go
418w := logger.Writer()
419defer w.Close()
420
421srv := http.Server{
422 // create a stdlib log.Logger that writes to
423 // logrus.Logger.
424 ErrorLog: log.New(w, "", 0),
425}
426```
427
428Each line written to that writer will be printed the usual way, using formatters
429and hooks. The level for those entries is `info`.
430
431This means that we can override the standard library logger easily:
432
433```go
434logger := logrus.New()
435logger.Formatter = &logrus.JSONFormatter{}
436
437// Use logrus for standard log output
438// Note that `log` here references stdlib's log
439// Not logrus imported under the name `log`.
440log.SetOutput(logger.Writer())
441```
442
443#### Rotation
444
445Log rotation is not provided with Logrus. Log rotation should be done by an
446external program (like `logrotate(8)`) that can compress and delete old log
447entries. It should not be a feature of the application-level logger.
448
449#### Tools
450
451| Tool | Description |
452| ---- | ----------- |
453|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will be generated with different configs in different environments.|
454|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
455
456#### Testing
457
458Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
459
460* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just adds the `test` hook
461* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
462
463```go
464import(
465 "github.com/sirupsen/logrus"
466 "github.com/sirupsen/logrus/hooks/test"
467 "github.com/stretchr/testify/assert"
468 "testing"
469)
470
471func TestSomething(t*testing.T){
472 logger, hook := test.NewNullLogger()
473 logger.Error("Helloerror")
474
475 assert.Equal(t, 1, len(hook.Entries))
476 assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level)
477 assert.Equal(t, "Helloerror", hook.LastEntry().Message)
478
479 hook.Reset()
480 assert.Nil(t, hook.LastEntry())
481}
482```
483
484#### Fatal handlers
485
486Logrus can register one or more functions that will be called when any `fatal`
487level message is logged. The registered handlers will be executed before
488logrus performs an `os.Exit(1)`. This behavior may be helpful if callers need
489to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
490
491```
492...
493handler := func() {
494 // gracefully shutdown something...
495}
496logrus.RegisterExitHandler(handler)
497...
498```
499
500#### Thread safety
501
502By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs.
503If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
504
505Situation when locking is not needed includes:
506
507* You have no hooks registered, or hooks calling is already thread-safe.
508
509* Writing to logger.Out is already thread-safe, for example:
510
511 1) logger.Out is protected by locks.
512
513 2) logger.Out is an os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allows multi-thread/multi-process writing)
514
515 (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)
diff --git a/vendor/github.com/sirupsen/logrus/alt_exit.go b/vendor/github.com/sirupsen/logrus/alt_exit.go
new file mode 100644
index 0000000..8fd189e
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/alt_exit.go
@@ -0,0 +1,76 @@
1package logrus
2
3// The following code was sourced and modified from the
4// https://github.com/tebeka/atexit package governed by the following license:
5//
6// Copyright (c) 2012 Miki Tebeka <[email protected]>.
7//
8// Permission is hereby granted, free of charge, to any person obtaining a copy of
9// this software and associated documentation files (the "Software"), to deal in
10// the Software without restriction, including without limitation the rights to
11// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
12// the Software, and to permit persons to whom the Software is furnished to do so,
13// subject to the following conditions:
14//
15// The above copyright notice and this permission notice shall be included in all
16// copies or substantial portions of the Software.
17//
18// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
20// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
21// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
22// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
23// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24
25import (
26 "fmt"
27 "os"
28)
29
30var handlers = []func(){}
31
32func runHandler(handler func()) {
33 defer func() {
34 if err := recover(); err != nil {
35 fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
36 }
37 }()
38
39 handler()
40}
41
42func runHandlers() {
43 for _, handler := range handlers {
44 runHandler(handler)
45 }
46}
47
48// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
49func Exit(code int) {
50 runHandlers()
51 os.Exit(code)
52}
53
54// RegisterExitHandler appends a Logrus Exit handler to the list of handlers,
55// call logrus.Exit to invoke all handlers. The handlers will also be invoked when
56// any Fatal log entry is made.
57//
58// This method is useful when a caller wishes to use logrus to log a fatal
59// message but also needs to gracefully shutdown. An example usecase could be
60// closing database connections, or sending a alert that the application is
61// closing.
62func RegisterExitHandler(handler func()) {
63 handlers = append(handlers, handler)
64}
65
66// DeferExitHandler prepends a Logrus Exit handler to the list of handlers,
67// call logrus.Exit to invoke all handlers. The handlers will also be invoked when
68// any Fatal log entry is made.
69//
70// This method is useful when a caller wishes to use logrus to log a fatal
71// message but also needs to gracefully shutdown. An example usecase could be
72// closing database connections, or sending a alert that the application is
73// closing.
74func DeferExitHandler(handler func()) {
75 handlers = append([]func(){handler}, handlers...)
76}
diff --git a/vendor/github.com/sirupsen/logrus/appveyor.yml b/vendor/github.com/sirupsen/logrus/appveyor.yml
new file mode 100644
index 0000000..df9d65c
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/appveyor.yml
@@ -0,0 +1,14 @@
1version: "{build}"
2platform: x64
3clone_folder: c:\gopath\src\github.com\sirupsen\logrus
4environment:
5 GOPATH: c:\gopath
6branches:
7 only:
8 - master
9install:
10 - set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
11 - go version
12build_script:
13 - go get -t
14 - go test
diff --git a/vendor/github.com/sirupsen/logrus/buffer_pool.go b/vendor/github.com/sirupsen/logrus/buffer_pool.go
new file mode 100644
index 0000000..c7787f7
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/buffer_pool.go
@@ -0,0 +1,43 @@
1package logrus
2
3import (
4 "bytes"
5 "sync"
6)
7
8var (
9 bufferPool BufferPool
10)
11
12type BufferPool interface {
13 Put(*bytes.Buffer)
14 Get() *bytes.Buffer
15}
16
17type defaultPool struct {
18 pool *sync.Pool
19}
20
21func (p *defaultPool) Put(buf *bytes.Buffer) {
22 p.pool.Put(buf)
23}
24
25func (p *defaultPool) Get() *bytes.Buffer {
26 return p.pool.Get().(*bytes.Buffer)
27}
28
29// SetBufferPool allows to replace the default logrus buffer pool
30// to better meets the specific needs of an application.
31func SetBufferPool(bp BufferPool) {
32 bufferPool = bp
33}
34
35func init() {
36 SetBufferPool(&defaultPool{
37 pool: &sync.Pool{
38 New: func() interface{} {
39 return new(bytes.Buffer)
40 },
41 },
42 })
43}
diff --git a/vendor/github.com/sirupsen/logrus/doc.go b/vendor/github.com/sirupsen/logrus/doc.go
new file mode 100644
index 0000000..da67aba
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/doc.go
@@ -0,0 +1,26 @@
1/*
2Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
3
4
5The simplest way to use Logrus is simply the package-level exported logger:
6
7 package main
8
9 import (
10 log "github.com/sirupsen/logrus"
11 )
12
13 func main() {
14 log.WithFields(log.Fields{
15 "animal": "walrus",
16 "number": 1,
17 "size": 10,
18 }).Info("A walrus appears")
19 }
20
21Output:
22 time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
23
24For a full guide visit https://github.com/sirupsen/logrus
25*/
26package logrus
diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go
new file mode 100644
index 0000000..71cdbbc
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/entry.go
@@ -0,0 +1,442 @@
1package logrus
2
3import (
4 "bytes"
5 "context"
6 "fmt"
7 "os"
8 "reflect"
9 "runtime"
10 "strings"
11 "sync"
12 "time"
13)
14
15var (
16
17 // qualified package name, cached at first use
18 logrusPackage string
19
20 // Positions in the call stack when tracing to report the calling method
21 minimumCallerDepth int
22
23 // Used for caller information initialisation
24 callerInitOnce sync.Once
25)
26
27const (
28 maximumCallerDepth int = 25
29 knownLogrusFrames int = 4
30)
31
32func init() {
33 // start at the bottom of the stack before the package-name cache is primed
34 minimumCallerDepth = 1
35}
36
37// Defines the key when adding errors using WithError.
38var ErrorKey = "error"
39
40// An entry is the final or intermediate Logrus logging entry. It contains all
41// the fields passed with WithField{,s}. It's finally logged when Trace, Debug,
42// Info, Warn, Error, Fatal or Panic is called on it. These objects can be
43// reused and passed around as much as you wish to avoid field duplication.
44type Entry struct {
45 Logger *Logger
46
47 // Contains all the fields set by the user.
48 Data Fields
49
50 // Time at which the log entry was created
51 Time time.Time
52
53 // Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic
54 // This field will be set on entry firing and the value will be equal to the one in Logger struct field.
55 Level Level
56
57 // Calling method, with package name
58 Caller *runtime.Frame
59
60 // Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic
61 Message string
62
63 // When formatter is called in entry.log(), a Buffer may be set to entry
64 Buffer *bytes.Buffer
65
66 // Contains the context set by the user. Useful for hook processing etc.
67 Context context.Context
68
69 // err may contain a field formatting error
70 err string
71}
72
73func NewEntry(logger *Logger) *Entry {
74 return &Entry{
75 Logger: logger,
76 // Default is three fields, plus one optional. Give a little extra room.
77 Data: make(Fields, 6),
78 }
79}
80
81func (entry *Entry) Dup() *Entry {
82 data := make(Fields, len(entry.Data))
83 for k, v := range entry.Data {
84 data[k] = v
85 }
86 return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, Context: entry.Context, err: entry.err}
87}
88
89// Returns the bytes representation of this entry from the formatter.
90func (entry *Entry) Bytes() ([]byte, error) {
91 return entry.Logger.Formatter.Format(entry)
92}
93
94// Returns the string representation from the reader and ultimately the
95// formatter.
96func (entry *Entry) String() (string, error) {
97 serialized, err := entry.Bytes()
98 if err != nil {
99 return "", err
100 }
101 str := string(serialized)
102 return str, nil
103}
104
105// Add an error as single field (using the key defined in ErrorKey) to the Entry.
106func (entry *Entry) WithError(err error) *Entry {
107 return entry.WithField(ErrorKey, err)
108}
109
110// Add a context to the Entry.
111func (entry *Entry) WithContext(ctx context.Context) *Entry {
112 dataCopy := make(Fields, len(entry.Data))
113 for k, v := range entry.Data {
114 dataCopy[k] = v
115 }
116 return &Entry{Logger: entry.Logger, Data: dataCopy, Time: entry.Time, err: entry.err, Context: ctx}
117}
118
119// Add a single field to the Entry.
120func (entry *Entry) WithField(key string, value interface{}) *Entry {
121 return entry.WithFields(Fields{key: value})
122}
123
124// Add a map of fields to the Entry.
125func (entry *Entry) WithFields(fields Fields) *Entry {
126 data := make(Fields, len(entry.Data)+len(fields))
127 for k, v := range entry.Data {
128 data[k] = v
129 }
130 fieldErr := entry.err
131 for k, v := range fields {
132 isErrField := false
133 if t := reflect.TypeOf(v); t != nil {
134 switch {
135 case t.Kind() == reflect.Func, t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Func:
136 isErrField = true
137 }
138 }
139 if isErrField {
140 tmp := fmt.Sprintf("can not add field %q", k)
141 if fieldErr != "" {
142 fieldErr = entry.err + ", " + tmp
143 } else {
144 fieldErr = tmp
145 }
146 } else {
147 data[k] = v
148 }
149 }
150 return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context}
151}
152
153// Overrides the time of the Entry.
154func (entry *Entry) WithTime(t time.Time) *Entry {
155 dataCopy := make(Fields, len(entry.Data))
156 for k, v := range entry.Data {
157 dataCopy[k] = v
158 }
159 return &Entry{Logger: entry.Logger, Data: dataCopy, Time: t, err: entry.err, Context: entry.Context}
160}
161
162// getPackageName reduces a fully qualified function name to the package name
163// There really ought to be to be a better way...
164func getPackageName(f string) string {
165 for {
166 lastPeriod := strings.LastIndex(f, ".")
167 lastSlash := strings.LastIndex(f, "/")
168 if lastPeriod > lastSlash {
169 f = f[:lastPeriod]
170 } else {
171 break
172 }
173 }
174
175 return f
176}
177
178// getCaller retrieves the name of the first non-logrus calling function
179func getCaller() *runtime.Frame {
180 // cache this package's fully-qualified name
181 callerInitOnce.Do(func() {
182 pcs := make([]uintptr, maximumCallerDepth)
183 _ = runtime.Callers(0, pcs)
184
185 // dynamic get the package name and the minimum caller depth
186 for i := 0; i < maximumCallerDepth; i++ {
187 funcName := runtime.FuncForPC(pcs[i]).Name()
188 if strings.Contains(funcName, "getCaller") {
189 logrusPackage = getPackageName(funcName)
190 break
191 }
192 }
193
194 minimumCallerDepth = knownLogrusFrames
195 })
196
197 // Restrict the lookback frames to avoid runaway lookups
198 pcs := make([]uintptr, maximumCallerDepth)
199 depth := runtime.Callers(minimumCallerDepth, pcs)
200 frames := runtime.CallersFrames(pcs[:depth])
201
202 for f, again := frames.Next(); again; f, again = frames.Next() {
203 pkg := getPackageName(f.Function)
204
205 // If the caller isn't part of this package, we're done
206 if pkg != logrusPackage {
207 return &f //nolint:scopelint
208 }
209 }
210
211 // if we got here, we failed to find the caller's context
212 return nil
213}
214
215func (entry Entry) HasCaller() (has bool) {
216 return entry.Logger != nil &&
217 entry.Logger.ReportCaller &&
218 entry.Caller != nil
219}
220
221func (entry *Entry) log(level Level, msg string) {
222 var buffer *bytes.Buffer
223
224 newEntry := entry.Dup()
225
226 if newEntry.Time.IsZero() {
227 newEntry.Time = time.Now()
228 }
229
230 newEntry.Level = level
231 newEntry.Message = msg
232
233 newEntry.Logger.mu.Lock()
234 reportCaller := newEntry.Logger.ReportCaller
235 bufPool := newEntry.getBufferPool()
236 newEntry.Logger.mu.Unlock()
237
238 if reportCaller {
239 newEntry.Caller = getCaller()
240 }
241
242 newEntry.fireHooks()
243 buffer = bufPool.Get()
244 defer func() {
245 newEntry.Buffer = nil
246 buffer.Reset()
247 bufPool.Put(buffer)
248 }()
249 buffer.Reset()
250 newEntry.Buffer = buffer
251
252 newEntry.write()
253
254 newEntry.Buffer = nil
255
256 // To avoid Entry#log() returning a value that only would make sense for
257 // panic() to use in Entry#Panic(), we avoid the allocation by checking
258 // directly here.
259 if level <= PanicLevel {
260 panic(newEntry)
261 }
262}
263
264func (entry *Entry) getBufferPool() (pool BufferPool) {
265 if entry.Logger.BufferPool != nil {
266 return entry.Logger.BufferPool
267 }
268 return bufferPool
269}
270
271func (entry *Entry) fireHooks() {
272 var tmpHooks LevelHooks
273 entry.Logger.mu.Lock()
274 tmpHooks = make(LevelHooks, len(entry.Logger.Hooks))
275 for k, v := range entry.Logger.Hooks {
276 tmpHooks[k] = v
277 }
278 entry.Logger.mu.Unlock()
279
280 err := tmpHooks.Fire(entry.Level, entry)
281 if err != nil {
282 fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
283 }
284}
285
286func (entry *Entry) write() {
287 entry.Logger.mu.Lock()
288 defer entry.Logger.mu.Unlock()
289 serialized, err := entry.Logger.Formatter.Format(entry)
290 if err != nil {
291 fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
292 return
293 }
294 if _, err := entry.Logger.Out.Write(serialized); err != nil {
295 fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
296 }
297}
298
299// Log will log a message at the level given as parameter.
300// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit.
301// For this behaviour Entry.Panic or Entry.Fatal should be used instead.
302func (entry *Entry) Log(level Level, args ...interface{}) {
303 if entry.Logger.IsLevelEnabled(level) {
304 entry.log(level, fmt.Sprint(args...))
305 }
306}
307
308func (entry *Entry) Trace(args ...interface{}) {
309 entry.Log(TraceLevel, args...)
310}
311
312func (entry *Entry) Debug(args ...interface{}) {
313 entry.Log(DebugLevel, args...)
314}
315
316func (entry *Entry) Print(args ...interface{}) {
317 entry.Info(args...)
318}
319
320func (entry *Entry) Info(args ...interface{}) {
321 entry.Log(InfoLevel, args...)
322}
323
324func (entry *Entry) Warn(args ...interface{}) {
325 entry.Log(WarnLevel, args...)
326}
327
328func (entry *Entry) Warning(args ...interface{}) {
329 entry.Warn(args...)
330}
331
332func (entry *Entry) Error(args ...interface{}) {
333 entry.Log(ErrorLevel, args...)
334}
335
336func (entry *Entry) Fatal(args ...interface{}) {
337 entry.Log(FatalLevel, args...)
338 entry.Logger.Exit(1)
339}
340
341func (entry *Entry) Panic(args ...interface{}) {
342 entry.Log(PanicLevel, args...)
343}
344
345// Entry Printf family functions
346
347func (entry *Entry) Logf(level Level, format string, args ...interface{}) {
348 if entry.Logger.IsLevelEnabled(level) {
349 entry.Log(level, fmt.Sprintf(format, args...))
350 }
351}
352
353func (entry *Entry) Tracef(format string, args ...interface{}) {
354 entry.Logf(TraceLevel, format, args...)
355}
356
357func (entry *Entry) Debugf(format string, args ...interface{}) {
358 entry.Logf(DebugLevel, format, args...)
359}
360
361func (entry *Entry) Infof(format string, args ...interface{}) {
362 entry.Logf(InfoLevel, format, args...)
363}
364
365func (entry *Entry) Printf(format string, args ...interface{}) {
366 entry.Infof(format, args...)
367}
368
369func (entry *Entry) Warnf(format string, args ...interface{}) {
370 entry.Logf(WarnLevel, format, args...)
371}
372
373func (entry *Entry) Warningf(format string, args ...interface{}) {
374 entry.Warnf(format, args...)
375}
376
377func (entry *Entry) Errorf(format string, args ...interface{}) {
378 entry.Logf(ErrorLevel, format, args...)
379}
380
381func (entry *Entry) Fatalf(format string, args ...interface{}) {
382 entry.Logf(FatalLevel, format, args...)
383 entry.Logger.Exit(1)
384}
385
386func (entry *Entry) Panicf(format string, args ...interface{}) {
387 entry.Logf(PanicLevel, format, args...)
388}
389
390// Entry Println family functions
391
392func (entry *Entry) Logln(level Level, args ...interface{}) {
393 if entry.Logger.IsLevelEnabled(level) {
394 entry.Log(level, entry.sprintlnn(args...))
395 }
396}
397
398func (entry *Entry) Traceln(args ...interface{}) {
399 entry.Logln(TraceLevel, args...)
400}
401
402func (entry *Entry) Debugln(args ...interface{}) {
403 entry.Logln(DebugLevel, args...)
404}
405
406func (entry *Entry) Infoln(args ...interface{}) {
407 entry.Logln(InfoLevel, args...)
408}
409
410func (entry *Entry) Println(args ...interface{}) {
411 entry.Infoln(args...)
412}
413
414func (entry *Entry) Warnln(args ...interface{}) {
415 entry.Logln(WarnLevel, args...)
416}
417
418func (entry *Entry) Warningln(args ...interface{}) {
419 entry.Warnln(args...)
420}
421
422func (entry *Entry) Errorln(args ...interface{}) {
423 entry.Logln(ErrorLevel, args...)
424}
425
426func (entry *Entry) Fatalln(args ...interface{}) {
427 entry.Logln(FatalLevel, args...)
428 entry.Logger.Exit(1)
429}
430
431func (entry *Entry) Panicln(args ...interface{}) {
432 entry.Logln(PanicLevel, args...)
433}
434
435// Sprintlnn => Sprint no newline. This is to get the behavior of how
436// fmt.Sprintln where spaces are always added between operands, regardless of
437// their type. Instead of vendoring the Sprintln implementation to spare a
438// string allocation, we do the simplest thing.
439func (entry *Entry) sprintlnn(args ...interface{}) string {
440 msg := fmt.Sprintln(args...)
441 return msg[:len(msg)-1]
442}
diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go
new file mode 100644
index 0000000..017c30c
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/exported.go
@@ -0,0 +1,270 @@
1package logrus
2
3import (
4 "context"
5 "io"
6 "time"
7)
8
9var (
10 // std is the name of the standard logger in stdlib `log`
11 std = New()
12)
13
14func StandardLogger() *Logger {
15 return std
16}
17
18// SetOutput sets the standard logger output.
19func SetOutput(out io.Writer) {
20 std.SetOutput(out)
21}
22
23// SetFormatter sets the standard logger formatter.
24func SetFormatter(formatter Formatter) {
25 std.SetFormatter(formatter)
26}
27
28// SetReportCaller sets whether the standard logger will include the calling
29// method as a field.
30func SetReportCaller(include bool) {
31 std.SetReportCaller(include)
32}
33
34// SetLevel sets the standard logger level.
35func SetLevel(level Level) {
36 std.SetLevel(level)
37}
38
39// GetLevel returns the standard logger level.
40func GetLevel() Level {
41 return std.GetLevel()
42}
43
44// IsLevelEnabled checks if the log level of the standard logger is greater than the level param
45func IsLevelEnabled(level Level) bool {
46 return std.IsLevelEnabled(level)
47}
48
49// AddHook adds a hook to the standard logger hooks.
50func AddHook(hook Hook) {
51 std.AddHook(hook)
52}
53
54// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
55func WithError(err error) *Entry {
56 return std.WithField(ErrorKey, err)
57}
58
59// WithContext creates an entry from the standard logger and adds a context to it.
60func WithContext(ctx context.Context) *Entry {
61 return std.WithContext(ctx)
62}
63
64// WithField creates an entry from the standard logger and adds a field to
65// it. If you want multiple fields, use `WithFields`.
66//
67// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
68// or Panic on the Entry it returns.
69func WithField(key string, value interface{}) *Entry {
70 return std.WithField(key, value)
71}
72
73// WithFields creates an entry from the standard logger and adds multiple
74// fields to it. This is simply a helper for `WithField`, invoking it
75// once for each field.
76//
77// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
78// or Panic on the Entry it returns.
79func WithFields(fields Fields) *Entry {
80 return std.WithFields(fields)
81}
82
83// WithTime creates an entry from the standard logger and overrides the time of
84// logs generated with it.
85//
86// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
87// or Panic on the Entry it returns.
88func WithTime(t time.Time) *Entry {
89 return std.WithTime(t)
90}
91
92// Trace logs a message at level Trace on the standard logger.
93func Trace(args ...interface{}) {
94 std.Trace(args...)
95}
96
97// Debug logs a message at level Debug on the standard logger.
98func Debug(args ...interface{}) {
99 std.Debug(args...)
100}
101
102// Print logs a message at level Info on the standard logger.
103func Print(args ...interface{}) {
104 std.Print(args...)
105}
106
107// Info logs a message at level Info on the standard logger.
108func Info(args ...interface{}) {
109 std.Info(args...)
110}
111
112// Warn logs a message at level Warn on the standard logger.
113func Warn(args ...interface{}) {
114 std.Warn(args...)
115}
116
117// Warning logs a message at level Warn on the standard logger.
118func Warning(args ...interface{}) {
119 std.Warning(args...)
120}
121
122// Error logs a message at level Error on the standard logger.
123func Error(args ...interface{}) {
124 std.Error(args...)
125}
126
127// Panic logs a message at level Panic on the standard logger.
128func Panic(args ...interface{}) {
129 std.Panic(args...)
130}
131
132// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
133func Fatal(args ...interface{}) {
134 std.Fatal(args...)
135}
136
137// TraceFn logs a message from a func at level Trace on the standard logger.
138func TraceFn(fn LogFunction) {
139 std.TraceFn(fn)
140}
141
142// DebugFn logs a message from a func at level Debug on the standard logger.
143func DebugFn(fn LogFunction) {
144 std.DebugFn(fn)
145}
146
147// PrintFn logs a message from a func at level Info on the standard logger.
148func PrintFn(fn LogFunction) {
149 std.PrintFn(fn)
150}
151
152// InfoFn logs a message from a func at level Info on the standard logger.
153func InfoFn(fn LogFunction) {
154 std.InfoFn(fn)
155}
156
157// WarnFn logs a message from a func at level Warn on the standard logger.
158func WarnFn(fn LogFunction) {
159 std.WarnFn(fn)
160}
161
162// WarningFn logs a message from a func at level Warn on the standard logger.
163func WarningFn(fn LogFunction) {
164 std.WarningFn(fn)
165}
166
167// ErrorFn logs a message from a func at level Error on the standard logger.
168func ErrorFn(fn LogFunction) {
169 std.ErrorFn(fn)
170}
171
172// PanicFn logs a message from a func at level Panic on the standard logger.
173func PanicFn(fn LogFunction) {
174 std.PanicFn(fn)
175}
176
177// FatalFn logs a message from a func at level Fatal on the standard logger then the process will exit with status set to 1.
178func FatalFn(fn LogFunction) {
179 std.FatalFn(fn)
180}
181
182// Tracef logs a message at level Trace on the standard logger.
183func Tracef(format string, args ...interface{}) {
184 std.Tracef(format, args...)
185}
186
187// Debugf logs a message at level Debug on the standard logger.
188func Debugf(format string, args ...interface{}) {
189 std.Debugf(format, args...)
190}
191
192// Printf logs a message at level Info on the standard logger.
193func Printf(format string, args ...interface{}) {
194 std.Printf(format, args...)
195}
196
197// Infof logs a message at level Info on the standard logger.
198func Infof(format string, args ...interface{}) {
199 std.Infof(format, args...)
200}
201
202// Warnf logs a message at level Warn on the standard logger.
203func Warnf(format string, args ...interface{}) {
204 std.Warnf(format, args...)
205}
206
207// Warningf logs a message at level Warn on the standard logger.
208func Warningf(format string, args ...interface{}) {
209 std.Warningf(format, args...)
210}
211
212// Errorf logs a message at level Error on the standard logger.
213func Errorf(format string, args ...interface{}) {
214 std.Errorf(format, args...)
215}
216
217// Panicf logs a message at level Panic on the standard logger.
218func Panicf(format string, args ...interface{}) {
219 std.Panicf(format, args...)
220}
221
222// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
223func Fatalf(format string, args ...interface{}) {
224 std.Fatalf(format, args...)
225}
226
227// Traceln logs a message at level Trace on the standard logger.
228func Traceln(args ...interface{}) {
229 std.Traceln(args...)
230}
231
232// Debugln logs a message at level Debug on the standard logger.
233func Debugln(args ...interface{}) {
234 std.Debugln(args...)
235}
236
237// Println logs a message at level Info on the standard logger.
238func Println(args ...interface{}) {
239 std.Println(args...)
240}
241
242// Infoln logs a message at level Info on the standard logger.
243func Infoln(args ...interface{}) {
244 std.Infoln(args...)
245}
246
247// Warnln logs a message at level Warn on the standard logger.
248func Warnln(args ...interface{}) {
249 std.Warnln(args...)
250}
251
252// Warningln logs a message at level Warn on the standard logger.
253func Warningln(args ...interface{}) {
254 std.Warningln(args...)
255}
256
257// Errorln logs a message at level Error on the standard logger.
258func Errorln(args ...interface{}) {
259 std.Errorln(args...)
260}
261
262// Panicln logs a message at level Panic on the standard logger.
263func Panicln(args ...interface{}) {
264 std.Panicln(args...)
265}
266
267// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
268func Fatalln(args ...interface{}) {
269 std.Fatalln(args...)
270}
diff --git a/vendor/github.com/sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go
new file mode 100644
index 0000000..4088837
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/formatter.go
@@ -0,0 +1,78 @@
1package logrus
2
3import "time"
4
5// Default key names for the default fields
6const (
7 defaultTimestampFormat = time.RFC3339
8 FieldKeyMsg = "msg"
9 FieldKeyLevel = "level"
10 FieldKeyTime = "time"
11 FieldKeyLogrusError = "logrus_error"
12 FieldKeyFunc = "func"
13 FieldKeyFile = "file"
14)
15
16// The Formatter interface is used to implement a custom Formatter. It takes an
17// `Entry`. It exposes all the fields, including the default ones:
18//
19// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
20// * `entry.Data["time"]`. The timestamp.
21// * `entry.Data["level"]. The level the entry was logged at.
22//
23// Any additional fields added with `WithField` or `WithFields` are also in
24// `entry.Data`. Format is expected to return an array of bytes which are then
25// logged to `logger.Out`.
26type Formatter interface {
27 Format(*Entry) ([]byte, error)
28}
29
30// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when
31// dumping it. If this code wasn't there doing:
32//
33// logrus.WithField("level", 1).Info("hello")
34//
35// Would just silently drop the user provided level. Instead with this code
36// it'll logged as:
37//
38// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
39//
40// It's not exported because it's still using Data in an opinionated way. It's to
41// avoid code duplication between the two default formatters.
42func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) {
43 timeKey := fieldMap.resolve(FieldKeyTime)
44 if t, ok := data[timeKey]; ok {
45 data["fields."+timeKey] = t
46 delete(data, timeKey)
47 }
48
49 msgKey := fieldMap.resolve(FieldKeyMsg)
50 if m, ok := data[msgKey]; ok {
51 data["fields."+msgKey] = m
52 delete(data, msgKey)
53 }
54
55 levelKey := fieldMap.resolve(FieldKeyLevel)
56 if l, ok := data[levelKey]; ok {
57 data["fields."+levelKey] = l
58 delete(data, levelKey)
59 }
60
61 logrusErrKey := fieldMap.resolve(FieldKeyLogrusError)
62 if l, ok := data[logrusErrKey]; ok {
63 data["fields."+logrusErrKey] = l
64 delete(data, logrusErrKey)
65 }
66
67 // If reportCaller is not set, 'func' will not conflict.
68 if reportCaller {
69 funcKey := fieldMap.resolve(FieldKeyFunc)
70 if l, ok := data[funcKey]; ok {
71 data["fields."+funcKey] = l
72 }
73 fileKey := fieldMap.resolve(FieldKeyFile)
74 if l, ok := data[fileKey]; ok {
75 data["fields."+fileKey] = l
76 }
77 }
78}
diff --git a/vendor/github.com/sirupsen/logrus/hooks.go b/vendor/github.com/sirupsen/logrus/hooks.go
new file mode 100644
index 0000000..3f151cd
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/hooks.go
@@ -0,0 +1,34 @@
1package logrus
2
3// A hook to be fired when logging on the logging levels returned from
4// `Levels()` on your implementation of the interface. Note that this is not
5// fired in a goroutine or a channel with workers, you should handle such
6// functionality yourself if your call is non-blocking and you don't wish for
7// the logging calls for levels returned from `Levels()` to block.
8type Hook interface {
9 Levels() []Level
10 Fire(*Entry) error
11}
12
13// Internal type for storing the hooks on a logger instance.
14type LevelHooks map[Level][]Hook
15
16// Add a hook to an instance of logger. This is called with
17// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
18func (hooks LevelHooks) Add(hook Hook) {
19 for _, level := range hook.Levels() {
20 hooks[level] = append(hooks[level], hook)
21 }
22}
23
24// Fire all the hooks for the passed level. Used by `entry.log` to fire
25// appropriate hooks for a log entry.
26func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
27 for _, hook := range hooks[level] {
28 if err := hook.Fire(entry); err != nil {
29 return err
30 }
31 }
32
33 return nil
34}
diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go
new file mode 100644
index 0000000..c96dc56
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/json_formatter.go
@@ -0,0 +1,128 @@
1package logrus
2
3import (
4 "bytes"
5 "encoding/json"
6 "fmt"
7 "runtime"
8)
9
10type fieldKey string
11
12// FieldMap allows customization of the key names for default fields.
13type FieldMap map[fieldKey]string
14
15func (f FieldMap) resolve(key fieldKey) string {
16 if k, ok := f[key]; ok {
17 return k
18 }
19
20 return string(key)
21}
22
23// JSONFormatter formats logs into parsable json
24type JSONFormatter struct {
25 // TimestampFormat sets the format used for marshaling timestamps.
26 // The format to use is the same than for time.Format or time.Parse from the standard
27 // library.
28 // The standard Library already provides a set of predefined format.
29 TimestampFormat string
30
31 // DisableTimestamp allows disabling automatic timestamps in output
32 DisableTimestamp bool
33
34 // DisableHTMLEscape allows disabling html escaping in output
35 DisableHTMLEscape bool
36
37 // DataKey allows users to put all the log entry parameters into a nested dictionary at a given key.
38 DataKey string
39
40 // FieldMap allows users to customize the names of keys for default fields.
41 // As an example:
42 // formatter := &JSONFormatter{
43 // FieldMap: FieldMap{
44 // FieldKeyTime: "@timestamp",
45 // FieldKeyLevel: "@level",
46 // FieldKeyMsg: "@message",
47 // FieldKeyFunc: "@caller",
48 // },
49 // }
50 FieldMap FieldMap
51
52 // CallerPrettyfier can be set by the user to modify the content
53 // of the function and file keys in the json data when ReportCaller is
54 // activated. If any of the returned value is the empty string the
55 // corresponding key will be removed from json fields.
56 CallerPrettyfier func(*runtime.Frame) (function string, file string)
57
58 // PrettyPrint will indent all json logs
59 PrettyPrint bool
60}
61
62// Format renders a single log entry
63func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
64 data := make(Fields, len(entry.Data)+4)
65 for k, v := range entry.Data {
66 switch v := v.(type) {
67 case error:
68 // Otherwise errors are ignored by `encoding/json`
69 // https://github.com/sirupsen/logrus/issues/137
70 data[k] = v.Error()
71 default:
72 data[k] = v
73 }
74 }
75
76 if f.DataKey != "" {
77 newData := make(Fields, 4)
78 newData[f.DataKey] = data
79 data = newData
80 }
81
82 prefixFieldClashes(data, f.FieldMap, entry.HasCaller())
83
84 timestampFormat := f.TimestampFormat
85 if timestampFormat == "" {
86 timestampFormat = defaultTimestampFormat
87 }
88
89 if entry.err != "" {
90 data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err
91 }
92 if !f.DisableTimestamp {
93 data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
94 }
95 data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
96 data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
97 if entry.HasCaller() {
98 funcVal := entry.Caller.Function
99 fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
100 if f.CallerPrettyfier != nil {
101 funcVal, fileVal = f.CallerPrettyfier(entry.Caller)
102 }
103 if funcVal != "" {
104 data[f.FieldMap.resolve(FieldKeyFunc)] = funcVal
105 }
106 if fileVal != "" {
107 data[f.FieldMap.resolve(FieldKeyFile)] = fileVal
108 }
109 }
110
111 var b *bytes.Buffer
112 if entry.Buffer != nil {
113 b = entry.Buffer
114 } else {
115 b = &bytes.Buffer{}
116 }
117
118 encoder := json.NewEncoder(b)
119 encoder.SetEscapeHTML(!f.DisableHTMLEscape)
120 if f.PrettyPrint {
121 encoder.SetIndent("", " ")
122 }
123 if err := encoder.Encode(data); err != nil {
124 return nil, fmt.Errorf("failed to marshal fields to JSON, %w", err)
125 }
126
127 return b.Bytes(), nil
128}
diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go
new file mode 100644
index 0000000..5ff0aef
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/logger.go
@@ -0,0 +1,417 @@
1package logrus
2
3import (
4 "context"
5 "io"
6 "os"
7 "sync"
8 "sync/atomic"
9 "time"
10)
11
12// LogFunction For big messages, it can be more efficient to pass a function
13// and only call it if the log level is actually enables rather than
14// generating the log message and then checking if the level is enabled
15type LogFunction func() []interface{}
16
17type Logger struct {
18 // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
19 // file, or leave it default which is `os.Stderr`. You can also set this to
20 // something more adventurous, such as logging to Kafka.
21 Out io.Writer
22 // Hooks for the logger instance. These allow firing events based on logging
23 // levels and log entries. For example, to send errors to an error tracking
24 // service, log to StatsD or dump the core on fatal errors.
25 Hooks LevelHooks
26 // All log entries pass through the formatter before logged to Out. The
27 // included formatters are `TextFormatter` and `JSONFormatter` for which
28 // TextFormatter is the default. In development (when a TTY is attached) it
29 // logs with colors, but to a file it wouldn't. You can easily implement your
30 // own that implements the `Formatter` interface, see the `README` or included
31 // formatters for examples.
32 Formatter Formatter
33
34 // Flag for whether to log caller info (off by default)
35 ReportCaller bool
36
37 // The logging level the logger should log at. This is typically (and defaults
38 // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
39 // logged.
40 Level Level
41 // Used to sync writing to the log. Locking is enabled by Default
42 mu MutexWrap
43 // Reusable empty entry
44 entryPool sync.Pool
45 // Function to exit the application, defaults to `os.Exit()`
46 ExitFunc exitFunc
47 // The buffer pool used to format the log. If it is nil, the default global
48 // buffer pool will be used.
49 BufferPool BufferPool
50}
51
52type exitFunc func(int)
53
54type MutexWrap struct {
55 lock sync.Mutex
56 disabled bool
57}
58
59func (mw *MutexWrap) Lock() {
60 if !mw.disabled {
61 mw.lock.Lock()
62 }
63}
64
65func (mw *MutexWrap) Unlock() {
66 if !mw.disabled {
67 mw.lock.Unlock()
68 }
69}
70
71func (mw *MutexWrap) Disable() {
72 mw.disabled = true
73}
74
75// Creates a new logger. Configuration should be set by changing `Formatter`,
76// `Out` and `Hooks` directly on the default logger instance. You can also just
77// instantiate your own:
78//
79// var log = &logrus.Logger{
80// Out: os.Stderr,
81// Formatter: new(logrus.TextFormatter),
82// Hooks: make(logrus.LevelHooks),
83// Level: logrus.DebugLevel,
84// }
85//
86// It's recommended to make this a global instance called `log`.
87func New() *Logger {
88 return &Logger{
89 Out: os.Stderr,
90 Formatter: new(TextFormatter),
91 Hooks: make(LevelHooks),
92 Level: InfoLevel,
93 ExitFunc: os.Exit,
94 ReportCaller: false,
95 }
96}
97
98func (logger *Logger) newEntry() *Entry {
99 entry, ok := logger.entryPool.Get().(*Entry)
100 if ok {
101 return entry
102 }
103 return NewEntry(logger)
104}
105
106func (logger *Logger) releaseEntry(entry *Entry) {
107 entry.Data = map[string]interface{}{}
108 logger.entryPool.Put(entry)
109}
110
111// WithField allocates a new entry and adds a field to it.
112// Debug, Print, Info, Warn, Error, Fatal or Panic must be then applied to
113// this new returned entry.
114// If you want multiple fields, use `WithFields`.
115func (logger *Logger) WithField(key string, value interface{}) *Entry {
116 entry := logger.newEntry()
117 defer logger.releaseEntry(entry)
118 return entry.WithField(key, value)
119}
120
121// Adds a struct of fields to the log entry. All it does is call `WithField` for
122// each `Field`.
123func (logger *Logger) WithFields(fields Fields) *Entry {
124 entry := logger.newEntry()
125 defer logger.releaseEntry(entry)
126 return entry.WithFields(fields)
127}
128
129// Add an error as single field to the log entry. All it does is call
130// `WithError` for the given `error`.
131func (logger *Logger) WithError(err error) *Entry {
132 entry := logger.newEntry()
133 defer logger.releaseEntry(entry)
134 return entry.WithError(err)
135}
136
137// Add a context to the log entry.
138func (logger *Logger) WithContext(ctx context.Context) *Entry {
139 entry := logger.newEntry()
140 defer logger.releaseEntry(entry)
141 return entry.WithContext(ctx)
142}
143
144// Overrides the time of the log entry.
145func (logger *Logger) WithTime(t time.Time) *Entry {
146 entry := logger.newEntry()
147 defer logger.releaseEntry(entry)
148 return entry.WithTime(t)
149}
150
151func (logger *Logger) Logf(level Level, format string, args ...interface{}) {
152 if logger.IsLevelEnabled(level) {
153 entry := logger.newEntry()
154 entry.Logf(level, format, args...)
155 logger.releaseEntry(entry)
156 }
157}
158
159func (logger *Logger) Tracef(format string, args ...interface{}) {
160 logger.Logf(TraceLevel, format, args...)
161}
162
163func (logger *Logger) Debugf(format string, args ...interface{}) {
164 logger.Logf(DebugLevel, format, args...)
165}
166
167func (logger *Logger) Infof(format string, args ...interface{}) {
168 logger.Logf(InfoLevel, format, args...)
169}
170
171func (logger *Logger) Printf(format string, args ...interface{}) {
172 entry := logger.newEntry()
173 entry.Printf(format, args...)
174 logger.releaseEntry(entry)
175}
176
177func (logger *Logger) Warnf(format string, args ...interface{}) {
178 logger.Logf(WarnLevel, format, args...)
179}
180
181func (logger *Logger) Warningf(format string, args ...interface{}) {
182 logger.Warnf(format, args...)
183}
184
185func (logger *Logger) Errorf(format string, args ...interface{}) {
186 logger.Logf(ErrorLevel, format, args...)
187}
188
189func (logger *Logger) Fatalf(format string, args ...interface{}) {
190 logger.Logf(FatalLevel, format, args...)
191 logger.Exit(1)
192}
193
194func (logger *Logger) Panicf(format string, args ...interface{}) {
195 logger.Logf(PanicLevel, format, args...)
196}
197
198// Log will log a message at the level given as parameter.
199// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit.
200// For this behaviour Logger.Panic or Logger.Fatal should be used instead.
201func (logger *Logger) Log(level Level, args ...interface{}) {
202 if logger.IsLevelEnabled(level) {
203 entry := logger.newEntry()
204 entry.Log(level, args...)
205 logger.releaseEntry(entry)
206 }
207}
208
209func (logger *Logger) LogFn(level Level, fn LogFunction) {
210 if logger.IsLevelEnabled(level) {
211 entry := logger.newEntry()
212 entry.Log(level, fn()...)
213 logger.releaseEntry(entry)
214 }
215}
216
217func (logger *Logger) Trace(args ...interface{}) {
218 logger.Log(TraceLevel, args...)
219}
220
221func (logger *Logger) Debug(args ...interface{}) {
222 logger.Log(DebugLevel, args...)
223}
224
225func (logger *Logger) Info(args ...interface{}) {
226 logger.Log(InfoLevel, args...)
227}
228
229func (logger *Logger) Print(args ...interface{}) {
230 entry := logger.newEntry()
231 entry.Print(args...)
232 logger.releaseEntry(entry)
233}
234
235func (logger *Logger) Warn(args ...interface{}) {
236 logger.Log(WarnLevel, args...)
237}
238
239func (logger *Logger) Warning(args ...interface{}) {
240 logger.Warn(args...)
241}
242
243func (logger *Logger) Error(args ...interface{}) {
244 logger.Log(ErrorLevel, args...)
245}
246
247func (logger *Logger) Fatal(args ...interface{}) {
248 logger.Log(FatalLevel, args...)
249 logger.Exit(1)
250}
251
252func (logger *Logger) Panic(args ...interface{}) {
253 logger.Log(PanicLevel, args...)
254}
255
256func (logger *Logger) TraceFn(fn LogFunction) {
257 logger.LogFn(TraceLevel, fn)
258}
259
260func (logger *Logger) DebugFn(fn LogFunction) {
261 logger.LogFn(DebugLevel, fn)
262}
263
264func (logger *Logger) InfoFn(fn LogFunction) {
265 logger.LogFn(InfoLevel, fn)
266}
267
268func (logger *Logger) PrintFn(fn LogFunction) {
269 entry := logger.newEntry()
270 entry.Print(fn()...)
271 logger.releaseEntry(entry)
272}
273
274func (logger *Logger) WarnFn(fn LogFunction) {
275 logger.LogFn(WarnLevel, fn)
276}
277
278func (logger *Logger) WarningFn(fn LogFunction) {
279 logger.WarnFn(fn)
280}
281
282func (logger *Logger) ErrorFn(fn LogFunction) {
283 logger.LogFn(ErrorLevel, fn)
284}
285
286func (logger *Logger) FatalFn(fn LogFunction) {
287 logger.LogFn(FatalLevel, fn)
288 logger.Exit(1)
289}
290
291func (logger *Logger) PanicFn(fn LogFunction) {
292 logger.LogFn(PanicLevel, fn)
293}
294
295func (logger *Logger) Logln(level Level, args ...interface{}) {
296 if logger.IsLevelEnabled(level) {
297 entry := logger.newEntry()
298 entry.Logln(level, args...)
299 logger.releaseEntry(entry)
300 }
301}
302
303func (logger *Logger) Traceln(args ...interface{}) {
304 logger.Logln(TraceLevel, args...)
305}
306
307func (logger *Logger) Debugln(args ...interface{}) {
308 logger.Logln(DebugLevel, args...)
309}
310
311func (logger *Logger) Infoln(args ...interface{}) {
312 logger.Logln(InfoLevel, args...)
313}
314
315func (logger *Logger) Println(args ...interface{}) {
316 entry := logger.newEntry()
317 entry.Println(args...)
318 logger.releaseEntry(entry)
319}
320
321func (logger *Logger) Warnln(args ...interface{}) {
322 logger.Logln(WarnLevel, args...)
323}
324
325func (logger *Logger) Warningln(args ...interface{}) {
326 logger.Warnln(args...)
327}
328
329func (logger *Logger) Errorln(args ...interface{}) {
330 logger.Logln(ErrorLevel, args...)
331}
332
333func (logger *Logger) Fatalln(args ...interface{}) {
334 logger.Logln(FatalLevel, args...)
335 logger.Exit(1)
336}
337
338func (logger *Logger) Panicln(args ...interface{}) {
339 logger.Logln(PanicLevel, args...)
340}
341
342func (logger *Logger) Exit(code int) {
343 runHandlers()
344 if logger.ExitFunc == nil {
345 logger.ExitFunc = os.Exit
346 }
347 logger.ExitFunc(code)
348}
349
350//When file is opened with appending mode, it's safe to
351//write concurrently to a file (within 4k message on Linux).
352//In these cases user can choose to disable the lock.
353func (logger *Logger) SetNoLock() {
354 logger.mu.Disable()
355}
356
357func (logger *Logger) level() Level {
358 return Level(atomic.LoadUint32((*uint32)(&logger.Level)))
359}
360
361// SetLevel sets the logger level.
362func (logger *Logger) SetLevel(level Level) {
363 atomic.StoreUint32((*uint32)(&logger.Level), uint32(level))
364}
365
366// GetLevel returns the logger level.
367func (logger *Logger) GetLevel() Level {
368 return logger.level()
369}
370
371// AddHook adds a hook to the logger hooks.
372func (logger *Logger) AddHook(hook Hook) {
373 logger.mu.Lock()
374 defer logger.mu.Unlock()
375 logger.Hooks.Add(hook)
376}
377
378// IsLevelEnabled checks if the log level of the logger is greater than the level param
379func (logger *Logger) IsLevelEnabled(level Level) bool {
380 return logger.level() >= level
381}
382
383// SetFormatter sets the logger formatter.
384func (logger *Logger) SetFormatter(formatter Formatter) {
385 logger.mu.Lock()
386 defer logger.mu.Unlock()
387 logger.Formatter = formatter
388}
389
390// SetOutput sets the logger output.
391func (logger *Logger) SetOutput(output io.Writer) {
392 logger.mu.Lock()
393 defer logger.mu.Unlock()
394 logger.Out = output
395}
396
397func (logger *Logger) SetReportCaller(reportCaller bool) {
398 logger.mu.Lock()
399 defer logger.mu.Unlock()
400 logger.ReportCaller = reportCaller
401}
402
403// ReplaceHooks replaces the logger hooks and returns the old ones
404func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks {
405 logger.mu.Lock()
406 oldHooks := logger.Hooks
407 logger.Hooks = hooks
408 logger.mu.Unlock()
409 return oldHooks
410}
411
412// SetBufferPool sets the logger buffer pool.
413func (logger *Logger) SetBufferPool(pool BufferPool) {
414 logger.mu.Lock()
415 defer logger.mu.Unlock()
416 logger.BufferPool = pool
417}
diff --git a/vendor/github.com/sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go
new file mode 100644
index 0000000..2f16224
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/logrus.go
@@ -0,0 +1,186 @@
1package logrus
2
3import (
4 "fmt"
5 "log"
6 "strings"
7)
8
9// Fields type, used to pass to `WithFields`.
10type Fields map[string]interface{}
11
12// Level type
13type Level uint32
14
15// Convert the Level to a string. E.g. PanicLevel becomes "panic".
16func (level Level) String() string {
17 if b, err := level.MarshalText(); err == nil {
18 return string(b)
19 } else {
20 return "unknown"
21 }
22}
23
24// ParseLevel takes a string level and returns the Logrus log level constant.
25func ParseLevel(lvl string) (Level, error) {
26 switch strings.ToLower(lvl) {
27 case "panic":
28 return PanicLevel, nil
29 case "fatal":
30 return FatalLevel, nil
31 case "error":
32 return ErrorLevel, nil
33 case "warn", "warning":
34 return WarnLevel, nil
35 case "info":
36 return InfoLevel, nil
37 case "debug":
38 return DebugLevel, nil
39 case "trace":
40 return TraceLevel, nil
41 }
42
43 var l Level
44 return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
45}
46
47// UnmarshalText implements encoding.TextUnmarshaler.
48func (level *Level) UnmarshalText(text []byte) error {
49 l, err := ParseLevel(string(text))
50 if err != nil {
51 return err
52 }
53
54 *level = l
55
56 return nil
57}
58
59func (level Level) MarshalText() ([]byte, error) {
60 switch level {
61 case TraceLevel:
62 return []byte("trace"), nil
63 case DebugLevel:
64 return []byte("debug"), nil
65 case InfoLevel:
66 return []byte("info"), nil
67 case WarnLevel:
68 return []byte("warning"), nil
69 case ErrorLevel:
70 return []byte("error"), nil
71 case FatalLevel:
72 return []byte("fatal"), nil
73 case PanicLevel:
74 return []byte("panic"), nil
75 }
76
77 return nil, fmt.Errorf("not a valid logrus level %d", level)
78}
79
80// A constant exposing all logging levels
81var AllLevels = []Level{
82 PanicLevel,
83 FatalLevel,
84 ErrorLevel,
85 WarnLevel,
86 InfoLevel,
87 DebugLevel,
88 TraceLevel,
89}
90
91// These are the different logging levels. You can set the logging level to log
92// on your instance of logger, obtained with `logrus.New()`.
93const (
94 // PanicLevel level, highest level of severity. Logs and then calls panic with the
95 // message passed to Debug, Info, ...
96 PanicLevel Level = iota
97 // FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the
98 // logging level is set to Panic.
99 FatalLevel
100 // ErrorLevel level. Logs. Used for errors that should definitely be noted.
101 // Commonly used for hooks to send errors to an error tracking service.
102 ErrorLevel
103 // WarnLevel level. Non-critical entries that deserve eyes.
104 WarnLevel
105 // InfoLevel level. General operational entries about what's going on inside the
106 // application.
107 InfoLevel
108 // DebugLevel level. Usually only enabled when debugging. Very verbose logging.
109 DebugLevel
110 // TraceLevel level. Designates finer-grained informational events than the Debug.
111 TraceLevel
112)
113
114// Won't compile if StdLogger can't be realized by a log.Logger
115var (
116 _ StdLogger = &log.Logger{}
117 _ StdLogger = &Entry{}
118 _ StdLogger = &Logger{}
119)
120
121// StdLogger is what your logrus-enabled library should take, that way
122// it'll accept a stdlib logger and a logrus logger. There's no standard
123// interface, this is the closest we get, unfortunately.
124type StdLogger interface {
125 Print(...interface{})
126 Printf(string, ...interface{})
127 Println(...interface{})
128
129 Fatal(...interface{})
130 Fatalf(string, ...interface{})
131 Fatalln(...interface{})
132
133 Panic(...interface{})
134 Panicf(string, ...interface{})
135 Panicln(...interface{})
136}
137
138// The FieldLogger interface generalizes the Entry and Logger types
139type FieldLogger interface {
140 WithField(key string, value interface{}) *Entry
141 WithFields(fields Fields) *Entry
142 WithError(err error) *Entry
143
144 Debugf(format string, args ...interface{})
145 Infof(format string, args ...interface{})
146 Printf(format string, args ...interface{})
147 Warnf(format string, args ...interface{})
148 Warningf(format string, args ...interface{})
149 Errorf(format string, args ...interface{})
150 Fatalf(format string, args ...interface{})
151 Panicf(format string, args ...interface{})
152
153 Debug(args ...interface{})
154 Info(args ...interface{})
155 Print(args ...interface{})
156 Warn(args ...interface{})
157 Warning(args ...interface{})
158 Error(args ...interface{})
159 Fatal(args ...interface{})
160 Panic(args ...interface{})
161
162 Debugln(args ...interface{})
163 Infoln(args ...interface{})
164 Println(args ...interface{})
165 Warnln(args ...interface{})
166 Warningln(args ...interface{})
167 Errorln(args ...interface{})
168 Fatalln(args ...interface{})
169 Panicln(args ...interface{})
170
171 // IsDebugEnabled() bool
172 // IsInfoEnabled() bool
173 // IsWarnEnabled() bool
174 // IsErrorEnabled() bool
175 // IsFatalEnabled() bool
176 // IsPanicEnabled() bool
177}
178
179// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is
180// here for consistancy. Do not use. Use Logger or Entry instead.
181type Ext1FieldLogger interface {
182 FieldLogger
183 Tracef(format string, args ...interface{})
184 Trace(args ...interface{})
185 Traceln(args ...interface{})
186}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
new file mode 100644
index 0000000..2403de9
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
@@ -0,0 +1,11 @@
1// +build appengine
2
3package logrus
4
5import (
6 "io"
7)
8
9func checkIfTerminal(w io.Writer) bool {
10 return true
11}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go
new file mode 100644
index 0000000..4997899
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go
@@ -0,0 +1,13 @@
1// +build darwin dragonfly freebsd netbsd openbsd
2// +build !js
3
4package logrus
5
6import "golang.org/x/sys/unix"
7
8const ioctlReadTermios = unix.TIOCGETA
9
10func isTerminal(fd int) bool {
11 _, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
12 return err == nil
13}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_js.go b/vendor/github.com/sirupsen/logrus/terminal_check_js.go
new file mode 100644
index 0000000..ebdae3e
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_js.go
@@ -0,0 +1,7 @@
1// +build js
2
3package logrus
4
5func isTerminal(fd int) bool {
6 return false
7}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go b/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go
new file mode 100644
index 0000000..97af92c
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go
@@ -0,0 +1,11 @@
1// +build js nacl plan9
2
3package logrus
4
5import (
6 "io"
7)
8
9func checkIfTerminal(w io.Writer) bool {
10 return false
11}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
new file mode 100644
index 0000000..3293fb3
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
@@ -0,0 +1,17 @@
1// +build !appengine,!js,!windows,!nacl,!plan9
2
3package logrus
4
5import (
6 "io"
7 "os"
8)
9
10func checkIfTerminal(w io.Writer) bool {
11 switch v := w.(type) {
12 case *os.File:
13 return isTerminal(int(v.Fd()))
14 default:
15 return false
16 }
17}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go b/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go
new file mode 100644
index 0000000..f6710b3
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go
@@ -0,0 +1,11 @@
1package logrus
2
3import (
4 "golang.org/x/sys/unix"
5)
6
7// IsTerminal returns true if the given file descriptor is a terminal.
8func isTerminal(fd int) bool {
9 _, err := unix.IoctlGetTermio(fd, unix.TCGETA)
10 return err == nil
11}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go
new file mode 100644
index 0000000..04748b8
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go
@@ -0,0 +1,13 @@
1// +build linux aix zos
2// +build !js
3
4package logrus
5
6import "golang.org/x/sys/unix"
7
8const ioctlReadTermios = unix.TCGETS
9
10func isTerminal(fd int) bool {
11 _, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
12 return err == nil
13}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go
new file mode 100644
index 0000000..2879eb5
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go
@@ -0,0 +1,27 @@
1// +build !appengine,!js,windows
2
3package logrus
4
5import (
6 "io"
7 "os"
8
9 "golang.org/x/sys/windows"
10)
11
12func checkIfTerminal(w io.Writer) bool {
13 switch v := w.(type) {
14 case *os.File:
15 handle := windows.Handle(v.Fd())
16 var mode uint32
17 if err := windows.GetConsoleMode(handle, &mode); err != nil {
18 return false
19 }
20 mode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING
21 if err := windows.SetConsoleMode(handle, mode); err != nil {
22 return false
23 }
24 return true
25 }
26 return false
27}
diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go
new file mode 100644
index 0000000..be2c6ef
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/text_formatter.go
@@ -0,0 +1,339 @@
1package logrus
2
3import (
4 "bytes"
5 "fmt"
6 "os"
7 "runtime"
8 "sort"
9 "strconv"
10 "strings"
11 "sync"
12 "time"
13 "unicode/utf8"
14)
15
16const (
17 red = 31
18 yellow = 33
19 blue = 36
20 gray = 37
21)
22
23var baseTimestamp time.Time
24
25func init() {
26 baseTimestamp = time.Now()
27}
28
29// TextFormatter formats logs into text
30type TextFormatter struct {
31 // Set to true to bypass checking for a TTY before outputting colors.
32 ForceColors bool
33
34 // Force disabling colors.
35 DisableColors bool
36
37 // Force quoting of all values
38 ForceQuote bool
39
40 // DisableQuote disables quoting for all values.
41 // DisableQuote will have a lower priority than ForceQuote.
42 // If both of them are set to true, quote will be forced on all values.
43 DisableQuote bool
44
45 // Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/
46 EnvironmentOverrideColors bool
47
48 // Disable timestamp logging. useful when output is redirected to logging
49 // system that already adds timestamps.
50 DisableTimestamp bool
51
52 // Enable logging the full timestamp when a TTY is attached instead of just
53 // the time passed since beginning of execution.
54 FullTimestamp bool
55
56 // TimestampFormat to use for display when a full timestamp is printed.
57 // The format to use is the same than for time.Format or time.Parse from the standard
58 // library.
59 // The standard Library already provides a set of predefined format.
60 TimestampFormat string
61
62 // The fields are sorted by default for a consistent output. For applications
63 // that log extremely frequently and don't use the JSON formatter this may not
64 // be desired.
65 DisableSorting bool
66
67 // The keys sorting function, when uninitialized it uses sort.Strings.
68 SortingFunc func([]string)
69
70 // Disables the truncation of the level text to 4 characters.
71 DisableLevelTruncation bool
72
73 // PadLevelText Adds padding the level text so that all the levels output at the same length
74 // PadLevelText is a superset of the DisableLevelTruncation option
75 PadLevelText bool
76
77 // QuoteEmptyFields will wrap empty fields in quotes if true
78 QuoteEmptyFields bool
79
80 // Whether the logger's out is to a terminal
81 isTerminal bool
82
83 // FieldMap allows users to customize the names of keys for default fields.
84 // As an example:
85 // formatter := &TextFormatter{
86 // FieldMap: FieldMap{
87 // FieldKeyTime: "@timestamp",
88 // FieldKeyLevel: "@level",
89 // FieldKeyMsg: "@message"}}
90 FieldMap FieldMap
91
92 // CallerPrettyfier can be set by the user to modify the content
93 // of the function and file keys in the data when ReportCaller is
94 // activated. If any of the returned value is the empty string the
95 // corresponding key will be removed from fields.
96 CallerPrettyfier func(*runtime.Frame) (function string, file string)
97
98 terminalInitOnce sync.Once
99
100 // The max length of the level text, generated dynamically on init
101 levelTextMaxLength int
102}
103
104func (f *TextFormatter) init(entry *Entry) {
105 if entry.Logger != nil {
106 f.isTerminal = checkIfTerminal(entry.Logger.Out)
107 }
108 // Get the max length of the level text
109 for _, level := range AllLevels {
110 levelTextLength := utf8.RuneCount([]byte(level.String()))
111 if levelTextLength > f.levelTextMaxLength {
112 f.levelTextMaxLength = levelTextLength
113 }
114 }
115}
116
117func (f *TextFormatter) isColored() bool {
118 isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows"))
119
120 if f.EnvironmentOverrideColors {
121 switch force, ok := os.LookupEnv("CLICOLOR_FORCE"); {
122 case ok && force != "0":
123 isColored = true
124 case ok && force == "0", os.Getenv("CLICOLOR") == "0":
125 isColored = false
126 }
127 }
128
129 return isColored && !f.DisableColors
130}
131
132// Format renders a single log entry
133func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
134 data := make(Fields)
135 for k, v := range entry.Data {
136 data[k] = v
137 }
138 prefixFieldClashes(data, f.FieldMap, entry.HasCaller())
139 keys := make([]string, 0, len(data))
140 for k := range data {
141 keys = append(keys, k)
142 }
143
144 var funcVal, fileVal string
145
146 fixedKeys := make([]string, 0, 4+len(data))
147 if !f.DisableTimestamp {
148 fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime))
149 }
150 fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel))
151 if entry.Message != "" {
152 fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg))
153 }
154 if entry.err != "" {
155 fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError))
156 }
157 if entry.HasCaller() {
158 if f.CallerPrettyfier != nil {
159 funcVal, fileVal = f.CallerPrettyfier(entry.Caller)
160 } else {
161 funcVal = entry.Caller.Function
162 fileVal = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
163 }
164
165 if funcVal != "" {
166 fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFunc))
167 }
168 if fileVal != "" {
169 fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFile))
170 }
171 }
172
173 if !f.DisableSorting {
174 if f.SortingFunc == nil {
175 sort.Strings(keys)
176 fixedKeys = append(fixedKeys, keys...)
177 } else {
178 if !f.isColored() {
179 fixedKeys = append(fixedKeys, keys...)
180 f.SortingFunc(fixedKeys)
181 } else {
182 f.SortingFunc(keys)
183 }
184 }
185 } else {
186 fixedKeys = append(fixedKeys, keys...)
187 }
188
189 var b *bytes.Buffer
190 if entry.Buffer != nil {
191 b = entry.Buffer
192 } else {
193 b = &bytes.Buffer{}
194 }
195
196 f.terminalInitOnce.Do(func() { f.init(entry) })
197
198 timestampFormat := f.TimestampFormat
199 if timestampFormat == "" {
200 timestampFormat = defaultTimestampFormat
201 }
202 if f.isColored() {
203 f.printColored(b, entry, keys, data, timestampFormat)
204 } else {
205
206 for _, key := range fixedKeys {
207 var value interface{}
208 switch {
209 case key == f.FieldMap.resolve(FieldKeyTime):
210 value = entry.Time.Format(timestampFormat)
211 case key == f.FieldMap.resolve(FieldKeyLevel):
212 value = entry.Level.String()
213 case key == f.FieldMap.resolve(FieldKeyMsg):
214 value = entry.Message
215 case key == f.FieldMap.resolve(FieldKeyLogrusError):
216 value = entry.err
217 case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller():
218 value = funcVal
219 case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller():
220 value = fileVal
221 default:
222 value = data[key]
223 }
224 f.appendKeyValue(b, key, value)
225 }
226 }
227
228 b.WriteByte('\n')
229 return b.Bytes(), nil
230}
231
232func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, data Fields, timestampFormat string) {
233 var levelColor int
234 switch entry.Level {
235 case DebugLevel, TraceLevel:
236 levelColor = gray
237 case WarnLevel:
238 levelColor = yellow
239 case ErrorLevel, FatalLevel, PanicLevel:
240 levelColor = red
241 case InfoLevel:
242 levelColor = blue
243 default:
244 levelColor = blue
245 }
246
247 levelText := strings.ToUpper(entry.Level.String())
248 if !f.DisableLevelTruncation && !f.PadLevelText {
249 levelText = levelText[0:4]
250 }
251 if f.PadLevelText {
252 // Generates the format string used in the next line, for example "%-6s" or "%-7s".
253 // Based on the max level text length.
254 formatString := "%-" + strconv.Itoa(f.levelTextMaxLength) + "s"
255 // Formats the level text by appending spaces up to the max length, for example:
256 // - "INFO "
257 // - "WARNING"
258 levelText = fmt.Sprintf(formatString, levelText)
259 }
260
261 // Remove a single newline if it already exists in the message to keep
262 // the behavior of logrus text_formatter the same as the stdlib log package
263 entry.Message = strings.TrimSuffix(entry.Message, "\n")
264
265 caller := ""
266 if entry.HasCaller() {
267 funcVal := fmt.Sprintf("%s()", entry.Caller.Function)
268 fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
269
270 if f.CallerPrettyfier != nil {
271 funcVal, fileVal = f.CallerPrettyfier(entry.Caller)
272 }
273
274 if fileVal == "" {
275 caller = funcVal
276 } else if funcVal == "" {
277 caller = fileVal
278 } else {
279 caller = fileVal + " " + funcVal
280 }
281 }
282
283 switch {
284 case f.DisableTimestamp:
285 fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message)
286 case !f.FullTimestamp:
287 fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message)
288 default:
289 fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message)
290 }
291 for _, k := range keys {
292 v := data[k]
293 fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
294 f.appendValue(b, v)
295 }
296}
297
298func (f *TextFormatter) needsQuoting(text string) bool {
299 if f.ForceQuote {
300 return true
301 }
302 if f.QuoteEmptyFields && len(text) == 0 {
303 return true
304 }
305 if f.DisableQuote {
306 return false
307 }
308 for _, ch := range text {
309 if !((ch >= 'a' && ch <= 'z') ||
310 (ch >= 'A' && ch <= 'Z') ||
311 (ch >= '0' && ch <= '9') ||
312 ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') {
313 return true
314 }
315 }
316 return false
317}
318
319func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
320 if b.Len() > 0 {
321 b.WriteByte(' ')
322 }
323 b.WriteString(key)
324 b.WriteByte('=')
325 f.appendValue(b, value)
326}
327
328func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
329 stringVal, ok := value.(string)
330 if !ok {
331 stringVal = fmt.Sprint(value)
332 }
333
334 if !f.needsQuoting(stringVal) {
335 b.WriteString(stringVal)
336 } else {
337 b.WriteString(fmt.Sprintf("%q", stringVal))
338 }
339}
diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go
new file mode 100644
index 0000000..074fd4b
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/writer.go
@@ -0,0 +1,102 @@
1package logrus
2
3import (
4 "bufio"
5 "io"
6 "runtime"
7 "strings"
8)
9
10// Writer at INFO level. See WriterLevel for details.
11func (logger *Logger) Writer() *io.PipeWriter {
12 return logger.WriterLevel(InfoLevel)
13}
14
15// WriterLevel returns an io.Writer that can be used to write arbitrary text to
16// the logger at the given log level. Each line written to the writer will be
17// printed in the usual way using formatters and hooks. The writer is part of an
18// io.Pipe and it is the callers responsibility to close the writer when done.
19// This can be used to override the standard library logger easily.
20func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
21 return NewEntry(logger).WriterLevel(level)
22}
23
24// Writer returns an io.Writer that writes to the logger at the info log level
25func (entry *Entry) Writer() *io.PipeWriter {
26 return entry.WriterLevel(InfoLevel)
27}
28
29// WriterLevel returns an io.Writer that writes to the logger at the given log level
30func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
31 reader, writer := io.Pipe()
32
33 var printFunc func(args ...interface{})
34
35 // Determine which log function to use based on the specified log level
36 switch level {
37 case TraceLevel:
38 printFunc = entry.Trace
39 case DebugLevel:
40 printFunc = entry.Debug
41 case InfoLevel:
42 printFunc = entry.Info
43 case WarnLevel:
44 printFunc = entry.Warn
45 case ErrorLevel:
46 printFunc = entry.Error
47 case FatalLevel:
48 printFunc = entry.Fatal
49 case PanicLevel:
50 printFunc = entry.Panic
51 default:
52 printFunc = entry.Print
53 }
54
55 // Start a new goroutine to scan the input and write it to the logger using the specified print function.
56 // It splits the input into chunks of up to 64KB to avoid buffer overflows.
57 go entry.writerScanner(reader, printFunc)
58
59 // Set a finalizer function to close the writer when it is garbage collected
60 runtime.SetFinalizer(writer, writerFinalizer)
61
62 return writer
63}
64
65// writerScanner scans the input from the reader and writes it to the logger
66func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
67 scanner := bufio.NewScanner(reader)
68
69 // Set the buffer size to the maximum token size to avoid buffer overflows
70 scanner.Buffer(make([]byte, bufio.MaxScanTokenSize), bufio.MaxScanTokenSize)
71
72 // Define a split function to split the input into chunks of up to 64KB
73 chunkSize := bufio.MaxScanTokenSize // 64KB
74 splitFunc := func(data []byte, atEOF bool) (int, []byte, error) {
75 if len(data) >= chunkSize {
76 return chunkSize, data[:chunkSize], nil
77 }
78
79 return bufio.ScanLines(data, atEOF)
80 }
81
82 // Use the custom split function to split the input
83 scanner.Split(splitFunc)
84
85 // Scan the input and write it to the logger using the specified print function
86 for scanner.Scan() {
87 printFunc(strings.TrimRight(scanner.Text(), "\r\n"))
88 }
89
90 // If there was an error while scanning the input, log an error
91 if err := scanner.Err(); err != nil {
92 entry.Errorf("Error while reading from Writer: %s", err)
93 }
94
95 // Close the reader when we are done
96 reader.Close()
97}
98
99// WriterFinalizer is a finalizer function that closes then given writer when it is garbage collected
100func writerFinalizer(writer *io.PipeWriter) {
101 writer.Close()
102}