vendor vendors

This commit is contained in:
Adrien Delorme 2019-10-14 16:21:52 +02:00
parent 2b0e0d4eab
commit 8b1fcbb3bd
485 changed files with 99277 additions and 45870 deletions

59
go.mod
View File

@ -5,24 +5,23 @@ require (
github.com/1and1/oneandone-cloudserver-sdk-go v1.0.1
github.com/Azure/azure-sdk-for-go v30.0.0+incompatible
github.com/Azure/go-autorest v12.0.0+incompatible
github.com/Azure/go-ntlmssp v0.0.0-20180810175552-4a21cbd618b4 // indirect
github.com/ChrisTrenkamp/goxpath v0.0.0-20170625215350-4fe035839290
github.com/ChrisTrenkamp/goxpath v0.0.0-20170922090931-c385f95c6022
github.com/NaverCloudPlatform/ncloud-sdk-go v0.0.0-20180110055012-c2e73f942591
github.com/PuerkitoBio/goquery v1.5.0 // indirect
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect
github.com/Telmate/proxmox-api-go v0.0.0-20190815172943-ef9222844e60
github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af // indirect
github.com/agext/levenshtein v1.2.2 // indirect
github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190418113227-25233c783f4e
github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20170113022742-e6dbea820a9f
github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190103054945-8205d1f41e70
github.com/antchfx/htmlquery v1.0.0 // indirect
github.com/antchfx/xmlquery v1.0.0 // indirect
github.com/antchfx/xpath v0.0.0-20170728053731-b5c552e1acbd // indirect
github.com/antchfx/xquery v0.0.0-20170730121040-eb8c3c172607 // indirect
github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6 // indirect
github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0 // indirect
github.com/approvals/go-approval-tests v0.0.0-20160714161514-ad96e53bea43
github.com/armon/go-radix v1.0.0 // indirect
github.com/aws/aws-sdk-go v1.24.1
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f // indirect
github.com/biogo/hts v0.0.0-20160420073057-50da7d4131a3
github.com/c2h5oh/datasize v0.0.0-20171227191756-4eba002a5eae
github.com/cheggaaa/pb v1.0.27
@ -38,7 +37,7 @@ require (
github.com/docker/docker v0.0.0-20180422163414-57142e89befe // indirect
github.com/dustin/go-humanize v1.0.0 // indirect
github.com/dylanmei/iso8601 v0.1.0 // indirect
github.com/dylanmei/winrmtest v0.0.0-20170819153634-c2fbb09e6c08
github.com/dylanmei/winrmtest v0.0.0-20190225150635-99b7fe2fddf1
github.com/exoscale/egoscale v0.18.1
github.com/fatih/camelcase v1.0.0
github.com/fatih/structtag v1.0.0
@ -48,28 +47,31 @@ require (
github.com/gocolly/colly v1.2.0
github.com/gofrs/flock v0.7.1
github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3
github.com/golang/example v0.0.0-20170904185048-46695d81d1fa // indirect
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect
github.com/google/go-cmp v0.3.0
github.com/google/go-querystring v1.0.0 // indirect
github.com/google/shlex v0.0.0-20150127133951-6f45313302b9
github.com/google/uuid v1.0.0
github.com/google/uuid v1.1.1
github.com/gophercloud/gophercloud v0.2.0
github.com/gophercloud/utils v0.0.0-20190124192022-a5c25e7a53a6
github.com/gophercloud/utils v0.0.0-20190128072930-fbb6ab446f01
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e // indirect
github.com/gorilla/websocket v0.0.0-20170319172727-a91eba7f9777 // indirect
github.com/gorilla/websocket v1.4.0 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.1.0
github.com/hashicorp/consul v1.4.0
github.com/hashicorp/errwrap v1.0.0
github.com/hashicorp/go-checkpoint v0.0.0-20171009173528-1545e56e46de
github.com/hashicorp/go-checkpoint v0.5.0
github.com/hashicorp/go-cleanhttp v0.5.0
github.com/hashicorp/go-getter v1.3.1-0.20190906090232-a0f878cb75da
github.com/hashicorp/go-getter v1.4.0
github.com/hashicorp/go-msgpack v0.5.4 // indirect
github.com/hashicorp/go-multierror v1.0.0
github.com/hashicorp/go-oracle-terraform v0.0.0-20181016190316-007121241b79
github.com/hashicorp/go-retryablehttp v0.5.2 // indirect
github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 // indirect
github.com/hashicorp/go-rootcerts v1.0.0 // indirect
github.com/hashicorp/go-uuid v1.0.1
github.com/hashicorp/go-version v1.2.0
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/hashicorp/hcl/v2 v2.0.0
github.com/hashicorp/serf v0.8.2 // indirect
github.com/hashicorp/vault v1.1.0
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d
@ -80,50 +82,49 @@ require (
github.com/joyent/triton-go v0.0.0-20180116165742-545edbe0d564
github.com/json-iterator/go v1.1.6 // indirect
github.com/jtolds/gls v4.2.1+incompatible // indirect
github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0
github.com/kennygrant/sanitize v1.2.4 // indirect
github.com/klauspost/compress v0.0.0-20160131094358-f86d2e6d8a77 // indirect
github.com/klauspost/cpuid v0.0.0-20160106104451-349c67577817 // indirect
github.com/klauspost/crc32 v0.0.0-20160114101742-999f3125931f // indirect
github.com/klauspost/pgzip v0.0.0-20151221113845-47f36e165cec
github.com/kr/fs v0.0.0-20131111012553-2788f0dbd169 // indirect
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/linode/linodego v0.7.1
github.com/masterzen/azure-sdk-for-go v0.0.0-20161014135628-ee4f0065d00c // indirect
github.com/masterzen/simplexml v0.0.0-20190410153822-31eea3082786 // indirect
github.com/masterzen/winrm v0.0.0-20180224160350-7e40f93ae939
github.com/masterzen/winrm v0.0.0-20190223112901-5e5c9a7fe54b
github.com/mattn/go-colorable v0.1.1 // indirect
github.com/mattn/go-tty v0.0.0-20190424173100-523744f04859
github.com/miekg/dns v1.1.1 // indirect
github.com/mitchellh/cli v1.0.0
github.com/mitchellh/go-fs v0.0.0-20180402234041-7b48fa161ea7
github.com/mitchellh/go-homedir v1.0.0
github.com/mitchellh/go-homedir v1.1.0
github.com/mitchellh/go-vnc v0.0.0-20150629162542-723ed9867aed
github.com/mitchellh/go-wordwrap v1.0.0 // indirect
github.com/mitchellh/iochan v1.0.0
github.com/mitchellh/mapstructure v0.0.0-20180111000720-b4575eea38cc
github.com/mitchellh/panicwrap v0.0.0-20170106182340-fce601fe5557
github.com/mitchellh/prefixedio v0.0.0-20151214002211-6e6954073784
github.com/mitchellh/panicwrap v0.0.0-20190213213626-17011010aaa4
github.com/mitchellh/prefixedio v0.0.0-20190213213902-5733675afd51
github.com/mitchellh/reflectwalk v1.0.0
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.1 // indirect
github.com/moul/anonuuid v0.0.0-20160222162117-609b752a95ef // indirect
github.com/moul/gotty-client v0.0.0-20180327180212-b26a57ebc215 // indirect
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32 // indirect
github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d // indirect
github.com/olekukonko/tablewriter v0.0.0-20180105111133-96aac992fc8b
github.com/onsi/ginkgo v1.7.0 // indirect
github.com/onsi/gomega v1.4.3 // indirect
github.com/onsi/ginkgo v1.10.2 // indirect
github.com/onsi/gomega v1.7.0 // indirect
github.com/oracle/oci-go-sdk v1.8.0
github.com/outscale/osc-go v0.0.1
github.com/packer-community/winrmcp v0.0.0-20180921204643-0fd363d6159a
github.com/pierrec/lz4 v2.0.5+incompatible
github.com/pkg/errors v0.8.1
github.com/pkg/sftp v0.0.0-20160118190721-e84cc8c755ca
github.com/posener/complete v1.1.1
github.com/posener/complete v1.2.1
github.com/profitbricks/profitbricks-sdk-go v4.0.2+incompatible
github.com/renstrom/fuzzysearch v0.0.0-20160331204855-2d205ac6ec17 // indirect
github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735 // indirect
github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca // indirect
github.com/satori/go.uuid v1.2.0 // indirect
github.com/scaleway/scaleway-cli v0.0.0-20180921094345-7b12c9699d70
github.com/shirou/gopsutil v2.18.12+incompatible
github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4 // indirect
@ -133,17 +134,19 @@ require (
github.com/temoto/robotstxt v1.1.1 // indirect
github.com/tencentcloud/tencentcloud-sdk-go v3.0.71+incompatible
github.com/ucloud/ucloud-sdk-go v0.8.7
github.com/ugorji/go v0.0.0-20151218193438-646ae4a518c1
github.com/ugorji/go v0.0.0-20180813092308-00b869d2f4a5
github.com/ulikunitz/xz v0.5.5
github.com/vmware/govmomi v0.0.0-20170707011325-c2105a174311
github.com/xanzy/go-cloudstack v0.0.0-20190526095453-42f262b63ed0
github.com/yandex-cloud/go-genproto v0.0.0-20190916101622-7617782d381e
github.com/yandex-cloud/go-sdk v0.0.0-20190916101744-c781afa45829
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5
github.com/zclconf/go-cty v1.1.0
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4
golang.org/x/net v0.0.0-20190620200207-3b0461eec859
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45
golang.org/x/sync v0.0.0-20190423024810-112230192c58
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0
golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0
google.golang.org/api v0.9.0
google.golang.org/grpc v1.21.1
gopkg.in/h2non/gock.v1 v1.0.12 // indirect

124
go.sum
View File

@ -19,8 +19,8 @@ github.com/Azure/go-ntlmssp v0.0.0-20180810175552-4a21cbd618b4 h1:pSm8mp0T2OH2CP
github.com/Azure/go-ntlmssp v0.0.0-20180810175552-4a21cbd618b4/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/ChrisTrenkamp/goxpath v0.0.0-20170625215350-4fe035839290 h1:K9I21XUHNbYD3GNMmJBN0UKJCpdP+glftwNZ7Bo8kqY=
github.com/ChrisTrenkamp/goxpath v0.0.0-20170625215350-4fe035839290/go.mod h1:nuWgzSkT5PnyOd+272uUmV0dnAnAn42Mk7PiQC5VzN4=
github.com/ChrisTrenkamp/goxpath v0.0.0-20170922090931-c385f95c6022 h1:y8Gs8CzNfDF5AZvjr+5UyGQvQEBL7pwo+v+wX6q9JI8=
github.com/ChrisTrenkamp/goxpath v0.0.0-20170922090931-c385f95c6022/go.mod h1:nuWgzSkT5PnyOd+272uUmV0dnAnAn42Mk7PiQC5VzN4=
github.com/NaverCloudPlatform/ncloud-sdk-go v0.0.0-20180110055012-c2e73f942591 h1:/P9HCl71+Eh6vDbKNyRu+rpIIR70UCZWNOGexVV3e6k=
github.com/NaverCloudPlatform/ncloud-sdk-go v0.0.0-20180110055012-c2e73f942591/go.mod h1:EHGzQGbwozJBj/4qj3WGrTJ0FqjgOTOxLQ0VNWvPn08=
github.com/PuerkitoBio/goquery v1.5.0 h1:uGvmFXOA73IKluu/F84Xd1tt/z07GYm8X49XKHP7EJk=
@ -31,22 +31,31 @@ github.com/Telmate/proxmox-api-go v0.0.0-20190815172943-ef9222844e60 h1:iEmbIRk4
github.com/Telmate/proxmox-api-go v0.0.0-20190815172943-ef9222844e60/go.mod h1:OGWyIMJ87/k/GCz8CGiWB2HOXsOVDM6Lpe/nFPkC4IQ=
github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af h1:DBNMBMuMiWYu0b+8KMJuWmfCkcxl09JwdlqwDZZ6U14=
github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw=
github.com/agext/levenshtein v1.2.1 h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8=
github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE=
github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190418113227-25233c783f4e h1:/8wOj52pewmIX/8d5eVO3t7Rr3astkBI/ruyg4WNqRo=
github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190418113227-25233c783f4e/go.mod h1:T9M45xf79ahXVelWoOBmH0y4aC1t5kXO5BxwyakgIGA=
github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20170113022742-e6dbea820a9f h1:jI4DIE5Vf4oRaHfthB0oRhU+yuYuoOTurDzwAlskP00=
github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20170113022742-e6dbea820a9f/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=
github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190103054945-8205d1f41e70 h1:FrF4uxA24DF3ARNXVbUin3wa5fDLaB1Cy8mKks/LRz4=
github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190103054945-8205d1f41e70/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=
github.com/andybalholm/cascadia v1.0.0 h1:hOCXnnZ5A+3eVDX8pvgl4kofXv2ELss0bKcqRySc45o=
github.com/andybalholm/cascadia v1.0.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
github.com/antchfx/htmlquery v1.0.0 h1:O5IXz8fZF3B3MW+B33MZWbTHBlYmcfw0BAxgErHuaMA=
github.com/antchfx/htmlquery v1.0.0/go.mod h1:MS9yksVSQXls00iXkiMqXr0J+umL/AmxXKuP28SUJM8=
github.com/antchfx/xmlquery v1.0.0 h1:YuEPqexGG2opZKNc9JU3Zw6zFXwC47wNcy6/F8oKsrM=
github.com/antchfx/xmlquery v1.0.0/go.mod h1:/+CnyD/DzHRnv2eRxrVbieRU/FIF6N0C+7oTtyUtCKk=
github.com/antchfx/xpath v0.0.0-20170728053731-b5c552e1acbd h1:S3Fr6QnkpW9VRjiEY4psQHhhbbahASuNVj52YIce7lI=
github.com/antchfx/xpath v0.0.0-20170728053731-b5c552e1acbd/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk=
github.com/antchfx/xquery v0.0.0-20170730121040-eb8c3c172607 h1:BFFG6KP8ASFBg2ptWsJn8p8RDufBjBDKIxLU7BTYGOM=
github.com/antchfx/xquery v0.0.0-20170730121040-eb8c3c172607/go.mod h1:LzD22aAzDP8/dyiCKFp31He4m2GPjl0AFyzDtZzUu9M=
github.com/antchfx/xpath v0.0.0-20190129040759-c8489ed3251e h1:ptBAamGVd6CfRsUtyHD+goy2JGhv1QC32v3gqM8mYAM=
github.com/antchfx/xpath v0.0.0-20190129040759-c8489ed3251e/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk=
github.com/antchfx/xquery v0.0.0-20180515051857-ad5b8c7a47b0 h1:JaCC8jz0zdMLk2m+qCCVLLLM/PL93p84w4pK3aJWj60=
github.com/antchfx/xquery v0.0.0-20180515051857-ad5b8c7a47b0/go.mod h1:LzD22aAzDP8/dyiCKFp31He4m2GPjl0AFyzDtZzUu9M=
github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6 h1:uZuxRZCz65cG1o6K/xUqImNcYKtmk9ylqaH0itMSvzA=
github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q=
github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM=
github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0 h1:MzVXffFUye+ZcSR6opIgz9Co7WcDx6ZcY+RjfFHoA0I=
github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM=
github.com/apparentlymart/go-textseg v1.0.0 h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2wFoYVvnCs0=
github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk=
github.com/approvals/go-approval-tests v0.0.0-20160714161514-ad96e53bea43 h1:ePCAQPf5tUc5IMcUvu6euhSGna7jzs7eiXtJXHig6Zc=
github.com/approvals/go-approval-tests v0.0.0-20160714161514-ad96e53bea43/go.mod h1:S6puKjZ9ZeqUPBv2hEBnMZGcM2J6mOsDRQcmxkMAND0=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
@ -61,6 +70,8 @@ github.com/aws/aws-sdk-go v1.24.1 h1:B2NRyTV1/+h+Dg8Bh7vnuvW6QZz/NBL+uzgC2uILDMI
github.com/aws/aws-sdk-go v1.24.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/azr/flock v0.0.0-20190823144736-958d66434653 h1:2H3Cu0cbG8iszfcgnANwC/cm0YkPJIQvaJ9/tSpwh9o=
github.com/azr/flock v0.0.0-20190823144736-958d66434653/go.mod h1:EI7lzWWilX2K3ZMZ7Ta+E4DZtWzMC2tbn3cM3oVPuAU=
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA=
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc=
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas=
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4=
github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=
@ -101,8 +112,8 @@ github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dylanmei/iso8601 v0.1.0 h1:812NGQDBcqquTfH5Yeo7lwR0nzx/cKdsmf3qMjPURUI=
github.com/dylanmei/iso8601 v0.1.0/go.mod h1:w9KhXSgIyROl1DefbMYIE7UVSIvELTbMrCfx+QkYnoQ=
github.com/dylanmei/winrmtest v0.0.0-20170819153634-c2fbb09e6c08 h1:0bp6/GrNOrTDtSXe9YYGCwf8jp5Fb/b+4a6MTRm4qzY=
github.com/dylanmei/winrmtest v0.0.0-20170819153634-c2fbb09e6c08/go.mod h1:VBVDFSBXCIW8JaHQpI8lldSKfYaLMzP9oyq6IJ4fhzY=
github.com/dylanmei/winrmtest v0.0.0-20190225150635-99b7fe2fddf1 h1:r1oACdS2XYiAWcfF8BJXkoU8l1J71KehGR+d99yWEDA=
github.com/dylanmei/winrmtest v0.0.0-20190225150635-99b7fe2fddf1/go.mod h1:lcy9/2gH1jn/VCLouHA6tOEwLoNVd4GW6zhuKLmHC2Y=
github.com/exoscale/egoscale v0.18.1 h1:1FNZVk8jHUx0AvWhOZxLEDNlacTU0chMXUUNkm9EZaI=
github.com/exoscale/egoscale v0.18.1/go.mod h1:Z7OOdzzTOz1Q1PjQXumlz9Wn/CddH0zSYdCF3rnBKXE=
github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8=
@ -119,6 +130,8 @@ github.com/go-ini/ini v1.25.4 h1:Mujh4R/dH6YL8bxuISne3xX2+qcQ9p0IxKAP6ExWoUo=
github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI=
github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM=
github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68=
github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
github.com/gocolly/colly v1.2.0 h1:qRz9YAn8FIH0qzgNUw+HT9UN7wm1oF9OBAilwEWpyrI=
@ -130,11 +143,14 @@ github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRx
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 h1:zN2lZNZRflqFyxVaTIU61KNKQ9C0055u9CAfpmqUvo4=
github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3/go.mod h1:nPpo7qLxd6XL3hWJG/O60sR8ZKfMCiIoNap5GvD12KU=
github.com/golang/example v0.0.0-20170904185048-46695d81d1fa h1:iqCQC2Z53KkwGgTN9szyL4q0OQHmuNjeoNnMT6lk66k=
github.com/golang/example v0.0.0-20170904185048-46695d81d1fa/go.mod h1:tO/5UvQ/uKigUjQBPqzstj6uxd3fUIjddi19DxGJeWg=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
@ -159,19 +175,19 @@ github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OI
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/shlex v0.0.0-20150127133951-6f45313302b9 h1:JM174NTeGNJ2m/oLH3UOWOvWQQKd+BoL3hcSCUWFLt0=
github.com/google/shlex v0.0.0-20150127133951-6f45313302b9/go.mod h1:RpwtwJQFrIEPstU94h88MWPXP2ektJZ8cZ0YntAmXiE=
github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gophercloud/gophercloud v0.2.0 h1:lD2Bce2xBAMNNcFZ0dObTpXkGLlVIb33RPVUNVpw6ic=
github.com/gophercloud/gophercloud v0.2.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
github.com/gophercloud/utils v0.0.0-20190124192022-a5c25e7a53a6 h1:Cw/B8Bu7Rryomxf7bjc8zNfIyLgjxsDd91n0eGRWpuo=
github.com/gophercloud/utils v0.0.0-20190124192022-a5c25e7a53a6/go.mod h1:wjDF8z83zTeg5eMLml5EBSlAhbF7G8DobyI1YsMuyzw=
github.com/gophercloud/utils v0.0.0-20190128072930-fbb6ab446f01 h1:OgCNGSnEalfkRpn//WGJHhpo7fkP+LhTpvEITZ7CkK4=
github.com/gophercloud/utils v0.0.0-20190128072930-fbb6ab446f01/go.mod h1:wjDF8z83zTeg5eMLml5EBSlAhbF7G8DobyI1YsMuyzw=
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e h1:JKmoR8x90Iww1ks85zJ1lfDGgIiMDuIptTOhJq+zKyg=
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/websocket v0.0.0-20170319172727-a91eba7f9777 h1:JIM+OacoOJRU30xpjMf8sulYqjr0ViA3WDrTX6j/yDI=
github.com/gorilla/websocket v0.0.0-20170319172727-a91eba7f9777/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 h1:THDBEeQ9xZ8JEaCLyLQqXMMdRqNr0QAUJTIkQAUtFjg=
github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE=
github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE=
@ -180,24 +196,26 @@ github.com/hashicorp/consul v1.4.0 h1:PQTW4xCuAExEiSbhrsFsikzbW5gVBoi74BjUvYFyKH
github.com/hashicorp/consul v1.4.0/go.mod h1:mFrjN1mfidgJfYP1xrJCF+AfRhr6Eaqhb2+sfyn/OOI=
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-checkpoint v0.0.0-20171009173528-1545e56e46de h1:XDCSythtg8aWSRSO29uwhgh7b127fWr+m5SemqjSUL8=
github.com/hashicorp/go-checkpoint v0.0.0-20171009173528-1545e56e46de/go.mod h1:xIwEieBHERyEvaeKF/TcHh1Hu+lxPM+n2vT1+g9I4m4=
github.com/hashicorp/go-checkpoint v0.5.0 h1:MFYpPZCnQqQTE18jFwSII6eUQrD/oxMFp3mlgcqk5mU=
github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg=
github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig=
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-getter v1.3.1-0.20190906090232-a0f878cb75da h1:HAasZmyRrb7/paYuww5RfVwY3wkFpsbMNYwBxOSZquY=
github.com/hashicorp/go-getter v1.3.1-0.20190906090232-a0f878cb75da/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY=
github.com/hashicorp/go-getter v1.4.0 h1:ENHNi8494porjD0ZhIrjlAHnveSFhY7hvOJrV/fsKkw=
github.com/hashicorp/go-getter v1.4.0/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY=
github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-msgpack v0.5.4 h1:SFT72YqIkOcLdWJUYcriVX7hbrZpwc/f7h8aW2NUqrA=
github.com/hashicorp/go-msgpack v0.5.4/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-oracle-terraform v0.0.0-20181016190316-007121241b79 h1:RKu7yAXZTaQsxj1K9GDsh+QVw0+Wu1SWHxtbFN0n+hE=
github.com/hashicorp/go-oracle-terraform v0.0.0-20181016190316-007121241b79/go.mod h1:09jT3Y/OIsjTjQ2+3bkVNPDKqWcGIYYvjB2BEKVUdvc=
github.com/hashicorp/go-retryablehttp v0.5.2 h1:AoISa4P4IsW0/m4T6St8Yw38gTl5GtBAgfkhYh1xAz4=
github.com/hashicorp/go-retryablehttp v0.5.2/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 h1:VBj0QYQ0u2MCJzBfeYXGexnAl17GsH1yidnoxCqqD9E=
github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90/go.mod h1:o4zcYY1e0GEZI6eSEr+43QDYmuGglw1qSO6qdHUHCgg=
github.com/hashicorp/go-rootcerts v1.0.0 h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82kpwzSwCI=
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo=
github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I=
github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs=
@ -217,6 +235,9 @@ github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/hcl/v2 v2.0.0 h1:efQznTz+ydmQXq3BOnRa3AXzvCeTq1P4dKj/z5GLlY8=
github.com/hashicorp/hcl/v2 v2.0.0/go.mod h1:oVVDG71tEinNGYCxinCYadcmKU9bglqW9pV3txagJ90=
github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
github.com/hashicorp/memberlist v0.1.3 h1:EmmoJme1matNzb+hMpDuR/0sbJSUisxyqBGG676r31M=
@ -249,6 +270,8 @@ github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpR
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1 h1:PJPDf8OUfOK1bb/NeTKd4f1QXZItOX389VN3B6qC8ro=
github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA=
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
github.com/kennygrant/sanitize v1.2.4 h1:gN25/otpP5vAsO2djbMhF/LQX6R7+O1TB4yv8NzpJ3o=
github.com/kennygrant/sanitize v1.2.4/go.mod h1:LGsjYYtgxbetdg5owWB2mpgUL6e2nfw2eObZ0u0qvak=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
@ -272,19 +295,24 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4=
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/linode/linodego v0.7.1 h1:4WZmMpSA2NRwlPZcc0+4Gyn7rr99Evk9bnr0B3gXRKE=
github.com/linode/linodego v0.7.1/go.mod h1:ga11n3ivecUrPCHN0rANxKmfWBJVkOXfLMZinAbj2sY=
github.com/masterzen/azure-sdk-for-go v0.0.0-20161014135628-ee4f0065d00c h1:FMUOnVGy8nWk1cvlMCAoftRItQGMxI0vzJ3dQjeZTCE=
github.com/masterzen/azure-sdk-for-go v0.0.0-20161014135628-ee4f0065d00c/go.mod h1:mf8fjOu33zCqxUjuiU3I8S1lJMyEAlH+0F2+M5xl3hE=
github.com/masterzen/simplexml v0.0.0-20160608183007-4572e39b1ab9/go.mod h1:kCEbxUJlNDEBNbdQMkPSp6yaKcRXVI6f4ddk8Riv4bc=
github.com/masterzen/simplexml v0.0.0-20190410153822-31eea3082786 h1:2ZKn+w/BJeL43sCxI2jhPLRv73oVVOjEKZjKkflyqxg=
github.com/masterzen/simplexml v0.0.0-20190410153822-31eea3082786/go.mod h1:kCEbxUJlNDEBNbdQMkPSp6yaKcRXVI6f4ddk8Riv4bc=
github.com/masterzen/winrm v0.0.0-20180224160350-7e40f93ae939 h1:cRFHA33ER97Xy5jmjS519OXCS/yE3AT3zdbQAg0Z53g=
github.com/masterzen/winrm v0.0.0-20180224160350-7e40f93ae939/go.mod h1:CfZSN7zwz5gJiFhZJz49Uzk7mEBHIceWmbFmYx7Hf7E=
github.com/masterzen/winrm v0.0.0-20190223112901-5e5c9a7fe54b h1:/1RFh2SLCJ+tEnT73+Fh5R2AO89sQqs8ba7o+hx1G0Y=
github.com/masterzen/winrm v0.0.0-20190223112901-5e5c9a7fe54b/go.mod h1:wr1VqkwW0AB5JS0QLy5GpVMS9E3VtRoSYXUYyVk46KY=
github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg=
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.5 h1:tHXDdz1cpzGaovsTB+TVB8q90WEokoVmfMqoVcrLUgw=
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-tty v0.0.0-20190424173100-523744f04859 h1:smQbSzmT3EHl4EUwtFwFGmGIpiYgIiiPeVv1uguIQEE=
@ -298,20 +326,26 @@ github.com/mitchellh/go-fs v0.0.0-20180402234041-7b48fa161ea7 h1:PXPMDtfqV+rZJsh
github.com/mitchellh/go-fs v0.0.0-20180402234041-7b48fa161ea7/go.mod h1:g7SZj7ABpStq3tM4zqHiVEG5un/DZ1+qJJKO7qx1EvU=
github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/go-vnc v0.0.0-20150629162542-723ed9867aed h1:FI2NIv6fpef6BQl2u3IZX/Cj20tfypRF4yd+uaHOMtI=
github.com/mitchellh/go-vnc v0.0.0-20150629162542-723ed9867aed/go.mod h1:3rdaFaCv4AyBgu5ALFM0+tSuHrBh6v692nyQe3ikrq0=
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 h1:DpOJ2HYzCv8LZP15IdmG+YdwD2luVPHITV96TkirNBM=
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4=
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
github.com/mitchellh/iochan v1.0.0 h1:C+X3KsSTLFVBr/tK1eYN/vs4rJcvsiLU338UhYPJWeY=
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v0.0.0-20180111000720-b4575eea38cc h1:5T6hzGUO5OrL6MdYXYoLQtRWJDDgjdlOVBn9mIqGY1g=
github.com/mitchellh/mapstructure v0.0.0-20180111000720-b4575eea38cc/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/panicwrap v0.0.0-20170106182340-fce601fe5557 h1:w1QuuAA2km2Hax+EPamrq5ZRBeaNv2vsjvgB4an0zoU=
github.com/mitchellh/panicwrap v0.0.0-20170106182340-fce601fe5557/go.mod h1:QuAqW7/z+iv6aWFJdrA8kCbsF0OOJVKCICqTcYBexuY=
github.com/mitchellh/prefixedio v0.0.0-20151214002211-6e6954073784 h1:+DAetXqxv/mSyCkE9KBIYOZs9b68y7SUaDCxQMRjA68=
github.com/mitchellh/prefixedio v0.0.0-20151214002211-6e6954073784/go.mod h1:kB1naBgV9ORnkiTVeyJOI1DavaJkG4oNIq0Af6ZVKUo=
github.com/mitchellh/panicwrap v0.0.0-20190213213626-17011010aaa4 h1:jw9tsdJ1FQmUkyTXdIF/nByTX+mMnnp16glnvGZMsC4=
github.com/mitchellh/panicwrap v0.0.0-20190213213626-17011010aaa4/go.mod h1:YYMf4xtQnR8LRC0vKi3afvQ5QwRPQ17zjcpkBCufb+I=
github.com/mitchellh/prefixedio v0.0.0-20190213213902-5733675afd51 h1:eD92Am0Qf3rqhsOeA1zwBHSfRkoHrt4o6uORamdmJP8=
github.com/mitchellh/prefixedio v0.0.0-20190213213902-5733675afd51/go.mod h1:kB1naBgV9ORnkiTVeyJOI1DavaJkG4oNIq0Af6ZVKUo=
github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY=
github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
@ -329,10 +363,10 @@ github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d/go.mod h1:YUTz3bUH
github.com/olekukonko/tablewriter v0.0.0-20180105111133-96aac992fc8b h1:LGItPaClbzopugAomw5VFKnG3h1dUr9QW5KOU+m8gu0=
github.com/olekukonko/tablewriter v0.0.0-20180105111133-96aac992fc8b/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/ginkgo v1.10.2 h1:uqH7bpe+ERSiDa34FDOF7RikN6RzXgduUF8yarlZp94=
github.com/onsi/ginkgo v1.10.2/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/oracle/oci-go-sdk v1.8.0 h1:4SO45bKV0I3/Mn1os3ANDZmV0eSE5z5CLdSUIkxtyzs=
github.com/oracle/oci-go-sdk v1.8.0/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888=
@ -354,6 +388,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1 h1:ccV59UEOTzVDnDUEFdT95ZzHVZ+5+158q8+SJb2QV5w=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/posener/complete v1.2.1 h1:LrvDIY//XNo65Lq84G/akBuMGlawHvGBABv8f/ZN6DI=
github.com/posener/complete v1.2.1/go.mod h1:6gapUrK/U1TAN7ciCoNRIdVC5sbdBTUh1DKN0g6uH7E=
github.com/profitbricks/profitbricks-sdk-go v4.0.2+incompatible h1:ZoVHH6voxW9Onzo6z2yLtocVoN6mBocyDoqoyAMHokE=
github.com/profitbricks/profitbricks-sdk-go v4.0.2+incompatible/go.mod h1:T3/WrziK7fYH3C8ilAFAHe99R452/IzIG3YYkqaOFeQ=
github.com/renstrom/fuzzysearch v0.0.0-20160331204855-2d205ac6ec17 h1:4qPms2txLWMLXKzqlnYSulKRS4cS9aYgPtAEpUelQok=
@ -370,6 +406,8 @@ github.com/scaleway/scaleway-cli v0.0.0-20180921094345-7b12c9699d70 h1:DaqC32ZwO
github.com/scaleway/scaleway-cli v0.0.0-20180921094345-7b12c9699d70/go.mod h1:XjlXWPd6VONhsRSEuzGkV8mzRpH7ou1cdLV7IKJk96s=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/shirou/gopsutil v2.18.12+incompatible h1:1eaJvGomDnH74/5cF4CTmTbLHAriGFsTZppLXDX93OM=
github.com/shirou/gopsutil v2.18.12+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4 h1:udFKJ0aHUL60LboW/A+DfgoHVedieIzIXE8uylPue0U=
@ -382,6 +420,7 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykE
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c h1:Ho+uVpkel/udgjbwB5Lktg9BtvJSh2DT0Hi6LPSyI2w=
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@ -397,10 +436,11 @@ github.com/tencentcloud/tencentcloud-sdk-go v3.0.71+incompatible h1:9sIWfe6ZC7xo
github.com/tencentcloud/tencentcloud-sdk-go v3.0.71+incompatible/go.mod h1:0PfYow01SHPMhKY31xa+EFz2RStxIqj6JFAJS+IkCi4=
github.com/ucloud/ucloud-sdk-go v0.8.7 h1:BmXOb5RivI0Uu4oZRpjI6SQ9/y7n/H9wxTGR1txIE8o=
github.com/ucloud/ucloud-sdk-go v0.8.7/go.mod h1:lM6fpI8y6iwACtlbHUav823/uKPdXsNBlnBpRF2fj3c=
github.com/ugorji/go v0.0.0-20151218193438-646ae4a518c1 h1:U6ufy3mLDgg9RYupntOvAF7xCmNNquyKaYaaVHo1Nnk=
github.com/ugorji/go v0.0.0-20151218193438-646ae4a518c1/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ=
github.com/ugorji/go v0.0.0-20180813092308-00b869d2f4a5 h1:cMjKdf4PxEBN9K5HaD9UMW8gkTbM0kMzkTa9SJe0WNQ=
github.com/ugorji/go v0.0.0-20180813092308-00b869d2f4a5/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ=
github.com/ulikunitz/xz v0.5.5 h1:pFrO0lVpTBXLpYw+pnLj6TbvHuyjXMfjGeCwSqCVwok=
github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk=
github.com/vmware/govmomi v0.0.0-20170707011325-c2105a174311 h1:s5pyxd5S6wRs2WpEE0xRfWUF46Wbz44h203KnbX0ecI=
github.com/vmware/govmomi v0.0.0-20170707011325-c2105a174311/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU=
github.com/xanzy/go-cloudstack v0.0.0-20190526095453-42f262b63ed0 h1:NJrcIkdzq0C3I8ypAZwFE9RHtGbfp+mJvqIcoFATZuk=
@ -409,6 +449,8 @@ github.com/yandex-cloud/go-genproto v0.0.0-20190916101622-7617782d381e h1:hzwq5G
github.com/yandex-cloud/go-genproto v0.0.0-20190916101622-7617782d381e/go.mod h1:HEUYX/p8966tMUHHT+TsS0hF/Ca/NYwqprC5WXSDMfE=
github.com/yandex-cloud/go-sdk v0.0.0-20190916101744-c781afa45829 h1:2FGwbx03GpP1Ulzg/L46tSoKh9t4yg8BhMKQl/Ff1x8=
github.com/yandex-cloud/go-sdk v0.0.0-20190916101744-c781afa45829/go.mod h1:Eml0jFLU4VVHgIN8zPHMuNwZXVzUMILyO6lQZSfz854=
github.com/zclconf/go-cty v1.1.0 h1:uJwc9HiBOCpoKIObTQaLR+tsEXx1HBHnOsOOpcdhZgw=
github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s=
go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
@ -420,10 +462,14 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3 h1:KYQXGkl6vs02hK7pK4eIbw0NpNPedieTSTEiJ//bwGs=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190222235706-ffb98f73852f/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 h1:58fnuSXlxZmFdJyvtTFVmVhcMLU6v5fEb/ok4wyqtNU=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
@ -435,6 +481,7 @@ golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHl
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -477,14 +524,18 @@ golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa h1:KIDDMLT1O0Nr7TSxp8xM5tJcdn8tgyAONntO829og1M=
golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To=
@ -506,6 +557,7 @@ golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0 h1:Dh6fw+p6FyRl5x/FvNswO1ji0lIGzm3KP8Y9VkS9PTE=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
google.golang.org/api v0.4.0 h1:KKgc1aqhV8wDPbDzlDtpvyjZFY3vjz85FP7p4wcQUyI=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=

View File

@ -5,7 +5,7 @@ import (
"fmt"
"github.com/ChrisTrenkamp/goxpath/internal/execxp"
"github.com/ChrisTrenkamp/goxpath/internal/parser"
"github.com/ChrisTrenkamp/goxpath/parser"
"github.com/ChrisTrenkamp/goxpath/tree"
)

View File

@ -3,7 +3,7 @@ package execxp
import (
"encoding/xml"
"github.com/ChrisTrenkamp/goxpath/internal/parser"
"github.com/ChrisTrenkamp/goxpath/parser"
"github.com/ChrisTrenkamp/goxpath/tree"
)

View File

@ -3,9 +3,9 @@ package findutil
import (
"encoding/xml"
"github.com/ChrisTrenkamp/goxpath/internal/parser/pathexpr"
"github.com/ChrisTrenkamp/goxpath/internal/xconst"
"github.com/ChrisTrenkamp/goxpath/parser/pathexpr"
"github.com/ChrisTrenkamp/goxpath/tree"
"github.com/ChrisTrenkamp/goxpath/xconst"
)
const (

View File

@ -6,15 +6,14 @@ import (
"strconv"
"strings"
"github.com/ChrisTrenkamp/goxpath/internal/parser"
"github.com/ChrisTrenkamp/goxpath/internal/parser/findutil"
"github.com/ChrisTrenkamp/goxpath/internal/parser/intfns"
"github.com/ChrisTrenkamp/goxpath/internal/xconst"
"github.com/ChrisTrenkamp/goxpath/internal/execxp/findutil"
"github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns"
"github.com/ChrisTrenkamp/goxpath/internal/xsort"
"github.com/ChrisTrenkamp/goxpath/internal/lexer"
"github.com/ChrisTrenkamp/goxpath/internal/parser/pathexpr"
"github.com/ChrisTrenkamp/goxpath/lexer"
"github.com/ChrisTrenkamp/goxpath/parser"
"github.com/ChrisTrenkamp/goxpath/parser/pathexpr"
"github.com/ChrisTrenkamp/goxpath/tree"
"github.com/ChrisTrenkamp/goxpath/xconst"
)
type xpFilt struct {

View File

@ -3,7 +3,7 @@ package lexer
import (
"fmt"
"github.com/ChrisTrenkamp/goxpath/internal/xconst"
"github.com/ChrisTrenkamp/goxpath/xconst"
)
func absLocPathState(l *Lexer) stateFn {

View File

@ -1,6 +1,6 @@
package parser
import "github.com/ChrisTrenkamp/goxpath/internal/lexer"
import "github.com/ChrisTrenkamp/goxpath/lexer"
//NodeType enumerations
const (

View File

@ -3,7 +3,7 @@ package parser
import (
"fmt"
"github.com/ChrisTrenkamp/goxpath/internal/lexer"
"github.com/ChrisTrenkamp/goxpath/lexer"
)
type stateType int

53
vendor/github.com/agext/levenshtein/.gitignore generated vendored Normal file
View File

@ -0,0 +1,53 @@
# Ignore docs files
_gh_pages
_site
# Ignore temporary files
README.html
coverage.out
.tmp
# Numerous always-ignore extensions
*.diff
*.err
*.log
*.orig
*.rej
*.swo
*.swp
*.vi
*.zip
*~
# OS or Editor folders
._*
.cache
.DS_Store
.idea
.project
.settings
.tmproj
*.esproj
*.sublime-project
*.sublime-workspace
nbproject
Thumbs.db
# Komodo
.komodotools
*.komodoproject
# SCSS-Lint
scss-lint-report.xml
# grunt-contrib-sass cache
.sass-cache
# Jekyll metadata
docs/.jekyll-metadata
# Folders to ignore
.build
.test
bower_components
node_modules

25
vendor/github.com/agext/levenshtein/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,25 @@
language: go
sudo: false
matrix:
fast_finish: true
include:
- go: 1.11.x
env: TEST_METHOD=goveralls
- go: 1.10.x
- go: tip
- go: 1.9.x
- go: 1.8.x
- go: 1.7.x
- go: 1.6.x
- go: 1.5.x
allow_failures:
- go: tip
- go: 1.9.x
- go: 1.8.x
- go: 1.7.x
- go: 1.6.x
- go: 1.5.x
script: ./test.sh $TEST_METHOD
notifications:
email:
on_success: never

36
vendor/github.com/agext/levenshtein/DCO generated vendored Normal file
View File

@ -0,0 +1,36 @@
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
660 York Street, Suite 102,
San Francisco, CA 94110 USA
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.

View File

@ -1,4 +1,3 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
@ -187,7 +186,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2016 Microsoft Corporation
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

1
vendor/github.com/agext/levenshtein/MAINTAINERS generated vendored Normal file
View File

@ -0,0 +1 @@
Alex Bucataru <alex@alrux.com> (@AlexBucataru)

5
vendor/github.com/agext/levenshtein/NOTICE generated vendored Normal file
View File

@ -0,0 +1,5 @@
Alrux Go EXTensions (AGExt) - package levenshtein
Copyright 2016 ALRUX Inc.
This product includes software developed at ALRUX Inc.
(http://www.alrux.com/).

38
vendor/github.com/agext/levenshtein/README.md generated vendored Normal file
View File

@ -0,0 +1,38 @@
# A Go package for calculating the Levenshtein distance between two strings
[![Release](https://img.shields.io/github/release/agext/levenshtein.svg?style=flat)](https://github.com/agext/levenshtein/releases/latest)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/agext/levenshtein) 
[![Build Status](https://travis-ci.org/agext/levenshtein.svg?branch=master&style=flat)](https://travis-ci.org/agext/levenshtein)
[![Coverage Status](https://coveralls.io/repos/github/agext/levenshtein/badge.svg?style=flat)](https://coveralls.io/github/agext/levenshtein)
[![Go Report Card](https://goreportcard.com/badge/github.com/agext/levenshtein?style=flat)](https://goreportcard.com/report/github.com/agext/levenshtein)
This package implements distance and similarity metrics for strings, based on the Levenshtein measure, in [Go](http://golang.org).
## Project Status
v1.2.2 Stable: Guaranteed no breaking changes to the API in future v1.x releases. Probably safe to use in production, though provided on "AS IS" basis.
This package is being actively maintained. If you encounter any problems or have any suggestions for improvement, please [open an issue](https://github.com/agext/levenshtein/issues). Pull requests are welcome.
## Overview
The Levenshtein `Distance` between two strings is the minimum total cost of edits that would convert the first string into the second. The allowed edit operations are insertions, deletions, and substitutions, all at character (one UTF-8 code point) level. Each operation has a default cost of 1, but each can be assigned its own cost equal to or greater than 0.
A `Distance` of 0 means the two strings are identical, and the higher the value the more different the strings. Since in practice we are interested in finding if the two strings are "close enough", it often does not make sense to continue the calculation once the result is mathematically guaranteed to exceed a desired threshold. Providing this value to the `Distance` function allows it to take a shortcut and return a lower bound instead of an exact cost when the threshold is exceeded.
The `Similarity` function calculates the distance, then converts it into a normalized metric within the range 0..1, with 1 meaning the strings are identical, and 0 that they have nothing in common. A minimum similarity threshold can be provided to speed up the calculation of the metric for strings that are far too dissimilar for the purpose at hand. All values under this threshold are rounded down to 0.
The `Match` function provides a similarity metric, with the same range and meaning as `Similarity`, but with a bonus for string pairs that share a common prefix and have a similarity above a "bonus threshold". It uses the same method as proposed by Winkler for the Jaro distance, and the reasoning behind it is that these string pairs are very likely spelling variations or errors, and they are more closely linked than the edit distance alone would suggest.
The underlying `Calculate` function is also exported, to allow the building of other derivative metrics, if needed.
## Installation
```
go get github.com/agext/levenshtein
```
## License
Package levenshtein is released under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details.

1
vendor/github.com/agext/levenshtein/go.mod generated vendored Normal file
View File

@ -0,0 +1 @@
module github.com/agext/levenshtein

290
vendor/github.com/agext/levenshtein/levenshtein.go generated vendored Normal file
View File

@ -0,0 +1,290 @@
// Copyright 2016 ALRUX Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package levenshtein implements distance and similarity metrics for strings, based on the Levenshtein measure.
The Levenshtein `Distance` between two strings is the minimum total cost of edits that would convert the first string into the second. The allowed edit operations are insertions, deletions, and substitutions, all at character (one UTF-8 code point) level. Each operation has a default cost of 1, but each can be assigned its own cost equal to or greater than 0.
A `Distance` of 0 means the two strings are identical, and the higher the value the more different the strings. Since in practice we are interested in finding if the two strings are "close enough", it often does not make sense to continue the calculation once the result is mathematically guaranteed to exceed a desired threshold. Providing this value to the `Distance` function allows it to take a shortcut and return a lower bound instead of an exact cost when the threshold is exceeded.
The `Similarity` function calculates the distance, then converts it into a normalized metric within the range 0..1, with 1 meaning the strings are identical, and 0 that they have nothing in common. A minimum similarity threshold can be provided to speed up the calculation of the metric for strings that are far too dissimilar for the purpose at hand. All values under this threshold are rounded down to 0.
The `Match` function provides a similarity metric, with the same range and meaning as `Similarity`, but with a bonus for string pairs that share a common prefix and have a similarity above a "bonus threshold". It uses the same method as proposed by Winkler for the Jaro distance, and the reasoning behind it is that these string pairs are very likely spelling variations or errors, and they are more closely linked than the edit distance alone would suggest.
The underlying `Calculate` function is also exported, to allow the building of other derivative metrics, if needed.
*/
package levenshtein
// Calculate determines the Levenshtein distance between two strings, using
// the given costs for each edit operation. It returns the distance along with
// the lengths of the longest common prefix and suffix.
//
// If maxCost is non-zero, the calculation stops as soon as the distance is determined
// to be greater than maxCost. Therefore, any return value higher than maxCost is a
// lower bound for the actual distance.
func Calculate(str1, str2 []rune, maxCost, insCost, subCost, delCost int) (dist, prefixLen, suffixLen int) {
l1, l2 := len(str1), len(str2)
// trim common prefix, if any, as it doesn't affect the distance
for ; prefixLen < l1 && prefixLen < l2; prefixLen++ {
if str1[prefixLen] != str2[prefixLen] {
break
}
}
str1, str2 = str1[prefixLen:], str2[prefixLen:]
l1 -= prefixLen
l2 -= prefixLen
// trim common suffix, if any, as it doesn't affect the distance
for 0 < l1 && 0 < l2 {
if str1[l1-1] != str2[l2-1] {
str1, str2 = str1[:l1], str2[:l2]
break
}
l1--
l2--
suffixLen++
}
// if the first string is empty, the distance is the length of the second string times the cost of insertion
if l1 == 0 {
dist = l2 * insCost
return
}
// if the second string is empty, the distance is the length of the first string times the cost of deletion
if l2 == 0 {
dist = l1 * delCost
return
}
// variables used in inner "for" loops
var y, dy, c, l int
// if maxCost is greater than or equal to the maximum possible distance, it's equivalent to 'unlimited'
if maxCost > 0 {
if subCost < delCost+insCost {
if maxCost >= l1*subCost+(l2-l1)*insCost {
maxCost = 0
}
} else {
if maxCost >= l1*delCost+l2*insCost {
maxCost = 0
}
}
}
if maxCost > 0 {
// prefer the longer string first, to minimize time;
// a swap also transposes the meanings of insertion and deletion.
if l1 < l2 {
str1, str2, l1, l2, insCost, delCost = str2, str1, l2, l1, delCost, insCost
}
// the length differential times cost of deletion is a lower bound for the cost;
// if it is higher than the maxCost, there is no point going into the main calculation.
if dist = (l1 - l2) * delCost; dist > maxCost {
return
}
d := make([]int, l1+1)
// offset and length of d in the current row
doff, dlen := 0, 1
for y, dy = 1, delCost; y <= l1 && dy <= maxCost; dlen++ {
d[y] = dy
y++
dy = y * delCost
}
// fmt.Printf("%q -> %q: init doff=%d dlen=%d d[%d:%d]=%v\n", str1, str2, doff, dlen, doff, doff+dlen, d[doff:doff+dlen])
for x := 0; x < l2; x++ {
dy, d[doff] = d[doff], d[doff]+insCost
for d[doff] > maxCost && dlen > 0 {
if str1[doff] != str2[x] {
dy += subCost
}
doff++
dlen--
if c = d[doff] + insCost; c < dy {
dy = c
}
dy, d[doff] = d[doff], dy
}
for y, l = doff, doff+dlen-1; y < l; dy, d[y] = d[y], dy {
if str1[y] != str2[x] {
dy += subCost
}
if c = d[y] + delCost; c < dy {
dy = c
}
y++
if c = d[y] + insCost; c < dy {
dy = c
}
}
if y < l1 {
if str1[y] != str2[x] {
dy += subCost
}
if c = d[y] + delCost; c < dy {
dy = c
}
for ; dy <= maxCost && y < l1; dy, d[y] = dy+delCost, dy {
y++
dlen++
}
}
// fmt.Printf("%q -> %q: x=%d doff=%d dlen=%d d[%d:%d]=%v\n", str1, str2, x, doff, dlen, doff, doff+dlen, d[doff:doff+dlen])
if dlen == 0 {
dist = maxCost + 1
return
}
}
if doff+dlen-1 < l1 {
dist = maxCost + 1
return
}
dist = d[l1]
} else {
// ToDo: This is O(l1*l2) time and O(min(l1,l2)) space; investigate if it is
// worth to implement diagonal approach - O(l1*(1+dist)) time, up to O(l1*l2) space
// http://www.csse.monash.edu.au/~lloyd/tildeStrings/Alignment/92.IPL.html
// prefer the shorter string first, to minimize space; time is O(l1*l2) anyway;
// a swap also transposes the meanings of insertion and deletion.
if l1 > l2 {
str1, str2, l1, l2, insCost, delCost = str2, str1, l2, l1, delCost, insCost
}
d := make([]int, l1+1)
for y = 1; y <= l1; y++ {
d[y] = y * delCost
}
for x := 0; x < l2; x++ {
dy, d[0] = d[0], d[0]+insCost
for y = 0; y < l1; dy, d[y] = d[y], dy {
if str1[y] != str2[x] {
dy += subCost
}
if c = d[y] + delCost; c < dy {
dy = c
}
y++
if c = d[y] + insCost; c < dy {
dy = c
}
}
}
dist = d[l1]
}
return
}
// Distance returns the Levenshtein distance between str1 and str2, using the
// default or provided cost values. Pass nil for the third argument to use the
// default cost of 1 for all three operations, with no maximum.
func Distance(str1, str2 string, p *Params) int {
if p == nil {
p = defaultParams
}
dist, _, _ := Calculate([]rune(str1), []rune(str2), p.maxCost, p.insCost, p.subCost, p.delCost)
return dist
}
// Similarity returns a score in the range of 0..1 for how similar the two strings are.
// A score of 1 means the strings are identical, and 0 means they have nothing in common.
//
// A nil third argument uses the default cost of 1 for all three operations.
//
// If a non-zero MinScore value is provided in the parameters, scores lower than it
// will be returned as 0.
func Similarity(str1, str2 string, p *Params) float64 {
return Match(str1, str2, p.Clone().BonusThreshold(1.1)) // guaranteed no bonus
}
// Match returns a similarity score adjusted by the same method as proposed by Winkler for
// the Jaro distance - giving a bonus to string pairs that share a common prefix, only if their
// similarity score is already over a threshold.
//
// The score is in the range of 0..1, with 1 meaning the strings are identical,
// and 0 meaning they have nothing in common.
//
// A nil third argument uses the default cost of 1 for all three operations, maximum length of
// common prefix to consider for bonus of 4, scaling factor of 0.1, and bonus threshold of 0.7.
//
// If a non-zero MinScore value is provided in the parameters, scores lower than it
// will be returned as 0.
func Match(str1, str2 string, p *Params) float64 {
s1, s2 := []rune(str1), []rune(str2)
l1, l2 := len(s1), len(s2)
// two empty strings are identical; shortcut also avoids divByZero issues later on.
if l1 == 0 && l2 == 0 {
return 1
}
if p == nil {
p = defaultParams
}
// a min over 1 can never be satisfied, so the score is 0.
if p.minScore > 1 {
return 0
}
insCost, delCost, maxDist, max := p.insCost, p.delCost, 0, 0
if l1 > l2 {
l1, l2, insCost, delCost = l2, l1, delCost, insCost
}
if p.subCost < delCost+insCost {
maxDist = l1*p.subCost + (l2-l1)*insCost
} else {
maxDist = l1*delCost + l2*insCost
}
// a zero min is always satisfied, so no need to set a max cost.
if p.minScore > 0 {
// if p.minScore is lower than p.bonusThreshold, we can use a simplified formula
// for the max cost, because a sim score below min cannot receive a bonus.
if p.minScore < p.bonusThreshold {
// round down the max - a cost equal to a rounded up max would already be under min.
max = int((1 - p.minScore) * float64(maxDist))
} else {
// p.minScore <= sim + p.bonusPrefix*p.bonusScale*(1-sim)
// p.minScore <= (1-dist/maxDist) + p.bonusPrefix*p.bonusScale*(1-(1-dist/maxDist))
// p.minScore <= 1 - dist/maxDist + p.bonusPrefix*p.bonusScale*dist/maxDist
// 1 - p.minScore >= dist/maxDist - p.bonusPrefix*p.bonusScale*dist/maxDist
// (1-p.minScore)*maxDist/(1-p.bonusPrefix*p.bonusScale) >= dist
max = int((1 - p.minScore) * float64(maxDist) / (1 - float64(p.bonusPrefix)*p.bonusScale))
}
}
dist, pl, _ := Calculate(s1, s2, max, p.insCost, p.subCost, p.delCost)
if max > 0 && dist > max {
return 0
}
sim := 1 - float64(dist)/float64(maxDist)
if sim >= p.bonusThreshold && sim < 1 && p.bonusPrefix > 0 && p.bonusScale > 0 {
if pl > p.bonusPrefix {
pl = p.bonusPrefix
}
sim += float64(pl) * p.bonusScale * (1 - sim)
}
if sim < p.minScore {
return 0
}
return sim
}

152
vendor/github.com/agext/levenshtein/params.go generated vendored Normal file
View File

@ -0,0 +1,152 @@
// Copyright 2016 ALRUX Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package levenshtein
// Params represents a set of parameter values for the various formulas involved
// in the calculation of the Levenshtein string metrics.
type Params struct {
insCost int
subCost int
delCost int
maxCost int
minScore float64
bonusPrefix int
bonusScale float64
bonusThreshold float64
}
var (
defaultParams = NewParams()
)
// NewParams creates a new set of parameters and initializes it with the default values.
func NewParams() *Params {
return &Params{
insCost: 1,
subCost: 1,
delCost: 1,
maxCost: 0,
minScore: 0,
bonusPrefix: 4,
bonusScale: .1,
bonusThreshold: .7,
}
}
// Clone returns a pointer to a copy of the receiver parameter set, or of a new
// default parameter set if the receiver is nil.
func (p *Params) Clone() *Params {
if p == nil {
return NewParams()
}
return &Params{
insCost: p.insCost,
subCost: p.subCost,
delCost: p.delCost,
maxCost: p.maxCost,
minScore: p.minScore,
bonusPrefix: p.bonusPrefix,
bonusScale: p.bonusScale,
bonusThreshold: p.bonusThreshold,
}
}
// InsCost overrides the default value of 1 for the cost of insertion.
// The new value must be zero or positive.
func (p *Params) InsCost(v int) *Params {
if v >= 0 {
p.insCost = v
}
return p
}
// SubCost overrides the default value of 1 for the cost of substitution.
// The new value must be zero or positive.
func (p *Params) SubCost(v int) *Params {
if v >= 0 {
p.subCost = v
}
return p
}
// DelCost overrides the default value of 1 for the cost of deletion.
// The new value must be zero or positive.
func (p *Params) DelCost(v int) *Params {
if v >= 0 {
p.delCost = v
}
return p
}
// MaxCost overrides the default value of 0 (meaning unlimited) for the maximum cost.
// The calculation of Distance() stops when the result is guaranteed to exceed
// this maximum, returning a lower-bound rather than exact value.
// The new value must be zero or positive.
func (p *Params) MaxCost(v int) *Params {
if v >= 0 {
p.maxCost = v
}
return p
}
// MinScore overrides the default value of 0 for the minimum similarity score.
// Scores below this threshold are returned as 0 by Similarity() and Match().
// The new value must be zero or positive. Note that a minimum greater than 1
// can never be satisfied, resulting in a score of 0 for any pair of strings.
func (p *Params) MinScore(v float64) *Params {
if v >= 0 {
p.minScore = v
}
return p
}
// BonusPrefix overrides the default value for the maximum length of
// common prefix to be considered for bonus by Match().
// The new value must be zero or positive.
func (p *Params) BonusPrefix(v int) *Params {
if v >= 0 {
p.bonusPrefix = v
}
return p
}
// BonusScale overrides the default value for the scaling factor used by Match()
// in calculating the bonus.
// The new value must be zero or positive. To guarantee that the similarity score
// remains in the interval 0..1, this scaling factor is not allowed to exceed
// 1 / BonusPrefix.
func (p *Params) BonusScale(v float64) *Params {
if v >= 0 {
p.bonusScale = v
}
// the bonus cannot exceed (1-sim), or the score may become greater than 1.
if float64(p.bonusPrefix)*p.bonusScale > 1 {
p.bonusScale = 1 / float64(p.bonusPrefix)
}
return p
}
// BonusThreshold overrides the default value for the minimum similarity score
// for which Match() can assign a bonus.
// The new value must be zero or positive. Note that a threshold greater than 1
// effectively makes Match() become the equivalent of Similarity().
func (p *Params) BonusThreshold(v float64) *Params {
if v >= 0 {
p.bonusThreshold = v
}
return p
}

10
vendor/github.com/agext/levenshtein/test.sh generated vendored Normal file
View File

@ -0,0 +1,10 @@
set -ev
if [[ "$1" == "goveralls" ]]; then
echo "Testing with goveralls..."
go get github.com/mattn/goveralls
$HOME/gopath/bin/goveralls -service=travis-ci
else
echo "Testing with go test..."
go test -v ./...
fi

View File

@ -12,15 +12,23 @@ import (
"strings"
)
// 用于signHeader的字典排序存放容器。
// headerSorter defines the key-value structure for storing the sorted data in signHeader.
type headerSorter struct {
Keys []string
Vals []string
}
// 生成签名方法直接设置请求的Header
// signHeader signs the header and sets it as the authorization header.
func (conn Conn) signHeader(req *http.Request, canonicalizedResource string) {
// Find out the "x-oss-"'s address in this request'header
// Get the final authorization string
authorizationStr := "OSS " + conn.config.AccessKeyID + ":" + conn.getSignedStr(req, canonicalizedResource)
// Give the parameter "Authorization" value
req.Header.Set(HTTPHeaderAuthorization, authorizationStr)
}
func (conn Conn) getSignedStr(req *http.Request, canonicalizedResource string) string {
// Find out the "x-oss-"'s address in header of the request
temp := make(map[string]string)
for k, v := range req.Header {
@ -30,16 +38,17 @@ func (conn Conn) signHeader(req *http.Request, canonicalizedResource string) {
}
hs := newHeaderSorter(temp)
// Sort the temp by the Ascending Order
// Sort the temp by the ascending order
hs.Sort()
// Get the CanonicalizedOSSHeaders
// Get the canonicalizedOSSHeaders
canonicalizedOSSHeaders := ""
for i := range hs.Keys {
canonicalizedOSSHeaders += hs.Keys[i] + ":" + hs.Vals[i] + "\n"
}
// Give other parameters values
// when sign URL, date is expires
date := req.Header.Get(HTTPHeaderDate)
contentType := req.Header.Get(HTTPHeaderContentType)
contentMd5 := req.Header.Get(HTTPHeaderContentMD5)
@ -49,14 +58,10 @@ func (conn Conn) signHeader(req *http.Request, canonicalizedResource string) {
io.WriteString(h, signStr)
signedStr := base64.StdEncoding.EncodeToString(h.Sum(nil))
// Get the final Authorization' string
authorizationStr := "OSS " + conn.config.AccessKeyID + ":" + signedStr
// Give the parameter "Authorization" value
req.Header.Set(HTTPHeaderAuthorization, authorizationStr)
return signedStr
}
// Additional function for function SignHeader.
// newHeaderSorter is an additional function for function SignHeader.
func newHeaderSorter(m map[string]string) *headerSorter {
hs := &headerSorter{
Keys: make([]string, 0, len(m)),
@ -70,22 +75,22 @@ func newHeaderSorter(m map[string]string) *headerSorter {
return hs
}
// Additional function for function SignHeader.
// Sort is an additional function for function SignHeader.
func (hs *headerSorter) Sort() {
sort.Sort(hs)
}
// Additional function for function SignHeader.
// Len is an additional function for function SignHeader.
func (hs *headerSorter) Len() int {
return len(hs.Vals)
}
// Additional function for function SignHeader.
// Less is an additional function for function SignHeader.
func (hs *headerSorter) Less(i, j int) bool {
return bytes.Compare([]byte(hs.Keys[i]), []byte(hs.Keys[j])) < 0
}
// Additional function for function SignHeader.
// Swap is an additional function for function SignHeader.
func (hs *headerSorter) Swap(i, j int) {
hs.Vals[i], hs.Vals[j] = hs.Vals[j], hs.Vals[i]
hs.Keys[i], hs.Keys[j] = hs.Keys[j], hs.Keys[i]

View File

@ -5,14 +5,16 @@ import (
"crypto/md5"
"encoding/base64"
"encoding/xml"
"fmt"
"hash"
"hash/crc64"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"time"
)
// Bucket implements the operations of object.
@ -21,16 +23,15 @@ type Bucket struct {
BucketName string
}
// PutObject creates a new object and it will overwrite the original one if it exists already.
//
// PutObject 新建Object如果Object已存在覆盖原有Object。
// objectKey the object key in UTF-8 encoding. The length must be between 1 and 1023, and cannot start with "/" or "\".
// reader io.Reader instance for reading the data for uploading
// options the options for uploading the object. The valid options here are CacheControl, ContentDisposition, ContentEncoding
// Expires, ServerSideEncryption, ObjectACL and Meta. Refer to the link below for more details.
// https://help.aliyun.com/document_detail/oss/api-reference/object/PutObject.html
//
// objectKey 上传对象的名称使用UTF-8编码、长度必须在1-1023字节之间、不能以“/”或者“\”字符开头。
// reader io.Reader读取object的数据。
// options 上传对象时可以指定对象的属性可用选项有CacheControl、ContentDisposition、ContentEncoding、
// Expires、ServerSideEncryption、ObjectACL、Meta具体含义请参看
// https://help.aliyun.com/document_detail/oss/api-reference/object/PutObject.html
//
// error 操作无错误为nil非nil为错误信息。
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) PutObject(objectKey string, reader io.Reader, options ...Option) error {
opts := addContentType(options, objectKey)
@ -48,14 +49,13 @@ func (bucket Bucket) PutObject(objectKey string, reader io.Reader, options ...Op
return err
}
// PutObjectFromFile creates a new object from the local file.
//
// PutObjectFromFile 新建Object内容从本地文件中读取。
// objectKey object key.
// filePath the local file path to upload.
// options the options for uploading the object. Refer to the parameter options in PutObject for more details.
//
// objectKey 上传对象的名称。
// filePath 本地文件,上传对象的值为该文件内容。
// options 上传对象时可以指定对象的属性。详见PutObject的options。
//
// error 操作无错误为nil非nil为错误信息。
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) PutObjectFromFile(objectKey, filePath string, options ...Option) error {
fd, err := os.Open(filePath)
@ -79,14 +79,13 @@ func (bucket Bucket) PutObjectFromFile(objectKey, filePath string, options ...Op
return err
}
// DoPutObject does the actual upload work.
//
// DoPutObject 上传文件。
// request the request instance for uploading an object.
// options the options for uploading an object.
//
// request 上传请求。
// options 上传选项。
//
// Response 上传请求返回值。
// error 操作无错误为nil非nil为错误信息。
// Response the response from OSS.
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) DoPutObject(request *PutObjectRequest, options []Option) (*Response, error) {
isOptSet, _, _ := isOptionSet(options, HTTPHeaderContentType)
@ -96,7 +95,8 @@ func (bucket Bucket) DoPutObject(request *PutObjectRequest, options []Option) (*
listener := getProgressListener(options)
resp, err := bucket.do("PUT", request.ObjectKey, "", "", options, request.Reader, listener)
params := map[string]interface{}{}
resp, err := bucket.do("PUT", request.ObjectKey, params, options, request.Reader, listener)
if err != nil {
return nil, err
}
@ -113,60 +113,64 @@ func (bucket Bucket) DoPutObject(request *PutObjectRequest, options []Option) (*
return resp, err
}
// GetObject downloads the object.
//
// GetObject 下载文件。
// objectKey the object key.
// options the options for downloading the object. The valid values are: Range, IfModifiedSince, IfUnmodifiedSince, IfMatch,
// IfNoneMatch, AcceptEncoding. For more details, please check out:
// https://help.aliyun.com/document_detail/oss/api-reference/object/GetObject.html
//
// objectKey 下载的文件名称。
// options 对象的属性限制项可选值有Range、IfModifiedSince、IfUnmodifiedSince、IfMatch、
// IfNoneMatch、AcceptEncoding详细请参考
// https://help.aliyun.com/document_detail/oss/api-reference/object/GetObject.html
//
// io.ReadCloser reader读取数据后需要close。error为nil时有效。
// error 操作无错误为nil非nil为错误信息。
// io.ReadCloser reader instance for reading data from response. It must be called close() after the usage and only valid when error is nil.
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) GetObject(objectKey string, options ...Option) (io.ReadCloser, error) {
result, err := bucket.DoGetObject(&GetObjectRequest{objectKey}, options)
if err != nil {
return nil, err
}
return result.Response.Body, nil
return result.Response, nil
}
// GetObjectToFile downloads the data to a local file.
//
// GetObjectToFile 下载文件。
// objectKey the object key to download.
// filePath the local file to store the object data.
// options the options for downloading the object. Refer to the parameter options in method GetObject for more details.
//
// objectKey 下载的文件名称。
// filePath 下载对象的内容写到该本地文件。
// options 对象的属性限制项。详见GetObject的options。
//
// error 操作无错误时返回error为nil非nil为错误说明。
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) GetObjectToFile(objectKey, filePath string, options ...Option) error {
tempFilePath := filePath + TempFileSuffix
// 读取Object内容
// Calls the API to actually download the object. Returns the result instance.
result, err := bucket.DoGetObject(&GetObjectRequest{objectKey}, options)
if err != nil {
return err
}
defer result.Response.Body.Close()
defer result.Response.Close()
// 如果文件不存在则创建,存在则清空
// If the local file does not exist, create a new one. If it exists, overwrite it.
fd, err := os.OpenFile(tempFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, FilePermMode)
if err != nil {
return err
}
// 存储数据到文件
// Copy the data to the local file path.
_, err = io.Copy(fd, result.Response.Body)
fd.Close()
if err != nil {
return err
}
// 比较CRC值
// Compares the CRC value
hasRange, _, _ := isOptionSet(options, HTTPHeaderRange)
if bucket.getConfig().IsEnableCRC && !hasRange {
encodeOpt, _ := findOption(options, HTTPHeaderAcceptEncoding, nil)
acceptEncoding := ""
if encodeOpt != nil {
acceptEncoding = encodeOpt.(string)
}
if bucket.getConfig().IsEnableCRC && !hasRange && acceptEncoding != "gzip" {
result.Response.ClientCRC = result.ClientCRC.Sum64()
err = checkCRC(result.Response, "GetObjectToFile")
if err != nil {
@ -178,17 +182,17 @@ func (bucket Bucket) GetObjectToFile(objectKey, filePath string, options ...Opti
return os.Rename(tempFilePath, filePath)
}
// DoGetObject is the actual API that gets the object. It's the internal function called by other public APIs.
//
// DoGetObject 下载文件
// request the request to download the object.
// options the options for downloading the file. Checks out the parameter options in method GetObject.
//
// request 下载请求
// options 对象的属性限制项。详见GetObject的options。
//
// GetObjectResult 下载请求返回值。
// error 操作无错误为nil非nil为错误信息。
// GetObjectResult the result instance of getting the object.
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) DoGetObject(request *GetObjectRequest, options []Option) (*GetObjectResult, error) {
resp, err := bucket.do("GET", request.ObjectKey, "", "", options, nil, nil)
params, _ := getRawParams(options)
resp, err := bucket.do("GET", request.ObjectKey, params, options, nil, nil)
if err != nil {
return nil, err
}
@ -197,7 +201,7 @@ func (bucket Bucket) DoGetObject(request *GetObjectRequest, options []Option) (*
Response: resp,
}
// crc
// CRC
var crcCalc hash.Hash64
hasRange, _, _ := isOptionSet(options, HTTPHeaderRange)
if bucket.getConfig().IsEnableCRC && !hasRange {
@ -206,32 +210,32 @@ func (bucket Bucket) DoGetObject(request *GetObjectRequest, options []Option) (*
result.ClientCRC = crcCalc
}
// progress
// Progress
listener := getProgressListener(options)
contentLen, _ := strconv.ParseInt(resp.Headers.Get(HTTPHeaderContentLength), 10, 64)
resp.Body = ioutil.NopCloser(TeeReader(resp.Body, crcCalc, contentLen, listener, nil))
resp.Body = TeeReader(resp.Body, crcCalc, contentLen, listener, nil)
return result, nil
}
// CopyObject copies the object inside the bucket.
//
// CopyObject 同一个bucket内拷贝Object。
// srcObjectKey the source object to copy.
// destObjectKey the target object to copy.
// options options for copying an object. You can specify the conditions of copy. The valid conditions are CopySourceIfMatch,
// CopySourceIfNoneMatch, CopySourceIfModifiedSince, CopySourceIfUnmodifiedSince, MetadataDirective.
// Also you can specify the target object's attributes, such as CacheControl, ContentDisposition, ContentEncoding, Expires,
// ServerSideEncryption, ObjectACL, Meta. Refer to the link below for more details :
// https://help.aliyun.com/document_detail/oss/api-reference/object/CopyObject.html
//
// srcObjectKey Copy的源对象。
// destObjectKey Copy的目标对象。
// options Copy对象时您可以指定源对象的限制条件满足限制条件时copy不满足时返回错误您可以选择如下选项CopySourceIfMatch、
// CopySourceIfNoneMatch、CopySourceIfModifiedSince、CopySourceIfUnmodifiedSince、MetadataDirective。
// Copy对象时您可以指定目标对象的属性如CacheControl、ContentDisposition、ContentEncoding、Expires、
// ServerSideEncryption、ObjectACL、Meta选项的含义请参看
// https://help.aliyun.com/document_detail/oss/api-reference/object/CopyObject.html
//
// error 操作无错误为nil非nil为错误信息。
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) CopyObject(srcObjectKey, destObjectKey string, options ...Option) (CopyObjectResult, error) {
var out CopyObjectResult
options = append(options, CopySource(bucket.BucketName, url.QueryEscape(srcObjectKey)))
resp, err := bucket.do("PUT", destObjectKey, "", "", options, nil, nil)
params := map[string]interface{}{}
resp, err := bucket.do("PUT", destObjectKey, params, options, nil, nil)
if err != nil {
return out, err
}
@ -241,29 +245,28 @@ func (bucket Bucket) CopyObject(srcObjectKey, destObjectKey string, options ...O
return out, err
}
// CopyObjectTo copies the object to another bucket.
//
// CopyObjectTo bucket间拷贝object。
// srcObjectKey source object key. The source bucket is Bucket.BucketName .
// destBucketName target bucket name.
// destObjectKey target object name.
// options copy options, check out parameter options in function CopyObject for more details.
//
// srcObjectKey 源Object名称。源Bucket名称为Bucket.BucketName。
// destBucketName 目标Bucket名称。
// destObjectKey 目标Object名称。
// options Copy选项详见CopyObject的options。
//
// error 操作无错误为nil非nil为错误信息。
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) CopyObjectTo(destBucketName, destObjectKey, srcObjectKey string, options ...Option) (CopyObjectResult, error) {
return bucket.copy(srcObjectKey, destBucketName, destObjectKey, options...)
}
//
// CopyObjectFrom bucket间拷贝object。
// CopyObjectFrom copies the object to another bucket.
//
// srcBucketName 源Bucket名称。
// srcObjectKey 源Object名称。
// destObjectKey 目标Object名称。目标Bucket名称为Bucket.BucketName。
// options Copy选项详见CopyObject的options。
// srcBucketName source bucket name.
// srcObjectKey source object name.
// destObjectKey target object name. The target bucket name is Bucket.BucketName.
// options copy options. Check out parameter options in function CopyObject.
//
// error 操作无错误为nil非nil为错误信息。
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) CopyObjectFrom(srcBucketName, srcObjectKey, destObjectKey string, options ...Option) (CopyObjectResult, error) {
destBucketName := bucket.BucketName
@ -284,7 +287,8 @@ func (bucket Bucket) copy(srcObjectKey, destBucketName, destObjectKey string, op
if err != nil {
return out, err
}
resp, err := bucket.Client.Conn.Do("PUT", destBucketName, destObjectKey, "", "", headers, nil, 0, nil)
params := map[string]interface{}{}
resp, err := bucket.Client.Conn.Do("PUT", destBucketName, destObjectKey, params, headers, nil, 0, nil)
if err != nil {
return out, err
}
@ -294,22 +298,21 @@ func (bucket Bucket) copy(srcObjectKey, destBucketName, destObjectKey string, op
return out, err
}
// AppendObject uploads the data in the way of appending an existing or new object.
//
// AppendObject 追加方式上传。
// AppendObject the parameter appendPosition specifies which postion (in the target object) to append. For the first append (to a non-existing file),
// the appendPosition should be 0. The appendPosition in the subsequent calls will be the current object length.
// For example, the first appendObject's appendPosition is 0 and it uploaded 65536 bytes data, then the second call's position is 65536.
// The response header x-oss-next-append-position after each successful request also specifies the next call's append position (so the caller need not to maintain this information).
//
// AppendObject参数必须包含position其值指定从何处进行追加。首次追加操作的position必须为0
// 后续追加操作的position是Object的当前长度。例如第一次Append Object请求指定position值为0
// content-length是65536那么第二次Append Object需要指定position为65536。
// 每次操作成功后响应头部x-oss-next-append-position也会标明下一次追加的position。
// objectKey the target object to append to.
// reader io.Reader. The read instance for reading the data to append.
// appendPosition the start position to append.
// destObjectProperties the options for the first appending, such as CacheControl, ContentDisposition, ContentEncoding,
// Expires, ServerSideEncryption, ObjectACL.
//
// objectKey 需要追加的Object。
// reader io.Reader读取追的内容。
// appendPosition object追加的起始位置。
// destObjectProperties 第一次追加时指定新对象的属性如CacheControl、ContentDisposition、ContentEncoding、
// Expires、ServerSideEncryption、ObjectACL。
//
// int64 下次追加的开始位置error为nil空时有效。
// error 操作无错误为nil非nil为错误信息。
// int64 the next append position, it's valid when error is nil.
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) AppendObject(objectKey string, reader io.Reader, appendPosition int64, options ...Option) (int64, error) {
request := &AppendObjectRequest{
@ -319,21 +322,25 @@ func (bucket Bucket) AppendObject(objectKey string, reader io.Reader, appendPosi
}
result, err := bucket.DoAppendObject(request, options)
if err != nil {
return appendPosition, err
}
return result.NextPosition, err
}
// DoAppendObject is the actual API that does the object append.
//
// DoAppendObject 追加上传。
// request the request object for appending object.
// options the options for appending object.
//
// request 追加上传请求。
// options 追加上传选项。
//
// AppendObjectResult 追加上传请求返回值。
// error 操作无错误为nil非nil为错误信息。
// AppendObjectResult the result object for appending object.
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) DoAppendObject(request *AppendObjectRequest, options []Option) (*AppendObjectResult, error) {
params := "append&position=" + strconv.FormatInt(request.Position, 10)
params := map[string]interface{}{}
params["append"] = nil
params["position"] = strconv.FormatInt(request.Position, 10)
headers := make(map[string]string)
opts := addContentType(options, request.ObjectKey)
@ -348,7 +355,7 @@ func (bucket Bucket) DoAppendObject(request *AppendObjectRequest, options []Opti
listener := getProgressListener(options)
handleOptions(headers, opts)
resp, err := bucket.Client.Conn.Do("POST", bucket.BucketName, request.ObjectKey, params, params, headers,
resp, err := bucket.Client.Conn.Do("POST", bucket.BucketName, request.ObjectKey, params, headers,
request.Reader, initCRC, listener)
if err != nil {
return nil, err
@ -371,15 +378,15 @@ func (bucket Bucket) DoAppendObject(request *AppendObjectRequest, options []Opti
return result, nil
}
// DeleteObject deletes the object.
//
// DeleteObject 删除Object。
// objectKey the object key to delete.
//
// objectKey 待删除Object。
//
// error 操作无错误为nil非nil为错误信息。
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) DeleteObject(objectKey string) error {
resp, err := bucket.do("DELETE", objectKey, "", "", nil, nil, nil)
params := map[string]interface{}{}
resp, err := bucket.do("DELETE", objectKey, params, nil, nil, nil)
if err != nil {
return err
}
@ -387,14 +394,14 @@ func (bucket Bucket) DeleteObject(objectKey string) error {
return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
}
// DeleteObjects deletes multiple objects.
//
// DeleteObjects 批量删除object。
// objectKeys the object keys to delete.
// options the options for deleting objects.
// Supported option is DeleteObjectsQuiet which means it will not return error even deletion failed (not recommended). By default it's not used.
//
// objectKeys 待删除object类表。
// options 删除选项DeleteObjectsQuiet是否是安静模式默认不使用。
//
// DeleteObjectsResult 非安静模式的的返回值。
// error 操作无错误为nil非nil为错误信息。
// DeleteObjectsResult the result object.
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) DeleteObjects(objectKeys []string, options ...Option) (DeleteObjectsResult, error) {
out := DeleteObjectsResult{}
@ -404,7 +411,6 @@ func (bucket Bucket) DeleteObjects(objectKeys []string, options ...Option) (Dele
}
isQuiet, _ := findOption(options, deleteObjectsQuiet, false)
dxml.Quiet = isQuiet.(bool)
encode := "&encoding-type=url"
bs, err := xml.Marshal(dxml)
if err != nil {
@ -418,7 +424,12 @@ func (bucket Bucket) DeleteObjects(objectKeys []string, options ...Option) (Dele
sum := md5.Sum(bs)
b64 := base64.StdEncoding.EncodeToString(sum[:])
options = append(options, ContentMD5(b64))
resp, err := bucket.do("POST", "", "delete"+encode, "delete", options, buffer, nil)
params := map[string]interface{}{}
params["delete"] = nil
params["encoding-type"] = "url"
resp, err := bucket.do("POST", "", params, options, buffer, nil)
if err != nil {
return out, err
}
@ -432,54 +443,58 @@ func (bucket Bucket) DeleteObjects(objectKeys []string, options ...Option) (Dele
return out, err
}
// IsObjectExist checks if the object exists.
//
// IsObjectExist object是否存在。
// bool flag of object's existence (true:exists; false:non-exist) when error is nil.
//
// bool object是否存在true存在false不存在。error为nil时有效。
//
// error 操作无错误为nil非nil为错误信息。
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) IsObjectExist(objectKey string) (bool, error) {
listRes, err := bucket.ListObjects(Prefix(objectKey), MaxKeys(1))
if err != nil {
return false, err
}
if len(listRes.Objects) == 1 && listRes.Objects[0].Key == objectKey {
_, err := bucket.GetObjectMeta(objectKey)
if err == nil {
return true, nil
}
return false, nil
switch err.(type) {
case ServiceError:
if err.(ServiceError).StatusCode == 404 {
return false, nil
}
}
return false, err
}
// ListObjects lists the objects under the current bucket.
//
// ListObjects 获得Bucket下筛选后所有的object的列表。
// options it contains all the filters for listing objects.
// It could specify a prefix filter on object keys, the max keys count to return and the object key marker and the delimiter for grouping object names.
// The key marker means the returned objects' key must be greater than it in lexicographic order.
//
// options ListObject的筛选行为。Prefix指定的前缀、MaxKeys最大数目、Marker第一个开始、Delimiter对Object名字进行分组的字符。
// For example, if the bucket has 8 objects, my-object-1, my-object-11, my-object-2, my-object-21,
// my-object-22, my-object-3, my-object-31, my-object-32. If the prefix is my-object-2 (no other filters), then it returns
// my-object-2, my-object-21, my-object-22 three objects. If the marker is my-object-22 (no other filters), then it returns
// my-object-3, my-object-31, my-object-32 three objects. If the max keys is 5, then it returns 5 objects.
// The three filters could be used together to achieve filter and paging functionality.
// If the prefix is the folder name, then it could list all files under this folder (including the files under its subfolders).
// But if the delimiter is specified with '/', then it only returns that folder's files (no subfolder's files). The direct subfolders are in the commonPrefixes properties.
// For example, if the bucket has three objects fun/test.jpg, fun/movie/001.avi, fun/movie/007.avi. And if the prefix is "fun/", then it returns all three objects.
// But if the delimiter is '/', then only "fun/test.jpg" is returned as files and fun/movie/ is returned as common prefix.
//
// 您有如下8个objectmy-object-1, my-object-11, my-object-2, my-object-21,
// my-object-22, my-object-3, my-object-31, my-object-32。如果您指定了Prefix为my-object-2,
// 则返回my-object-2, my-object-21, my-object-22三个object。如果您指定了Marker为my-object-22
// 则返回my-object-3, my-object-31, my-object-32三个object。如果您指定MaxKeys则每次最多返回MaxKeys个
// 最后一次可能不足。这三个参数可以组合使用实现分页等功能。如果把prefix设为某个文件夹名就可以罗列以此prefix开头的文件
// 即该文件夹下递归的所有的文件和子文件夹。如果再把delimiter设置为"/"时,返回值就只罗列该文件夹下的文件,该文件夹下的子文件名
// 返回在CommonPrefixes部分子文件夹下递归的文件和文件夹不被显示。例如一个bucket存在三个objectfun/test.jpg、
// fun/movie/001.avi、fun/movie/007.avi。若设定prefix为"fun/"则返回三个object如果增加设定
// delimiter为"/",则返回文件"fun/test.jpg"和前缀"fun/movie/",即实现了文件夹的逻辑。
// For common usage scenario, check out sample/list_object.go.
//
// 常用场景请参数示例sample/list_object.go。
//
// ListObjectsResponse 操作成功后的返回值成员Objects为bucket中对象列表。error为nil时该返回值有效。
// ListObjectsResponse the return value after operation succeeds (only valid when error is nil).
//
func (bucket Bucket) ListObjects(options ...Option) (ListObjectsResult, error) {
var out ListObjectsResult
options = append(options, EncodingType("url"))
params, err := handleParams(options)
params, err := getRawParams(options)
if err != nil {
return out, err
}
resp, err := bucket.do("GET", "", params, "", nil, nil, nil)
resp, err := bucket.do("GET", "", params, options, nil, nil)
if err != nil {
return out, err
}
@ -494,14 +509,13 @@ func (bucket Bucket) ListObjects(options ...Option) (ListObjectsResult, error) {
return out, err
}
// SetObjectMeta sets the metadata of the Object.
//
// SetObjectMeta 设置Object的Meta。
// objectKey object
// options options for setting the metadata. The valid options are CacheControl, ContentDisposition, ContentEncoding, Expires,
// ServerSideEncryption, and custom metadata.
//
// objectKey object
// options 指定对象的属性有以下可选项CacheControl、ContentDisposition、ContentEncoding、Expires、
// ServerSideEncryption、Meta。
//
// error 操作无错误时error为nil非nil为错误信息。
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) SetObjectMeta(objectKey string, options ...Option) error {
options = append(options, MetadataDirective(MetaReplace))
@ -509,18 +523,18 @@ func (bucket Bucket) SetObjectMeta(objectKey string, options ...Option) error {
return err
}
// GetObjectDetailedMeta gets the object's detailed metadata
//
// GetObjectDetailedMeta 查询Object的头信息。
// objectKey object key.
// options the constraints of the object. Only when the object meets the requirements this method will return the metadata. Otherwise returns error. Valid options are IfModifiedSince, IfUnmodifiedSince,
// IfMatch, IfNoneMatch. For more details check out https://help.aliyun.com/document_detail/oss/api-reference/object/HeadObject.html
//
// objectKey object名称。
// objectPropertyConstraints 对象的属性限制项满足时正常返回不满足时返回错误。现在项有IfModifiedSince、IfUnmodifiedSince、
// IfMatch、IfNoneMatch。具体含义请参看 https://help.aliyun.com/document_detail/oss/api-reference/object/HeadObject.html
//
// http.Header 对象的metaerror为nil时有效。
// error 操作无错误为nil非nil为错误信息。
// http.Header object meta when error is nil.
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) GetObjectDetailedMeta(objectKey string, options ...Option) (http.Header, error) {
resp, err := bucket.do("HEAD", objectKey, "", "", options, nil, nil)
params := map[string]interface{}{}
resp, err := bucket.do("HEAD", objectKey, params, options, nil, nil)
if err != nil {
return nil, err
}
@ -529,19 +543,21 @@ func (bucket Bucket) GetObjectDetailedMeta(objectKey string, options ...Option)
return resp.Headers, nil
}
// GetObjectMeta gets object metadata.
//
// GetObjectMeta 查询Object的头信息。
// GetObjectMeta is more lightweight than GetObjectDetailedMeta as it only returns basic metadata including ETag
// size, LastModified. The size information is in the HTTP header Content-Length.
//
// GetObjectMeta相比GetObjectDetailedMeta更轻量仅返回指定Object的少量基本meta信息
// 包括该Object的ETag、Size对象大小、LastModified其中Size由响应头Content-Length的数值表示。
// objectKey object key
//
// objectKey object名称。
// http.Header the object's metadata, valid when error is nil.
// error it's nil if no error, otherwise it's an error object.
//
// http.Header 对象的metaerror为nil时有效。
// error 操作无错误为nil非nil为错误信息。
//
func (bucket Bucket) GetObjectMeta(objectKey string) (http.Header, error) {
resp, err := bucket.do("GET", objectKey, "?objectMeta", "", nil, nil, nil)
func (bucket Bucket) GetObjectMeta(objectKey string, options ...Option) (http.Header, error) {
params := map[string]interface{}{}
params["objectMeta"] = nil
//resp, err := bucket.do("GET", objectKey, "?objectMeta", "", nil, nil, nil)
resp, err := bucket.do("HEAD", objectKey, params, options, nil, nil)
if err != nil {
return nil, err
}
@ -550,26 +566,27 @@ func (bucket Bucket) GetObjectMeta(objectKey string) (http.Header, error) {
return resp.Headers, nil
}
// SetObjectACL updates the object's ACL.
//
// SetObjectACL 修改Object的ACL权限。
// Only the bucket's owner could update object's ACL which priority is higher than bucket's ACL.
// For example, if the bucket ACL is private and object's ACL is public-read-write.
// Then object's ACL is used and it means all users could read or write that object.
// When the object's ACL is not set, then bucket's ACL is used as the object's ACL.
//
// 只有Bucket Owner才有权限调用PutObjectACL来修改Object的ACL。Object ACL优先级高于Bucket ACL。
// 例如Bucket ACL是private的而Object ACL是public-read-write的则访问这个Object时
// 先判断Object的ACL所以所有用户都拥有这个Object的访问权限即使这个Bucket是private bucket。
// 如果某个Object从来没设置过ACL则访问权限遵循Bucket ACL。
// Object read operations include GetObject, HeadObject, CopyObject and UploadPartCopy on the source object;
// Object write operations include PutObject, PostObject, AppendObject, DeleteObject, DeleteMultipleObjects,
// CompleteMultipartUpload and CopyObject on target object.
//
// Object的读操作包括GetObjectHeadObjectCopyObject和UploadPartCopy中的对source object的读
// Object的写操作包括PutObjectPostObjectAppendObjectDeleteObject
// DeleteMultipleObjectsCompleteMultipartUpload以及CopyObject对新的Object的写。
// objectKey the target object key (to set the ACL on)
// objectAcl object ACL. Valid options are PrivateACL, PublicReadACL, PublicReadWriteACL.
//
// objectKey 设置权限的object。
// objectAcl 对象权限。可选值PrivateACL(私有读写)、PublicReadACL(公共读私有写)、PublicReadWriteACL(公共读写)。
//
// error 操作无错误为nil非nil为错误信息。
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) SetObjectACL(objectKey string, objectACL ACLType) error {
options := []Option{ObjectACL(objectACL)}
resp, err := bucket.do("PUT", objectKey, "acl", "acl", options, nil, nil)
params := map[string]interface{}{}
params["acl"] = nil
resp, err := bucket.do("PUT", objectKey, params, options, nil, nil)
if err != nil {
return err
}
@ -577,17 +594,18 @@ func (bucket Bucket) SetObjectACL(objectKey string, objectACL ACLType) error {
return checkRespCode(resp.StatusCode, []int{http.StatusOK})
}
// GetObjectACL gets object's ACL
//
// GetObjectACL 获取对象的ACL权限。
// objectKey the object to get ACL from.
//
// objectKey 获取权限的object。
//
// GetObjectAclResponse 获取权限操作返回值error为nil时有效。GetObjectAclResponse.Acl为对象的权限。
// error 操作无错误为nil非nil为错误信息。
// GetObjectACLResult the result object when error is nil. GetObjectACLResult.Acl is the object ACL.
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) GetObjectACL(objectKey string) (GetObjectACLResult, error) {
var out GetObjectACLResult
resp, err := bucket.do("GET", objectKey, "acl", "acl", nil, nil, nil)
params := map[string]interface{}{}
params["acl"] = nil
resp, err := bucket.do("GET", objectKey, params, nil, nil, nil)
if err != nil {
return out, err
}
@ -597,8 +615,320 @@ func (bucket Bucket) GetObjectACL(objectKey string) (GetObjectACLResult, error)
return out, err
}
// PutSymlink creates a symlink (to point to an existing object)
//
// Symlink cannot point to another symlink.
// When creating a symlink, it does not check the existence of the target file, and does not check if the target file is symlink.
// Neither it checks the caller's permission on the target file. All these checks are deferred to the actual GetObject call via this symlink.
// If trying to add an existing file, as long as the caller has the write permission, the existing one will be overwritten.
// If the x-oss-meta- is specified, it will be added as the metadata of the symlink file.
//
// symObjectKey the symlink object's key.
// targetObjectKey the target object key to point to.
//
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) PutSymlink(symObjectKey string, targetObjectKey string, options ...Option) error {
options = append(options, symlinkTarget(url.QueryEscape(targetObjectKey)))
params := map[string]interface{}{}
params["symlink"] = nil
resp, err := bucket.do("PUT", symObjectKey, params, options, nil, nil)
if err != nil {
return err
}
defer resp.Body.Close()
return checkRespCode(resp.StatusCode, []int{http.StatusOK})
}
// GetSymlink gets the symlink object with the specified key.
// If the symlink object does not exist, returns 404.
//
// objectKey the symlink object's key.
//
// error it's nil if no error, otherwise it's an error object.
// When error is nil, the target file key is in the X-Oss-Symlink-Target header of the returned object.
//
func (bucket Bucket) GetSymlink(objectKey string) (http.Header, error) {
params := map[string]interface{}{}
params["symlink"] = nil
resp, err := bucket.do("GET", objectKey, params, nil, nil, nil)
if err != nil {
return nil, err
}
defer resp.Body.Close()
targetObjectKey := resp.Headers.Get(HTTPHeaderOssSymlinkTarget)
targetObjectKey, err = url.QueryUnescape(targetObjectKey)
if err != nil {
return resp.Headers, err
}
resp.Headers.Set(HTTPHeaderOssSymlinkTarget, targetObjectKey)
return resp.Headers, err
}
// RestoreObject restores the object from the archive storage.
//
// An archive object is in cold status by default and it cannot be accessed.
// When restore is called on the cold object, it will become available for access after some time.
// If multiple restores are called on the same file when the object is being restored, server side does nothing for additional calls but returns success.
// By default, the restored object is available for access for one day. After that it will be unavailable again.
// But if another RestoreObject are called after the file is restored, then it will extend one day's access time of that object, up to 7 days.
//
// objectKey object key to restore.
//
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) RestoreObject(objectKey string) error {
params := map[string]interface{}{}
params["restore"] = nil
resp, err := bucket.do("POST", objectKey, params, nil, nil, nil)
if err != nil {
return err
}
defer resp.Body.Close()
return checkRespCode(resp.StatusCode, []int{http.StatusOK, http.StatusAccepted})
}
// SignURL signs the URL. Users could access the object directly with this URL without getting the AK.
//
// objectKey the target object to sign.
// signURLConfig the configuration for the signed URL
//
// string returns the signed URL, when error is nil.
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) SignURL(objectKey string, method HTTPMethod, expiredInSec int64, options ...Option) (string, error) {
if expiredInSec < 0 {
return "", fmt.Errorf("invalid expires: %d, expires must bigger than 0", expiredInSec)
}
expiration := time.Now().Unix() + expiredInSec
params, err := getRawParams(options)
if err != nil {
return "", err
}
headers := make(map[string]string)
err = handleOptions(headers, options)
if err != nil {
return "", err
}
return bucket.Client.Conn.signURL(method, bucket.BucketName, objectKey, expiration, params, headers), nil
}
// PutObjectWithURL uploads an object with the URL. If the object exists, it will be overwritten.
// PutObjectWithURL It will not generate minetype according to the key name.
//
// signedURL signed URL.
// reader io.Reader the read instance for reading the data for the upload.
// options the options for uploading the data. The valid options are CacheControl, ContentDisposition, ContentEncoding,
// Expires, ServerSideEncryption, ObjectACL and custom metadata. Check out the following link for details:
// https://help.aliyun.com/document_detail/oss/api-reference/object/PutObject.html
//
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) PutObjectWithURL(signedURL string, reader io.Reader, options ...Option) error {
resp, err := bucket.DoPutObjectWithURL(signedURL, reader, options)
if err != nil {
return err
}
defer resp.Body.Close()
return err
}
// PutObjectFromFileWithURL uploads an object from a local file with the signed URL.
// PutObjectFromFileWithURL It does not generate mimetype according to object key's name or the local file name.
//
// signedURL the signed URL.
// filePath local file path, such as dirfile.txt, for uploading.
// options options for uploading, same as the options in PutObject function.
//
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) PutObjectFromFileWithURL(signedURL, filePath string, options ...Option) error {
fd, err := os.Open(filePath)
if err != nil {
return err
}
defer fd.Close()
resp, err := bucket.DoPutObjectWithURL(signedURL, fd, options)
if err != nil {
return err
}
defer resp.Body.Close()
return err
}
// DoPutObjectWithURL is the actual API that does the upload with URL work(internal for SDK)
//
// signedURL the signed URL.
// reader io.Reader the read instance for getting the data to upload.
// options options for uploading.
//
// Response the response object which contains the HTTP response.
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) DoPutObjectWithURL(signedURL string, reader io.Reader, options []Option) (*Response, error) {
listener := getProgressListener(options)
params := map[string]interface{}{}
resp, err := bucket.doURL("PUT", signedURL, params, options, reader, listener)
if err != nil {
return nil, err
}
if bucket.getConfig().IsEnableCRC {
err = checkCRC(resp, "DoPutObjectWithURL")
if err != nil {
return resp, err
}
}
err = checkRespCode(resp.StatusCode, []int{http.StatusOK})
return resp, err
}
// GetObjectWithURL downloads the object and returns the reader instance, with the signed URL.
//
// signedURL the signed URL.
// options options for downloading the object. Valid options are IfModifiedSince, IfUnmodifiedSince, IfMatch,
// IfNoneMatch, AcceptEncoding. For more information, check out the following link:
// https://help.aliyun.com/document_detail/oss/api-reference/object/GetObject.html
//
// io.ReadCloser the reader object for getting the data from response. It needs be closed after the usage. It's only valid when error is nil.
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) GetObjectWithURL(signedURL string, options ...Option) (io.ReadCloser, error) {
result, err := bucket.DoGetObjectWithURL(signedURL, options)
if err != nil {
return nil, err
}
return result.Response, nil
}
// GetObjectToFileWithURL downloads the object into a local file with the signed URL.
//
// signedURL the signed URL
// filePath the local file path to download to.
// options the options for downloading object. Check out the parameter options in function GetObject for the reference.
//
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) GetObjectToFileWithURL(signedURL, filePath string, options ...Option) error {
tempFilePath := filePath + TempFileSuffix
// Get the object's content
result, err := bucket.DoGetObjectWithURL(signedURL, options)
if err != nil {
return err
}
defer result.Response.Close()
// If the file does not exist, create one. If exists, then overwrite it.
fd, err := os.OpenFile(tempFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, FilePermMode)
if err != nil {
return err
}
// Save the data to the file.
_, err = io.Copy(fd, result.Response.Body)
fd.Close()
if err != nil {
return err
}
// Compare the CRC value. If CRC values do not match, return error.
hasRange, _, _ := isOptionSet(options, HTTPHeaderRange)
encodeOpt, _ := findOption(options, HTTPHeaderAcceptEncoding, nil)
acceptEncoding := ""
if encodeOpt != nil {
acceptEncoding = encodeOpt.(string)
}
if bucket.getConfig().IsEnableCRC && !hasRange && acceptEncoding != "gzip" {
result.Response.ClientCRC = result.ClientCRC.Sum64()
err = checkCRC(result.Response, "GetObjectToFileWithURL")
if err != nil {
os.Remove(tempFilePath)
return err
}
}
return os.Rename(tempFilePath, filePath)
}
// DoGetObjectWithURL is the actual API that downloads the file with the signed URL.
//
// signedURL the signed URL.
// options the options for getting object. Check out parameter options in GetObject for the reference.
//
// GetObjectResult the result object when the error is nil.
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) DoGetObjectWithURL(signedURL string, options []Option) (*GetObjectResult, error) {
params, _ := getRawParams(options)
resp, err := bucket.doURL("GET", signedURL, params, options, nil, nil)
if err != nil {
return nil, err
}
result := &GetObjectResult{
Response: resp,
}
// CRC
var crcCalc hash.Hash64
hasRange, _, _ := isOptionSet(options, HTTPHeaderRange)
if bucket.getConfig().IsEnableCRC && !hasRange {
crcCalc = crc64.New(crcTable())
result.ServerCRC = resp.ServerCRC
result.ClientCRC = crcCalc
}
// Progress
listener := getProgressListener(options)
contentLen, _ := strconv.ParseInt(resp.Headers.Get(HTTPHeaderContentLength), 10, 64)
resp.Body = TeeReader(resp.Body, crcCalc, contentLen, listener, nil)
return result, nil
}
//
// ProcessObject apply process on the specified image file.
//
// The supported process includes resize, rotate, crop, watermark, format,
// udf, customized style, etc.
//
//
// objectKey object key to process.
// process process string, such as "image/resize,w_100|sys/saveas,o_dGVzdC5qcGc,b_dGVzdA"
//
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) ProcessObject(objectKey string, process string) (ProcessObjectResult, error) {
var out ProcessObjectResult
params := map[string]interface{}{}
params["x-oss-process"] = nil
processData := fmt.Sprintf("%v=%v", "x-oss-process", process)
data := strings.NewReader(processData)
resp, err := bucket.do("POST", objectKey, params, nil, data, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = jsonUnmarshal(resp.Body, &out)
return out, err
}
// Private
func (bucket Bucket) do(method, objectName, urlParams, subResource string, options []Option,
func (bucket Bucket) do(method, objectName string, params map[string]interface{}, options []Option,
data io.Reader, listener ProgressListener) (*Response, error) {
headers := make(map[string]string)
err := handleOptions(headers, options)
@ -606,7 +936,17 @@ func (bucket Bucket) do(method, objectName, urlParams, subResource string, optio
return nil, err
}
return bucket.Client.Conn.Do(method, bucket.BucketName, objectName,
urlParams, subResource, headers, data, 0, listener)
params, headers, data, 0, listener)
}
func (bucket Bucket) doURL(method HTTPMethod, signedURL string, params map[string]interface{}, options []Option,
data io.Reader, listener ProgressListener) (*Response, error) {
headers := make(map[string]string)
err := handleOptions(headers, options)
if err != nil {
return nil, err
}
return bucket.Client.Conn.DoURL(method, signedURL, headers, data, 0, listener)
}
func (bucket Bucket) getConfig() *Config {

View File

@ -11,70 +11,68 @@ import (
"time"
)
//
// Client Sdk的入口Client的方法可以完成bucket的各种操作如create/delete bucket
// set/get acl/lifecycle/referer/logging/website等。文件(object)的上传下载通过Bucket完成。
// 用户用oss.New创建Client。
// Client SDK's entry point. It's for bucket related options such as create/delete/set bucket (such as set/get ACL/lifecycle/referer/logging/website).
// Object related operations are done by Bucket class.
// Users use oss.New to create Client instance.
//
type (
// Client oss client
// Client OSS client
Client struct {
Config *Config // Oss Client configure
Conn *Conn // Send http request
Config *Config // OSS client configuration
Conn *Conn // Send HTTP request
HTTPClient *http.Client //http.Client to use - if nil will make its own
}
// ClientOption client option such as UseCname, Timeout, SecurityToken.
ClientOption func(*Client)
)
// New creates a new client.
//
// New 生成一个新的Client。
// endpoint the OSS datacenter endpoint such as http://oss-cn-hangzhou.aliyuncs.com .
// accessKeyId access key Id.
// accessKeySecret access key secret.
//
// endpoint 用户Bucket所在数据中心的访问域名如http://oss-cn-hangzhou.aliyuncs.com。
// accessKeyId 用户标识。
// accessKeySecret 用户密钥。
//
// Client 生成的新Client。error为nil时有效。
// error 操作无错误时为nil非nil时表示操作出错。
// Client creates the new client instance, the returned value is valid when error is nil.
// error it's nil if no error, otherwise it's an error object.
//
func New(endpoint, accessKeyID, accessKeySecret string, options ...ClientOption) (*Client, error) {
// configuration
// Configuration
config := getDefaultOssConfig()
config.Endpoint = endpoint
config.AccessKeyID = accessKeyID
config.AccessKeySecret = accessKeySecret
// url parse
// URL parse
url := &urlMaker{}
url.Init(config.Endpoint, config.IsCname, config.IsUseProxy)
// http connect
// HTTP connect
conn := &Conn{config: config, url: url}
// oss client
// OSS client
client := &Client{
config,
conn,
Config: config,
Conn: conn,
}
// client options parse
// Client options parse
for _, option := range options {
option(client)
}
// create http connect
err := conn.init(config, url)
// Create HTTP connection
err := conn.init(config, url, client.HTTPClient)
return client, err
}
// Bucket gets the bucket instance.
//
// Bucket 取存储空间Bucket的对象实例。
// bucketName the bucket name.
// Bucket the bucket object, when error is nil.
//
// bucketName 存储空间名称。
// Bucket 新的Bucket。error为nil时有效。
//
// error 操作无错误时返回nil非nil为错误信息。
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) Bucket(bucketName string) (*Bucket, error) {
return &Bucket{
@ -83,21 +81,36 @@ func (client Client) Bucket(bucketName string) (*Bucket, error) {
}, nil
}
// CreateBucket creates a bucket.
//
// CreateBucket 创建Bucket。
// bucketName the bucket name, it's globably unique and immutable. The bucket name can only consist of lowercase letters, numbers and dash ('-').
// It must start with lowercase letter or number and the length can only be between 3 and 255.
// options options for creating the bucket, with optional ACL. The ACL could be ACLPrivate, ACLPublicRead, and ACLPublicReadWrite. By default it's ACLPrivate.
// It could also be specified with StorageClass option, which supports StorageStandard, StorageIA(infrequent access), StorageArchive.
//
// bucketName bucket名称在整个OSS中具有全局唯一性且不能修改。bucket名称的只能包括小写字母数字和短横线-
// 必须以小写字母或者数字开头长度必须在3-255字节之间。
// options 创建bucket的选项。您可以使用选项ACL指定bucket的访问权限。Bucket有以下三种访问权限私有读写ACLPrivate
// 公共读私有写ACLPublicRead公共读公共写(ACLPublicReadWrite),默认访问权限是私有读写。
//
// error 操作无错误时返回nil非nil为错误信息。
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) CreateBucket(bucketName string, options ...Option) error {
headers := make(map[string]string)
handleOptions(headers, options)
resp, err := client.do("PUT", bucketName, "", "", headers, nil)
buffer := new(bytes.Buffer)
isOptSet, val, _ := isOptionSet(options, storageClass)
if isOptSet {
cbConfig := createBucketConfiguration{StorageClass: val.(StorageClassType)}
bs, err := xml.Marshal(cbConfig)
if err != nil {
return err
}
buffer.Write(bs)
contentType := http.DetectContentType(buffer.Bytes())
headers[HTTPHeaderContentType] = contentType
}
params := map[string]interface{}{}
resp, err := client.do("PUT", bucketName, params, headers, buffer)
if err != nil {
return err
}
@ -106,25 +119,25 @@ func (client Client) CreateBucket(bucketName string, options ...Option) error {
return checkRespCode(resp.StatusCode, []int{http.StatusOK})
}
// ListBuckets lists buckets of the current account under the given endpoint, with optional filters.
//
// ListBuckets 获取当前用户下的bucket。
// options specifies the filters such as Prefix, Marker and MaxKeys. Prefix is the bucket name's prefix filter.
// And marker makes sure the returned buckets' name are greater than it in lexicographic order.
// Maxkeys limits the max keys to return, and by default it's 100 and up to 1000.
// For the common usage scenario, please check out list_bucket.go in the sample.
// ListBucketsResponse the response object if error is nil.
//
// options 指定ListBuckets的筛选行为Prefix、Marker、MaxKeys三个选项。Prefix限定前缀。
// Marker设定从Marker之后的第一个开始返回。MaxKeys限定此次返回的最大数目默认为100。
// 常用使用场景的实现参数示例程序list_bucket.go。
// ListBucketsResponse 操作成功后的返回值error为nil时该返回值有效。
//
// error 操作无错误时返回nil非nil为错误信息。
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) ListBuckets(options ...Option) (ListBucketsResult, error) {
var out ListBucketsResult
params, err := handleParams(options)
params, err := getRawParams(options)
if err != nil {
return out, err
}
resp, err := client.do("GET", "", params, "", nil, nil)
resp, err := client.do("GET", "", params, nil, nil)
if err != nil {
return out, err
}
@ -134,13 +147,12 @@ func (client Client) ListBuckets(options ...Option) (ListBucketsResult, error) {
return out, err
}
// IsBucketExist checks if the bucket exists
//
// IsBucketExist Bucket是否存在。
// bucketName the bucket name.
//
// bucketName 存储空间名称。
//
// bool 存储空间是否存在。error为nil时有效。
// error 操作无错误时返回nil非nil为错误信息。
// bool true if it exists, and it's only valid when error is nil.
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) IsBucketExist(bucketName string) (bool, error) {
listRes, err := client.ListBuckets(Prefix(bucketName), MaxKeys(1))
@ -154,15 +166,15 @@ func (client Client) IsBucketExist(bucketName string) (bool, error) {
return false, nil
}
// DeleteBucket deletes the bucket. Only empty bucket can be deleted (no object and parts).
//
// DeleteBucket 删除空存储空间。非空时请先清理Object、Upload。
// bucketName the bucket name.
//
// bucketName 存储空间名称。
//
// error 操作无错误时返回nil非nil为错误信息。
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) DeleteBucket(bucketName string) error {
resp, err := client.do("DELETE", bucketName, "", "", nil, nil)
params := map[string]interface{}{}
resp, err := client.do("DELETE", bucketName, params, nil, nil)
if err != nil {
return err
}
@ -171,19 +183,20 @@ func (client Client) DeleteBucket(bucketName string) error {
return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
}
// GetBucketLocation gets the bucket location.
//
// GetBucketLocation 查看Bucket所属数据中心位置的信息。
//
// 如果您想了解"访问域名和数据中心"详细信息,请参看
// Checks out the following link for more information :
// https://help.aliyun.com/document_detail/oss/user_guide/oss_concept/endpoint.html
//
// bucketName 存储空间名称。
// bucketName the bucket name
//
// string Bucket所属的数据中心位置信息。
// error 操作无错误时返回nil非nil为错误信息。
// string bucket's datacenter location
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) GetBucketLocation(bucketName string) (string, error) {
resp, err := client.do("GET", bucketName, "location", "location", nil, nil)
params := map[string]interface{}{}
params["location"] = nil
resp, err := client.do("GET", bucketName, params, nil, nil)
if err != nil {
return "", err
}
@ -194,18 +207,17 @@ func (client Client) GetBucketLocation(bucketName string) (string, error) {
return LocationConstraint, err
}
// SetBucketACL sets bucket's ACL.
//
// SetBucketACL 修改Bucket的访问权限。
// bucketName the bucket name
// bucketAcl the bucket ACL: ACLPrivate, ACLPublicRead and ACLPublicReadWrite.
//
// bucketName 存储空间名称。
// bucketAcl bucket的访问权限。Bucket有以下三种访问权限Bucket有以下三种访问权限私有读写ACLPrivate
// 公共读私有写ACLPublicRead公共读公共写(ACLPublicReadWrite)。
//
// error 操作无错误时返回nil非nil为错误信息。
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) SetBucketACL(bucketName string, bucketACL ACLType) error {
headers := map[string]string{HTTPHeaderOssACL: string(bucketACL)}
resp, err := client.do("PUT", bucketName, "", "", headers, nil)
params := map[string]interface{}{}
resp, err := client.do("PUT", bucketName, params, headers, nil)
if err != nil {
return err
}
@ -213,17 +225,18 @@ func (client Client) SetBucketACL(bucketName string, bucketACL ACLType) error {
return checkRespCode(resp.StatusCode, []int{http.StatusOK})
}
// GetBucketACL gets the bucket ACL.
//
// GetBucketACL 获得Bucket的访问权限。
// bucketName the bucket name.
//
// bucketName 存储空间名称。
//
// GetBucketAclResponse 操作成功后的返回值error为nil时该返回值有效。
// error 操作无错误时返回nil非nil为错误信息。
// GetBucketAclResponse the result object, and it's only valid when error is nil.
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) GetBucketACL(bucketName string) (GetBucketACLResult, error) {
var out GetBucketACLResult
resp, err := client.do("GET", bucketName, "acl", "acl", nil, nil)
params := map[string]interface{}{}
params["acl"] = nil
resp, err := client.do("GET", bucketName, params, nil, nil)
if err != nil {
return out, err
}
@ -233,19 +246,16 @@ func (client Client) GetBucketACL(bucketName string) (GetBucketACLResult, error)
return out, err
}
// SetBucketLifecycle sets the bucket's lifecycle.
//
// SetBucketLifecycle 修改Bucket的生命周期设置。
//
// OSS提供Object生命周期管理来为用户管理对象。用户可以为某个Bucket定义生命周期配置来为该Bucket的Object定义各种规则。
// Bucket的拥有者可以通过SetBucketLifecycle来设置Bucket的Lifecycle配置。Lifecycle开启后OSS将按照配置
// 定期自动删除与Lifecycle规则相匹配的Object。如果您想了解更多的生命周期的信息请参看
// For more information, checks out following link:
// https://help.aliyun.com/document_detail/oss/user_guide/manage_object/object_lifecycle.html
//
// bucketName 存储空间名称。
// rules 生命周期规则列表。生命周期规则有两种格式指定绝对和相对过期时间分布由days和year/month/day控制。
// 具体用法请参考示例程序sample/bucket_lifecycle.go。
// bucketName the bucket name.
// rules the lifecycle rules. There're two kind of rules: absolute time expiration and relative time expiration in days and day/month/year respectively.
// Check out sample/bucket_lifecycle.go for more details.
//
// error 操作无错误时返回error为nil非nil为错误信息。
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) SetBucketLifecycle(bucketName string, rules []LifecycleRule) error {
lxml := lifecycleXML{Rules: convLifecycleRule(rules)}
@ -260,7 +270,9 @@ func (client Client) SetBucketLifecycle(bucketName string, rules []LifecycleRule
headers := map[string]string{}
headers[HTTPHeaderContentType] = contentType
resp, err := client.do("PUT", bucketName, "lifecycle", "lifecycle", headers, buffer)
params := map[string]interface{}{}
params["lifecycle"] = nil
resp, err := client.do("PUT", bucketName, params, headers, buffer)
if err != nil {
return err
}
@ -268,16 +280,17 @@ func (client Client) SetBucketLifecycle(bucketName string, rules []LifecycleRule
return checkRespCode(resp.StatusCode, []int{http.StatusOK})
}
//
// DeleteBucketLifecycle 删除Bucket的生命周期设置。
// DeleteBucketLifecycle deletes the bucket's lifecycle.
//
//
// bucketName 存储空间名称。
// bucketName the bucket name.
//
// error 操作无错误为nil非nil为错误信息。
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) DeleteBucketLifecycle(bucketName string) error {
resp, err := client.do("DELETE", bucketName, "lifecycle", "lifecycle", nil, nil)
params := map[string]interface{}{}
params["lifecycle"] = nil
resp, err := client.do("DELETE", bucketName, params, nil, nil)
if err != nil {
return err
}
@ -285,17 +298,18 @@ func (client Client) DeleteBucketLifecycle(bucketName string) error {
return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
}
// GetBucketLifecycle gets the bucket's lifecycle settings.
//
// GetBucketLifecycle 查看Bucket的生命周期设置。
// bucketName the bucket name.
//
// bucketName 存储空间名称。
//
// GetBucketLifecycleResponse 操作成功的返回值error为nil时该返回值有效。Rules为该bucket上的规则列表。
// error 操作无错误时为nil非nil为错误信息。
// GetBucketLifecycleResponse the result object upon successful request. It's only valid when error is nil.
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) GetBucketLifecycle(bucketName string) (GetBucketLifecycleResult, error) {
var out GetBucketLifecycleResult
resp, err := client.do("GET", bucketName, "lifecycle", "lifecycle", nil, nil)
params := map[string]interface{}{}
params["lifecycle"] = nil
resp, err := client.do("GET", bucketName, params, nil, nil)
if err != nil {
return out, err
}
@ -305,21 +319,20 @@ func (client Client) GetBucketLifecycle(bucketName string) (GetBucketLifecycleRe
return out, err
}
// SetBucketReferer sets the bucket's referer whitelist and the flag if allowing empty referrer.
//
// SetBucketReferer 设置bucket的referer访问白名单和是否允许referer字段为空的请求访问。
//
// 防止用户在OSS上的数据被其他人盗用OSS支持基于HTTP header中表头字段referer的防盗链方法。可以通过OSS控制台或者API的方式对
// 一个bucket设置referer字段的白名单和是否允许referer字段为空的请求访问。例如对于一个名为oss-example的bucket
// 设置其referer白名单为http://www.aliyun.com。则所有referer为http://www.aliyun.com的请求才能访问oss-example
// 这个bucket中的object。如果您还需要了解更多信息请参看
// To avoid stealing link on OSS data, OSS supports the HTTP referrer header. A whitelist referrer could be set either by API or web console, as well as
// the allowing empty referrer flag. Note that this applies to requests from webbrowser only.
// For example, for a bucket os-example and its referrer http://www.aliyun.com, all requests from this URL could access the bucket.
// For more information, please check out this link :
// https://help.aliyun.com/document_detail/oss/user_guide/security_management/referer.html
//
// bucketName 存储空间名称。
// referers 访问白名单列表。一个bucket可以支持多个referer参数。referer参数支持通配符"*"和"?"。
// 用法请参看示例sample/bucket_referer.go
// allowEmptyReferer 指定是否允许referer字段为空的请求访问。 默认为true。
// bucketName the bucket name.
// referers the referrer white list. A bucket could have a referrer list and each referrer supports one '*' and multiple '?' as wildcards.
// The sample could be found in sample/bucket_referer.go
// allowEmptyReferer the flag of allowing empty referrer. By default it's true.
//
// error 操作无错误为nil非nil为错误信息。
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) SetBucketReferer(bucketName string, referers []string, allowEmptyReferer bool) error {
rxml := RefererXML{}
@ -343,7 +356,9 @@ func (client Client) SetBucketReferer(bucketName string, referers []string, allo
headers := map[string]string{}
headers[HTTPHeaderContentType] = contentType
resp, err := client.do("PUT", bucketName, "referer", "referer", headers, buffer)
params := map[string]interface{}{}
params["referer"] = nil
resp, err := client.do("PUT", bucketName, params, headers, buffer)
if err != nil {
return err
}
@ -351,17 +366,18 @@ func (client Client) SetBucketReferer(bucketName string, referers []string, allo
return checkRespCode(resp.StatusCode, []int{http.StatusOK})
}
// GetBucketReferer gets the bucket's referrer white list.
//
// GetBucketReferer 获得Bucket的白名单地址。
// bucketName the bucket name.
//
// bucketName 存储空间名称。
//
// GetBucketRefererResponse 操作成功的返回值error为nil时该返回值有效。
// error 操作无错误时为nil非nil为错误信息。
// GetBucketRefererResponse the result object upon successful request. It's only valid when error is nil.
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) GetBucketReferer(bucketName string) (GetBucketRefererResult, error) {
var out GetBucketRefererResult
resp, err := client.do("GET", bucketName, "referer", "referer", nil, nil)
params := map[string]interface{}{}
params["referer"] = nil
resp, err := client.do("GET", bucketName, params, nil, nil)
if err != nil {
return out, err
}
@ -371,18 +387,17 @@ func (client Client) GetBucketReferer(bucketName string) (GetBucketRefererResult
return out, err
}
// SetBucketLogging sets the bucket logging settings.
//
// SetBucketLogging 修改Bucket的日志设置。
// OSS could automatically store the access log. Only the bucket owner could enable the logging.
// Once enabled, OSS would save all the access log into hourly log files in a specified bucket.
// For more information, please check out https://help.aliyun.com/document_detail/oss/user_guide/security_management/logging.html
//
// OSS为您提供自动保存访问日志记录功能。Bucket的拥有者可以开启访问日志记录功能。当一个bucket开启访问日志记录功能后
// OSS自动将访问这个bucket的请求日志以小时为单位按照固定的命名规则生成一个Object写入用户指定的bucket中。
// 如果您需要更多,请参看 https://help.aliyun.com/document_detail/oss/user_guide/security_management/logging.html
// bucketName bucket name to enable the log.
// targetBucket the target bucket name to store the log files.
// targetPrefix the log files' prefix.
//
// bucketName 需要记录访问日志的Bucket。
// targetBucket 访问日志记录到的Bucket。
// targetPrefix bucketName中需要存储访问日志记录的object前缀。为空记录所有object的访问日志。
//
// error 操作无错误为nil非nil为错误信息。
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) SetBucketLogging(bucketName, targetBucket, targetPrefix string,
isEnable bool) error {
@ -409,7 +424,9 @@ func (client Client) SetBucketLogging(bucketName, targetBucket, targetPrefix str
headers := map[string]string{}
headers[HTTPHeaderContentType] = contentType
resp, err := client.do("PUT", bucketName, "logging", "logging", headers, buffer)
params := map[string]interface{}{}
params["logging"] = nil
resp, err := client.do("PUT", bucketName, params, headers, buffer)
if err != nil {
return err
}
@ -417,15 +434,16 @@ func (client Client) SetBucketLogging(bucketName, targetBucket, targetPrefix str
return checkRespCode(resp.StatusCode, []int{http.StatusOK})
}
// DeleteBucketLogging deletes the logging configuration to disable the logging on the bucket.
//
// DeleteBucketLogging 删除Bucket的日志设置。
// bucketName the bucket name to disable the logging.
//
// bucketName 需要删除访问日志的Bucket。
//
// error 操作无错误为nil非nil为错误信息。
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) DeleteBucketLogging(bucketName string) error {
resp, err := client.do("DELETE", bucketName, "logging", "logging", nil, nil)
params := map[string]interface{}{}
params["logging"] = nil
resp, err := client.do("DELETE", bucketName, params, nil, nil)
if err != nil {
return err
}
@ -433,17 +451,18 @@ func (client Client) DeleteBucketLogging(bucketName string) error {
return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
}
// GetBucketLogging gets the bucket's logging settings
//
// GetBucketLogging 获得Bucket的日志设置。
// bucketName the bucket name
// GetBucketLoggingResponse the result object upon successful request. It's only valid when error is nil.
//
// bucketName 需要删除访问日志的Bucket。
// GetBucketLoggingResponse 操作成功的返回值error为nil时该返回值有效。
//
// error 操作无错误为nil非nil为错误信息。
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) GetBucketLogging(bucketName string) (GetBucketLoggingResult, error) {
var out GetBucketLoggingResult
resp, err := client.do("GET", bucketName, "logging", "logging", nil, nil)
params := map[string]interface{}{}
params["logging"] = nil
resp, err := client.do("GET", bucketName, params, nil, nil)
if err != nil {
return out, err
}
@ -453,17 +472,16 @@ func (client Client) GetBucketLogging(bucketName string) (GetBucketLoggingResult
return out, err
}
// SetBucketWebsite sets the bucket's static website's index and error page.
//
// SetBucketWebsite 设置/修改Bucket的默认首页以及错误页。
// OSS supports static web site hosting for the bucket data. When the bucket is enabled with that, you can access the file in the bucket like the way to access a static website.
// For more information, please check out: https://help.aliyun.com/document_detail/oss/user_guide/static_host_website.html
//
// OSS支持静态网站托管Website操作可以将一个bucket设置成静态网站托管模式 。您可以将自己的Bucket配置成静态网站托管模式。
// 如果您需要更多,请参看 https://help.aliyun.com/document_detail/oss/user_guide/static_host_website.html
// bucketName the bucket name to enable static web site.
// indexDocument index page.
// errorDocument error page.
//
// bucketName 需要设置Website的Bucket。
// indexDocument 索引文档。
// errorDocument 错误文档。
//
// error 操作无错误为nil非nil为错误信息。
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) SetBucketWebsite(bucketName, indexDocument, errorDocument string) error {
wxml := WebsiteXML{}
@ -481,7 +499,9 @@ func (client Client) SetBucketWebsite(bucketName, indexDocument, errorDocument s
headers := make(map[string]string)
headers[HTTPHeaderContentType] = contentType
resp, err := client.do("PUT", bucketName, "website", "website", headers, buffer)
params := map[string]interface{}{}
params["website"] = nil
resp, err := client.do("PUT", bucketName, params, headers, buffer)
if err != nil {
return err
}
@ -489,15 +509,16 @@ func (client Client) SetBucketWebsite(bucketName, indexDocument, errorDocument s
return checkRespCode(resp.StatusCode, []int{http.StatusOK})
}
// DeleteBucketWebsite deletes the bucket's static web site settings.
//
// DeleteBucketWebsite 删除Bucket的Website设置。
// bucketName the bucket name.
//
// bucketName 需要删除website设置的Bucket。
//
// error 操作无错误为nil非nil为错误信息。
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) DeleteBucketWebsite(bucketName string) error {
resp, err := client.do("DELETE", bucketName, "website", "website", nil, nil)
params := map[string]interface{}{}
params["website"] = nil
resp, err := client.do("DELETE", bucketName, params, nil, nil)
if err != nil {
return err
}
@ -505,17 +526,18 @@ func (client Client) DeleteBucketWebsite(bucketName string) error {
return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
}
// GetBucketWebsite gets the bucket's default page (index page) and the error page.
//
// GetBucketWebsite 获得Bucket的默认首页以及错误页。
// bucketName the bucket name
//
// bucketName 存储空间名称。
//
// GetBucketWebsiteResponse 操作成功的返回值error为nil时该返回值有效。
// error 操作无错误为nil非nil为错误信息。
// GetBucketWebsiteResponse the result object upon successful request. It's only valid when error is nil.
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) GetBucketWebsite(bucketName string) (GetBucketWebsiteResult, error) {
var out GetBucketWebsiteResult
resp, err := client.do("GET", bucketName, "website", "website", nil, nil)
params := map[string]interface{}{}
params["website"] = nil
resp, err := client.do("GET", bucketName, params, nil, nil)
if err != nil {
return out, err
}
@ -525,15 +547,14 @@ func (client Client) GetBucketWebsite(bucketName string) (GetBucketWebsiteResult
return out, err
}
// SetBucketCORS sets the bucket's CORS rules
//
// SetBucketCORS 设置Bucket的跨域访问(CORS)规则。
// For more information, please check out https://help.aliyun.com/document_detail/oss/user_guide/security_management/cors.html
//
// 跨域访问的更多信息,请参看 https://help.aliyun.com/document_detail/oss/user_guide/security_management/cors.html
// bucketName the bucket name
// corsRules the CORS rules to set. The related sample code is in sample/bucket_cors.go.
//
// bucketName 需要设置Website的Bucket。
// corsRules 待设置的CORS规则。用法请参看示例代码sample/bucket_cors.go。
//
// error 操作无错误为nil非nil为错误信息。
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) SetBucketCORS(bucketName string, corsRules []CORSRule) error {
corsxml := CORSXML{}
@ -558,7 +579,9 @@ func (client Client) SetBucketCORS(bucketName string, corsRules []CORSRule) erro
headers := map[string]string{}
headers[HTTPHeaderContentType] = contentType
resp, err := client.do("PUT", bucketName, "cors", "cors", headers, buffer)
params := map[string]interface{}{}
params["cors"] = nil
resp, err := client.do("PUT", bucketName, params, headers, buffer)
if err != nil {
return err
}
@ -566,15 +589,16 @@ func (client Client) SetBucketCORS(bucketName string, corsRules []CORSRule) erro
return checkRespCode(resp.StatusCode, []int{http.StatusOK})
}
// DeleteBucketCORS deletes the bucket's static website settings.
//
// DeleteBucketCORS 删除Bucket的Website设置。
// bucketName the bucket name.
//
// bucketName 需要删除cors设置的Bucket。
//
// error 操作无错误为nil非nil为错误信息。
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) DeleteBucketCORS(bucketName string) error {
resp, err := client.do("DELETE", bucketName, "cors", "cors", nil, nil)
params := map[string]interface{}{}
params["cors"] = nil
resp, err := client.do("DELETE", bucketName, params, nil, nil)
if err != nil {
return err
}
@ -582,18 +606,18 @@ func (client Client) DeleteBucketCORS(bucketName string) error {
return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
}
// GetBucketCORS gets the bucket's CORS settings.
//
// GetBucketCORS 获得Bucket的CORS设置。
// bucketName the bucket name.
// GetBucketCORSResult the result object upon successful request. It's only valid when error is nil.
//
//
// bucketName 存储空间名称。
// GetBucketCORSResult 操作成功的返回值error为nil时该返回值有效。
//
// error 操作无错误为nil非nil为错误信息。
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) GetBucketCORS(bucketName string) (GetBucketCORSResult, error) {
var out GetBucketCORSResult
resp, err := client.do("GET", bucketName, "cors", "cors", nil, nil)
params := map[string]interface{}{}
params["cors"] = nil
resp, err := client.do("GET", bucketName, params, nil, nil)
if err != nil {
return out, err
}
@ -603,17 +627,18 @@ func (client Client) GetBucketCORS(bucketName string) (GetBucketCORSResult, erro
return out, err
}
// GetBucketInfo gets the bucket information.
//
// GetBucketInfo 获得Bucket的信息。
// bucketName the bucket name.
// GetBucketInfoResult the result object upon successful request. It's only valid when error is nil.
//
// bucketName 存储空间名称。
// GetBucketInfoResult 操作成功的返回值error为nil时该返回值有效。
//
// error 操作无错误为nil非nil为错误信息。
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) GetBucketInfo(bucketName string) (GetBucketInfoResult, error) {
var out GetBucketInfoResult
resp, err := client.do("GET", bucketName, "bucketInfo", "bucketInfo", nil, nil)
params := map[string]interface{}{}
params["bucketInfo"] = nil
resp, err := client.do("GET", bucketName, params, nil, nil)
if err != nil {
return out, err
}
@ -623,10 +648,9 @@ func (client Client) GetBucketInfo(bucketName string) (GetBucketInfoResult, erro
return out, err
}
// UseCname sets the flag of using CName. By default it's false.
//
// UseCname 设置是否使用CNAME默认不使用。
//
// isUseCname true设置endpoint格式是cname格式false为非cname格式默认false
// isUseCname true: the endpoint has the CName, false: the endpoint does not have cname. Default is false.
//
func UseCname(isUseCname bool) ClientOption {
return func(client *Client) {
@ -635,11 +659,10 @@ func UseCname(isUseCname bool) ClientOption {
}
}
// Timeout sets the HTTP timeout in seconds.
//
// Timeout 设置HTTP超时时间。
//
// connectTimeoutSec HTTP链接超时时间单位是秒默认10秒。0表示永不超时。
// readWriteTimeout HTTP发送接受数据超时时间单位是秒默认20秒。0表示永不超时。
// connectTimeoutSec HTTP timeout in seconds. Default is 10 seconds. 0 means infinite (not recommended)
// readWriteTimeout HTTP read or write's timeout in seconds. Default is 20 seconds. 0 means infinite.
//
func Timeout(connectTimeoutSec, readWriteTimeout int64) ClientOption {
return func(client *Client) {
@ -649,15 +672,16 @@ func Timeout(connectTimeoutSec, readWriteTimeout int64) ClientOption {
time.Second * time.Duration(readWriteTimeout)
client.Config.HTTPTimeout.HeaderTimeout =
time.Second * time.Duration(readWriteTimeout)
client.Config.HTTPTimeout.IdleConnTimeout =
time.Second * time.Duration(readWriteTimeout)
client.Config.HTTPTimeout.LongTimeout =
time.Second * time.Duration(readWriteTimeout*10)
}
}
// SecurityToken sets the temporary user's SecurityToken.
//
// SecurityToken 临时用户设置SecurityToken。
//
// token STS token
// token STS token
//
func SecurityToken(token string) ClientOption {
return func(client *Client) {
@ -665,10 +689,9 @@ func SecurityToken(token string) ClientOption {
}
}
// EnableMD5 enables MD5 validation.
//
// EnableMD5 是否启用MD5校验默认启用。
//
// isEnableMD5 true启用MD5校验false不启用MD5校验
// isEnableMD5 true: enable MD5 validation; false: disable MD5 validation.
//
func EnableMD5(isEnableMD5 bool) ClientOption {
return func(client *Client) {
@ -676,10 +699,9 @@ func EnableMD5(isEnableMD5 bool) ClientOption {
}
}
// MD5ThresholdCalcInMemory sets the memory usage threshold for computing the MD5, default is 16MB.
//
// MD5ThresholdCalcInMemory 使用内存计算MD5值的上限默认16MB。
//
// threshold 单位Byte。上传内容小于threshold在MD5在内存中计算大于使用临时文件计算MD5
// threshold the memory threshold in bytes. When the uploaded content is more than 16MB, the temp file is used for computing the MD5.
//
func MD5ThresholdCalcInMemory(threshold int64) ClientOption {
return func(client *Client) {
@ -687,10 +709,9 @@ func MD5ThresholdCalcInMemory(threshold int64) ClientOption {
}
}
// EnableCRC enables the CRC checksum. Default is true.
//
// EnableCRC 上传是否启用CRC校验默认启用。
//
// isEnableCRC true启用CRC校验false不启用CRC校验
// isEnableCRC true: enable CRC checksum; false: disable the CRC checksum.
//
func EnableCRC(isEnableCRC bool) ClientOption {
return func(client *Client) {
@ -698,10 +719,9 @@ func EnableCRC(isEnableCRC bool) ClientOption {
}
}
// UserAgent specifies UserAgent. The default is aliyun-sdk-go/1.2.0 (windows/-/amd64;go1.5.2).
//
// UserAgent 指定UserAgent默认如下aliyun-sdk-go/1.2.0 (windows/-/amd64;go1.5.2)。
//
// userAgent user agent字符串。
// userAgent the user agent string.
//
func UserAgent(userAgent string) ClientOption {
return func(client *Client) {
@ -709,10 +729,9 @@ func UserAgent(userAgent string) ClientOption {
}
}
// Proxy sets the proxy (optional). The default is not using proxy.
//
// Proxy 设置代理服务器,默认不使用代理。
//
// proxyHost 代理服务器地址格式是host或host:port
// proxyHost the proxy host in the format "host:port". For example, proxy.com:80 .
//
func Proxy(proxyHost string) ClientOption {
return func(client *Client) {
@ -722,12 +741,11 @@ func Proxy(proxyHost string) ClientOption {
}
}
// AuthProxy sets the proxy information with user name and password.
//
// AuthProxy 设置需要认证的代理服务器,默认不使用代理。
//
// proxyHost 代理服务器地址格式是host或host:port
// proxyUser 代理服务器认证的用户名
// proxyPassword 代理服务器认证的用户密码
// proxyHost the proxy host in the format "host:port". For example, proxy.com:80 .
// proxyUser the proxy user name.
// proxyPassword the proxy password.
//
func AuthProxy(proxyHost, proxyUser, proxyPassword string) ClientOption {
return func(client *Client) {
@ -740,9 +758,18 @@ func AuthProxy(proxyHost, proxyUser, proxyPassword string) ClientOption {
}
}
// Private
func (client Client) do(method, bucketName, urlParams, subResource string,
headers map[string]string, data io.Reader) (*Response, error) {
return client.Conn.Do(method, bucketName, "", urlParams,
subResource, headers, data, 0, nil)
//
// HTTPClient sets the http.Client in use to the one passed in
//
func HTTPClient(HTTPClient *http.Client) ClientOption {
return func(client *Client) {
client.HTTPClient = HTTPClient
}
}
// Private
func (client Client) do(method, bucketName string, params map[string]interface{},
headers map[string]string, data io.Reader) (*Response, error) {
return client.Conn.Do(method, bucketName, "", params,
headers, data, 0, nil)
}

View File

@ -4,37 +4,44 @@ import (
"time"
)
// HTTPTimeout http timeout
// HTTPTimeout defines HTTP timeout.
type HTTPTimeout struct {
ConnectTimeout time.Duration
ReadWriteTimeout time.Duration
HeaderTimeout time.Duration
LongTimeout time.Duration
IdleConnTimeout time.Duration
}
// Config oss configure
type HTTPMaxConns struct {
MaxIdleConns int
MaxIdleConnsPerHost int
}
// Config defines oss configuration
type Config struct {
Endpoint string // oss地址
AccessKeyID string // accessId
AccessKeySecret string // accessKey
RetryTimes uint // 失败重试次数默认5
UserAgent string // SDK名称/版本/系统信息
IsDebug bool // 是否开启调试模式默认false
Timeout uint // 超时时间默认60s
SecurityToken string // STS Token
IsCname bool // Endpoint是否是CNAME
HTTPTimeout HTTPTimeout // HTTP的超时时间设置
IsUseProxy bool // 是否使用代理
ProxyHost string // 代理服务器地址
IsAuthProxy bool // 代理服务器是否使用用户认证
ProxyUser string // 代理服务器认证用户名
ProxyPassword string // 代理服务器认证密码
IsEnableMD5 bool // 上传数据时是否启用MD5校验
MD5Threshold int64 // 内存中计算MD5的上线大小大于该值启用临时文件单位Byte
IsEnableCRC bool // 上传数据时是否启用CRC64校验
Endpoint string // OSS endpoint
AccessKeyID string // AccessId
AccessKeySecret string // AccessKey
RetryTimes uint // Retry count by default it's 5.
UserAgent string // SDK name/version/system information
IsDebug bool // Enable debug mode. Default is false.
Timeout uint // Timeout in seconds. By default it's 60.
SecurityToken string // STS Token
IsCname bool // If cname is in the endpoint.
HTTPTimeout HTTPTimeout // HTTP timeout
HTTPMaxConns HTTPMaxConns // Http max connections
IsUseProxy bool // Flag of using proxy.
ProxyHost string // Flag of using proxy host.
IsAuthProxy bool // Flag of needing authentication.
ProxyUser string // Proxy user
ProxyPassword string // Proxy password
IsEnableMD5 bool // Flag of enabling MD5 for upload.
MD5Threshold int64 // Memory footprint threshold for each MD5 computation (16MB is the default), in byte. When the data is more than that, temp file is used.
IsEnableCRC bool // Flag of enabling CRC for upload.
}
// 获取默认配置
// getDefaultOssConfig gets the default configuration.
func getDefaultOssConfig() *Config {
config := Config{}
@ -43,8 +50,8 @@ func getDefaultOssConfig() *Config {
config.AccessKeySecret = ""
config.RetryTimes = 5
config.IsDebug = false
config.UserAgent = userAgent
config.Timeout = 60 // seconds
config.UserAgent = userAgent()
config.Timeout = 60 // Seconds
config.SecurityToken = ""
config.IsCname = false
@ -52,6 +59,9 @@ func getDefaultOssConfig() *Config {
config.HTTPTimeout.ReadWriteTimeout = time.Second * 60 // 60s
config.HTTPTimeout.HeaderTimeout = time.Second * 60 // 60s
config.HTTPTimeout.LongTimeout = time.Second * 300 // 300s
config.HTTPTimeout.IdleConnTimeout = time.Second * 50 // 50s
config.HTTPMaxConns.MaxIdleConns = 100
config.HTTPMaxConns.MaxIdleConnsPerHost = 100
config.IsUseProxy = false
config.ProxyHost = ""

View File

@ -4,6 +4,7 @@ import (
"bytes"
"crypto/md5"
"encoding/base64"
"encoding/json"
"encoding/xml"
"fmt"
"hash"
@ -13,64 +14,178 @@ import (
"net/http"
"net/url"
"os"
"sort"
"strconv"
"strings"
"time"
)
// Conn oss conn
// Conn defines OSS Conn
type Conn struct {
config *Config
url *urlMaker
client *http.Client
}
// init 初始化Conn
func (conn *Conn) init(config *Config, urlMaker *urlMaker) error {
httpTimeOut := conn.config.HTTPTimeout
var signKeyList = []string{"acl", "uploads", "location", "cors", "logging", "website", "referer", "lifecycle", "delete", "append", "tagging", "objectMeta", "uploadId", "partNumber", "security-token", "position", "img", "style", "styleName", "replication", "replicationProgress", "replicationLocation", "cname", "bucketInfo", "comp", "qos", "live", "status", "vod", "startTime", "endTime", "symlink", "x-oss-process", "response-content-type", "response-content-language", "response-expires", "response-cache-control", "response-content-disposition", "response-content-encoding", "udf", "udfName", "udfImage", "udfId", "udfImageDesc", "udfApplication", "comp", "udfApplicationLog", "restore", "callback", "callback-var"}
// new Transport
transport := &http.Transport{
Dial: func(netw, addr string) (net.Conn, error) {
conn, err := net.DialTimeout(netw, addr, httpTimeOut.ConnectTimeout)
// init initializes Conn
func (conn *Conn) init(config *Config, urlMaker *urlMaker, client *http.Client) error {
if client == nil {
// New transport
transport := newTransport(conn, config)
// Proxy
if conn.config.IsUseProxy {
proxyURL, err := url.Parse(config.ProxyHost)
if err != nil {
return nil, err
return err
}
return newTimeoutConn(conn, httpTimeOut.ReadWriteTimeout, httpTimeOut.LongTimeout), nil
},
ResponseHeaderTimeout: httpTimeOut.HeaderTimeout,
}
// Proxy
if conn.config.IsUseProxy {
proxyURL, err := url.Parse(config.ProxyHost)
if err != nil {
return err
transport.Proxy = http.ProxyURL(proxyURL)
}
transport.Proxy = http.ProxyURL(proxyURL)
client = &http.Client{Transport: transport}
}
conn.config = config
conn.url = urlMaker
conn.client = &http.Client{Transport: transport}
conn.client = client
return nil
}
// Do 处理请求,返回响应结果。
func (conn Conn) Do(method, bucketName, objectName, urlParams, subResource string, headers map[string]string,
// Do sends request and returns the response
func (conn Conn) Do(method, bucketName, objectName string, params map[string]interface{}, headers map[string]string,
data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) {
urlParams := conn.getURLParams(params)
subResource := conn.getSubResource(params)
uri := conn.url.getURL(bucketName, objectName, urlParams)
resource := conn.url.getResource(bucketName, objectName, subResource)
return conn.doRequest(method, uri, resource, headers, data, initCRC, listener)
}
// DoURL sends the request with signed URL and returns the response result.
func (conn Conn) DoURL(method HTTPMethod, signedURL string, headers map[string]string,
data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) {
// Get URI from signedURL
uri, err := url.ParseRequestURI(signedURL)
if err != nil {
return nil, err
}
m := strings.ToUpper(string(method))
req := &http.Request{
Method: m,
URL: uri,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: make(http.Header),
Host: uri.Host,
}
tracker := &readerTracker{completedBytes: 0}
fd, crc := conn.handleBody(req, data, initCRC, listener, tracker)
if fd != nil {
defer func() {
fd.Close()
os.Remove(fd.Name())
}()
}
if conn.config.IsAuthProxy {
auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword
basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth))
req.Header.Set("Proxy-Authorization", basic)
}
req.Header.Set(HTTPHeaderHost, conn.config.Endpoint)
req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent)
if headers != nil {
for k, v := range headers {
req.Header.Set(k, v)
}
}
// Transfer started
event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength)
publishProgress(listener, event)
resp, err := conn.client.Do(req)
if err != nil {
// Transfer failed
event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength)
publishProgress(listener, event)
return nil, err
}
// Transfer completed
event = newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength)
publishProgress(listener, event)
return conn.handleResponse(resp, crc)
}
func (conn Conn) getURLParams(params map[string]interface{}) string {
// Sort
keys := make([]string, 0, len(params))
for k := range params {
keys = append(keys, k)
}
sort.Strings(keys)
// Serialize
var buf bytes.Buffer
for _, k := range keys {
if buf.Len() > 0 {
buf.WriteByte('&')
}
buf.WriteString(url.QueryEscape(k))
if params[k] != nil {
buf.WriteString("=" + url.QueryEscape(params[k].(string)))
}
}
return buf.String()
}
func (conn Conn) getSubResource(params map[string]interface{}) string {
// Sort
keys := make([]string, 0, len(params))
for k := range params {
if conn.isParamSign(k) {
keys = append(keys, k)
}
}
sort.Strings(keys)
// Serialize
var buf bytes.Buffer
for _, k := range keys {
if buf.Len() > 0 {
buf.WriteByte('&')
}
buf.WriteString(k)
if params[k] != nil {
buf.WriteString("=" + params[k].(string))
}
}
return buf.String()
}
func (conn Conn) isParamSign(paramKey string) bool {
for _, k := range signKeyList {
if paramKey == k {
return true
}
}
return false
}
func (conn Conn) doRequest(method string, uri *url.URL, canonicalizedResource string, headers map[string]string,
data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) {
method = strings.ToUpper(method)
if !conn.config.IsUseProxy {
uri.Opaque = uri.Path
}
req := &http.Request{
Method: method,
URL: uri,
@ -112,33 +227,72 @@ func (conn Conn) doRequest(method string, uri *url.URL, canonicalizedResource st
conn.signHeader(req, canonicalizedResource)
// transfer started
// Transfer started
event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength)
publishProgress(listener, event)
resp, err := conn.client.Do(req)
if err != nil {
// transfer failed
// Transfer failed
event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength)
publishProgress(listener, event)
return nil, err
}
// transfer completed
// Transfer completed
event = newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength)
publishProgress(listener, event)
return conn.handleResponse(resp, crc)
}
// handle request body
func (conn Conn) signURL(method HTTPMethod, bucketName, objectName string, expiration int64, params map[string]interface{}, headers map[string]string) string {
if conn.config.SecurityToken != "" {
params[HTTPParamSecurityToken] = conn.config.SecurityToken
}
subResource := conn.getSubResource(params)
canonicalizedResource := conn.url.getResource(bucketName, objectName, subResource)
m := strings.ToUpper(string(method))
req := &http.Request{
Method: m,
Header: make(http.Header),
}
if conn.config.IsAuthProxy {
auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword
basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth))
req.Header.Set("Proxy-Authorization", basic)
}
req.Header.Set(HTTPHeaderDate, strconv.FormatInt(expiration, 10))
req.Header.Set(HTTPHeaderHost, conn.config.Endpoint)
req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent)
if headers != nil {
for k, v := range headers {
req.Header.Set(k, v)
}
}
signedStr := conn.getSignedStr(req, canonicalizedResource)
params[HTTPParamExpires] = strconv.FormatInt(expiration, 10)
params[HTTPParamAccessKeyID] = conn.config.AccessKeyID
params[HTTPParamSignature] = signedStr
urlParams := conn.getURLParams(params)
return conn.url.getSignURL(bucketName, objectName, urlParams)
}
// handleBody handles request body
func (conn Conn) handleBody(req *http.Request, body io.Reader, initCRC uint64,
listener ProgressListener, tracker *readerTracker) (*os.File, hash.Hash64) {
var file *os.File
var crc hash.Hash64
reader := body
// length
// Length
switch v := body.(type) {
case *bytes.Buffer:
req.ContentLength = int64(v.Len())
@ -153,20 +307,20 @@ func (conn Conn) handleBody(req *http.Request, body io.Reader, initCRC uint64,
}
req.Header.Set(HTTPHeaderContentLength, strconv.FormatInt(req.ContentLength, 10))
// md5
// MD5
if body != nil && conn.config.IsEnableMD5 && req.Header.Get(HTTPHeaderContentMD5) == "" {
md5 := ""
reader, md5, file, _ = calcMD5(body, req.ContentLength, conn.config.MD5Threshold)
req.Header.Set(HTTPHeaderContentMD5, md5)
}
// crc
// CRC
if reader != nil && conn.config.IsEnableCRC {
crc = NewCRC(crcTable(), initCRC)
reader = TeeReader(reader, crc, req.ContentLength, listener, tracker)
}
// http body
// HTTP body
rc, ok := reader.(io.ReadCloser)
if !ok && reader != nil {
rc = ioutil.NopCloser(reader)
@ -181,7 +335,7 @@ func tryGetFileSize(f *os.File) int64 {
return fInfo.Size()
}
// handle response
// handleResponse handles response
func (conn Conn) handleResponse(resp *http.Response, crc hash.Hash64) (*Response, error) {
var cliCRC uint64
var srvCRC uint64
@ -196,24 +350,28 @@ func (conn Conn) handleResponse(resp *http.Response, crc hash.Hash64) (*Response
}
if len(respBody) == 0 {
// no error in response body
err = fmt.Errorf("oss: service returned without a response body (%s)", resp.Status)
err = ServiceError{
StatusCode: statusCode,
RequestID: resp.Header.Get(HTTPHeaderOssRequestID),
}
} else {
// response contains storage service error object, unmarshal
// Response contains storage service error object, unmarshal
srvErr, errIn := serviceErrFromXML(respBody, resp.StatusCode,
resp.Header.Get(HTTPHeaderOssRequestID))
if err != nil { // error unmarshaling the error response
err = errIn
if errIn != nil { // error unmarshaling the error response
err = fmt.Errorf("oss: service returned invalid response body, status = %s, RequestId = %s", resp.Status, resp.Header.Get(HTTPHeaderOssRequestID))
} else {
err = srvErr
}
err = srvErr
}
return &Response{
StatusCode: resp.StatusCode,
Headers: resp.Header,
Body: ioutil.NopCloser(bytes.NewReader(respBody)), // restore the body
}, err
} else if statusCode >= 300 && statusCode <= 307 {
// oss use 3xx, but response has no body
// OSS use 3xx, but response has no body
err := fmt.Errorf("oss: service returned %d,%s", resp.StatusCode, resp.Status)
return &Response{
StatusCode: resp.StatusCode,
@ -239,7 +397,7 @@ func (conn Conn) handleResponse(resp *http.Response, crc hash.Hash64) (*Response
func calcMD5(body io.Reader, contentLen, md5Threshold int64) (reader io.Reader, b64 string, tempFile *os.File, err error) {
if contentLen == 0 || contentLen > md5Threshold {
// huge body, use temporary file
// Huge body, use temporary file
tempFile, err = ioutil.TempFile(os.TempDir(), TempFilePrefix)
if tempFile != nil {
io.Copy(tempFile, body)
@ -252,7 +410,7 @@ func calcMD5(body io.Reader, contentLen, md5Threshold int64) (reader io.Reader,
reader = tempFile
}
} else {
// small body, use memory
// Small body, use memory
buf, _ := ioutil.ReadAll(body)
sum := md5.Sum(buf)
b64 = base64.StdEncoding.EncodeToString(sum[:])
@ -272,9 +430,11 @@ func readResponseBody(resp *http.Response) ([]byte, error) {
func serviceErrFromXML(body []byte, statusCode int, requestID string) (ServiceError, error) {
var storageErr ServiceError
if err := xml.Unmarshal(body, &storageErr); err != nil {
return storageErr, err
}
storageErr.StatusCode = statusCode
storageErr.RequestID = requestID
storageErr.RawMessage = string(body)
@ -289,7 +449,15 @@ func xmlUnmarshal(body io.Reader, v interface{}) error {
return xml.Unmarshal(data, v)
}
// Handle http timeout
func jsonUnmarshal(body io.Reader, v interface{}) error {
data, err := ioutil.ReadAll(body)
if err != nil {
return err
}
return json.Unmarshal(data, v)
}
// timeoutConn handles HTTP timeout
type timeoutConn struct {
conn net.Conn
timeout time.Duration
@ -343,7 +511,7 @@ func (c *timeoutConn) SetWriteDeadline(t time.Time) error {
return c.conn.SetWriteDeadline(t)
}
// UrlMaker - build url and resource
// UrlMaker builds URL and resource
const (
urlTypeCname = 1
urlTypeIP = 2
@ -351,13 +519,13 @@ const (
)
type urlMaker struct {
Scheme string // http or https
NetLoc string // host or ip
Type int // 1 CNAME 2 IP 3 ALIYUN
IsProxy bool // proxy
Scheme string // HTTP or HTTPS
NetLoc string // Host or IP
Type int // 1 CNAME, 2 IP, 3 ALIYUN
IsProxy bool // Proxy
}
// Parse endpoint
// Init parses endpoint
func (um *urlMaker) Init(endpoint string, isCname bool, isProxy bool) {
if strings.HasPrefix(endpoint, "http://") {
um.Scheme = "http"
@ -373,7 +541,11 @@ func (um *urlMaker) Init(endpoint string, isCname bool, isProxy bool) {
host, _, err := net.SplitHostPort(um.NetLoc)
if err != nil {
host = um.NetLoc
if host[0] == '[' && host[len(host)-1] == ']' {
host = host[1 : len(host)-1]
}
}
ip := net.ParseIP(host)
if ip != nil {
um.Type = urlTypeIP
@ -385,14 +557,32 @@ func (um *urlMaker) Init(endpoint string, isCname bool, isProxy bool) {
um.IsProxy = isProxy
}
// Build URL
// getURL gets URL
func (um urlMaker) getURL(bucket, object, params string) *url.URL {
host, path := um.buildURL(bucket, object)
addr := ""
if params == "" {
addr = fmt.Sprintf("%s://%s%s", um.Scheme, host, path)
} else {
addr = fmt.Sprintf("%s://%s%s?%s", um.Scheme, host, path, params)
}
uri, _ := url.ParseRequestURI(addr)
return uri
}
// getSignURL gets sign URL
func (um urlMaker) getSignURL(bucket, object, params string) string {
host, path := um.buildURL(bucket, object)
return fmt.Sprintf("%s://%s%s?%s", um.Scheme, host, path, params)
}
// buildURL builds URL
func (um urlMaker) buildURL(bucket, object string) (string, string) {
var host = ""
var path = ""
if !um.IsProxy {
object = url.QueryEscape(object)
}
object = url.QueryEscape(object)
object = strings.Replace(object, "+", "%20", -1)
if um.Type == urlTypeCname {
host = um.NetLoc
@ -415,17 +605,10 @@ func (um urlMaker) getURL(bucket, object, params string) *url.URL {
}
}
uri := &url.URL{
Scheme: um.Scheme,
Host: host,
Path: path,
RawQuery: params,
}
return uri
return host, path
}
// Canonicalized Resource
// getResource gets canonicalized resource
func (um urlMaker) getResource(bucketName, objectName, subResource string) string {
if subResource != "" {
subResource = "?" + subResource

View File

@ -2,35 +2,77 @@ package oss
import "os"
// ACLType Bucket/Object的访问控制
// ACLType bucket/object ACL
type ACLType string
const (
// ACLPrivate 私有读写
// ACLPrivate definition : private read and write
ACLPrivate ACLType = "private"
// ACLPublicRead 公共读私有写
// ACLPublicRead definition : public read and private write
ACLPublicRead ACLType = "public-read"
// ACLPublicReadWrite 公共读写
// ACLPublicReadWrite definition : public read and public write
ACLPublicReadWrite ACLType = "public-read-write"
// ACLDefault Object默认权限Bucket无此权限
// ACLDefault Object. It's only applicable for object.
ACLDefault ACLType = "default"
)
// MetadataDirectiveType 对象COPY时新对象是否使用原对象的Meta
// MetadataDirectiveType specifying whether use the metadata of source object when copying object.
type MetadataDirectiveType string
const (
// MetaCopy 目标对象使用源对象的META
// MetaCopy the target object's metadata is copied from the source one
MetaCopy MetadataDirectiveType = "COPY"
// MetaReplace 目标对象使用自定义的META
// MetaReplace the target object's metadata is created as part of the copy request (not same as the source one)
MetaReplace MetadataDirectiveType = "REPLACE"
)
// Http头标签
// StorageClassType bucket storage type
type StorageClassType string
const (
// StorageStandard standard
StorageStandard StorageClassType = "Standard"
// StorageIA infrequent access
StorageIA StorageClassType = "IA"
// StorageArchive archive
StorageArchive StorageClassType = "Archive"
)
// PayerType the type of request payer
type PayerType string
const (
// Requester the requester who send the request
Requester PayerType = "requester"
)
// HTTPMethod HTTP request method
type HTTPMethod string
const (
// HTTPGet HTTP GET
HTTPGet HTTPMethod = "GET"
// HTTPPut HTTP PUT
HTTPPut HTTPMethod = "PUT"
// HTTPHead HTTP HEAD
HTTPHead HTTPMethod = "HEAD"
// HTTPPost HTTP POST
HTTPPost HTTPMethod = "POST"
// HTTPDelete HTTP DELETE
HTTPDelete HTTPMethod = "DELETE"
)
// HTTP headers
const (
HTTPHeaderAcceptEncoding string = "Accept-Encoding"
HTTPHeaderAuthorization = "Authorization"
@ -61,6 +103,7 @@ const (
HTTPHeaderOssObjectACL = "X-Oss-Object-Acl"
HTTPHeaderOssSecurityToken = "X-Oss-Security-Token"
HTTPHeaderOssServerSideEncryption = "X-Oss-Server-Side-Encryption"
HTTPHeaderOssServerSideEncryptionKeyID = "X-Oss-Server-Side-Encryption-Key-Id"
HTTPHeaderOssCopySource = "X-Oss-Copy-Source"
HTTPHeaderOssCopySourceRange = "X-Oss-Copy-Source-Range"
HTTPHeaderOssCopySourceIfMatch = "X-Oss-Copy-Source-If-Match"
@ -71,19 +114,32 @@ const (
HTTPHeaderOssNextAppendPosition = "X-Oss-Next-Append-Position"
HTTPHeaderOssRequestID = "X-Oss-Request-Id"
HTTPHeaderOssCRC64 = "X-Oss-Hash-Crc64ecma"
HTTPHeaderOssSymlinkTarget = "X-Oss-Symlink-Target"
HTTPHeaderOssStorageClass = "X-Oss-Storage-Class"
HTTPHeaderOssCallback = "X-Oss-Callback"
HTTPHeaderOssCallbackVar = "X-Oss-Callback-Var"
HTTPHeaderOSSRequester = "X-Oss-Request-Payer"
)
// 其它常量
// HTTP Param
const (
MaxPartSize = 5 * 1024 * 1024 * 1024 // 文件片最大值5GB
MinPartSize = 100 * 1024 // 文件片最小值100KBß
FilePermMode = os.FileMode(0664) // 新建文件默认权限
TempFilePrefix = "oss-go-temp-" // 临时文件前缀
TempFileSuffix = ".temp" // 临时文件后缀
CheckpointFileSuffix = ".cp" // Checkpoint文件后缀
Version = "1.3.0" // Go sdk版本
HTTPParamExpires = "Expires"
HTTPParamAccessKeyID = "OSSAccessKeyId"
HTTPParamSignature = "Signature"
HTTPParamSecurityToken = "security-token"
)
// Other constants
const (
MaxPartSize = 5 * 1024 * 1024 * 1024 // Max part size, 5GB
MinPartSize = 100 * 1024 // Min part size, 100KB
FilePermMode = os.FileMode(0664) // Default file permission
TempFilePrefix = "oss-go-temp-" // Temp file prefix
TempFileSuffix = ".temp" // Temp file suffix
CheckpointFileSuffix = ".cp" // Checkpoint file suffix
Version = "1.9.2" // Go SDK version
)

View File

@ -11,11 +11,11 @@ type digest struct {
tab *crc64.Table
}
// NewCRC creates a new hash.Hash64 computing the CRC-64 checksum
// NewCRC creates a new hash.Hash64 computing the CRC64 checksum
// using the polynomial represented by the Table.
func NewCRC(tab *crc64.Table, init uint64) hash.Hash64 { return &digest{init, tab} }
// Size returns the number of bytes Sum will return.
// Size returns the number of bytes sum will return.
func (d *digest) Size() int { return crc64.Size }
// BlockSize returns the hash's underlying block size.
@ -24,7 +24,7 @@ func (d *digest) Size() int { return crc64.Size }
// are a multiple of the block size.
func (d *digest) BlockSize() int { return 1 }
// Reset resets the Hash to its initial state.
// Reset resets the hash to its initial state.
func (d *digest) Reset() { d.crc = 0 }
// Write (via the embedded io.Writer interface) adds more data to the running hash.
@ -34,7 +34,7 @@ func (d *digest) Write(p []byte) (n int, err error) {
return len(p), nil
}
// Sum64 returns crc64 value.
// Sum64 returns CRC64 value.
func (d *digest) Sum64() uint64 { return d.crc }
// Sum returns hash value.
@ -42,3 +42,82 @@ func (d *digest) Sum(in []byte) []byte {
s := d.Sum64()
return append(in, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
}
// gf2Dim dimension of GF(2) vectors (length of CRC)
const gf2Dim int = 64
func gf2MatrixTimes(mat []uint64, vec uint64) uint64 {
var sum uint64
for i := 0; vec != 0; i++ {
if vec&1 != 0 {
sum ^= mat[i]
}
vec >>= 1
}
return sum
}
func gf2MatrixSquare(square []uint64, mat []uint64) {
for n := 0; n < gf2Dim; n++ {
square[n] = gf2MatrixTimes(mat, mat[n])
}
}
// CRC64Combine combines CRC64
func CRC64Combine(crc1 uint64, crc2 uint64, len2 uint64) uint64 {
var even [gf2Dim]uint64 // Even-power-of-two zeros operator
var odd [gf2Dim]uint64 // Odd-power-of-two zeros operator
// Degenerate case
if len2 == 0 {
return crc1
}
// Put operator for one zero bit in odd
odd[0] = crc64.ECMA // CRC64 polynomial
var row uint64 = 1
for n := 1; n < gf2Dim; n++ {
odd[n] = row
row <<= 1
}
// Put operator for two zero bits in even
gf2MatrixSquare(even[:], odd[:])
// Put operator for four zero bits in odd
gf2MatrixSquare(odd[:], even[:])
// Apply len2 zeros to crc1, first square will put the operator for one zero byte, eight zero bits, in even
for {
// Apply zeros operator for this bit of len2
gf2MatrixSquare(even[:], odd[:])
if len2&1 != 0 {
crc1 = gf2MatrixTimes(even[:], crc1)
}
len2 >>= 1
// If no more bits set, then done
if len2 == 0 {
break
}
// Another iteration of the loop with odd and even swapped
gf2MatrixSquare(odd[:], even[:])
if len2&1 != 0 {
crc1 = gf2MatrixTimes(odd[:], crc1)
}
len2 >>= 1
// If no more bits set, then done
if len2 == 0 {
break
}
}
// Return combined CRC
crc1 ^= crc2
return crc1
}

View File

@ -5,53 +5,81 @@ import (
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"hash"
"hash/crc64"
"io"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"strconv"
)
// DownloadFile downloads files with multipart download.
//
// DownloadFile 分片下载文件
// objectKey the object key.
// filePath the local file to download from objectKey in OSS.
// partSize the part size in bytes.
// options object's constraints, check out GetObject for the reference.
//
// objectKey object key。
// filePath 本地文件。objectKey下载到文件。
// partSize 本次上传文件片的大小字节数。比如100 * 1024为每片100KB。
// options Object的属性限制项。详见GetObject。
//
// error 操作成功error为nil非nil为错误信息。
// error it's nil when the call succeeds, otherwise it's an error object.
//
func (bucket Bucket) DownloadFile(objectKey, filePath string, partSize int64, options ...Option) error {
if partSize < 1 || partSize > MaxPartSize {
return errors.New("oss: part size invalid range (1, 5GB]")
if partSize < 1 {
return errors.New("oss: part size smaller than 1")
}
cpConf, err := getCpConfig(options, filePath)
uRange, err := getRangeConfig(options)
if err != nil {
return err
}
cpConf := getCpConfig(options)
routines := getRoutines(options)
if cpConf.IsEnable {
return bucket.downloadFileWithCp(objectKey, filePath, partSize, options, cpConf.FilePath, routines)
if cpConf != nil && cpConf.IsEnable {
cpFilePath := getDownloadCpFilePath(cpConf, bucket.BucketName, objectKey, filePath)
if cpFilePath != "" {
return bucket.downloadFileWithCp(objectKey, filePath, partSize, options, cpFilePath, routines, uRange)
}
}
return bucket.downloadFile(objectKey, filePath, partSize, options, routines)
return bucket.downloadFile(objectKey, filePath, partSize, options, routines, uRange)
}
// ----- 并发无断点的下载 -----
func getDownloadCpFilePath(cpConf *cpConfig, srcBucket, srcObject, destFile string) string {
if cpConf.FilePath == "" && cpConf.DirPath != "" {
src := fmt.Sprintf("oss://%v/%v", srcBucket, srcObject)
absPath, _ := filepath.Abs(destFile)
cpFileName := getCpFileName(src, absPath)
cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName
}
return cpConf.FilePath
}
// 工作协程参数
// getRangeConfig gets the download range from the options.
func getRangeConfig(options []Option) (*unpackedRange, error) {
rangeOpt, err := findOption(options, HTTPHeaderRange, nil)
if err != nil || rangeOpt == nil {
return nil, err
}
return parseRange(rangeOpt.(string))
}
// ----- concurrent download without checkpoint -----
// downloadWorkerArg is download worker's parameters
type downloadWorkerArg struct {
bucket *Bucket
key string
filePath string
options []Option
hook downloadPartHook
bucket *Bucket
key string
filePath string
options []Option
hook downloadPartHook
enableCRC bool
}
// Hook用于测试
// downloadPartHook is hook for test
type downloadPartHook func(part downloadPart) error
var downloadPartHooker downloadPartHook = defaultDownloadPartHook
@ -60,15 +88,15 @@ func defaultDownloadPartHook(part downloadPart) error {
return nil
}
// 默认ProgressListener屏蔽GetObject的Options中ProgressListener
// defaultDownloadProgressListener defines default ProgressListener, shields the ProgressListener in options of GetObject.
type defaultDownloadProgressListener struct {
}
// ProgressChanged 静默处理
// ProgressChanged no-ops
func (listener *defaultDownloadProgressListener) ProgressChanged(event *ProgressEvent) {
}
// 工作协程
// downloadWorker
func downloadWorker(id int, arg downloadWorkerArg, jobs <-chan downloadPart, results chan<- downloadPart, failed chan<- error, die <-chan bool) {
for part := range jobs {
if err := arg.hook(part); err != nil {
@ -76,11 +104,11 @@ func downloadWorker(id int, arg downloadWorkerArg, jobs <-chan downloadPart, res
break
}
// resolve options
// Resolve options
r := Range(part.Start, part.End)
p := Progress(&defaultDownloadProgressListener{})
opts := make([]Option, len(arg.options)+2)
// append orderly, can not be reversed!
// Append orderly, can not be reversed!
opts = append(opts, arg.options...)
opts = append(opts, r, p)
@ -91,6 +119,14 @@ func downloadWorker(id int, arg downloadWorkerArg, jobs <-chan downloadPart, res
}
defer rd.Close()
var crcCalc hash.Hash64
if arg.enableCRC {
crcCalc = crc64.New(crcTable())
contentLen := part.End - part.Start + 1
rd = ioutil.NopCloser(TeeReader(rd, crcCalc, contentLen, nil, nil))
}
defer rd.Close()
select {
case <-die:
return
@ -102,25 +138,31 @@ func downloadWorker(id int, arg downloadWorkerArg, jobs <-chan downloadPart, res
failed <- err
break
}
defer fd.Close()
_, err = fd.Seek(part.Start, os.SEEK_SET)
_, err = fd.Seek(part.Start-part.Offset, os.SEEK_SET)
if err != nil {
fd.Close()
failed <- err
break
}
_, err = io.Copy(fd, rd)
if err != nil {
fd.Close()
failed <- err
break
}
if arg.enableCRC {
part.CRC64 = crcCalc.Sum64()
}
fd.Close()
results <- part
}
}
// 调度协程
// downloadScheduler
func downloadScheduler(jobs chan downloadPart, parts []downloadPart) {
for _, part := range parts {
jobs <- part
@ -128,39 +170,34 @@ func downloadScheduler(jobs chan downloadPart, parts []downloadPart) {
close(jobs)
}
// 下载片
// downloadPart defines download part
type downloadPart struct {
Index int // 片序号从0开始编号
Start int64 // 片起始位置
End int64 // 片结束位置
Index int // Part number, starting from 0
Start int64 // Start index
End int64 // End index
Offset int64 // Offset
CRC64 uint64 // CRC check value of part
}
// 文件分片
func getDownloadParts(bucket *Bucket, objectKey string, partSize int64) ([]downloadPart, error) {
meta, err := bucket.GetObjectDetailedMeta(objectKey)
if err != nil {
return nil, err
}
// getDownloadParts gets download parts
func getDownloadParts(objectSize, partSize int64, uRange *unpackedRange) []downloadPart {
parts := []downloadPart{}
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
if err != nil {
return nil, err
}
part := downloadPart{}
i := 0
for offset := int64(0); offset < objectSize; offset += partSize {
start, end := adjustRange(uRange, objectSize)
for offset := start; offset < end; offset += partSize {
part.Index = i
part.Start = offset
part.End = GetPartEnd(offset, objectSize, partSize)
part.End = GetPartEnd(offset, end, partSize)
part.Offset = start
part.CRC64 = 0
parts = append(parts, part)
i++
}
return parts, nil
return parts
}
// 文件大小
// getObjectBytes gets object bytes length
func getObjectBytes(parts []downloadPart) int64 {
var ob int64
for _, part := range parts {
@ -169,24 +206,59 @@ func getObjectBytes(parts []downloadPart) int64 {
return ob
}
// 并发无断点续传的下载
func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, options []Option, routines int) error {
// combineCRCInParts caculates the total CRC of continuous parts
func combineCRCInParts(dps []downloadPart) uint64 {
if dps == nil || len(dps) == 0 {
return 0
}
crc := dps[0].CRC64
for i := 1; i < len(dps); i++ {
crc = CRC64Combine(crc, dps[i].CRC64, (uint64)(dps[i].End-dps[i].Start+1))
}
return crc
}
// downloadFile downloads file concurrently without checkpoint.
func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, options []Option, routines int, uRange *unpackedRange) error {
tempFilePath := filePath + TempFileSuffix
listener := getProgressListener(options)
// 如果文件不存在则创建,存在不清空,下载分片会重写文件内容
payerOptions := []Option{}
payer := getPayer(options)
if payer != "" {
payerOptions = append(payerOptions, RequestPayer(PayerType(payer)))
}
// If the file does not exist, create one. If exists, the download will overwrite it.
fd, err := os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode)
if err != nil {
return err
}
fd.Close()
// 分割文件
parts, err := getDownloadParts(&bucket, objectKey, partSize)
meta, err := bucket.GetObjectDetailedMeta(objectKey, payerOptions...)
if err != nil {
return err
}
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
if err != nil {
return err
}
enableCRC := false
expectedCRC := (uint64)(0)
if bucket.getConfig().IsEnableCRC && meta.Get(HTTPHeaderOssCRC64) != "" {
if uRange == nil || (!uRange.hasStart && !uRange.hasEnd) {
enableCRC = true
expectedCRC, _ = strconv.ParseUint(meta.Get(HTTPHeaderOssCRC64), 10, 0)
}
}
// Get the parts of the file
parts := getDownloadParts(objectSize, partSize, uRange)
jobs := make(chan downloadPart, len(parts))
results := make(chan downloadPart, len(parts))
failed := make(chan error)
@ -197,24 +269,23 @@ func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, op
event := newProgressEvent(TransferStartedEvent, 0, totalBytes)
publishProgress(listener, event)
// 启动工作协程
arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker}
// Start the download workers
arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker, enableCRC}
for w := 1; w <= routines; w++ {
go downloadWorker(w, arg, jobs, results, failed, die)
}
// 并发上传分片
// Download parts concurrently
go downloadScheduler(jobs, parts)
// 等待分片下载完成
// Waiting for parts download finished
completed := 0
ps := make([]downloadPart, len(parts))
for completed < len(parts) {
select {
case part := <-results:
completed++
ps[part.Index] = part
completedBytes += (part.End - part.Start + 1)
parts[part.Index].CRC64 = part.CRC64
event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes)
publishProgress(listener, event)
case err := <-failed:
@ -232,32 +303,44 @@ func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, op
event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes)
publishProgress(listener, event)
if enableCRC {
actualCRC := combineCRCInParts(parts)
err = checkDownloadCRC(actualCRC, expectedCRC)
if err != nil {
return err
}
}
return os.Rename(tempFilePath, filePath)
}
// ----- 并发有断点的下载 -----
// ----- Concurrent download with chcekpoint -----
const downloadCpMagic = "92611BED-89E2-46B6-89E5-72F273D4B0A3"
type downloadCheckpoint struct {
Magic string // magic
MD5 string // cp内容的MD5
FilePath string // 本地文件
Object string // key
ObjStat objectStat // 文件状态
Parts []downloadPart // 全部分片
PartStat []bool // 分片下载是否完成
Magic string // Magic
MD5 string // Checkpoint content MD5
FilePath string // Local file
Object string // Key
ObjStat objectStat // Object status
Parts []downloadPart // All download parts
PartStat []bool // Parts' download status
Start int64 // Start point of the file
End int64 // End point of the file
enableCRC bool // Whether has CRC check
CRC uint64 // CRC check value
}
type objectStat struct {
Size int64 // 大小
LastModified string // 最后修改时间
Etag string // etag
Size int64 // Object size
LastModified string // Last modified time
Etag string // Etag
}
// CP数据是否有效CP有效且Object没有更新时有效
func (cp downloadCheckpoint) isValid(bucket *Bucket, objectKey string) (bool, error) {
// 比较CP的Magic及MD5
// isValid flags of checkpoint data is valid. It returns true when the data is valid and the checkpoint is valid and the object is not updated.
func (cp downloadCheckpoint) isValid(meta http.Header, uRange *unpackedRange) (bool, error) {
// Compare the CP's Magic and the MD5
cpb := cp
cpb.MD5 = ""
js, _ := json.Marshal(cpb)
@ -268,28 +351,30 @@ func (cp downloadCheckpoint) isValid(bucket *Bucket, objectKey string) (bool, er
return false, nil
}
// 确认object没有更新
meta, err := bucket.GetObjectDetailedMeta(objectKey)
if err != nil {
return false, err
}
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
if err != nil {
return false, err
}
// 比较Object的大小/最后修改时间/etag
// Compare the object size, last modified time and etag
if cp.ObjStat.Size != objectSize ||
cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) ||
cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) {
return false, nil
}
// Check the download range
if uRange != nil {
start, end := adjustRange(uRange, objectSize)
if start != cp.Start || end != cp.End {
return false, nil
}
}
return true, nil
}
// 从文件中load
// load checkpoint from local file
func (cp *downloadCheckpoint) load(filePath string) error {
contents, err := ioutil.ReadFile(filePath)
if err != nil {
@ -300,11 +385,11 @@ func (cp *downloadCheckpoint) load(filePath string) error {
return err
}
// dump到文件
// dump funciton dumps to file
func (cp *downloadCheckpoint) dump(filePath string) error {
bcp := *cp
// 计算MD5
// Calculate MD5
bcp.MD5 = ""
js, err := json.Marshal(bcp)
if err != nil {
@ -314,17 +399,17 @@ func (cp *downloadCheckpoint) dump(filePath string) error {
b64 := base64.StdEncoding.EncodeToString(sum[:])
bcp.MD5 = b64
// 序列化
// Serialize
js, err = json.Marshal(bcp)
if err != nil {
return err
}
// dump
// Dump
return ioutil.WriteFile(filePath, js, FilePermMode)
}
// 未完成的分片
// todoParts gets unfinished parts
func (cp downloadCheckpoint) todoParts() []downloadPart {
dps := []downloadPart{}
for i, ps := range cp.PartStat {
@ -335,7 +420,7 @@ func (cp downloadCheckpoint) todoParts() []downloadPart {
return dps
}
// 完成的字节数
// getCompletedBytes gets completed size
func (cp downloadCheckpoint) getCompletedBytes() int64 {
var completedBytes int64
for i, part := range cp.Parts {
@ -346,19 +431,13 @@ func (cp downloadCheckpoint) getCompletedBytes() int64 {
return completedBytes
}
// 初始化下载任务
func (cp *downloadCheckpoint) prepare(bucket *Bucket, objectKey, filePath string, partSize int64) error {
// cp
// prepare initiates download tasks
func (cp *downloadCheckpoint) prepare(meta http.Header, bucket *Bucket, objectKey, filePath string, partSize int64, uRange *unpackedRange) error {
// CP
cp.Magic = downloadCpMagic
cp.FilePath = filePath
cp.Object = objectKey
// object
meta, err := bucket.GetObjectDetailedMeta(objectKey)
if err != nil {
return err
}
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
if err != nil {
return err
@ -368,11 +447,15 @@ func (cp *downloadCheckpoint) prepare(bucket *Bucket, objectKey, filePath string
cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified)
cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag)
// parts
cp.Parts, err = getDownloadParts(bucket, objectKey, partSize)
if err != nil {
return err
if bucket.getConfig().IsEnableCRC && meta.Get(HTTPHeaderOssCRC64) != "" {
if uRange == nil || (!uRange.hasStart && !uRange.hasEnd) {
cp.enableCRC = true
cp.CRC, _ = strconv.ParseUint(meta.Get(HTTPHeaderOssCRC64), 10, 0)
}
}
// Parts
cp.Parts = getDownloadParts(objectSize, partSize, uRange)
cp.PartStat = make([]bool, len(cp.Parts))
for i := range cp.PartStat {
cp.PartStat[i] = false
@ -386,35 +469,47 @@ func (cp *downloadCheckpoint) complete(cpFilePath, downFilepath string) error {
return os.Rename(downFilepath, cp.FilePath)
}
// 并发带断点的下载
func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int) error {
// downloadFileWithCp downloads files with checkpoint.
func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int, uRange *unpackedRange) error {
tempFilePath := filePath + TempFileSuffix
listener := getProgressListener(options)
// LOAD CP数据
payerOptions := []Option{}
payer := getPayer(options)
if payer != "" {
payerOptions = append(payerOptions, RequestPayer(PayerType(payer)))
}
// Load checkpoint data.
dcp := downloadCheckpoint{}
err := dcp.load(cpFilePath)
if err != nil {
os.Remove(cpFilePath)
}
// LOAD出错或数据无效重新初始化下载
valid, err := dcp.isValid(&bucket, objectKey)
// Get the object detailed meta.
meta, err := bucket.GetObjectDetailedMeta(objectKey, payerOptions...)
if err != nil {
return err
}
// Load error or data invalid. Re-initialize the download.
valid, err := dcp.isValid(meta, uRange)
if err != nil || !valid {
if err = dcp.prepare(&bucket, objectKey, filePath, partSize); err != nil {
if err = dcp.prepare(meta, &bucket, objectKey, filePath, partSize, uRange); err != nil {
return err
}
os.Remove(cpFilePath)
}
// 如果文件不存在则创建,存在不清空,下载分片会重写文件内容
// Create the file if not exists. Otherwise the parts download will overwrite it.
fd, err := os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode)
if err != nil {
return err
}
fd.Close()
// 未完成的分片
// Unfinished parts
parts := dcp.todoParts()
jobs := make(chan downloadPart, len(parts))
results := make(chan downloadPart, len(parts))
@ -425,22 +520,23 @@ func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int
event := newProgressEvent(TransferStartedEvent, completedBytes, dcp.ObjStat.Size)
publishProgress(listener, event)
// 启动工作协程
arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker}
// Start the download workers routine
arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker, dcp.enableCRC}
for w := 1; w <= routines; w++ {
go downloadWorker(w, arg, jobs, results, failed, die)
}
// 并发下载分片
// Concurrently downloads parts
go downloadScheduler(jobs, parts)
// 等待分片下载完成
// Wait for the parts download finished
completed := 0
for completed < len(parts) {
select {
case part := <-results:
completed++
dcp.PartStat[part.Index] = true
dcp.Parts[part.Index].CRC64 = part.CRC64
dcp.dump(cpFilePath)
completedBytes += (part.End - part.Start + 1)
event = newProgressEvent(TransferDataEvent, completedBytes, dcp.ObjStat.Size)
@ -460,5 +556,13 @@ func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int
event = newProgressEvent(TransferCompletedEvent, completedBytes, dcp.ObjStat.Size)
publishProgress(listener, event)
if dcp.enableCRC {
actualCRC := combineCRCInParts(dcp.Parts)
err = checkDownloadCRC(actualCRC, dcp.CRC)
if err != nil {
return err
}
}
return dcp.complete(cpFilePath, tempFilePath)
}

View File

@ -10,28 +10,33 @@ import (
// ServiceError contains fields of the error response from Oss Service REST API.
type ServiceError struct {
XMLName xml.Name `xml:"Error"`
Code string `xml:"Code"` // OSS返回给用户的错误码
Message string `xml:"Message"` // OSS给出的详细错误信息
RequestID string `xml:"RequestId"` // 用于唯一标识该次请求的UUID
HostID string `xml:"HostId"` // 用于标识访问的OSS集群
RawMessage string // OSS返回的原始消息内容
StatusCode int // HTTP状态码
Code string `xml:"Code"` // The error code returned from OSS to the caller
Message string `xml:"Message"` // The detail error message from OSS
RequestID string `xml:"RequestId"` // The UUID used to uniquely identify the request
HostID string `xml:"HostId"` // The OSS server cluster's Id
Endpoint string `xml:"Endpoint"`
RawMessage string // The raw messages from OSS
StatusCode int // HTTP status code
}
// Implement interface error
// Error implements interface error
func (e ServiceError) Error() string {
return fmt.Sprintf("oss: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestId=%s",
e.StatusCode, e.Code, e.Message, e.RequestID)
if e.Endpoint == "" {
return fmt.Sprintf("oss: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=\"%s\", RequestId=%s",
e.StatusCode, e.Code, e.Message, e.RequestID)
}
return fmt.Sprintf("oss: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=\"%s\", RequestId=%s, Endpoint=%s",
e.StatusCode, e.Code, e.Message, e.RequestID, e.Endpoint)
}
// UnexpectedStatusCodeError is returned when a storage service responds with neither an error
// nor with an HTTP status code indicating success.
type UnexpectedStatusCodeError struct {
allowed []int // 预期OSS返回HTTP状态码
got int // OSS实际返回HTTP状态码
allowed []int // The expected HTTP stats code returned from OSS
got int // The actual HTTP status code from OSS
}
// Implement interface error
// Error implements interface error
func (e UnexpectedStatusCodeError) Error() string {
s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) }
@ -62,18 +67,25 @@ func checkRespCode(respCode int, allowed []int) error {
// CRCCheckError is returned when crc check is inconsistent between client and server
type CRCCheckError struct {
clientCRC uint64 // 客户端计算的CRC64值
serverCRC uint64 // 服务端计算的CRC64值
operation string // 上传操作如PutObject/AppendObject/UploadPart等
requestID string // 本次操作的RequestID
clientCRC uint64 // Calculated CRC64 in client
serverCRC uint64 // Calculated CRC64 in server
operation string // Upload operations such as PutObject/AppendObject/UploadPart, etc
requestID string // The request id of this operation
}
// Implement interface error
// Error implements interface error
func (e CRCCheckError) Error() string {
return fmt.Sprintf("oss: the crc of %s is inconsistent, client %d but server %d; request id is %s",
e.operation, e.clientCRC, e.serverCRC, e.requestID)
}
func checkDownloadCRC(clientCRC, serverCRC uint64) error {
if clientCRC == serverCRC {
return nil
}
return CRCCheckError{clientCRC, serverCRC, "DownloadFile", ""}
}
func checkCRC(resp *Response, operation string) error {
if resp.Headers.Get(HTTPHeaderOssCRC64) == "" || resp.ClientCRC == resp.ServerCRC {
return nil

View File

@ -235,7 +235,7 @@ var extToMimeType = map[string]string{
}
// TypeByExtension returns the MIME type associated with the file extension ext.
// 获取文件类型选项ContentType使用
// gets the file's MIME type for HTTP header Content-Type
func TypeByExtension(filePath string) string {
typ := mime.TypeByExtension(path.Ext(filePath))
if typ == "" {

View File

@ -6,7 +6,7 @@ import (
"net/http"
)
// Response Http response from oss
// Response defines HTTP response from OSS
type Response struct {
StatusCode int
Headers http.Header
@ -15,38 +15,46 @@ type Response struct {
ServerCRC uint64
}
// PutObjectRequest The request of DoPutObject
func (r *Response) Read(p []byte) (n int, err error) {
return r.Body.Read(p)
}
func (r *Response) Close() error {
return r.Body.Close()
}
// PutObjectRequest is the request of DoPutObject
type PutObjectRequest struct {
ObjectKey string
Reader io.Reader
}
// GetObjectRequest The request of DoGetObject
// GetObjectRequest is the request of DoGetObject
type GetObjectRequest struct {
ObjectKey string
}
// GetObjectResult The result of DoGetObject
// GetObjectResult is the result of DoGetObject
type GetObjectResult struct {
Response *Response
ClientCRC hash.Hash64
ServerCRC uint64
}
// AppendObjectRequest The requtest of DoAppendObject
// AppendObjectRequest is the requtest of DoAppendObject
type AppendObjectRequest struct {
ObjectKey string
Reader io.Reader
Position int64
}
// AppendObjectResult The result of DoAppendObject
// AppendObjectResult is the result of DoAppendObject
type AppendObjectResult struct {
NextPosition int64
CRC uint64
}
// UploadPartRequest The request of DoUploadPart
// UploadPartRequest is the request of DoUploadPart
type UploadPartRequest struct {
InitResult *InitiateMultipartUploadResult
Reader io.Reader
@ -54,7 +62,7 @@ type UploadPartRequest struct {
PartNumber int
}
// UploadPartResult The result of DoUploadPart
// UploadPartResult is the result of DoUploadPart
type UploadPartResult struct {
Part UploadPart
}

View File

@ -5,22 +5,22 @@ import (
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"strconv"
)
// CopyFile is multipart copy object
//
// CopyFile 分片复制文件
// srcBucketName source bucket name
// srcObjectKey source object name
// destObjectKey target object name in the form of bucketname.objectkey
// partSize the part size in byte.
// options object's contraints. Check out function InitiateMultipartUpload.
//
// srcBucketName 源Bucket名称。
// srcObjectKey 源Object名称。
// destObjectKey 目标Object名称。目标Bucket名称为Bucket.BucketName。
// partSize 复制文件片的大小字节数。比如100 * 1024为每片100KB。
// options Object的属性限制项。详见InitiateMultipartUpload。
//
// error 操作成功error为nil非nil为错误信息。
// error it's nil if the operation succeeds, otherwise it's an error object.
//
func (bucket Bucket) CopyFile(srcBucketName, srcObjectKey, destObjectKey string, partSize int64, options ...Option) error {
destBucketName := bucket.BucketName
@ -28,25 +28,33 @@ func (bucket Bucket) CopyFile(srcBucketName, srcObjectKey, destObjectKey string,
return errors.New("oss: part size invalid range (1024KB, 5GB]")
}
cpConf, err := getCpConfig(options, filepath.Base(destObjectKey))
if err != nil {
return err
}
cpConf := getCpConfig(options)
routines := getRoutines(options)
if cpConf.IsEnable {
return bucket.copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey,
partSize, options, cpConf.FilePath, routines)
if cpConf != nil && cpConf.IsEnable {
cpFilePath := getCopyCpFilePath(cpConf, srcBucketName, srcObjectKey, destBucketName, destObjectKey)
if cpFilePath != "" {
return bucket.copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey, partSize, options, cpFilePath, routines)
}
}
return bucket.copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey,
partSize, options, routines)
}
// ----- 并发无断点的下载 -----
func getCopyCpFilePath(cpConf *cpConfig, srcBucket, srcObject, destBucket, destObject string) string {
if cpConf.FilePath == "" && cpConf.DirPath != "" {
dest := fmt.Sprintf("oss://%v/%v", destBucket, destObject)
src := fmt.Sprintf("oss://%v/%v", srcBucket, srcObject)
cpFileName := getCpFileName(src, dest)
cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName
}
return cpConf.FilePath
}
// 工作协程参数
// ----- Concurrently copy without checkpoint ---------
// copyWorkerArg defines the copy worker arguments
type copyWorkerArg struct {
bucket *Bucket
imur InitiateMultipartUploadResult
@ -56,7 +64,7 @@ type copyWorkerArg struct {
hook copyPartHook
}
// Hook用于测试
// copyPartHook is the hook for testing purpose
type copyPartHook func(part copyPart) error
var copyPartHooker copyPartHook = defaultCopyPartHook
@ -65,7 +73,7 @@ func defaultCopyPartHook(part copyPart) error {
return nil
}
// 工作协程
// copyWorker copies worker
func copyWorker(id int, arg copyWorkerArg, jobs <-chan copyPart, results chan<- UploadPart, failed chan<- error, die <-chan bool) {
for chunk := range jobs {
if err := arg.hook(chunk); err != nil {
@ -88,7 +96,7 @@ func copyWorker(id int, arg copyWorkerArg, jobs <-chan copyPart, results chan<-
}
}
// 调度协程
// copyScheduler
func copyScheduler(jobs chan copyPart, parts []copyPart) {
for _, part := range parts {
jobs <- part
@ -96,26 +104,16 @@ func copyScheduler(jobs chan copyPart, parts []copyPart) {
close(jobs)
}
// 分片
// copyPart structure
type copyPart struct {
Number int // 片序号[1, 10000]
Start int64 // 片起始位置
End int64 // 片结束位置
Number int // Part number (from 1 to 10,000)
Start int64 // The start index in the source file.
End int64 // The end index in the source file
}
// 文件分片
func getCopyParts(bucket *Bucket, objectKey string, partSize int64) ([]copyPart, error) {
meta, err := bucket.GetObjectDetailedMeta(objectKey)
if err != nil {
return nil, err
}
// getCopyParts calculates copy parts
func getCopyParts(objectSize, partSize int64) []copyPart {
parts := []copyPart{}
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
if err != nil {
return nil, err
}
part := copyPart{}
i := 0
for offset := int64(0); offset < objectSize; offset += partSize {
@ -125,10 +123,10 @@ func getCopyParts(bucket *Bucket, objectKey string, partSize int64) ([]copyPart,
parts = append(parts, part)
i++
}
return parts, nil
return parts
}
// 获取源文件大小
// getSrcObjectBytes gets the source file size
func getSrcObjectBytes(parts []copyPart) int64 {
var ob int64
for _, part := range parts {
@ -137,20 +135,32 @@ func getSrcObjectBytes(parts []copyPart) int64 {
return ob
}
// 并发无断点续传的下载
// copyFile is a concurrently copy without checkpoint
func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey string,
partSize int64, options []Option, routines int) error {
descBucket, err := bucket.Client.Bucket(destBucketName)
srcBucket, err := bucket.Client.Bucket(srcBucketName)
listener := getProgressListener(options)
// 分割文件
parts, err := getCopyParts(srcBucket, srcObjectKey, partSize)
payerOptions := []Option{}
payer := getPayer(options)
if payer != "" {
payerOptions = append(payerOptions, RequestPayer(PayerType(payer)))
}
meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey, payerOptions...)
if err != nil {
return err
}
// 初始化上传任务
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
if err != nil {
return err
}
// Get copy parts
parts := getCopyParts(objectSize, partSize)
// Initialize the multipart upload
imur, err := descBucket.InitiateMultipartUpload(destObjectKey, options...)
if err != nil {
return err
@ -166,16 +176,16 @@ func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destO
event := newProgressEvent(TransferStartedEvent, 0, totalBytes)
publishProgress(listener, event)
// 启动工作协程
arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, options, copyPartHooker}
// Start to copy workers
arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, payerOptions, copyPartHooker}
for w := 1; w <= routines; w++ {
go copyWorker(w, arg, jobs, results, failed, die)
}
// 并发上传分片
// Start the scheduler
go copyScheduler(jobs, parts)
// 等待分片下载完成
// Wait for the parts finished.
completed := 0
ups := make([]UploadPart, len(parts))
for completed < len(parts) {
@ -188,7 +198,7 @@ func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destO
publishProgress(listener, event)
case err := <-failed:
close(die)
descBucket.AbortMultipartUpload(imur)
descBucket.AbortMultipartUpload(imur, payerOptions...)
event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes)
publishProgress(listener, event)
return err
@ -202,36 +212,36 @@ func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destO
event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes)
publishProgress(listener, event)
// 提交任务
_, err = descBucket.CompleteMultipartUpload(imur, ups)
// Complete the multipart upload
_, err = descBucket.CompleteMultipartUpload(imur, ups, payerOptions...)
if err != nil {
bucket.AbortMultipartUpload(imur)
bucket.AbortMultipartUpload(imur, payerOptions...)
return err
}
return nil
}
// ----- 并发有断点的下载 -----
// ----- Concurrently copy with checkpoint -----
const copyCpMagic = "84F1F18C-FF1D-403B-A1D8-9DEB5F65910A"
type copyCheckpoint struct {
Magic string // magic
MD5 string // cp内容的MD5
SrcBucketName string // 源Bucket
SrcObjectKey string // 源Object
DestBucketName string // 目标Bucket
DestObjectKey string // 目标Bucket
CopyID string // copy id
ObjStat objectStat // 文件状态
Parts []copyPart // 全部分片
CopyParts []UploadPart // 分片上传成功后的返回值
PartStat []bool // 分片下载是否完成
Magic string // Magic
MD5 string // CP content MD5
SrcBucketName string // Source bucket
SrcObjectKey string // Source object
DestBucketName string // Target bucket
DestObjectKey string // Target object
CopyID string // Copy ID
ObjStat objectStat // Object stat
Parts []copyPart // Copy parts
CopyParts []UploadPart // The uploaded parts
PartStat []bool // The part status
}
// CP数据是否有效CP有效且Object没有更新时有效
func (cp copyCheckpoint) isValid(bucket *Bucket, objectKey string) (bool, error) {
// 比较CP的Magic及MD5
// isValid checks if the data is valid which means CP is valid and object is not updated.
func (cp copyCheckpoint) isValid(meta http.Header) (bool, error) {
// Compare CP's magic number and the MD5.
cpb := cp
cpb.MD5 = ""
js, _ := json.Marshal(cpb)
@ -242,18 +252,12 @@ func (cp copyCheckpoint) isValid(bucket *Bucket, objectKey string) (bool, error)
return false, nil
}
// 确认object没有更新
meta, err := bucket.GetObjectDetailedMeta(objectKey)
if err != nil {
return false, err
}
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
if err != nil {
return false, err
}
// 比较Object的大小/最后修改时间/etag
// Compare the object size and last modified time and etag.
if cp.ObjStat.Size != objectSize ||
cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) ||
cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) {
@ -263,7 +267,7 @@ func (cp copyCheckpoint) isValid(bucket *Bucket, objectKey string) (bool, error)
return true, nil
}
// 从文件中load
// load loads from the checkpoint file
func (cp *copyCheckpoint) load(filePath string) error {
contents, err := ioutil.ReadFile(filePath)
if err != nil {
@ -274,17 +278,17 @@ func (cp *copyCheckpoint) load(filePath string) error {
return err
}
// 更新分片状态
// update updates the parts status
func (cp *copyCheckpoint) update(part UploadPart) {
cp.CopyParts[part.PartNumber-1] = part
cp.PartStat[part.PartNumber-1] = true
}
// dump到文件
// dump dumps the CP to the file
func (cp *copyCheckpoint) dump(filePath string) error {
bcp := *cp
// 计算MD5
// Calculate MD5
bcp.MD5 = ""
js, err := json.Marshal(bcp)
if err != nil {
@ -294,17 +298,17 @@ func (cp *copyCheckpoint) dump(filePath string) error {
b64 := base64.StdEncoding.EncodeToString(sum[:])
bcp.MD5 = b64
// 序列化
// Serialization
js, err = json.Marshal(bcp)
if err != nil {
return err
}
// dump
// Dump
return ioutil.WriteFile(filePath, js, FilePermMode)
}
// 未完成的分片
// todoParts returns unfinished parts
func (cp copyCheckpoint) todoParts() []copyPart {
dps := []copyPart{}
for i, ps := range cp.PartStat {
@ -315,7 +319,7 @@ func (cp copyCheckpoint) todoParts() []copyPart {
return dps
}
// 完成的字节数
// getCompletedBytes returns finished bytes count
func (cp copyCheckpoint) getCompletedBytes() int64 {
var completedBytes int64
for i, part := range cp.Parts {
@ -326,22 +330,16 @@ func (cp copyCheckpoint) getCompletedBytes() int64 {
return completedBytes
}
// 初始化下载任务
func (cp *copyCheckpoint) prepare(srcBucket *Bucket, srcObjectKey string, destBucket *Bucket, destObjectKey string,
// prepare initializes the multipart upload
func (cp *copyCheckpoint) prepare(meta http.Header, srcBucket *Bucket, srcObjectKey string, destBucket *Bucket, destObjectKey string,
partSize int64, options []Option) error {
// cp
// CP
cp.Magic = copyCpMagic
cp.SrcBucketName = srcBucket.BucketName
cp.SrcObjectKey = srcObjectKey
cp.DestBucketName = destBucket.BucketName
cp.DestObjectKey = destObjectKey
// object
meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey)
if err != nil {
return err
}
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
if err != nil {
return err
@ -351,18 +349,15 @@ func (cp *copyCheckpoint) prepare(srcBucket *Bucket, srcObjectKey string, destBu
cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified)
cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag)
// parts
cp.Parts, err = getCopyParts(srcBucket, srcObjectKey, partSize)
if err != nil {
return err
}
// Parts
cp.Parts = getCopyParts(objectSize, partSize)
cp.PartStat = make([]bool, len(cp.Parts))
for i := range cp.PartStat {
cp.PartStat[i] = false
}
cp.CopyParts = make([]UploadPart, len(cp.Parts))
// init copy
// Init copy
imur, err := destBucket.InitiateMultipartUpload(destObjectKey, options...)
if err != nil {
return err
@ -372,10 +367,10 @@ func (cp *copyCheckpoint) prepare(srcBucket *Bucket, srcObjectKey string, destBu
return nil
}
func (cp *copyCheckpoint) complete(bucket *Bucket, parts []UploadPart, cpFilePath string) error {
func (cp *copyCheckpoint) complete(bucket *Bucket, parts []UploadPart, cpFilePath string, options []Option) error {
imur := InitiateMultipartUploadResult{Bucket: cp.DestBucketName,
Key: cp.DestObjectKey, UploadID: cp.CopyID}
_, err := bucket.CompleteMultipartUpload(imur, parts)
_, err := bucket.CompleteMultipartUpload(imur, parts, options...)
if err != nil {
return err
}
@ -383,30 +378,42 @@ func (cp *copyCheckpoint) complete(bucket *Bucket, parts []UploadPart, cpFilePat
return err
}
// 并发带断点的下载
// copyFileWithCp is concurrently copy with checkpoint
func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey string,
partSize int64, options []Option, cpFilePath string, routines int) error {
descBucket, err := bucket.Client.Bucket(destBucketName)
srcBucket, err := bucket.Client.Bucket(srcBucketName)
listener := getProgressListener(options)
// LOAD CP数据
payerOptions := []Option{}
payer := getPayer(options)
if payer != "" {
payerOptions = append(payerOptions, RequestPayer(PayerType(payer)))
}
// Load CP data
ccp := copyCheckpoint{}
err = ccp.load(cpFilePath)
if err != nil {
os.Remove(cpFilePath)
}
// LOAD出错或数据无效重新初始化下载
valid, err := ccp.isValid(srcBucket, srcObjectKey)
// Make sure the object is not updated.
meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey, payerOptions...)
if err != nil {
return err
}
// Load error or the CP data is invalid---reinitialize
valid, err := ccp.isValid(meta)
if err != nil || !valid {
if err = ccp.prepare(srcBucket, srcObjectKey, descBucket, destObjectKey, partSize, options); err != nil {
if err = ccp.prepare(meta, srcBucket, srcObjectKey, descBucket, destObjectKey, partSize, options); err != nil {
return err
}
os.Remove(cpFilePath)
}
// 未完成的分片
// Unfinished parts
parts := ccp.todoParts()
imur := InitiateMultipartUploadResult{
Bucket: destBucketName,
@ -422,16 +429,16 @@ func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName,
event := newProgressEvent(TransferStartedEvent, completedBytes, ccp.ObjStat.Size)
publishProgress(listener, event)
// 启动工作协程
arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, options, copyPartHooker}
// Start the worker coroutines
arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, payerOptions, copyPartHooker}
for w := 1; w <= routines; w++ {
go copyWorker(w, arg, jobs, results, failed, die)
}
// 并发下载分片
// Start the scheduler
go copyScheduler(jobs, parts)
// 等待分片下载完成
// Wait for the parts completed.
completed := 0
for completed < len(parts) {
select {
@ -457,5 +464,5 @@ func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName,
event = newProgressEvent(TransferCompletedEvent, completedBytes, ccp.ObjStat.Size)
publishProgress(listener, event)
return ccp.complete(descBucket, ccp.CopyParts, cpFilePath)
return ccp.complete(descBucket, ccp.CopyParts, cpFilePath, payerOptions)
}

View File

@ -5,26 +5,28 @@ import (
"encoding/xml"
"io"
"net/http"
"net/url"
"os"
"sort"
"strconv"
)
// InitiateMultipartUpload initializes multipart upload
//
// InitiateMultipartUpload 初始化分片上传任务。
// objectKey object name
// options the object constricts for upload. The valid options are CacheControl, ContentDisposition, ContentEncoding, Expires,
// ServerSideEncryption, Meta, check out the following link:
// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/InitiateMultipartUpload.html
//
// objectKey Object名称。
// options 上传时可以指定Object的属性可选属性有CacheControl、ContentDisposition、ContentEncoding、Expires、
// ServerSideEncryption、Meta具体含义请参考
// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/InitiateMultipartUpload.html
//
// InitiateMultipartUploadResult 初始化后操作成功的返回值用于后面的UploadPartFromFile、UploadPartCopy等操作。error为nil时有效。
// error 操作成功error为nil非nil为错误信息。
// InitiateMultipartUploadResult the return value of the InitiateMultipartUpload, which is used for calls later on such as UploadPartFromFile,UploadPartCopy.
// error it's nil if the operation succeeds, otherwise it's an error object.
//
func (bucket Bucket) InitiateMultipartUpload(objectKey string, options ...Option) (InitiateMultipartUploadResult, error) {
var imur InitiateMultipartUploadResult
opts := addContentType(options, objectKey)
resp, err := bucket.do("POST", objectKey, "uploads", "uploads", opts, nil, nil)
params := map[string]interface{}{}
params["uploads"] = nil
resp, err := bucket.do("POST", objectKey, params, opts, nil, nil)
if err != nil {
return imur, err
}
@ -34,23 +36,20 @@ func (bucket Bucket) InitiateMultipartUpload(objectKey string, options ...Option
return imur, err
}
// UploadPart uploads parts
//
// UploadPart 上传分片。
// After initializing a Multipart Upload, the upload Id and object key could be used for uploading the parts.
// Each part has its part number (ranges from 1 to 10,000). And for each upload Id, the part number identifies the position of the part in the whole file.
// And thus with the same part number and upload Id, another part upload will overwrite the data.
// Except the last one, minimal part size is 100KB. There's no limit on the last part size.
//
// 初始化一个Multipart Upload之后可以根据指定的Object名和Upload ID来分片Part上传数据。
// 每一个上传的Part都有一个标识它的号码part number范围是1~10000。对于同一个Upload ID
// 该号码不但唯一标识这一片数据也标识了这片数据在整个文件内的相对位置。如果您用同一个part号码上传了新的数据
// 那么OSS上已有的这个号码的Part数据将被覆盖。除了最后一片Part以外其他的part最小为100KB
// 最后一片Part没有大小限制。
// imur the returned value of InitiateMultipartUpload.
// reader io.Reader the reader for the part's data.
// size the part size.
// partNumber the part number (ranges from 1 to 10,000). Invalid part number will lead to InvalidArgument error.
//
// imur InitiateMultipartUpload成功后的返回值。
// reader io.Reader 需要分片上传的reader。
// size 本次上传片Part的大小。
// partNumber 本次上传片(Part)的编号范围是1~10000。如果超出范围OSS将返回InvalidArgument错误。
//
// UploadPart 上传成功的返回值两个成员PartNumber、ETag。PartNumber片编号即传入参数partNumber
// ETag及上传数据的MD5。error为nil时有效。
// error 操作成功error为nil非nil为错误信息。
// UploadPart the return value of the upload part. It consists of PartNumber and ETag. It's valid when error is nil.
// error it's nil if the operation succeeds, otherwise it's an error object.
//
func (bucket Bucket) UploadPart(imur InitiateMultipartUploadResult, reader io.Reader,
partSize int64, partNumber int, options ...Option) (UploadPart, error) {
@ -66,18 +65,16 @@ func (bucket Bucket) UploadPart(imur InitiateMultipartUploadResult, reader io.Re
return result.Part, err
}
// UploadPartFromFile uploads part from the file.
//
// UploadPartFromFile 上传分片。
// imur the return value of a successful InitiateMultipartUpload.
// filePath the local file path to upload.
// startPosition the start position in the local file.
// partSize the part size.
// partNumber the part number (from 1 to 10,000)
//
// imur InitiateMultipartUpload成功后的返回值。
// filePath 需要分片上传的本地文件。
// startPosition 本次上传文件片的起始位置。
// partSize 本次上传文件片的大小。
// partNumber 本次上传文件片的编号范围是1~10000。
//
// UploadPart 上传成功的返回值两个成员PartNumber、ETag。PartNumber片编号传入参数partNumber
// ETag上传数据的MD5。error为nil时有效。
// error 操作成功error为nil非nil为错误信息。
// UploadPart the return value consists of PartNumber and ETag.
// error it's nil if the operation succeeds, otherwise it's an error object.
//
func (bucket Bucket) UploadPartFromFile(imur InitiateMultipartUploadResult, filePath string,
startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) {
@ -101,19 +98,20 @@ func (bucket Bucket) UploadPartFromFile(imur InitiateMultipartUploadResult, file
return result.Part, err
}
// DoUploadPart does the actual part upload.
//
// DoUploadPart 上传分片。
// request part upload request
//
// request 上传分片请求。
//
// UploadPartResult 上传分片请求返回值。
// error 操作无错误为nil非nil为错误信息。
// UploadPartResult the result of uploading part.
// error it's nil if the operation succeeds, otherwise it's an error object.
//
func (bucket Bucket) DoUploadPart(request *UploadPartRequest, options []Option) (*UploadPartResult, error) {
listener := getProgressListener(options)
params := "partNumber=" + strconv.Itoa(request.PartNumber) + "&uploadId=" + request.InitResult.UploadID
opts := []Option{ContentLength(request.PartSize)}
resp, err := bucket.do("PUT", request.InitResult.Key, params, params, opts,
options = append(options, ContentLength(request.PartSize))
params := map[string]interface{}{}
params["partNumber"] = strconv.Itoa(request.PartNumber)
params["uploadId"] = request.InitResult.UploadID
resp, err := bucket.do("PUT", request.InitResult.Key, params, options,
&io.LimitedReader{R: request.Reader, N: request.PartSize}, listener)
if err != nil {
return &UploadPartResult{}, err
@ -135,32 +133,32 @@ func (bucket Bucket) DoUploadPart(request *UploadPartRequest, options []Option)
return &UploadPartResult{part}, nil
}
// UploadPartCopy uploads part copy
//
// UploadPartCopy 拷贝分片。
// imur the return value of InitiateMultipartUpload
// copySrc source Object name
// startPosition the part's start index in the source file
// partSize the part size
// partNumber the part number, ranges from 1 to 10,000. If it exceeds the range OSS returns InvalidArgument error.
// options the constraints of source object for the copy. The copy happens only when these contraints are met. Otherwise it returns error.
// CopySourceIfNoneMatch, CopySourceIfModifiedSince CopySourceIfUnmodifiedSince, check out the following link for the detail
// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/UploadPartCopy.html
//
// imur InitiateMultipartUpload成功后的返回值。
// copySrc 源Object名称。
// startPosition 本次拷贝片(Part)在源Object的起始位置。
// partSize 本次拷贝片的大小。
// partNumber 本次拷贝片的编号范围是1~10000。如果超出范围OSS将返回InvalidArgument错误。
// options copy时源Object的限制条件满足限制条件时copy不满足时返回错误。可选条件有CopySourceIfMatch、
// CopySourceIfNoneMatch、CopySourceIfModifiedSince CopySourceIfUnmodifiedSince具体含义请参看
// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/UploadPartCopy.html
//
// UploadPart 上传成功的返回值两个成员PartNumber、ETag。PartNumber片(Part)编号即传入参数partNumber
// ETag及上传数据的MD5。error为nil时有效。
// error 操作成功error为nil非nil为错误信息。
// UploadPart the return value consists of PartNumber and ETag.
// error it's nil if the operation succeeds, otherwise it's an error object.
//
func (bucket Bucket) UploadPartCopy(imur InitiateMultipartUploadResult, srcBucketName, srcObjectKey string,
startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) {
var out UploadPartCopyResult
var part UploadPart
opts := []Option{CopySource(srcBucketName, srcObjectKey),
opts := []Option{CopySource(srcBucketName, url.QueryEscape(srcObjectKey)),
CopySourceRange(startPosition, partSize)}
opts = append(opts, options...)
params := "partNumber=" + strconv.Itoa(partNumber) + "&uploadId=" + imur.UploadID
resp, err := bucket.do("PUT", imur.Key, params, params, opts, nil, nil)
params := map[string]interface{}{}
params["partNumber"] = strconv.Itoa(partNumber)
params["uploadId"] = imur.UploadID
resp, err := bucket.do("PUT", imur.Key, params, opts, nil, nil)
if err != nil {
return part, err
}
@ -176,17 +174,16 @@ func (bucket Bucket) UploadPartCopy(imur InitiateMultipartUploadResult, srcBucke
return part, nil
}
// CompleteMultipartUpload completes the multipart upload.
//
// CompleteMultipartUpload 提交分片上传任务。
// imur the return value of InitiateMultipartUpload.
// parts the array of return value of UploadPart/UploadPartFromFile/UploadPartCopy.
//
// imur InitiateMultipartUpload的返回值。
// parts UploadPart/UploadPartFromFile/UploadPartCopy返回值组成的数组。
//
// CompleteMultipartUploadResponse 操作成功后的返回值。error为nil时有效。
// error 操作成功error为nil非nil为错误信息。
// CompleteMultipartUploadResponse the return value when the call succeeds. Only valid when the error is nil.
// error it's nil if the operation succeeds, otherwise it's an error object.
//
func (bucket Bucket) CompleteMultipartUpload(imur InitiateMultipartUploadResult,
parts []UploadPart) (CompleteMultipartUploadResult, error) {
parts []UploadPart, options ...Option) (CompleteMultipartUploadResult, error) {
var out CompleteMultipartUploadResult
sort.Sort(uploadParts(parts))
@ -199,8 +196,9 @@ func (bucket Bucket) CompleteMultipartUpload(imur InitiateMultipartUploadResult,
buffer := new(bytes.Buffer)
buffer.Write(bs)
params := "uploadId=" + imur.UploadID
resp, err := bucket.do("POST", imur.Key, params, params, nil, buffer, nil)
params := map[string]interface{}{}
params["uploadId"] = imur.UploadID
resp, err := bucket.do("POST", imur.Key, params, options, buffer, nil)
if err != nil {
return out, err
}
@ -210,16 +208,16 @@ func (bucket Bucket) CompleteMultipartUpload(imur InitiateMultipartUploadResult,
return out, err
}
// AbortMultipartUpload aborts the multipart upload.
//
// AbortMultipartUpload 取消分片上传任务。
// imur the return value of InitiateMultipartUpload.
//
// imur InitiateMultipartUpload的返回值。
// error it's nil if the operation succeeds, otherwise it's an error object.
//
// error 操作成功error为nil非nil为错误信息。
//
func (bucket Bucket) AbortMultipartUpload(imur InitiateMultipartUploadResult) error {
params := "uploadId=" + imur.UploadID
resp, err := bucket.do("DELETE", imur.Key, params, params, nil, nil, nil)
func (bucket Bucket) AbortMultipartUpload(imur InitiateMultipartUploadResult, options ...Option) error {
params := map[string]interface{}{}
params["uploadId"] = imur.UploadID
resp, err := bucket.do("DELETE", imur.Key, params, options, nil, nil)
if err != nil {
return err
}
@ -227,46 +225,57 @@ func (bucket Bucket) AbortMultipartUpload(imur InitiateMultipartUploadResult) er
return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
}
// ListUploadedParts lists the uploaded parts.
//
// ListUploadedParts 列出指定上传任务已经上传的分片。
// imur the return value of InitiateMultipartUpload.
//
// imur InitiateMultipartUpload的返回值。
// ListUploadedPartsResponse the return value if it succeeds, only valid when error is nil.
// error it's nil if the operation succeeds, otherwise it's an error object.
//
// ListUploadedPartsResponse 操作成功后的返回值成员UploadedParts已经上传/拷贝的片。error为nil时该返回值有效。
// error 操作成功error为nil非nil为错误信息。
//
func (bucket Bucket) ListUploadedParts(imur InitiateMultipartUploadResult) (ListUploadedPartsResult, error) {
func (bucket Bucket) ListUploadedParts(imur InitiateMultipartUploadResult, options ...Option) (ListUploadedPartsResult, error) {
var out ListUploadedPartsResult
params := "uploadId=" + imur.UploadID
resp, err := bucket.do("GET", imur.Key, params, params, nil, nil, nil)
options = append(options, EncodingType("url"))
params := map[string]interface{}{}
params, err := getRawParams(options)
if err != nil {
return out, err
}
params["uploadId"] = imur.UploadID
resp, err := bucket.do("GET", imur.Key, params, nil, nil, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
if err != nil {
return out, err
}
err = decodeListUploadedPartsResult(&out)
return out, err
}
// ListMultipartUploads lists all ongoing multipart upload tasks
//
// ListMultipartUploads 列出所有未上传完整的multipart任务列表。
// options listObject's filter. Prefix specifies the returned object's prefix; KeyMarker specifies the returned object's start point in lexicographic order;
// MaxKeys specifies the max entries to return; Delimiter is the character for grouping object keys.
//
// options ListObject的筛选行为。Prefix返回object的前缀KeyMarker返回object的起始位置MaxUploads最大数目默认1000
// Delimiter用于对Object名字进行分组的字符所有名字包含指定的前缀且第一次出现delimiter字符之间的object。
//
// ListMultipartUploadResponse 操作成功后的返回值error为nil时该返回值有效。
// error 操作成功error为nil非nil为错误信息。
// ListMultipartUploadResponse the return value if it succeeds, only valid when error is nil.
// error it's nil if the operation succeeds, otherwise it's an error object.
//
func (bucket Bucket) ListMultipartUploads(options ...Option) (ListMultipartUploadResult, error) {
var out ListMultipartUploadResult
options = append(options, EncodingType("url"))
params, err := handleParams(options)
params, err := getRawParams(options)
if err != nil {
return out, err
}
params["uploads"] = nil
resp, err := bucket.do("GET", "", "uploads&"+params, "uploads", nil, nil, nil)
resp, err := bucket.do("GET", "", params, options, nil, nil)
if err != nil {
return out, err
}

View File

@ -1,21 +1,19 @@
package oss
import (
"bytes"
"fmt"
"net/http"
"net/url"
"sort"
"strconv"
"strings"
"time"
)
type optionType string
const (
optionParam optionType = "HTTPParameter" // URL参数
optionHTTP optionType = "HTTPHeader" // HTTP
optionArg optionType = "FuncArgument" // 函数参数
optionParam optionType = "HTTPParameter" // URL parameter
optionHTTP optionType = "HTTPHeader" // HTTP header
optionArg optionType = "FuncArgument" // Function argument
)
const (
@ -24,6 +22,7 @@ const (
checkpointConfig = "x-cp-config"
initCRC64 = "init-crc64"
progressListener = "x-progress-listener"
storageClass = "storage-class"
)
type (
@ -32,7 +31,7 @@ type (
Type optionType
}
// Option http option
// Option HTTP option
Option func(map[string]optionValue) error
)
@ -66,6 +65,11 @@ func ContentEncoding(value string) Option {
return setHeader(HTTPHeaderContentEncoding, value)
}
// ContentLanguage is an option to set Content-Language header
func ContentLanguage(value string) Option {
return setHeader(HTTPHeaderContentLanguage, value)
}
// ContentMD5 is an option to set Content-MD5 header
func ContentMD5(value string) Option {
return setHeader(HTTPHeaderContentMD5, value)
@ -86,6 +90,11 @@ func Range(start, end int64) Option {
return setHeader(HTTPHeaderRange, fmt.Sprintf("bytes=%d-%d", start, end))
}
// NormalizedRange is an option to set Range header, such as 1024-2048 or 1024- or -2048
func NormalizedRange(nr string) Option {
return setHeader(HTTPHeaderRange, fmt.Sprintf("bytes=%s", strings.TrimSpace(nr)))
}
// AcceptEncoding is an option to set Accept-Encoding header
func AcceptEncoding(value string) Option {
return setHeader(HTTPHeaderAcceptEncoding, value)
@ -153,16 +162,46 @@ func ServerSideEncryption(value string) Option {
return setHeader(HTTPHeaderOssServerSideEncryption, value)
}
// ServerSideEncryptionKeyID is an option to set X-Oss-Server-Side-Encryption-Key-Id header
func ServerSideEncryptionKeyID(value string) Option {
return setHeader(HTTPHeaderOssServerSideEncryptionKeyID, value)
}
// ObjectACL is an option to set X-Oss-Object-Acl header
func ObjectACL(acl ACLType) Option {
return setHeader(HTTPHeaderOssObjectACL, string(acl))
}
// symlinkTarget is an option to set X-Oss-Symlink-Target
func symlinkTarget(targetObjectKey string) Option {
return setHeader(HTTPHeaderOssSymlinkTarget, targetObjectKey)
}
// Origin is an option to set Origin header
func Origin(value string) Option {
return setHeader(HTTPHeaderOrigin, value)
}
// ObjectStorageClass is an option to set the storage class of object
func ObjectStorageClass(storageClass StorageClassType) Option {
return setHeader(HTTPHeaderOssStorageClass, string(storageClass))
}
// Callback is an option to set callback values
func Callback(callback string) Option {
return setHeader(HTTPHeaderOssCallback, callback)
}
// CallbackVar is an option to set callback user defined values
func CallbackVar(callbackVar string) Option {
return setHeader(HTTPHeaderOssCallbackVar, callbackVar)
}
// RequestPayer is an option to set payer who pay for the request
func RequestPayer(payerType PayerType) Option {
return setHeader(HTTPHeaderOSSRequester, string(payerType))
}
// Delimiter is an option to set delimiler parameter
func Delimiter(value string) Option {
return addParam("delimiter", value)
@ -203,28 +242,49 @@ func UploadIDMarker(value string) Option {
return addParam("upload-id-marker", value)
}
// DeleteObjectsQuiet DeleteObjects详细(verbose)模式或简单(quiet)模式,默认详细模式。
// MaxParts is an option to set max-parts parameter
func MaxParts(value int) Option {
return addParam("max-parts", strconv.Itoa(value))
}
// PartNumberMarker is an option to set part-number-marker parameter
func PartNumberMarker(value int) Option {
return addParam("part-number-marker", strconv.Itoa(value))
}
// DeleteObjectsQuiet false:DeleteObjects in verbose mode; true:DeleteObjects in quite mode. Default is false.
func DeleteObjectsQuiet(isQuiet bool) Option {
return addArg(deleteObjectsQuiet, isQuiet)
}
// 断点续传配置包括是否启用、cp文件
// StorageClass bucket storage class
func StorageClass(value StorageClassType) Option {
return addArg(storageClass, value)
}
// Checkpoint configuration
type cpConfig struct {
IsEnable bool
FilePath string
DirPath string
}
// Checkpoint DownloadFile/UploadFile是否开启checkpoint及checkpoint文件路径
// Checkpoint sets the isEnable flag and checkpoint file path for DownloadFile/UploadFile.
func Checkpoint(isEnable bool, filePath string) Option {
return addArg(checkpointConfig, &cpConfig{isEnable, filePath})
return addArg(checkpointConfig, &cpConfig{IsEnable: isEnable, FilePath: filePath})
}
// Routines DownloadFile/UploadFile并发数
// CheckpointDir sets the isEnable flag and checkpoint dir path for DownloadFile/UploadFile.
func CheckpointDir(isEnable bool, dirPath string) Option {
return addArg(checkpointConfig, &cpConfig{IsEnable: isEnable, DirPath: dirPath})
}
// Routines DownloadFile/UploadFile routine count
func Routines(n int) Option {
return addArg(routineNum, n)
}
// InitCRC AppendObject CRC的校验的初始值
// InitCRC Init AppendObject CRC
func InitCRC(initCRC uint64) Option {
return addArg(initCRC64, initCRC)
}
@ -234,6 +294,41 @@ func Progress(listener ProgressListener) Option {
return addArg(progressListener, listener)
}
// ResponseContentType is an option to set response-content-type param
func ResponseContentType(value string) Option {
return addParam("response-content-type", value)
}
// ResponseContentLanguage is an option to set response-content-language param
func ResponseContentLanguage(value string) Option {
return addParam("response-content-language", value)
}
// ResponseExpires is an option to set response-expires param
func ResponseExpires(value string) Option {
return addParam("response-expires", value)
}
// ResponseCacheControl is an option to set response-cache-control param
func ResponseCacheControl(value string) Option {
return addParam("response-cache-control", value)
}
// ResponseContentDisposition is an option to set response-content-disposition param
func ResponseContentDisposition(value string) Option {
return addParam("response-content-disposition", value)
}
// ResponseContentEncoding is an option to set response-content-encoding param
func ResponseContentEncoding(value string) Option {
return addParam("response-content-encoding", value)
}
// Process is an option to set x-oss-process param
func Process(value string) Option {
return addParam("x-oss-process", value)
}
func setHeader(key string, value interface{}) Option {
return func(params map[string]optionValue) error {
if value == nil {
@ -282,40 +377,27 @@ func handleOptions(headers map[string]string, options []Option) error {
return nil
}
func handleParams(options []Option) (string, error) {
// option
func getRawParams(options []Option) (map[string]interface{}, error) {
// Option
params := map[string]optionValue{}
for _, option := range options {
if option != nil {
if err := option(params); err != nil {
return "", err
return nil, err
}
}
}
// sort
var buf bytes.Buffer
keys := make([]string, 0, len(params))
paramsm := map[string]interface{}{}
// Serialize
for k, v := range params {
if v.Type == optionParam {
keys = append(keys, k)
vs := params[k]
paramsm[k] = vs.Value.(string)
}
}
sort.Strings(keys)
// serialize
for _, k := range keys {
vs := params[k]
prefix := url.QueryEscape(k) + "="
if buf.Len() > 0 {
buf.WriteByte('&')
}
buf.WriteString(prefix)
buf.WriteString(url.QueryEscape(vs.Value.(string)))
}
return buf.String(), nil
return paramsm, nil
}
func findOption(options []Option, param string, defaultVal interface{}) (interface{}, error) {

View File

@ -2,7 +2,7 @@ package oss
import "io"
// ProgressEventType transfer progress event type
// ProgressEventType defines transfer progress event type
type ProgressEventType int
const (
@ -16,19 +16,19 @@ const (
TransferFailedEvent
)
// ProgressEvent progress event
// ProgressEvent defines progress event
type ProgressEvent struct {
ConsumedBytes int64
TotalBytes int64
EventType ProgressEventType
}
// ProgressListener listen progress change
// ProgressListener listens progress change
type ProgressListener interface {
ProgressChanged(event *ProgressEvent)
}
// -------------------- private --------------------
// -------------------- Private --------------------
func newProgressEvent(eventType ProgressEventType, consumed, total int64) *ProgressEvent {
return &ProgressEvent{
@ -62,7 +62,7 @@ type teeReader struct {
// corresponding writes to w. There is no internal buffering -
// the write must complete before the read completes.
// Any error encountered while writing is reported as a read error.
func TeeReader(reader io.Reader, writer io.Writer, totalBytes int64, listener ProgressListener, tracker *readerTracker) io.Reader {
func TeeReader(reader io.Reader, writer io.Writer, totalBytes int64, listener ProgressListener, tracker *readerTracker) io.ReadCloser {
return &teeReader{
reader: reader,
writer: writer,
@ -76,7 +76,7 @@ func TeeReader(reader io.Reader, writer io.Writer, totalBytes int64, listener Pr
func (t *teeReader) Read(p []byte) (n int, err error) {
n, err = t.reader.Read(p)
// read encountered error
// Read encountered error
if err != nil && err != io.EOF {
event := newProgressEvent(TransferFailedEvent, t.consumedBytes, t.totalBytes)
publishProgress(t.listener, event)
@ -84,18 +84,18 @@ func (t *teeReader) Read(p []byte) (n int, err error) {
if n > 0 {
t.consumedBytes += int64(n)
// crc
// CRC
if t.writer != nil {
if n, err := t.writer.Write(p[:n]); err != nil {
return n, err
}
}
// progress
// Progress
if t.listener != nil {
event := newProgressEvent(TransferDataEvent, t.consumedBytes, t.totalBytes)
publishProgress(t.listener, event)
}
// track
// Track
if t.tracker != nil {
t.tracker.completedBytes = t.consumedBytes
}
@ -103,3 +103,10 @@ func (t *teeReader) Read(p []byte) (n int, err error) {
return
}
func (t *teeReader) Close() error {
if rc, ok := t.reader.(io.ReadCloser); ok {
return rc.Close()
}
return nil
}

View File

@ -0,0 +1,26 @@
// +build !go1.7
package oss
import (
"net"
"net/http"
)
func newTransport(conn *Conn, config *Config) *http.Transport {
httpTimeOut := conn.config.HTTPTimeout
httpMaxConns := conn.config.HTTPMaxConns
// New Transport
transport := &http.Transport{
Dial: func(netw, addr string) (net.Conn, error) {
conn, err := net.DialTimeout(netw, addr, httpTimeOut.ConnectTimeout)
if err != nil {
return nil, err
}
return newTimeoutConn(conn, httpTimeOut.ReadWriteTimeout, httpTimeOut.LongTimeout), nil
},
MaxIdleConnsPerHost: httpMaxConns.MaxIdleConnsPerHost,
ResponseHeaderTimeout: httpTimeOut.HeaderTimeout,
}
return transport
}

View File

@ -0,0 +1,28 @@
// +build go1.7
package oss
import (
"net"
"net/http"
)
func newTransport(conn *Conn, config *Config) *http.Transport {
httpTimeOut := conn.config.HTTPTimeout
httpMaxConns := conn.config.HTTPMaxConns
// New Transport
transport := &http.Transport{
Dial: func(netw, addr string) (net.Conn, error) {
conn, err := net.DialTimeout(netw, addr, httpTimeOut.ConnectTimeout)
if err != nil {
return nil, err
}
return newTimeoutConn(conn, httpTimeOut.ReadWriteTimeout, httpTimeOut.LongTimeout), nil
},
MaxIdleConns: httpMaxConns.MaxIdleConns,
MaxIdleConnsPerHost: httpMaxConns.MaxIdleConnsPerHost,
IdleConnTimeout: httpTimeOut.IdleConnTimeout,
ResponseHeaderTimeout: httpTimeOut.HeaderTimeout,
}
return transport
}

View File

@ -6,53 +6,54 @@ import (
"time"
)
// ListBucketsResult ListBuckets请求返回的结果
// ListBucketsResult defines the result object from ListBuckets request
type ListBucketsResult struct {
XMLName xml.Name `xml:"ListAllMyBucketsResult"`
Prefix string `xml:"Prefix"` // 本次查询结果的前缀
Marker string `xml:"Marker"` // 标明查询的起点,未全部返回时有此节点
MaxKeys int `xml:"MaxKeys"` // 返回结果的最大数目,未全部返回时有此节点
IsTruncated bool `xml:"IsTruncated"` // 所有的结果是否已经全部返回
NextMarker string `xml:"NextMarker"` // 表示下一次查询的起点
Owner Owner `xml:"Owner"` // 拥有者信息
Buckets []BucketProperties `xml:"Buckets>Bucket"` // Bucket列表
Prefix string `xml:"Prefix"` // The prefix in this query
Marker string `xml:"Marker"` // The marker filter
MaxKeys int `xml:"MaxKeys"` // The max entry count to return. This information is returned when IsTruncated is true.
IsTruncated bool `xml:"IsTruncated"` // Flag true means there's remaining buckets to return.
NextMarker string `xml:"NextMarker"` // The marker filter for the next list call
Owner Owner `xml:"Owner"` // The owner information
Buckets []BucketProperties `xml:"Buckets>Bucket"` // The bucket list
}
// BucketProperties Bucket信息
// BucketProperties defines bucket properties
type BucketProperties struct {
XMLName xml.Name `xml:"Bucket"`
Name string `xml:"Name"` // Bucket名称
Location string `xml:"Location"` // Bucket所在的数据中心
CreationDate time.Time `xml:"CreationDate"` // Bucket创建时间
Name string `xml:"Name"` // Bucket name
Location string `xml:"Location"` // Bucket datacenter
CreationDate time.Time `xml:"CreationDate"` // Bucket create time
StorageClass string `xml:"StorageClass"` // Bucket storage class
}
// GetBucketACLResult GetBucketACL请求返回的结果
// GetBucketACLResult defines GetBucketACL request's result
type GetBucketACLResult struct {
XMLName xml.Name `xml:"AccessControlPolicy"`
ACL string `xml:"AccessControlList>Grant"` // Bucket权限
Owner Owner `xml:"Owner"` // Bucket拥有者信息
ACL string `xml:"AccessControlList>Grant"` // Bucket ACL
Owner Owner `xml:"Owner"` // Bucket owner
}
// LifecycleConfiguration Bucket的Lifecycle配置
// LifecycleConfiguration is the Bucket Lifecycle configuration
type LifecycleConfiguration struct {
XMLName xml.Name `xml:"LifecycleConfiguration"`
Rules []LifecycleRule `xml:"Rule"`
}
// LifecycleRule Lifecycle规则
// LifecycleRule defines Lifecycle rules
type LifecycleRule struct {
XMLName xml.Name `xml:"Rule"`
ID string `xml:"ID"` // 规则唯一的ID
Prefix string `xml:"Prefix"` // 规则所适用Object的前缀
Status string `xml:"Status"` // 规则是否生效
Expiration LifecycleExpiration `xml:"Expiration"` // 规则的过期属性
ID string `xml:"ID"` // The rule ID
Prefix string `xml:"Prefix"` // The object key prefix
Status string `xml:"Status"` // The rule status (enabled or not)
Expiration LifecycleExpiration `xml:"Expiration"` // The expiration property
}
// LifecycleExpiration 规则的过期属性
// LifecycleExpiration defines the rule's expiration property
type LifecycleExpiration struct {
XMLName xml.Name `xml:"Expiration"`
Days int `xml:"Days,omitempty"` // 最后修改时间过后多少天生效
Date time.Time `xml:"Date,omitempty"` // 指定规则何时生效
Days int `xml:"Days,omitempty"` // Relative expiration time: The expiration time in days after the last modified time
Date time.Time `xml:"Date,omitempty"` // Absolute expiration time: The expiration time in date.
}
type lifecycleXML struct {
@ -93,7 +94,7 @@ func convLifecycleRule(rules []LifecycleRule) []lifecycleRule {
return rs
}
// BuildLifecycleRuleByDays 指定过期天数构建Lifecycle规则
// BuildLifecycleRuleByDays builds a lifecycle rule with specified expiration days
func BuildLifecycleRuleByDays(id, prefix string, status bool, days int) LifecycleRule {
var statusStr = "Enabled"
if !status {
@ -103,7 +104,7 @@ func BuildLifecycleRuleByDays(id, prefix string, status bool, days int) Lifecycl
Expiration: LifecycleExpiration{Days: days}}
}
// BuildLifecycleRuleByDate 指定过期时间构建Lifecycle规则
// BuildLifecycleRuleByDate builds a lifecycle rule with specified expiration time.
func BuildLifecycleRuleByDate(id, prefix string, status bool, year, month, day int) LifecycleRule {
var statusStr = "Enabled"
if !status {
@ -114,171 +115,172 @@ func BuildLifecycleRuleByDate(id, prefix string, status bool, year, month, day i
Expiration: LifecycleExpiration{Date: date}}
}
// GetBucketLifecycleResult GetBucketLifecycle请求请求结果
// GetBucketLifecycleResult defines GetBucketLifecycle's result object
type GetBucketLifecycleResult LifecycleConfiguration
// RefererXML Referer配置
// RefererXML defines Referer configuration
type RefererXML struct {
XMLName xml.Name `xml:"RefererConfiguration"`
AllowEmptyReferer bool `xml:"AllowEmptyReferer"` // 是否允许referer字段为空的请求访问
RefererList []string `xml:"RefererList>Referer"` // referer访问白名单
AllowEmptyReferer bool `xml:"AllowEmptyReferer"` // Allow empty referrer
RefererList []string `xml:"RefererList>Referer"` // Referer whitelist
}
// GetBucketRefererResult GetBucketReferer请教返回结果
// GetBucketRefererResult defines result object for GetBucketReferer request
type GetBucketRefererResult RefererXML
// LoggingXML Logging配置
// LoggingXML defines logging configuration
type LoggingXML struct {
XMLName xml.Name `xml:"BucketLoggingStatus"`
LoggingEnabled LoggingEnabled `xml:"LoggingEnabled"` // 访问日志信息容器
LoggingEnabled LoggingEnabled `xml:"LoggingEnabled"` // The logging configuration information
}
type loggingXMLEmpty struct {
XMLName xml.Name `xml:"BucketLoggingStatus"`
}
// LoggingEnabled 访问日志信息容器
// LoggingEnabled defines the logging configuration information
type LoggingEnabled struct {
XMLName xml.Name `xml:"LoggingEnabled"`
TargetBucket string `xml:"TargetBucket"` //存放访问日志的Bucket
TargetPrefix string `xml:"TargetPrefix"` //保存访问日志的文件前缀
TargetBucket string `xml:"TargetBucket"` // The bucket name for storing the log files
TargetPrefix string `xml:"TargetPrefix"` // The log file prefix
}
// GetBucketLoggingResult GetBucketLogging请求返回结果
// GetBucketLoggingResult defines the result from GetBucketLogging request
type GetBucketLoggingResult LoggingXML
// WebsiteXML Website配置
// WebsiteXML defines Website configuration
type WebsiteXML struct {
XMLName xml.Name `xml:"WebsiteConfiguration"`
IndexDocument IndexDocument `xml:"IndexDocument"` // 目录URL时添加的索引文件
ErrorDocument ErrorDocument `xml:"ErrorDocument"` // 404错误时使用的文件
IndexDocument IndexDocument `xml:"IndexDocument"` // The index page
ErrorDocument ErrorDocument `xml:"ErrorDocument"` // The error page
}
// IndexDocument 目录URL时添加的索引文件
// IndexDocument defines the index page info
type IndexDocument struct {
XMLName xml.Name `xml:"IndexDocument"`
Suffix string `xml:"Suffix"` // 目录URL时添加的索引文件名
Suffix string `xml:"Suffix"` // The file name for the index page
}
// ErrorDocument 404错误时使用的文件
// ErrorDocument defines the 404 error page info
type ErrorDocument struct {
XMLName xml.Name `xml:"ErrorDocument"`
Key string `xml:"Key"` // 404错误时使用的文件名
Key string `xml:"Key"` // 404 error file name
}
// GetBucketWebsiteResult GetBucketWebsite请求返回结果
// GetBucketWebsiteResult defines the result from GetBucketWebsite request.
type GetBucketWebsiteResult WebsiteXML
// CORSXML CORS配置
// CORSXML defines CORS configuration
type CORSXML struct {
XMLName xml.Name `xml:"CORSConfiguration"`
CORSRules []CORSRule `xml:"CORSRule"` // CORS规则列表
CORSRules []CORSRule `xml:"CORSRule"` // CORS rules
}
// CORSRule CORS规则
// CORSRule defines CORS rules
type CORSRule struct {
XMLName xml.Name `xml:"CORSRule"`
AllowedOrigin []string `xml:"AllowedOrigin"` // 允许的来源,默认通配符"*"
AllowedMethod []string `xml:"AllowedMethod"` // 允许的方法
AllowedHeader []string `xml:"AllowedHeader"` // 允许的请求头
ExposeHeader []string `xml:"ExposeHeader"` // 允许的响应头
MaxAgeSeconds int `xml:"MaxAgeSeconds"` // 最大的缓存时间
AllowedOrigin []string `xml:"AllowedOrigin"` // Allowed origins. By default it's wildcard '*'
AllowedMethod []string `xml:"AllowedMethod"` // Allowed methods
AllowedHeader []string `xml:"AllowedHeader"` // Allowed headers
ExposeHeader []string `xml:"ExposeHeader"` // Allowed response headers
MaxAgeSeconds int `xml:"MaxAgeSeconds"` // Max cache ages in seconds
}
// GetBucketCORSResult GetBucketCORS请求返回的结果
// GetBucketCORSResult defines the result from GetBucketCORS request.
type GetBucketCORSResult CORSXML
// GetBucketInfoResult GetBucketInfo请求返回结果
// GetBucketInfoResult defines the result from GetBucketInfo request.
type GetBucketInfoResult struct {
XMLName xml.Name `xml:"BucketInfo"`
BucketInfo BucketInfo `xml:"Bucket"`
XMLName xml.Name `xml:"BucketInfo"`
BucketInfo BucketInfo `xml:"Bucket"`
}
// BucketInfo Bucket信息
// BucketInfo defines Bucket information
type BucketInfo struct {
XMLName xml.Name `xml:"Bucket"`
Name string `xml:"Name"` // Bucket名称
Location string `xml:"Location"` // Bucket所在的数据中心
CreationDate time.Time `xml:"CreationDate"` // Bucket创建时间
ExtranetEndpoint string `xml:"ExtranetEndpoint"` // Bucket访问的外网域名
IntranetEndpoint string `xml:"IntranetEndpoint"` // Bucket访问的内网域名
ACL string `xml:"AccessControlList>Grant"` // Bucket权限
Owner Owner `xml:"Owner"` // Bucket拥有者信息
Name string `xml:"Name"` // Bucket name
Location string `xml:"Location"` // Bucket datacenter
CreationDate time.Time `xml:"CreationDate"` // Bucket creation time
ExtranetEndpoint string `xml:"ExtranetEndpoint"` // Bucket external endpoint
IntranetEndpoint string `xml:"IntranetEndpoint"` // Bucket internal endpoint
ACL string `xml:"AccessControlList>Grant"` // Bucket ACL
Owner Owner `xml:"Owner"` // Bucket owner
StorageClass string `xml:"StorageClass"` // Bucket storage class
}
// ListObjectsResult ListObjects请求返回结果
// ListObjectsResult defines the result from ListObjects request
type ListObjectsResult struct {
XMLName xml.Name `xml:"ListBucketResult"`
Prefix string `xml:"Prefix"` // 本次查询结果的开始前缀
Marker string `xml:"Marker"` // 这次查询的起点
MaxKeys int `xml:"MaxKeys"` // 请求返回结果的最大数目
Delimiter string `xml:"Delimiter"` // 对Object名字进行分组的字符
IsTruncated bool `xml:"IsTruncated"` // 是否所有的结果都已经返回
NextMarker string `xml:"NextMarker"` // 下一次查询的起点
Objects []ObjectProperties `xml:"Contents"` // Object类别
CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` // 以delimiter结尾并有共同前缀的Object的集合
Prefix string `xml:"Prefix"` // The object prefix
Marker string `xml:"Marker"` // The marker filter.
MaxKeys int `xml:"MaxKeys"` // Max keys to return
Delimiter string `xml:"Delimiter"` // The delimiter for grouping objects' name
IsTruncated bool `xml:"IsTruncated"` // Flag indicates if all results are returned (when it's false)
NextMarker string `xml:"NextMarker"` // The start point of the next query
Objects []ObjectProperties `xml:"Contents"` // Object list
CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` // You can think of commonprefixes as "folders" whose names end with the delimiter
}
// ObjectProperties Objecct属性
// ObjectProperties defines Objecct properties
type ObjectProperties struct {
XMLName xml.Name `xml:"Contents"`
Key string `xml:"Key"` // Object的Key
Type string `xml:"Type"` // Object Type
Size int64 `xml:"Size"` // Object的长度字节数
ETag string `xml:"ETag"` // 标示Object的内容
Owner Owner `xml:"Owner"` // 保存Object拥有者信息的容器
LastModified time.Time `xml:"LastModified"` // Object最后修改时间
StorageClass string `xml:"StorageClass"` // Object的存储类型目前只能是Standard
Key string `xml:"Key"` // Object key
Type string `xml:"Type"` // Object type
Size int64 `xml:"Size"` // Object size
ETag string `xml:"ETag"` // Object ETag
Owner Owner `xml:"Owner"` // Object owner information
LastModified time.Time `xml:"LastModified"` // Object last modified time
StorageClass string `xml:"StorageClass"` // Object storage class (Standard, IA, Archive)
}
// Owner Bucket/Object的owner
// Owner defines Bucket/Object's owner
type Owner struct {
XMLName xml.Name `xml:"Owner"`
ID string `xml:"ID"` // 用户ID
DisplayName string `xml:"DisplayName"` // Owner名字
ID string `xml:"ID"` // Owner ID
DisplayName string `xml:"DisplayName"` // Owner's display name
}
// CopyObjectResult CopyObject请求返回的结果
// CopyObjectResult defines result object of CopyObject
type CopyObjectResult struct {
XMLName xml.Name `xml:"CopyObjectResult"`
LastModified time.Time `xml:"LastModified"` // 新Object最后更新时间
ETag string `xml:"ETag"` // 新Object的ETag值
LastModified time.Time `xml:"LastModified"` // New object's last modified time.
ETag string `xml:"ETag"` // New object's ETag
}
// GetObjectACLResult GetObjectACL请求返回的结果
// GetObjectACLResult defines result of GetObjectACL request
type GetObjectACLResult GetBucketACLResult
type deleteXML struct {
XMLName xml.Name `xml:"Delete"`
Objects []DeleteObject `xml:"Object"` // 删除的所有Object
Quiet bool `xml:"Quiet"` // 安静响应模式
Objects []DeleteObject `xml:"Object"` // Objects to delete
Quiet bool `xml:"Quiet"` // Flag of quiet mode.
}
// DeleteObject 删除的Object
// DeleteObject defines the struct for deleting object
type DeleteObject struct {
XMLName xml.Name `xml:"Object"`
Key string `xml:"Key"` // Object名称
Key string `xml:"Key"` // Object name
}
// DeleteObjectsResult DeleteObjects请求返回结果
// DeleteObjectsResult defines result of DeleteObjects request
type DeleteObjectsResult struct {
XMLName xml.Name `xml:"DeleteResult"`
DeletedObjects []string `xml:"Deleted>Key"` // 删除的Object列表
DeletedObjects []string `xml:"Deleted>Key"` // Deleted object list
}
// InitiateMultipartUploadResult InitiateMultipartUpload请求返回结果
// InitiateMultipartUploadResult defines result of InitiateMultipartUpload request
type InitiateMultipartUploadResult struct {
XMLName xml.Name `xml:"InitiateMultipartUploadResult"`
Bucket string `xml:"Bucket"` // Bucket名称
Key string `xml:"Key"` // 上传Object名称
UploadID string `xml:"UploadId"` // 生成的UploadId
Bucket string `xml:"Bucket"` // Bucket name
Key string `xml:"Key"` // Object name to upload
UploadID string `xml:"UploadId"` // Generated UploadId
}
// UploadPart 上传/拷贝的分片
// UploadPart defines the upload/copy part
type UploadPart struct {
XMLName xml.Name `xml:"Part"`
PartNumber int `xml:"PartNumber"` // Part编号
ETag string `xml:"ETag"` // ETag缓存码
PartNumber int `xml:"PartNumber"` // Part number
ETag string `xml:"ETag"` // ETag value of the part's data
}
type uploadParts []UploadPart
@ -295,10 +297,10 @@ func (slice uploadParts) Swap(i, j int) {
slice[i], slice[j] = slice[j], slice[i]
}
// UploadPartCopyResult 拷贝分片请求返回的结果
// UploadPartCopyResult defines result object of multipart copy request.
type UploadPartCopyResult struct {
XMLName xml.Name `xml:"CopyPartResult"`
LastModified time.Time `xml:"LastModified"` // 最后修改时间
LastModified time.Time `xml:"LastModified"` // Last modified time
ETag string `xml:"ETag"` // ETag
}
@ -307,61 +309,69 @@ type completeMultipartUploadXML struct {
Part []UploadPart `xml:"Part"`
}
// CompleteMultipartUploadResult 提交分片上传任务返回结果
// CompleteMultipartUploadResult defines result object of CompleteMultipartUploadRequest
type CompleteMultipartUploadResult struct {
XMLName xml.Name `xml:"CompleteMultipartUploadResult"`
Location string `xml:"Location"` // ObjectURL
Bucket string `xml:"Bucket"` // Bucket名称
ETag string `xml:"ETag"` // ObjectETag
Key string `xml:"Key"` // Object的名字
Location string `xml:"Location"` // Object URL
Bucket string `xml:"Bucket"` // Bucket name
ETag string `xml:"ETag"` // Object ETag
Key string `xml:"Key"` // Object name
}
// ListUploadedPartsResult ListUploadedParts请求返回结果
// ListUploadedPartsResult defines result object of ListUploadedParts
type ListUploadedPartsResult struct {
XMLName xml.Name `xml:"ListPartsResult"`
Bucket string `xml:"Bucket"` // Bucket名称
Key string `xml:"Key"` // Object名称
UploadID string `xml:"UploadId"` // 上传Id
NextPartNumberMarker string `xml:"NextPartNumberMarker"` // 下一个Part的位置
MaxParts int `xml:"MaxParts"` // 最大Part个数
IsTruncated bool `xml:"IsTruncated"` // 是否完全上传完成
UploadedParts []UploadedPart `xml:"Part"` // 已完成的Part
Bucket string `xml:"Bucket"` // Bucket name
Key string `xml:"Key"` // Object name
UploadID string `xml:"UploadId"` // Upload ID
NextPartNumberMarker string `xml:"NextPartNumberMarker"` // Next part number
MaxParts int `xml:"MaxParts"` // Max parts count
IsTruncated bool `xml:"IsTruncated"` // Flag indicates all entries returned.false: all entries returned.
UploadedParts []UploadedPart `xml:"Part"` // Uploaded parts
}
// UploadedPart 该任务已经上传的分片
// UploadedPart defines uploaded part
type UploadedPart struct {
XMLName xml.Name `xml:"Part"`
PartNumber int `xml:"PartNumber"` // Part编号
LastModified time.Time `xml:"LastModified"` // 最后一次修改时间
ETag string `xml:"ETag"` // ETag缓存码
Size int `xml:"Size"` // Part大小
PartNumber int `xml:"PartNumber"` // Part number
LastModified time.Time `xml:"LastModified"` // Last modified time
ETag string `xml:"ETag"` // ETag cache
Size int `xml:"Size"` // Part size
}
// ListMultipartUploadResult ListMultipartUpload请求返回结果
// ListMultipartUploadResult defines result object of ListMultipartUpload
type ListMultipartUploadResult struct {
XMLName xml.Name `xml:"ListMultipartUploadsResult"`
Bucket string `xml:"Bucket"` // Bucket名称
Delimiter string `xml:"Delimiter"` // 分组分割符
Prefix string `xml:"Prefix"` // 筛选前缀
KeyMarker string `xml:"KeyMarker"` // 起始Object位置
UploadIDMarker string `xml:"UploadIdMarker"` // 起始UploadId位置
NextKeyMarker string `xml:"NextKeyMarker"` // 如果没有全部返回标明接下去的KeyMarker位置
NextUploadIDMarker string `xml:"NextUploadIdMarker"` // 如果没有全部返回标明接下去的UploadId位置
MaxUploads int `xml:"MaxUploads"` // 返回最大Upload数目
IsTruncated bool `xml:"IsTruncated"` // 是否完全返回
Uploads []UncompletedUpload `xml:"Upload"` // 未完成上传的MultipartUpload
CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` // 所有名字包含指定的前缀且第一次出现delimiter字符之间的object作为一组的分组结果
Bucket string `xml:"Bucket"` // Bucket name
Delimiter string `xml:"Delimiter"` // Delimiter for grouping object.
Prefix string `xml:"Prefix"` // Object prefix
KeyMarker string `xml:"KeyMarker"` // Object key marker
UploadIDMarker string `xml:"UploadIdMarker"` // UploadId marker
NextKeyMarker string `xml:"NextKeyMarker"` // Next key marker, if not all entries returned.
NextUploadIDMarker string `xml:"NextUploadIdMarker"` // Next uploadId marker, if not all entries returned.
MaxUploads int `xml:"MaxUploads"` // Max uploads to return
IsTruncated bool `xml:"IsTruncated"` // Flag indicates all entries are returned.
Uploads []UncompletedUpload `xml:"Upload"` // Ongoing uploads (not completed, not aborted)
CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` // Common prefixes list.
}
// UncompletedUpload 未完成的Upload任务
// UncompletedUpload structure wraps an uncompleted upload task
type UncompletedUpload struct {
XMLName xml.Name `xml:"Upload"`
Key string `xml:"Key"` // Object名称
UploadID string `xml:"UploadId"` // 对应UploadId
Initiated time.Time `xml:"Initiated"` // 初始化时间,格式2012-02-23T04:18:23.000Z
Key string `xml:"Key"` // Object name
UploadID string `xml:"UploadId"` // The UploadId
Initiated time.Time `xml:"Initiated"` // Initialization time in the format such as 2012-02-23T04:18:23.000Z
}
// 解析URL编码
// ProcessObjectResult defines result object of ProcessObject
type ProcessObjectResult struct {
Bucket string `json:"bucket"`
FileSize int `json:"fileSize"`
Object string `json:"object"`
Status string `json:"status"`
}
// decodeDeleteObjectsResult decodes deleting objects result in URL encoding
func decodeDeleteObjectsResult(result *DeleteObjectsResult) error {
var err error
for i := 0; i < len(result.DeletedObjects); i++ {
@ -373,7 +383,7 @@ func decodeDeleteObjectsResult(result *DeleteObjectsResult) error {
return nil
}
// 解析URL编码
// decodeListObjectsResult decodes list objects result in URL encoding
func decodeListObjectsResult(result *ListObjectsResult) error {
var err error
result.Prefix, err = url.QueryUnescape(result.Prefix)
@ -407,7 +417,17 @@ func decodeListObjectsResult(result *ListObjectsResult) error {
return nil
}
// 解析URL编码
// decodeListUploadedPartsResult decodes
func decodeListUploadedPartsResult(result *ListUploadedPartsResult) error {
var err error
result.Key, err = url.QueryUnescape(result.Key)
if err != nil {
return err
}
return nil
}
// decodeListMultipartUploadResult decodes list multipart upload result in URL encoding
func decodeListMultipartUploadResult(result *ListMultipartUploadResult) error {
var err error
result.Prefix, err = url.QueryUnescape(result.Prefix)
@ -440,3 +460,9 @@ func decodeListMultipartUploadResult(result *ListMultipartUploadResult) error {
}
return nil
}
// createBucketConfiguration defines the configuration for creating a bucket.
type createBucketConfiguration struct {
XMLName xml.Name `xml:"CreateBucketConfiguration"`
StorageClass StorageClassType `xml:"StorageClass,omitempty"`
}

View File

@ -3,61 +3,79 @@ package oss
import (
"crypto/md5"
"encoding/base64"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"time"
)
// UploadFile is multipart file upload.
//
// UploadFile 分片上传文件
// objectKey the object name.
// filePath the local file path to upload.
// partSize the part size in byte.
// options the options for uploading object.
//
// objectKey object名称。
// filePath 本地文件。需要上传的文件。
// partSize 本次上传文件片的大小字节数。比如100 * 1024为每片100KB。
// options 上传Object时可以指定Object的属性。详见InitiateMultipartUpload。
//
// error 操作成功为nil非nil为错误信息。
// error it's nil if the operation succeeds, otherwise it's an error object.
//
func (bucket Bucket) UploadFile(objectKey, filePath string, partSize int64, options ...Option) error {
if partSize < MinPartSize || partSize > MaxPartSize {
return errors.New("oss: part size invalid range (1024KB, 5GB]")
}
cpConf, err := getCpConfig(options, filePath)
if err != nil {
return err
return errors.New("oss: part size invalid range (100KB, 5GB]")
}
cpConf := getCpConfig(options)
routines := getRoutines(options)
if cpConf.IsEnable {
return bucket.uploadFileWithCp(objectKey, filePath, partSize, options, cpConf.FilePath, routines)
if cpConf != nil && cpConf.IsEnable {
cpFilePath := getUploadCpFilePath(cpConf, filePath, bucket.BucketName, objectKey)
if cpFilePath != "" {
return bucket.uploadFileWithCp(objectKey, filePath, partSize, options, cpFilePath, routines)
}
}
return bucket.uploadFile(objectKey, filePath, partSize, options, routines)
}
// ----- 并发无断点的上传 -----
// 获取Checkpoint配置
func getCpConfig(options []Option, filePath string) (*cpConfig, error) {
cpc := &cpConfig{}
cpcOpt, err := findOption(options, checkpointConfig, nil)
if err != nil || cpcOpt == nil {
return cpc, err
func getUploadCpFilePath(cpConf *cpConfig, srcFile, destBucket, destObject string) string {
if cpConf.FilePath == "" && cpConf.DirPath != "" {
dest := fmt.Sprintf("oss://%v/%v", destBucket, destObject)
absPath, _ := filepath.Abs(srcFile)
cpFileName := getCpFileName(absPath, dest)
cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName
}
cpc = cpcOpt.(*cpConfig)
if cpc.IsEnable && cpc.FilePath == "" {
cpc.FilePath = filePath + CheckpointFileSuffix
}
return cpc, nil
return cpConf.FilePath
}
// 获取并发数默认并发数1
// ----- concurrent upload without checkpoint -----
// getCpConfig gets checkpoint configuration
func getCpConfig(options []Option) *cpConfig {
cpcOpt, err := findOption(options, checkpointConfig, nil)
if err != nil || cpcOpt == nil {
return nil
}
return cpcOpt.(*cpConfig)
}
// getCpFileName return the name of the checkpoint file
func getCpFileName(src, dest string) string {
md5Ctx := md5.New()
md5Ctx.Write([]byte(src))
srcCheckSum := hex.EncodeToString(md5Ctx.Sum(nil))
md5Ctx.Reset()
md5Ctx.Write([]byte(dest))
destCheckSum := hex.EncodeToString(md5Ctx.Sum(nil))
return fmt.Sprintf("%v-%v.cp", srcCheckSum, destCheckSum)
}
// getRoutines gets the routine count. by default it's 1.
func getRoutines(options []Option) int {
rtnOpt, err := findOption(options, routineNum, nil)
if err != nil || rtnOpt == nil {
@ -74,7 +92,17 @@ func getRoutines(options []Option) int {
return rs
}
// 获取进度回调
// getPayer return the payer of the request
func getPayer(options []Option) string {
payerOpt, err := findOption(options, HTTPHeaderOSSRequester, nil)
if err != nil || payerOpt == nil {
return ""
}
return payerOpt.(string)
}
// getProgressListener gets the progress callback
func getProgressListener(options []Option) ProgressListener {
isSet, listener, _ := isOptionSet(options, progressListener)
if !isSet {
@ -83,7 +111,7 @@ func getProgressListener(options []Option) ProgressListener {
return listener.(ProgressListener)
}
// 测试使用
// uploadPartHook is for testing usage
type uploadPartHook func(id int, chunk FileChunk) error
var uploadPartHooker uploadPartHook = defaultUploadPart
@ -92,22 +120,23 @@ func defaultUploadPart(id int, chunk FileChunk) error {
return nil
}
// 工作协程参数
// workerArg defines worker argument structure
type workerArg struct {
bucket *Bucket
filePath string
imur InitiateMultipartUploadResult
options []Option
hook uploadPartHook
}
// 工作协程
// worker is the worker coroutine function
func worker(id int, arg workerArg, jobs <-chan FileChunk, results chan<- UploadPart, failed chan<- error, die <-chan bool) {
for chunk := range jobs {
if err := arg.hook(id, chunk); err != nil {
failed <- err
break
}
part, err := arg.bucket.UploadPartFromFile(arg.imur, arg.filePath, chunk.Offset, chunk.Size, chunk.Number)
part, err := arg.bucket.UploadPartFromFile(arg.imur, arg.filePath, chunk.Offset, chunk.Size, chunk.Number, arg.options...)
if err != nil {
failed <- err
break
@ -121,7 +150,7 @@ func worker(id int, arg workerArg, jobs <-chan FileChunk, results chan<- UploadP
}
}
// 调度协程
// scheduler function
func scheduler(jobs chan FileChunk, chunks []FileChunk) {
for _, chunk := range chunks {
jobs <- chunk
@ -137,7 +166,7 @@ func getTotalBytes(chunks []FileChunk) int64 {
return tb
}
// 并发上传,不带断点续传功能
// uploadFile is a concurrent upload, without checkpoint
func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, options []Option, routines int) error {
listener := getProgressListener(options)
@ -146,7 +175,13 @@ func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, opti
return err
}
// 初始化上传任务
payerOptions := []Option{}
payer := getPayer(options)
if payer != "" {
payerOptions = append(payerOptions, RequestPayer(PayerType(payer)))
}
// Initialize the multipart upload
imur, err := bucket.InitiateMultipartUpload(objectKey, options...)
if err != nil {
return err
@ -162,16 +197,16 @@ func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, opti
event := newProgressEvent(TransferStartedEvent, 0, totalBytes)
publishProgress(listener, event)
// 启动工作协程
arg := workerArg{&bucket, filePath, imur, uploadPartHooker}
// Start the worker coroutine
arg := workerArg{&bucket, filePath, imur, payerOptions, uploadPartHooker}
for w := 1; w <= routines; w++ {
go worker(w, arg, jobs, results, failed, die)
}
// 并发上传分片
// Schedule the jobs
go scheduler(jobs, chunks)
// 等待分配分片上传完成
// Waiting for the upload finished
completed := 0
parts := make([]UploadPart, len(chunks))
for completed < len(chunks) {
@ -186,7 +221,7 @@ func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, opti
close(die)
event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes)
publishProgress(listener, event)
bucket.AbortMultipartUpload(imur)
bucket.AbortMultipartUpload(imur, payerOptions...)
return err
}
@ -198,43 +233,43 @@ func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, opti
event = newProgressEvent(TransferStartedEvent, completedBytes, totalBytes)
publishProgress(listener, event)
// 提交任务
_, err = bucket.CompleteMultipartUpload(imur, parts)
// Complete the multpart upload
_, err = bucket.CompleteMultipartUpload(imur, parts, payerOptions...)
if err != nil {
bucket.AbortMultipartUpload(imur)
bucket.AbortMultipartUpload(imur, payerOptions...)
return err
}
return nil
}
// ----- 并发带断点的上传 -----
// ----- concurrent upload with checkpoint -----
const uploadCpMagic = "FE8BB4EA-B593-4FAC-AD7A-2459A36E2E62"
type uploadCheckpoint struct {
Magic string // magic
MD5 string // cp内容的MD5
FilePath string // 本地文件
FileStat cpStat // 文件状态
ObjectKey string // key
UploadID string // upload id
Parts []cpPart // 本地文件的全部分片
Magic string // Magic
MD5 string // Checkpoint file content's MD5
FilePath string // Local file path
FileStat cpStat // File state
ObjectKey string // Key
UploadID string // Upload ID
Parts []cpPart // All parts of the local file
}
type cpStat struct {
Size int64 // 文件大小
LastModified time.Time // 本地文件最后修改时间
MD5 string // 本地文件MD5
Size int64 // File size
LastModified time.Time // File's last modified time
MD5 string // Local file's MD5
}
type cpPart struct {
Chunk FileChunk // 分片
Part UploadPart // 上传完成的分片
IsCompleted bool // upload是否完成
Chunk FileChunk // File chunk
Part UploadPart // Uploaded part
IsCompleted bool // Upload complete flag
}
// CP数据是否有效CP有效且文件没有更新时有效
// isValid checks if the uploaded data is valid---it's valid when the file is not updated and the checkpoint data is valid.
func (cp uploadCheckpoint) isValid(filePath string) (bool, error) {
// 比较CP的Magic及MD5
// Compare the CP's magic number and MD5.
cpb := cp
cpb.MD5 = ""
js, _ := json.Marshal(cpb)
@ -245,7 +280,7 @@ func (cp uploadCheckpoint) isValid(filePath string) (bool, error) {
return false, nil
}
// 确认本地文件是否更新
// Make sure if the local file is updated.
fd, err := os.Open(filePath)
if err != nil {
return false, err
@ -262,7 +297,7 @@ func (cp uploadCheckpoint) isValid(filePath string) (bool, error) {
return false, err
}
// 比较文件大小/文件最后更新时间/文件MD5
// Compare the file size, file's last modified time and file's MD5
if cp.FileStat.Size != st.Size() ||
cp.FileStat.LastModified != st.ModTime() ||
cp.FileStat.MD5 != md {
@ -272,7 +307,7 @@ func (cp uploadCheckpoint) isValid(filePath string) (bool, error) {
return true, nil
}
// 从文件中load
// load loads from the file
func (cp *uploadCheckpoint) load(filePath string) error {
contents, err := ioutil.ReadFile(filePath)
if err != nil {
@ -283,11 +318,11 @@ func (cp *uploadCheckpoint) load(filePath string) error {
return err
}
// dump到文件
// dump dumps to the local file
func (cp *uploadCheckpoint) dump(filePath string) error {
bcp := *cp
// 计算MD5
// Calculate MD5
bcp.MD5 = ""
js, err := json.Marshal(bcp)
if err != nil {
@ -297,23 +332,23 @@ func (cp *uploadCheckpoint) dump(filePath string) error {
b64 := base64.StdEncoding.EncodeToString(sum[:])
bcp.MD5 = b64
// 序列化
// Serialization
js, err = json.Marshal(bcp)
if err != nil {
return err
}
// dump
// Dump
return ioutil.WriteFile(filePath, js, FilePermMode)
}
// 更新分片状态
// updatePart updates the part status
func (cp *uploadCheckpoint) updatePart(part UploadPart) {
cp.Parts[part.PartNumber-1].Part = part
cp.Parts[part.PartNumber-1].IsCompleted = true
}
// 未完成的分片
// todoParts returns unfinished parts
func (cp *uploadCheckpoint) todoParts() []FileChunk {
fcs := []FileChunk{}
for _, part := range cp.Parts {
@ -324,7 +359,7 @@ func (cp *uploadCheckpoint) todoParts() []FileChunk {
return fcs
}
// 所有的分片
// allParts returns all parts
func (cp *uploadCheckpoint) allParts() []UploadPart {
ps := []UploadPart{}
for _, part := range cp.Parts {
@ -333,7 +368,7 @@ func (cp *uploadCheckpoint) allParts() []UploadPart {
return ps
}
// 完成的字节数
// getCompletedBytes returns completed bytes count
func (cp *uploadCheckpoint) getCompletedBytes() int64 {
var completedBytes int64
for _, part := range cp.Parts {
@ -344,19 +379,19 @@ func (cp *uploadCheckpoint) getCompletedBytes() int64 {
return completedBytes
}
// 计算文件文件MD5
// calcFileMD5 calculates the MD5 for the specified local file
func calcFileMD5(filePath string) (string, error) {
return "", nil
}
// 初始化分片上传
// prepare initializes the multipart upload
func prepare(cp *uploadCheckpoint, objectKey, filePath string, partSize int64, bucket *Bucket, options []Option) error {
// cp
// CP
cp.Magic = uploadCpMagic
cp.FilePath = filePath
cp.ObjectKey = objectKey
// localfile
// Local file
fd, err := os.Open(filePath)
if err != nil {
return err
@ -375,7 +410,7 @@ func prepare(cp *uploadCheckpoint, objectKey, filePath string, partSize int64, b
}
cp.FileStat.MD5 = md
// chunks
// Chunks
parts, err := SplitFileByPartSize(filePath, partSize)
if err != nil {
return err
@ -387,7 +422,7 @@ func prepare(cp *uploadCheckpoint, objectKey, filePath string, partSize int64, b
cp.Parts[i].IsCompleted = false
}
// init load
// Init load
imur, err := bucket.InitiateMultipartUpload(objectKey, options...)
if err != nil {
return err
@ -397,11 +432,11 @@ func prepare(cp *uploadCheckpoint, objectKey, filePath string, partSize int64, b
return nil
}
// 提交分片上传删除CP文件
func complete(cp *uploadCheckpoint, bucket *Bucket, parts []UploadPart, cpFilePath string) error {
// complete completes the multipart upload and deletes the local CP files
func complete(cp *uploadCheckpoint, bucket *Bucket, parts []UploadPart, cpFilePath string, options []Option) error {
imur := InitiateMultipartUploadResult{Bucket: bucket.BucketName,
Key: cp.ObjectKey, UploadID: cp.UploadID}
_, err := bucket.CompleteMultipartUpload(imur, parts)
_, err := bucket.CompleteMultipartUpload(imur, parts, options...)
if err != nil {
return err
}
@ -409,18 +444,24 @@ func complete(cp *uploadCheckpoint, bucket *Bucket, parts []UploadPart, cpFilePa
return err
}
// 并发带断点的上传
// uploadFileWithCp handles concurrent upload with checkpoint
func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int) error {
listener := getProgressListener(options)
// LOAD CP数据
payerOptions := []Option{}
payer := getPayer(options)
if payer != "" {
payerOptions = append(payerOptions, RequestPayer(PayerType(payer)))
}
// Load CP data
ucp := uploadCheckpoint{}
err := ucp.load(cpFilePath)
if err != nil {
os.Remove(cpFilePath)
}
// LOAD出错或数据无效重新初始化上传
// Load error or the CP data is invalid.
valid, err := ucp.isValid(filePath)
if err != nil || !valid {
if err = prepare(&ucp, objectKey, filePath, partSize, &bucket, options); err != nil {
@ -444,16 +485,16 @@ func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64
event := newProgressEvent(TransferStartedEvent, completedBytes, ucp.FileStat.Size)
publishProgress(listener, event)
// 启动工作协程
arg := workerArg{&bucket, filePath, imur, uploadPartHooker}
// Start the workers
arg := workerArg{&bucket, filePath, imur, payerOptions, uploadPartHooker}
for w := 1; w <= routines; w++ {
go worker(w, arg, jobs, results, failed, die)
}
// 并发上传分片
// Schedule jobs
go scheduler(jobs, chunks)
// 等待分配分片上传完成
// Waiting for the job finished
completed := 0
for completed < len(chunks) {
select {
@ -479,7 +520,7 @@ func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64
event = newProgressEvent(TransferCompletedEvent, completedBytes, ucp.FileStat.Size)
publishProgress(listener, event)
// 提交分片上传
err = complete(&ucp, &bucket, ucp.allParts(), cpFilePath)
// Complete the multipart upload
err = complete(&ucp, &bucket, ucp.allParts(), cpFilePath, payerOptions)
return err
}

View File

@ -9,25 +9,27 @@ import (
"os"
"os/exec"
"runtime"
"strconv"
"strings"
"time"
)
// Get User Agent
// Go sdk相关信息包括sdk版本操作系统类型GO版本
var userAgent = func() string {
// userAgent gets user agent
// It has the SDK version information, OS information and GO version
func userAgent() string {
sys := getSysInfo()
return fmt.Sprintf("aliyun-sdk-go/%s (%s/%s/%s;%s)", Version, sys.name,
sys.release, sys.machine, runtime.Version())
}()
type sysInfo struct {
name string // 操作系统名称windows/Linux
release string // 操作系统版本 2.6.32-220.23.2.ali1089.el5.x86_64等
machine string // 机器类型amd64/x86_64
}
// Get system info
// 获取操作系统信息、机器类型
type sysInfo struct {
name string // OS name such as windows/Linux
release string // OS version 2.6.32-220.23.2.ali1089.el5.x86_64 etc
machine string // CPU type amd64/x86_64
}
// getSysInfo gets system info
// gets the OS information and CPU type
func getSysInfo() sysInfo {
name := runtime.GOOS
release := "-"
@ -44,8 +46,106 @@ func getSysInfo() sysInfo {
return sysInfo{name: name, release: release, machine: machine}
}
// unpackedRange
type unpackedRange struct {
hasStart bool // Flag indicates if the start point is specified
hasEnd bool // Flag indicates if the end point is specified
start int64 // Start point
end int64 // End point
}
// invalidRangeError returns invalid range error
func invalidRangeError(r string) error {
return fmt.Errorf("InvalidRange %s", r)
}
// parseRange parse various styles of range such as bytes=M-N
func parseRange(normalizedRange string) (*unpackedRange, error) {
var err error
hasStart := false
hasEnd := false
var start int64
var end int64
// Bytes==M-N or ranges=M-N
nrSlice := strings.Split(normalizedRange, "=")
if len(nrSlice) != 2 || nrSlice[0] != "bytes" {
return nil, invalidRangeError(normalizedRange)
}
// Bytes=M-N,X-Y
rSlice := strings.Split(nrSlice[1], ",")
rStr := rSlice[0]
if strings.HasSuffix(rStr, "-") { // M-
startStr := rStr[:len(rStr)-1]
start, err = strconv.ParseInt(startStr, 10, 64)
if err != nil {
return nil, invalidRangeError(normalizedRange)
}
hasStart = true
} else if strings.HasPrefix(rStr, "-") { // -N
len := rStr[1:]
end, err = strconv.ParseInt(len, 10, 64)
if err != nil {
return nil, invalidRangeError(normalizedRange)
}
if end == 0 { // -0
return nil, invalidRangeError(normalizedRange)
}
hasEnd = true
} else { // M-N
valSlice := strings.Split(rStr, "-")
if len(valSlice) != 2 {
return nil, invalidRangeError(normalizedRange)
}
start, err = strconv.ParseInt(valSlice[0], 10, 64)
if err != nil {
return nil, invalidRangeError(normalizedRange)
}
hasStart = true
end, err = strconv.ParseInt(valSlice[1], 10, 64)
if err != nil {
return nil, invalidRangeError(normalizedRange)
}
hasEnd = true
}
return &unpackedRange{hasStart, hasEnd, start, end}, nil
}
// adjustRange returns adjusted range, adjust the range according to the length of the file
func adjustRange(ur *unpackedRange, size int64) (start, end int64) {
if ur == nil {
return 0, size
}
if ur.hasStart && ur.hasEnd {
start = ur.start
end = ur.end + 1
if ur.start < 0 || ur.start >= size || ur.end > size || ur.start > ur.end {
start = 0
end = size
}
} else if ur.hasStart {
start = ur.start
end = size
if ur.start < 0 || ur.start >= size {
start = 0
}
} else if ur.hasEnd {
start = size - ur.end
end = size
if ur.end < 0 || ur.end > size {
start = 0
end = size
}
}
return
}
// GetNowSec returns Unix time, the number of seconds elapsed since January 1, 1970 UTC.
// 获取当前时间从UTC开始的秒数。
// gets the current time in Unix time, in seconds.
func GetNowSec() int64 {
return time.Now().Unix()
}
@ -54,25 +154,25 @@ func GetNowSec() int64 {
// since January 1, 1970 UTC. The result is undefined if the Unix time
// in nanoseconds cannot be represented by an int64. Note that this
// means the result of calling UnixNano on the zero Time is undefined.
// 获取当前时间从UTC开始的纳秒。
// gets the current time in Unix time, in nanoseconds.
func GetNowNanoSec() int64 {
return time.Now().UnixNano()
}
// GetNowGMT 获取当前时间,格式形如"Mon, 02 Jan 2006 15:04:05 GMT"HTTP中使用的时间格式
// GetNowGMT gets the current time in GMT format.
func GetNowGMT() string {
return time.Now().UTC().Format(http.TimeFormat)
}
// FileChunk 文件片定义
// FileChunk is the file chunk definition
type FileChunk struct {
Number int // 块序号
Offset int64 // 块在文件中的偏移量
Size int64 // 块大小
Number int // Chunk number
Offset int64 // Chunk offset
Size int64 // Chunk size.
}
// SplitFileByPartNum Split big file to part by the num of part
// 按指定的块数分割文件。返回值FileChunk为分割结果error为nil时有效。
// SplitFileByPartNum splits big file into parts by the num of parts.
// Split the file with specified parts count, returns the split result when error is nil.
func SplitFileByPartNum(fileName string, chunkNum int) ([]FileChunk, error) {
if chunkNum <= 0 || chunkNum > 10000 {
return nil, errors.New("chunkNum invalid")
@ -110,8 +210,8 @@ func SplitFileByPartNum(fileName string, chunkNum int) ([]FileChunk, error) {
return chunks, nil
}
// SplitFileByPartSize Split big file to part by the size of part
// 按块大小分割文件。返回值FileChunk为分割结果error为nil时有效。
// SplitFileByPartSize splits big file into parts by the size of parts.
// Splits the file by the part size. Returns the FileChunk when error is nil.
func SplitFileByPartSize(fileName string, chunkSize int64) ([]FileChunk, error) {
if chunkSize <= 0 {
return nil, errors.New("chunkSize invalid")
@ -129,7 +229,7 @@ func SplitFileByPartSize(fileName string, chunkSize int64) ([]FileChunk, error)
}
var chunkN = stat.Size() / chunkSize
if chunkN >= 10000 {
return nil, errors.New("Too many parts, please increase part size.")
return nil, errors.New("Too many parts, please increase part size")
}
var chunks []FileChunk
@ -151,7 +251,7 @@ func SplitFileByPartSize(fileName string, chunkSize int64) ([]FileChunk, error)
return chunks, nil
}
// GetPartEnd 计算结束位置
// GetPartEnd calculates the end position
func GetPartEnd(begin int64, total int64, per int64) int64 {
if begin+per > total {
return total - 1
@ -159,7 +259,7 @@ func GetPartEnd(begin int64, total int64, per int64) int64 {
return begin + per - 1
}
// crcTable returns the Table constructed from the specified polynomial
// crcTable returns the table constructed from the specified polynomial
var crcTable = func() *crc64.Table {
return crc64.MakeTable(crc64.ECMA)
}

View File

@ -2,8 +2,8 @@ language: go
go:
- 1.6
- 1.7
- 1.8
- 1.9
- '1.10'
install:
- go get github.com/mattn/goveralls

View File

@ -7,9 +7,17 @@ XPath
XPath is Go package provides selecting nodes from XML, HTML or other documents using XPath expression.
[XQuery](https://github.com/antchfx/xquery) : lets you extract data from HTML/XML documents using XPath package.
Implementation
===
### Features
- [htmlquery](https://github.com/antchfx/htmlquery) - an XPath query package for HTML document
- [xmlquery](https://github.com/antchfx/xmlquery) - an XPath query package for XML document.
- [jsonquery](https://github.com/antchfx/jsonquery) - an XPath query package for JSON document
Supported Features
===
#### The basic XPath patterns.
@ -45,7 +53,10 @@ XPath is Go package provides selecting nodes from XML, HTML or other documents u
- `//b` : Returns elements in the entire document matching b.
- `a|b` : All nodes matching a or b.
- `a|b` : All nodes matching a or b, union operation(not boolean or).
- `(a, b, c)` : Evaluates each of its operands and concatenates the resulting sequences, in order, into a single result sequence
#### Node Axes
@ -97,23 +108,60 @@ XPath is Go package provides selecting nodes from XML, HTML or other documents u
* a div b Divide
* a mod b Floating point mod, like Java.
- `a or b` : Boolean `or` operation.
- `a and b` : Boolean `and` operation.
- `(expr)` : Parenthesized expressions.
- `fun(arg1, ..., argn)` : Function calls.
- `fun(arg1, ..., argn)` : Function calls:
* position()
* last()
* count( node-set )
* name()
* starts-with( string, string )
* normalize-space( string )
* substring( string , start [, length] )
* not( expression )
* string-length( [string] )
* contains( string, string )
* sum( node-set )
* concat( string1 , string2 [, stringn]* )
| Function | Supported |
| --- | --- |
`boolean()`| ✓ |
`ceiling()`| ✓ |
`choose()`| ✗ |
`concat()`| ✓ |
`contains()`| ✓ |
`count()`| ✓ |
`current()`| ✗ |
`document()`| ✗ |
`element-available()`| ✗ |
`ends-with()`| ✓ |
`false()`| ✓ |
`floor()`| ✓ |
`format-number()`| ✗ |
`function-available()`| ✗ |
`generate-id()`| ✗ |
`id()`| ✗ |
`key()`| ✗ |
`lang()`| ✗ |
`last()`| ✓ |
`local-name()`| ✓ |
`name()`| ✓ |
`namespace-uri()`| ✓ |
`normalize-space()`| ✓ |
`not()`| ✓ |
`number()`| ✓ |
`position()`| ✓ |
`round()`| ✓ |
`starts-with()`| ✓ |
`string()`| ✓ |
`string-length()`| ✓ |
`substring()`| ✓ |
`substring-after()`| ✓ |
`substring-before()`| ✓ |
`sum()`| ✓ |
`system-property()`| ✗ |
`translate()`| ✓ |
`true()`| ✓ |
`unparsed-entity-url()` | ✗ |
- `a or b` : Boolean or.
Changelogs
===
- `a and b` : Boolean and.
2019-01-29
- improvement `normalize-space` function. [#32](https://github.com/antchfx/xpath/issues/32)
2018-12-07
- supports XPath 2.0 Sequence expressions. [#30](https://github.com/antchfx/xpath/pull/30) by [@minherz](https://github.com/minherz).

View File

@ -23,9 +23,12 @@ type builder struct {
func axisPredicate(root *axisNode) func(NodeNavigator) bool {
// get current axix node type.
typ := ElementNode
if root.AxeType == "attribute" {
switch root.AxeType {
case "attribute":
typ = AttributeNode
} else {
case "self", "parent":
typ = allNode
default:
switch root.Prop {
case "comment":
typ = CommentNode
@ -34,12 +37,17 @@ func axisPredicate(root *axisNode) func(NodeNavigator) bool {
// case "processing-instruction":
// typ = ProcessingInstructionNode
case "node":
typ = ElementNode
typ = allNode
}
}
nametest := root.LocalName != "" || root.Prefix != ""
predicate := func(n NodeNavigator) bool {
if typ == n.NodeType() || typ == TextNode {
if root.LocalName == "" || (root.LocalName == n.LocalName() && root.Prefix == n.Prefix()) {
if typ == n.NodeType() || typ == allNode || typ == TextNode {
if nametest {
if root.LocalName == n.LocalName() && root.Prefix == n.Prefix() {
return true
}
} else {
return true
}
}
@ -61,18 +69,16 @@ func (b *builder) processAxisNode(root *axisNode) (query, error) {
if root.Input == nil {
qyInput = &contextQuery{}
} else {
if b.flag&filterFlag == 0 {
if root.AxeType == "child" && (root.Input.Type() == nodeAxis) {
if input := root.Input.(*axisNode); input.AxeType == "descendant-or-self" {
var qyGrandInput query
if input.Input != nil {
qyGrandInput, _ = b.processNode(input.Input)
} else {
qyGrandInput = &contextQuery{}
}
qyOutput = &descendantQuery{Input: qyGrandInput, Predicate: predicate, Self: true}
return qyOutput, nil
if root.AxeType == "child" && (root.Input.Type() == nodeAxis) {
if input := root.Input.(*axisNode); input.AxeType == "descendant-or-self" {
var qyGrandInput query
if input.Input != nil {
qyGrandInput, _ = b.processNode(input.Input)
} else {
qyGrandInput = &contextQuery{}
}
qyOutput = &descendantQuery{Input: qyGrandInput, Predicate: predicate, Self: true}
return qyOutput, nil
}
}
qyInput, err = b.processNode(root.Input)
@ -157,6 +163,16 @@ func (b *builder) processFunctionNode(root *functionNode) (query, error) {
return nil, err
}
qyOutput = &functionQuery{Input: b.firstInput, Func: startwithFunc(arg1, arg2)}
case "ends-with":
arg1, err := b.processNode(root.Args[0])
if err != nil {
return nil, err
}
arg2, err := b.processNode(root.Args[1])
if err != nil {
return nil, err
}
qyOutput = &functionQuery{Input: b.firstInput, Func: endwithFunc(arg1, arg2)}
case "contains":
arg1, err := b.processNode(root.Args[0])
if err != nil {
@ -189,6 +205,25 @@ func (b *builder) processFunctionNode(root *functionNode) (query, error) {
}
}
qyOutput = &functionQuery{Input: b.firstInput, Func: substringFunc(arg1, arg2, arg3)}
case "substring-before", "substring-after":
//substring-xxxx( haystack, needle )
if len(root.Args) != 2 {
return nil, errors.New("xpath: substring-before function must have two parameters")
}
var (
arg1, arg2 query
err error
)
if arg1, err = b.processNode(root.Args[0]); err != nil {
return nil, err
}
if arg2, err = b.processNode(root.Args[1]); err != nil {
return nil, err
}
qyOutput = &functionQuery{
Input: b.firstInput,
Func: substringIndFunc(arg1, arg2, root.FuncName == "substring-after"),
}
case "string-length":
// string-length( [string] )
if len(root.Args) < 1 {
@ -208,6 +243,25 @@ func (b *builder) processFunctionNode(root *functionNode) (query, error) {
return nil, err
}
qyOutput = &functionQuery{Input: argQuery, Func: normalizespaceFunc}
case "translate":
//translate( string , string, string )
if len(root.Args) != 3 {
return nil, errors.New("xpath: translate function must have three parameters")
}
var (
arg1, arg2, arg3 query
err error
)
if arg1, err = b.processNode(root.Args[0]); err != nil {
return nil, err
}
if arg2, err = b.processNode(root.Args[1]); err != nil {
return nil, err
}
if arg3, err = b.processNode(root.Args[2]); err != nil {
return nil, err
}
qyOutput = &functionQuery{Input: b.firstInput, Func: translateFunc(arg1, arg2, arg3)}
case "not":
if len(root.Args) == 0 {
return nil, errors.New("xpath: not function must have at least one parameter")
@ -217,12 +271,62 @@ func (b *builder) processFunctionNode(root *functionNode) (query, error) {
return nil, err
}
qyOutput = &functionQuery{Input: argQuery, Func: notFunc}
case "name":
qyOutput = &functionQuery{Input: b.firstInput, Func: nameFunc}
case "name", "local-name", "namespace-uri":
inp := b.firstInput
if len(root.Args) > 1 {
return nil, fmt.Errorf("xpath: %s function must have at most one parameter", root.FuncName)
}
if len(root.Args) == 1 {
argQuery, err := b.processNode(root.Args[0])
if err != nil {
return nil, err
}
inp = argQuery
}
f := &functionQuery{Input: inp}
switch root.FuncName {
case "name":
f.Func = nameFunc
case "local-name":
f.Func = localNameFunc
case "namespace-uri":
f.Func = namespaceFunc
}
qyOutput = f
case "true", "false":
val := root.FuncName == "true"
qyOutput = &functionQuery{
Input: b.firstInput,
Func: func(_ query, _ iterator) interface{} {
return val
},
}
case "last":
qyOutput = &functionQuery{Input: b.firstInput, Func: lastFunc}
case "position":
qyOutput = &functionQuery{Input: b.firstInput, Func: positionFunc}
case "boolean", "number", "string":
inp := b.firstInput
if len(root.Args) > 1 {
return nil, fmt.Errorf("xpath: %s function must have at most one parameter", root.FuncName)
}
if len(root.Args) == 1 {
argQuery, err := b.processNode(root.Args[0])
if err != nil {
return nil, err
}
inp = argQuery
}
f := &functionQuery{Input: inp}
switch root.FuncName {
case "boolean":
f.Func = booleanFunc
case "string":
f.Func = stringFunc
case "number":
f.Func = numberFunc
}
qyOutput = f
case "count":
//if b.firstInput == nil {
// return nil, errors.New("xpath: expression must evaluate to node-set")
@ -244,6 +348,24 @@ func (b *builder) processFunctionNode(root *functionNode) (query, error) {
return nil, err
}
qyOutput = &functionQuery{Input: argQuery, Func: sumFunc}
case "ceiling", "floor", "round":
if len(root.Args) == 0 {
return nil, fmt.Errorf("xpath: ceiling(node-sets) function must with have parameters node-sets")
}
argQuery, err := b.processNode(root.Args[0])
if err != nil {
return nil, err
}
f := &functionQuery{Input: argQuery}
switch root.FuncName {
case "ceiling":
f.Func = ceilingFunc
case "floor":
f.Func = floorFunc
case "round":
f.Func = roundFunc
}
qyOutput = f
case "concat":
if len(root.Args) < 2 {
return nil, fmt.Errorf("xpath: concat() must have at least two arguments")
@ -304,12 +426,14 @@ func (b *builder) processOperatorNode(root *operatorNode) (query, error) {
exprFunc = neFunc
}
qyOutput = &logicalQuery{Left: left, Right: right, Do: exprFunc}
case "or", "and", "|":
case "or", "and":
isOr := false
if root.Op == "or" || root.Op == "|" {
if root.Op == "or" {
isOr = true
}
qyOutput = &booleanQuery{Left: left, Right: right, IsOr: isOr}
case "|":
qyOutput = &unionQuery{Left: left, Right: right}
}
return qyOutput, nil
}

View File

@ -2,6 +2,9 @@ package xpath
import (
"errors"
"fmt"
"math"
"regexp"
"strconv"
"strings"
)
@ -80,16 +83,146 @@ func sumFunc(q query, t iterator) interface{} {
case float64:
sum = typ
case string:
if v, err := strconv.ParseFloat(typ, 64); err != nil {
sum = v
v, err := strconv.ParseFloat(typ, 64)
if err != nil {
panic(errors.New("sum() function argument type must be a node-set or number"))
}
sum = v
}
return sum
}
func asNumber(t iterator, o interface{}) float64 {
switch typ := o.(type) {
case query:
node := typ.Select(t)
if node == nil {
return float64(0)
}
if v, err := strconv.ParseFloat(node.Value(), 64); err == nil {
return v
}
case float64:
return typ
case string:
v, err := strconv.ParseFloat(typ, 64)
if err != nil {
panic(errors.New("ceiling() function argument type must be a node-set or number"))
}
return v
}
return 0
}
// ceilingFunc is a XPath Node Set functions ceiling(node-set).
func ceilingFunc(q query, t iterator) interface{} {
val := asNumber(t, q.Evaluate(t))
return math.Ceil(val)
}
// floorFunc is a XPath Node Set functions floor(node-set).
func floorFunc(q query, t iterator) interface{} {
val := asNumber(t, q.Evaluate(t))
return math.Floor(val)
}
// roundFunc is a XPath Node Set functions round(node-set).
func roundFunc(q query, t iterator) interface{} {
val := asNumber(t, q.Evaluate(t))
//return math.Round(val)
return round(val)
}
// nameFunc is a XPath functions name([node-set]).
func nameFunc(q query, t iterator) interface{} {
return t.Current().LocalName()
v := q.Select(t)
if v == nil {
return ""
}
ns := v.Prefix()
if ns == "" {
return v.LocalName()
}
return ns + ":" + v.LocalName()
}
// localNameFunc is a XPath functions local-name([node-set]).
func localNameFunc(q query, t iterator) interface{} {
v := q.Select(t)
if v == nil {
return ""
}
return v.LocalName()
}
// namespaceFunc is a XPath functions namespace-uri([node-set]).
func namespaceFunc(q query, t iterator) interface{} {
v := q.Select(t)
if v == nil {
return ""
}
return v.Prefix()
}
func asBool(t iterator, v interface{}) bool {
switch v := v.(type) {
case nil:
return false
case *NodeIterator:
return v.MoveNext()
case bool:
return bool(v)
case float64:
return v != 0
case string:
return v != ""
case query:
return v.Select(t) != nil
default:
panic(fmt.Errorf("unexpected type: %T", v))
}
}
func asString(t iterator, v interface{}) string {
switch v := v.(type) {
case nil:
return ""
case bool:
if v {
return "true"
}
return "false"
case float64:
return strconv.FormatFloat(v, 'g', -1, 64)
case string:
return v
case query:
node := v.Select(t)
if node == nil {
return ""
}
return node.Value()
default:
panic(fmt.Errorf("unexpected type: %T", v))
}
}
// booleanFunc is a XPath functions boolean([node-set]).
func booleanFunc(q query, t iterator) interface{} {
v := q.Evaluate(t)
return asBool(t, v)
}
// numberFunc is a XPath functions number([node-set]).
func numberFunc(q query, t iterator) interface{} {
v := q.Evaluate(t)
return asNumber(t, v)
}
// stringFunc is a XPath functions string([node-set]).
func stringFunc(q query, t iterator) interface{} {
v := q.Evaluate(t)
return asString(t, v)
}
// startwithFunc is a XPath functions starts-with(string, string).
@ -119,6 +252,33 @@ func startwithFunc(arg1, arg2 query) func(query, iterator) interface{} {
}
}
// endwithFunc is a XPath functions ends-with(string, string).
func endwithFunc(arg1, arg2 query) func(query, iterator) interface{} {
return func(q query, t iterator) interface{} {
var (
m, n string
ok bool
)
switch typ := arg1.Evaluate(t).(type) {
case string:
m = typ
case query:
node := typ.Select(t)
if node == nil {
return false
}
m = node.Value()
default:
panic(errors.New("ends-with() function argument type must be string"))
}
n, ok = arg2.Evaluate(t).(string)
if !ok {
panic(errors.New("ends-with() function argument type must be string"))
}
return strings.HasSuffix(m, n)
}
}
// containsFunc is a XPath functions contains(string or @attr, string).
func containsFunc(arg1, arg2 query) func(query, iterator) interface{} {
return func(q query, t iterator) interface{} {
@ -149,6 +309,11 @@ func containsFunc(arg1, arg2 query) func(query, iterator) interface{} {
}
}
var (
regnewline = regexp.MustCompile(`[\r\n\t]`)
regseqspace = regexp.MustCompile(`\s{2,}`)
)
// normalizespaceFunc is XPath functions normalize-space(string?)
func normalizespaceFunc(q query, t iterator) interface{} {
var m string
@ -158,11 +323,14 @@ func normalizespaceFunc(q query, t iterator) interface{} {
case query:
node := typ.Select(t)
if node == nil {
return false
return ""
}
m = node.Value()
}
return strings.TrimSpace(m)
m = strings.TrimSpace(m)
m = regnewline.ReplaceAllString(m, " ")
m = regseqspace.ReplaceAllString(m, " ")
return m
}
// substringFunc is XPath functions substring function returns a part of a given string.
@ -175,7 +343,7 @@ func substringFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} {
case query:
node := typ.Select(t)
if node == nil {
return false
return ""
}
m = node.Value()
}
@ -185,7 +353,10 @@ func substringFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} {
if start, ok = arg2.Evaluate(t).(float64); !ok {
panic(errors.New("substring() function first argument type must be int"))
} else if start < 1 {
panic(errors.New("substring() function first argument type must be >= 1"))
}
start--
if arg3 != nil {
if length, ok = arg3.Evaluate(t).(float64); !ok {
panic(errors.New("substring() function second argument type must be int"))
@ -201,6 +372,46 @@ func substringFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} {
}
}
// substringIndFunc is XPath functions substring-before/substring-after function returns a part of a given string.
func substringIndFunc(arg1, arg2 query, after bool) func(query, iterator) interface{} {
return func(q query, t iterator) interface{} {
var str string
switch v := arg1.Evaluate(t).(type) {
case string:
str = v
case query:
node := v.Select(t)
if node == nil {
return ""
}
str = node.Value()
}
var word string
switch v := arg2.Evaluate(t).(type) {
case string:
word = v
case query:
node := v.Select(t)
if node == nil {
return ""
}
word = node.Value()
}
if word == "" {
return ""
}
i := strings.Index(str, word)
if i < 0 {
return ""
}
if after {
return str[i+len(word):]
}
return str[:i]
}
}
// stringLengthFunc is XPATH string-length( [string] ) function that returns a number
// equal to the number of characters in a given string.
func stringLengthFunc(arg1 query) func(query, iterator) interface{} {
@ -219,6 +430,25 @@ func stringLengthFunc(arg1 query) func(query, iterator) interface{} {
}
}
// translateFunc is XPath functions translate() function returns a replaced string.
func translateFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} {
return func(q query, t iterator) interface{} {
str := asString(t, arg1.Evaluate(t))
src := asString(t, arg2.Evaluate(t))
dst := asString(t, arg3.Evaluate(t))
var replace []string
for i, s := range src {
d := ""
if i < len(dst) {
d = string(dst[i])
}
replace = append(replace, string(s), d)
}
return strings.NewReplacer(replace...).Replace(str)
}
}
// notFunc is XPATH functions not(expression) function operation.
func notFunc(q query, t iterator) interface{} {
switch v := q.Evaluate(t).(type) {

9
vendor/github.com/antchfx/xpath/func_go110.go generated vendored Normal file
View File

@ -0,0 +1,9 @@
// +build go1.10
package xpath
import "math"
func round(f float64) int {
return int(math.Round(f))
}

15
vendor/github.com/antchfx/xpath/func_pre_go110.go generated vendored Normal file
View File

@ -0,0 +1,15 @@
// +build !go1.10
package xpath
import "math"
// math.Round() is supported by Go 1.10+,
// This method just compatible for version <1.10.
// https://github.com/golang/go/issues/20100
func round(f float64) int {
if math.Abs(f) < 0.5 {
return 0
}
return int(f + math.Copysign(0.5, f))
}

View File

@ -42,7 +42,7 @@ const (
itemString // Quoted string constant
itemNumber // Number constant
itemAxe // Axe (like child::)
itemEof // END
itemEOF // END
)
// A node is an XPath node in the parse tree.
@ -389,7 +389,7 @@ Loop:
}
// Step ::= AxisSpecifier NodeTest Predicate* | AbbreviatedStep
func (p *parser) parseStep(n node) node {
func (p *parser) parseStep(n node) (opnd node) {
axeTyp := "child" // default axes value.
if p.r.typ == itemDot || p.r.typ == itemDotDot {
if p.r.typ == itemDot {
@ -398,23 +398,45 @@ func (p *parser) parseStep(n node) node {
axeTyp = "parent"
}
p.next()
return newAxisNode(axeTyp, "", "", "", n)
opnd = newAxisNode(axeTyp, "", "", "", n)
if p.r.typ != itemLBracket {
return opnd
}
} else {
switch p.r.typ {
case itemAt:
p.next()
axeTyp = "attribute"
case itemAxe:
axeTyp = p.r.name
p.next()
case itemLParens:
return p.parseSequence(n)
}
opnd = p.parseNodeTest(n, axeTyp)
}
switch p.r.typ {
case itemAt:
p.next()
axeTyp = "attribute"
case itemAxe:
axeTyp = p.r.name
p.next()
}
opnd := p.parseNodeTest(n, axeTyp)
for p.r.typ == itemLBracket {
opnd = newFilterNode(opnd, p.parsePredicate(opnd))
}
return opnd
}
// Expr ::= '(' Step ("," Step)* ')'
func (p *parser) parseSequence(n node) (opnd node) {
p.skipItem(itemLParens)
opnd = p.parseStep(n)
for {
if p.r.typ != itemComma {
break
}
p.next()
opnd2 := p.parseStep(n)
opnd = newOperatorNode("|", opnd, opnd2)
}
p.skipItem(itemRParens)
return opnd
}
// NodeTest ::= NameTest | nodeType '(' ')' | 'processing-instruction' '(' Literal ')'
func (p *parser) parseNodeTest(n node, axeTyp string) (opnd node) {
switch p.r.typ {
@ -628,7 +650,7 @@ func (s *scanner) nextChar() bool {
return false
}
s.curr = rune(s.text[s.pos])
s.pos += 1
s.pos++
return true
}
@ -636,7 +658,7 @@ func (s *scanner) nextItem() bool {
s.skipSpace()
switch s.curr {
case 0:
s.typ = itemEof
s.typ = itemEOF
return false
case ',', '@', '(', ')', '|', '*', '[', ']', '+', '-', '=', '#', '$':
s.typ = asItemType(s.curr)

View File

@ -71,7 +71,7 @@ func (a *ancestorQuery) Select(t iterator) NodeNavigator {
}
for node.MoveToParent() {
if !a.Predicate(node) {
break
continue
}
return node
}
@ -707,16 +707,79 @@ func (b *booleanQuery) Select(t iterator) NodeNavigator {
func (b *booleanQuery) Evaluate(t iterator) interface{} {
m := b.Left.Evaluate(t)
if m.(bool) == b.IsOr {
return m
left := asBool(t, m)
if b.IsOr && left {
return true
} else if !b.IsOr && !left {
return false
}
return b.Right.Evaluate(t)
m = b.Right.Evaluate(t)
return asBool(t, m)
}
func (b *booleanQuery) Clone() query {
return &booleanQuery{IsOr: b.IsOr, Left: b.Left.Clone(), Right: b.Right.Clone()}
}
type unionQuery struct {
Left, Right query
iterator func() NodeNavigator
}
func (u *unionQuery) Select(t iterator) NodeNavigator {
if u.iterator == nil {
var list []NodeNavigator
var i int
root := t.Current().Copy()
for {
node := u.Left.Select(t)
if node == nil {
break
}
node = node.Copy()
list = append(list, node)
}
t.Current().MoveTo(root)
for {
node := u.Right.Select(t)
if node == nil {
break
}
node = node.Copy()
var exists bool
for _, x := range list {
if reflect.DeepEqual(x, node) {
exists = true
break
}
}
if !exists {
list = append(list, node)
}
}
u.iterator = func() NodeNavigator {
if i >= len(list) {
return nil
}
node := list[i]
i++
return node
}
}
return u.iterator()
}
func (u *unionQuery) Evaluate(t iterator) interface{} {
u.iterator = nil
u.Left.Evaluate(t)
u.Right.Evaluate(t)
return u
}
func (u *unionQuery) Clone() query {
return &unionQuery{Left: u.Left.Clone(), Right: u.Right.Clone()}
}
func getNodePosition(q query) int {
type Position interface {
position() int

View File

@ -22,6 +22,9 @@ const (
// CommentNode is a comment node, such as <!-- my comment -->
CommentNode
// allNode is any types of node, used by xpath package only to predicate match.
allNode
)
// NodeNavigator provides cursor model for navigating XML data.

View File

@ -66,10 +66,15 @@ func (n *Node) InnerText() string {
func outputXML(buf *bytes.Buffer, n *Node) {
if n.Type == TextNode || n.Type == CommentNode {
buf.WriteString(strings.TrimSpace(n.Data))
xml.EscapeText(buf, []byte(strings.TrimSpace(n.Data)))
return
}
buf.WriteString("<" + n.Data)
if n.Type == DeclarationNode {
buf.WriteString("<?" + n.Data)
} else {
buf.WriteString("<" + n.Data)
}
for _, attr := range n.Attr {
if attr.Name.Space != "" {
buf.WriteString(fmt.Sprintf(` %s:%s="%s"`, attr.Name.Space, attr.Name.Local, attr.Value))
@ -77,11 +82,17 @@ func outputXML(buf *bytes.Buffer, n *Node) {
buf.WriteString(fmt.Sprintf(` %s="%s"`, attr.Name.Local, attr.Value))
}
}
buf.WriteString(">")
if n.Type == DeclarationNode {
buf.WriteString("?>")
} else {
buf.WriteString(">")
}
for child := n.FirstChild; child != nil; child = child.NextSibling {
outputXML(buf, child)
}
buf.WriteString(fmt.Sprintf("</%s>", n.Data))
if n.Type != DeclarationNode {
buf.WriteString(fmt.Sprintf("</%s>", n.Data))
}
}
// OutputXML returns the text that including tags name.
@ -128,6 +139,9 @@ func addChild(parent, n *Node) {
}
func addSibling(sibling, n *Node) {
for t := sibling.NextSibling; t != nil; t = t.NextSibling {
sibling = t
}
n.Parent = sibling.Parent
sibling.NextSibling = n
n.PrevSibling = sibling
@ -245,8 +259,3 @@ quit:
func Parse(r io.Reader) (*Node, error) {
return parse(r)
}
// ParseXML returns the parse tree for the XML from the given Reader.Deprecated.
func ParseXML(r io.Reader) (*Node, error) {
return parse(r)
}

95
vendor/github.com/apparentlymart/go-textseg/LICENSE generated vendored Normal file
View File

@ -0,0 +1,95 @@
Copyright (c) 2017 Martin Atkins
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
---------
Unicode table generation programs are under a separate copyright and license:
Copyright (c) 2014 Couchbase, Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the
License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
either express or implied. See the License for the specific language governing permissions
and limitations under the License.
---------
Grapheme break data is provided as part of the Unicode character database,
copright 2016 Unicode, Inc, which is provided with the following license:
Unicode Data Files include all data files under the directories
http://www.unicode.org/Public/, http://www.unicode.org/reports/,
http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and
http://www.unicode.org/utility/trac/browser/.
Unicode Data Files do not include PDF online code charts under the
directory http://www.unicode.org/Public/.
Software includes any source code published in the Unicode Standard
or under the directories
http://www.unicode.org/Public/, http://www.unicode.org/reports/,
http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and
http://www.unicode.org/utility/trac/browser/.
NOTICE TO USER: Carefully read the following legal agreement.
BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S
DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"),
YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE
TERMS AND CONDITIONS OF THIS AGREEMENT.
IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE
THE DATA FILES OR SOFTWARE.
COPYRIGHT AND PERMISSION NOTICE
Copyright © 1991-2017 Unicode, Inc. All rights reserved.
Distributed under the Terms of Use in http://www.unicode.org/copyright.html.
Permission is hereby granted, free of charge, to any person obtaining
a copy of the Unicode data files and any associated documentation
(the "Data Files") or Unicode software and any associated documentation
(the "Software") to deal in the Data Files or Software
without restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, and/or sell copies of
the Data Files or Software, and to permit persons to whom the Data Files
or Software are furnished to do so, provided that either
(a) this copyright and permission notice appear with all copies
of the Data Files or Software, or
(b) this copyright and permission notice appear in associated
Documentation.
THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS
NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL
DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THE DATA FILES OR SOFTWARE.
Except as contained in this notice, the name of a copyright holder
shall not be used in advertising or otherwise to promote the sale,
use or other dealings in these Data Files or Software without prior
written authorization of the copyright holder.

View File

@ -0,0 +1,30 @@
package textseg
import (
"bufio"
"bytes"
)
// AllTokens is a utility that uses a bufio.SplitFunc to produce a slice of
// all of the recognized tokens in the given buffer.
func AllTokens(buf []byte, splitFunc bufio.SplitFunc) ([][]byte, error) {
scanner := bufio.NewScanner(bytes.NewReader(buf))
scanner.Split(splitFunc)
var ret [][]byte
for scanner.Scan() {
ret = append(ret, scanner.Bytes())
}
return ret, scanner.Err()
}
// TokenCount is a utility that uses a bufio.SplitFunc to count the number of
// recognized tokens in the given buffer.
func TokenCount(buf []byte, splitFunc bufio.SplitFunc) (int, error) {
scanner := bufio.NewScanner(bytes.NewReader(buf))
scanner.Split(splitFunc)
var ret int
for scanner.Scan() {
ret++
}
return ret, scanner.Err()
}

View File

@ -0,0 +1,7 @@
package textseg
//go:generate go run make_tables.go -output tables.go
//go:generate go run make_test_tables.go -output tables_test.go
//go:generate ruby unicode2ragel.rb --url=http://www.unicode.org/Public/9.0.0/ucd/auxiliary/GraphemeBreakProperty.txt -m GraphemeCluster -p "Prepend,CR,LF,Control,Extend,Regional_Indicator,SpacingMark,L,V,T,LV,LVT,E_Base,E_Modifier,ZWJ,Glue_After_Zwj,E_Base_GAZ" -o grapheme_clusters_table.rl
//go:generate ragel -Z grapheme_clusters.rl
//go:generate gofmt -w grapheme_clusters.go

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,132 @@
package textseg
import (
"errors"
"unicode/utf8"
)
// Generated from grapheme_clusters.rl. DO NOT EDIT
%%{
# (except you are actually in grapheme_clusters.rl here, so edit away!)
machine graphclust;
write data;
}%%
var Error = errors.New("invalid UTF8 text")
// ScanGraphemeClusters is a split function for bufio.Scanner that splits
// on grapheme cluster boundaries.
func ScanGraphemeClusters(data []byte, atEOF bool) (int, []byte, error) {
if len(data) == 0 {
return 0, nil, nil
}
// Ragel state
cs := 0 // Current State
p := 0 // "Pointer" into data
pe := len(data) // End-of-data "pointer"
ts := 0
te := 0
act := 0
eof := pe
// Make Go compiler happy
_ = ts
_ = te
_ = act
_ = eof
startPos := 0
endPos := 0
%%{
include GraphemeCluster "grapheme_clusters_table.rl";
action start {
startPos = p
}
action end {
endPos = p
}
action emit {
return endPos+1, data[startPos:endPos+1], nil
}
ZWJGlue = ZWJ (Glue_After_Zwj | E_Base_GAZ Extend* E_Modifier?)?;
AnyExtender = Extend | ZWJGlue | SpacingMark;
Extension = AnyExtender*;
ReplacementChar = (0xEF 0xBF 0xBD);
CRLFSeq = CR LF;
ControlSeq = Control | ReplacementChar;
HangulSeq = (
L+ (((LV? V+ | LVT) T*)?|LV?) |
LV V* T* |
V+ T* |
LVT T* |
T+
) Extension;
EmojiSeq = (E_Base | E_Base_GAZ) Extend* E_Modifier? Extension;
ZWJSeq = ZWJGlue Extension;
EmojiFlagSeq = Regional_Indicator Regional_Indicator? Extension;
UTF8Cont = 0x80 .. 0xBF;
AnyUTF8 = (
0x00..0x7F |
0xC0..0xDF . UTF8Cont |
0xE0..0xEF . UTF8Cont . UTF8Cont |
0xF0..0xF7 . UTF8Cont . UTF8Cont . UTF8Cont
);
# OtherSeq is any character that isn't at the start of one of the extended sequences above, followed by extension
OtherSeq = (AnyUTF8 - (CR|LF|Control|ReplacementChar|L|LV|V|LVT|T|E_Base|E_Base_GAZ|ZWJ|Regional_Indicator|Prepend)) Extension;
# PrependSeq is prepend followed by any of the other patterns above, except control characters which explicitly break
PrependSeq = Prepend+ (HangulSeq|EmojiSeq|ZWJSeq|EmojiFlagSeq|OtherSeq)?;
CRLFTok = CRLFSeq >start @end;
ControlTok = ControlSeq >start @end;
HangulTok = HangulSeq >start @end;
EmojiTok = EmojiSeq >start @end;
ZWJTok = ZWJSeq >start @end;
EmojiFlagTok = EmojiFlagSeq >start @end;
OtherTok = OtherSeq >start @end;
PrependTok = PrependSeq >start @end;
main := |*
CRLFTok => emit;
ControlTok => emit;
HangulTok => emit;
EmojiTok => emit;
ZWJTok => emit;
EmojiFlagTok => emit;
PrependTok => emit;
OtherTok => emit;
# any single valid UTF-8 character would also be valid per spec,
# but we'll handle that separately after the loop so we can deal
# with requesting more bytes if we're not at EOF.
*|;
write init;
write exec;
}%%
// If we fall out here then we were unable to complete a sequence.
// If we weren't able to complete a sequence then either we've
// reached the end of a partial buffer (so there's more data to come)
// or we have an isolated symbol that would normally be part of a
// grapheme cluster but has appeared in isolation here.
if !atEOF {
// Request more
return 0, nil, nil
}
// Just take the first UTF-8 sequence and return that.
_, seqLen := utf8.DecodeRune(data)
return seqLen, data[:seqLen], nil
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,335 @@
#!/usr/bin/env ruby
#
# This scripted has been updated to accept more command-line arguments:
#
# -u, --url URL to process
# -m, --machine Machine name
# -p, --properties Properties to add to the machine
# -o, --output Write output to file
#
# Updated by: Marty Schoch <marty.schoch@gmail.com>
#
# This script uses the unicode spec to generate a Ragel state machine
# that recognizes unicode alphanumeric characters. It generates 5
# character classes: uupper, ulower, ualpha, udigit, and ualnum.
# Currently supported encodings are UTF-8 [default] and UCS-4.
#
# Usage: unicode2ragel.rb [options]
# -e, --encoding [ucs4 | utf8] Data encoding
# -h, --help Show this message
#
# This script was originally written as part of the Ferret search
# engine library.
#
# Author: Rakan El-Khalil <rakan@well.com>
require 'optparse'
require 'open-uri'
ENCODINGS = [ :utf8, :ucs4 ]
ALPHTYPES = { :utf8 => "byte", :ucs4 => "rune" }
DEFAULT_CHART_URL = "http://www.unicode.org/Public/5.1.0/ucd/DerivedCoreProperties.txt"
DEFAULT_MACHINE_NAME= "WChar"
###
# Display vars & default option
TOTAL_WIDTH = 80
RANGE_WIDTH = 23
@encoding = :utf8
@chart_url = DEFAULT_CHART_URL
machine_name = DEFAULT_MACHINE_NAME
properties = []
@output = $stdout
###
# Option parsing
cli_opts = OptionParser.new do |opts|
opts.on("-e", "--encoding [ucs4 | utf8]", "Data encoding") do |o|
@encoding = o.downcase.to_sym
end
opts.on("-h", "--help", "Show this message") do
puts opts
exit
end
opts.on("-u", "--url URL", "URL to process") do |o|
@chart_url = o
end
opts.on("-m", "--machine MACHINE_NAME", "Machine name") do |o|
machine_name = o
end
opts.on("-p", "--properties x,y,z", Array, "Properties to add to machine") do |o|
properties = o
end
opts.on("-o", "--output FILE", "output file") do |o|
@output = File.new(o, "w+")
end
end
cli_opts.parse(ARGV)
unless ENCODINGS.member? @encoding
puts "Invalid encoding: #{@encoding}"
puts cli_opts
exit
end
##
# Downloads the document at url and yields every alpha line's hex
# range and description.
def each_alpha( url, property )
open( url ) do |file|
file.each_line do |line|
next if line =~ /^#/;
next if line !~ /; #{property} #/;
range, description = line.split(/;/)
range.strip!
description.gsub!(/.*#/, '').strip!
if range =~ /\.\./
start, stop = range.split '..'
else start = stop = range
end
yield start.hex .. stop.hex, description
end
end
end
###
# Formats to hex at minimum width
def to_hex( n )
r = "%0X" % n
r = "0#{r}" unless (r.length % 2).zero?
r
end
###
# UCS4 is just a straight hex conversion of the unicode codepoint.
def to_ucs4( range )
rangestr = "0x" + to_hex(range.begin)
rangestr << "..0x" + to_hex(range.end) if range.begin != range.end
[ rangestr ]
end
##
# 0x00 - 0x7f -> 0zzzzzzz[7]
# 0x80 - 0x7ff -> 110yyyyy[5] 10zzzzzz[6]
# 0x800 - 0xffff -> 1110xxxx[4] 10yyyyyy[6] 10zzzzzz[6]
# 0x010000 - 0x10ffff -> 11110www[3] 10xxxxxx[6] 10yyyyyy[6] 10zzzzzz[6]
UTF8_BOUNDARIES = [0x7f, 0x7ff, 0xffff, 0x10ffff]
def to_utf8_enc( n )
r = 0
if n <= 0x7f
r = n
elsif n <= 0x7ff
y = 0xc0 | (n >> 6)
z = 0x80 | (n & 0x3f)
r = y << 8 | z
elsif n <= 0xffff
x = 0xe0 | (n >> 12)
y = 0x80 | (n >> 6) & 0x3f
z = 0x80 | n & 0x3f
r = x << 16 | y << 8 | z
elsif n <= 0x10ffff
w = 0xf0 | (n >> 18)
x = 0x80 | (n >> 12) & 0x3f
y = 0x80 | (n >> 6) & 0x3f
z = 0x80 | n & 0x3f
r = w << 24 | x << 16 | y << 8 | z
end
to_hex(r)
end
def from_utf8_enc( n )
n = n.hex
r = 0
if n <= 0x7f
r = n
elsif n <= 0xdfff
y = (n >> 8) & 0x1f
z = n & 0x3f
r = y << 6 | z
elsif n <= 0xefffff
x = (n >> 16) & 0x0f
y = (n >> 8) & 0x3f
z = n & 0x3f
r = x << 10 | y << 6 | z
elsif n <= 0xf7ffffff
w = (n >> 24) & 0x07
x = (n >> 16) & 0x3f
y = (n >> 8) & 0x3f
z = n & 0x3f
r = w << 18 | x << 12 | y << 6 | z
end
r
end
###
# Given a range, splits it up into ranges that can be continuously
# encoded into utf8. Eg: 0x00 .. 0xff => [0x00..0x7f, 0x80..0xff]
# This is not strictly needed since the current [5.1] unicode standard
# doesn't have ranges that straddle utf8 boundaries. This is included
# for completeness as there is no telling if that will ever change.
def utf8_ranges( range )
ranges = []
UTF8_BOUNDARIES.each do |max|
if range.begin <= max
if range.end <= max
ranges << range
return ranges
end
ranges << (range.begin .. max)
range = (max + 1) .. range.end
end
end
ranges
end
def build_range( start, stop )
size = start.size/2
left = size - 1
return [""] if size < 1
a = start[0..1]
b = stop[0..1]
###
# Shared prefix
if a == b
return build_range(start[2..-1], stop[2..-1]).map do |elt|
"0x#{a} " + elt
end
end
###
# Unshared prefix, end of run
return ["0x#{a}..0x#{b} "] if left.zero?
###
# Unshared prefix, not end of run
# Range can be 0x123456..0x56789A
# Which is equivalent to:
# 0x123456 .. 0x12FFFF
# 0x130000 .. 0x55FFFF
# 0x560000 .. 0x56789A
ret = []
ret << build_range(start, a + "FF" * left)
###
# Only generate middle range if need be.
if a.hex+1 != b.hex
max = to_hex(b.hex - 1)
max = "FF" if b == "FF"
ret << "0x#{to_hex(a.hex+1)}..0x#{max} " + "0x00..0xFF " * left
end
###
# Don't generate last range if it is covered by first range
ret << build_range(b + "00" * left, stop) unless b == "FF"
ret.flatten!
end
def to_utf8( range )
utf8_ranges( range ).map do |r|
begin_enc = to_utf8_enc(r.begin)
end_enc = to_utf8_enc(r.end)
build_range begin_enc, end_enc
end.flatten!
end
##
# Perform a 3-way comparison of the number of codepoints advertised by
# the unicode spec for the given range, the originally parsed range,
# and the resulting utf8 encoded range.
def count_codepoints( code )
code.split(' ').inject(1) do |acc, elt|
if elt =~ /0x(.+)\.\.0x(.+)/
if @encoding == :utf8
acc * (from_utf8_enc($2) - from_utf8_enc($1) + 1)
else
acc * ($2.hex - $1.hex + 1)
end
else
acc
end
end
end
def is_valid?( range, desc, codes )
spec_count = 1
spec_count = $1.to_i if desc =~ /\[(\d+)\]/
range_count = range.end - range.begin + 1
sum = codes.inject(0) { |acc, elt| acc + count_codepoints(elt) }
sum == spec_count and sum == range_count
end
##
# Generate the state maching to stdout
def generate_machine( name, property )
pipe = " "
@output.puts " #{name} = "
each_alpha( @chart_url, property ) do |range, desc|
codes = (@encoding == :ucs4) ? to_ucs4(range) : to_utf8(range)
#raise "Invalid encoding of range #{range}: #{codes.inspect}" unless
# is_valid? range, desc, codes
range_width = codes.map { |a| a.size }.max
range_width = RANGE_WIDTH if range_width < RANGE_WIDTH
desc_width = TOTAL_WIDTH - RANGE_WIDTH - 11
desc_width -= (range_width - RANGE_WIDTH) if range_width > RANGE_WIDTH
if desc.size > desc_width
desc = desc[0..desc_width - 4] + "..."
end
codes.each_with_index do |r, idx|
desc = "" unless idx.zero?
code = "%-#{range_width}s" % r
@output.puts " #{pipe} #{code} ##{desc}"
pipe = "|"
end
end
@output.puts " ;"
@output.puts ""
end
@output.puts <<EOF
# The following Ragel file was autogenerated with #{$0}
# from: #{@chart_url}
#
# It defines #{properties}.
#
# To use this, make sure that your alphtype is set to #{ALPHTYPES[@encoding]},
# and that your input is in #{@encoding}.
%%{
machine #{machine_name};
EOF
properties.each { |x| generate_machine( x, x ) }
@output.puts <<EOF
}%%
EOF

View File

@ -0,0 +1,19 @@
package textseg
import "unicode/utf8"
// ScanGraphemeClusters is a split function for bufio.Scanner that splits
// on UTF8 sequence boundaries.
//
// This is included largely for completeness, since this behavior is already
// built in to Go when ranging over a string.
func ScanUTF8Sequences(data []byte, atEOF bool) (int, []byte, error) {
if len(data) == 0 {
return 0, nil, nil
}
r, seqLen := utf8.DecodeRune(data)
if r == utf8.RuneError && !atEOF {
return 0, nil, nil
}
return seqLen, data[:seqLen], nil
}

11
vendor/github.com/dylanmei/winrmtest/go.mod generated vendored Normal file
View File

@ -0,0 +1,11 @@
module github.com/dylanmei/winrmtest
require (
github.com/antchfx/xpath v0.0.0-20190129040759-c8489ed3251e // indirect
github.com/antchfx/xquery v0.0.0-20180515051857-ad5b8c7a47b0
github.com/kr/pretty v0.1.0 // indirect
github.com/satori/go.uuid v1.2.0
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd // indirect
golang.org/x/text v0.3.0 // indirect
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
)

17
vendor/github.com/dylanmei/winrmtest/go.sum generated vendored Normal file
View File

@ -0,0 +1,17 @@
github.com/antchfx/xpath v0.0.0-20190129040759-c8489ed3251e h1:ptBAamGVd6CfRsUtyHD+goy2JGhv1QC32v3gqM8mYAM=
github.com/antchfx/xpath v0.0.0-20190129040759-c8489ed3251e/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk=
github.com/antchfx/xquery v0.0.0-20180515051857-ad5b8c7a47b0 h1:JaCC8jz0zdMLk2m+qCCVLLLM/PL93p84w4pK3aJWj60=
github.com/antchfx/xquery v0.0.0-20180515051857-ad5b8c7a47b0/go.mod h1:LzD22aAzDP8/dyiCKFp31He4m2GPjl0AFyzDtZzUu9M=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd h1:HuTn7WObtcDo9uEEU7rEqL0jYthdXAmZ6PP+meazmaU=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

89
vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go generated vendored Normal file
View File

@ -0,0 +1,89 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
// Package cmpopts provides common options for the cmp package.
package cmpopts
import (
"math"
"reflect"
"github.com/google/go-cmp/cmp"
)
func equateAlways(_, _ interface{}) bool { return true }
// EquateEmpty returns a Comparer option that determines all maps and slices
// with a length of zero to be equal, regardless of whether they are nil.
//
// EquateEmpty can be used in conjunction with SortSlices and SortMaps.
func EquateEmpty() cmp.Option {
return cmp.FilterValues(isEmpty, cmp.Comparer(equateAlways))
}
func isEmpty(x, y interface{}) bool {
vx, vy := reflect.ValueOf(x), reflect.ValueOf(y)
return (x != nil && y != nil && vx.Type() == vy.Type()) &&
(vx.Kind() == reflect.Slice || vx.Kind() == reflect.Map) &&
(vx.Len() == 0 && vy.Len() == 0)
}
// EquateApprox returns a Comparer option that determines float32 or float64
// values to be equal if they are within a relative fraction or absolute margin.
// This option is not used when either x or y is NaN or infinite.
//
// The fraction determines that the difference of two values must be within the
// smaller fraction of the two values, while the margin determines that the two
// values must be within some absolute margin.
// To express only a fraction or only a margin, use 0 for the other parameter.
// The fraction and margin must be non-negative.
//
// The mathematical expression used is equivalent to:
// |x-y| ≤ max(fraction*min(|x|, |y|), margin)
//
// EquateApprox can be used in conjunction with EquateNaNs.
func EquateApprox(fraction, margin float64) cmp.Option {
if margin < 0 || fraction < 0 || math.IsNaN(margin) || math.IsNaN(fraction) {
panic("margin or fraction must be a non-negative number")
}
a := approximator{fraction, margin}
return cmp.Options{
cmp.FilterValues(areRealF64s, cmp.Comparer(a.compareF64)),
cmp.FilterValues(areRealF32s, cmp.Comparer(a.compareF32)),
}
}
type approximator struct{ frac, marg float64 }
func areRealF64s(x, y float64) bool {
return !math.IsNaN(x) && !math.IsNaN(y) && !math.IsInf(x, 0) && !math.IsInf(y, 0)
}
func areRealF32s(x, y float32) bool {
return areRealF64s(float64(x), float64(y))
}
func (a approximator) compareF64(x, y float64) bool {
relMarg := a.frac * math.Min(math.Abs(x), math.Abs(y))
return math.Abs(x-y) <= math.Max(a.marg, relMarg)
}
func (a approximator) compareF32(x, y float32) bool {
return a.compareF64(float64(x), float64(y))
}
// EquateNaNs returns a Comparer option that determines float32 and float64
// NaN values to be equal.
//
// EquateNaNs can be used in conjunction with EquateApprox.
func EquateNaNs() cmp.Option {
return cmp.Options{
cmp.FilterValues(areNaNsF64s, cmp.Comparer(equateAlways)),
cmp.FilterValues(areNaNsF32s, cmp.Comparer(equateAlways)),
}
}
func areNaNsF64s(x, y float64) bool {
return math.IsNaN(x) && math.IsNaN(y)
}
func areNaNsF32s(x, y float32) bool {
return areNaNsF64s(float64(x), float64(y))
}

207
vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go generated vendored Normal file
View File

@ -0,0 +1,207 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
package cmpopts
import (
"fmt"
"reflect"
"unicode"
"unicode/utf8"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/internal/function"
)
// IgnoreFields returns an Option that ignores exported fields of the
// given names on a single struct type.
// The struct type is specified by passing in a value of that type.
//
// The name may be a dot-delimited string (e.g., "Foo.Bar") to ignore a
// specific sub-field that is embedded or nested within the parent struct.
//
// This does not handle unexported fields; use IgnoreUnexported instead.
func IgnoreFields(typ interface{}, names ...string) cmp.Option {
sf := newStructFilter(typ, names...)
return cmp.FilterPath(sf.filter, cmp.Ignore())
}
// IgnoreTypes returns an Option that ignores all values assignable to
// certain types, which are specified by passing in a value of each type.
func IgnoreTypes(typs ...interface{}) cmp.Option {
tf := newTypeFilter(typs...)
return cmp.FilterPath(tf.filter, cmp.Ignore())
}
type typeFilter []reflect.Type
func newTypeFilter(typs ...interface{}) (tf typeFilter) {
for _, typ := range typs {
t := reflect.TypeOf(typ)
if t == nil {
// This occurs if someone tries to pass in sync.Locker(nil)
panic("cannot determine type; consider using IgnoreInterfaces")
}
tf = append(tf, t)
}
return tf
}
func (tf typeFilter) filter(p cmp.Path) bool {
if len(p) < 1 {
return false
}
t := p.Last().Type()
for _, ti := range tf {
if t.AssignableTo(ti) {
return true
}
}
return false
}
// IgnoreInterfaces returns an Option that ignores all values or references of
// values assignable to certain interface types. These interfaces are specified
// by passing in an anonymous struct with the interface types embedded in it.
// For example, to ignore sync.Locker, pass in struct{sync.Locker}{}.
func IgnoreInterfaces(ifaces interface{}) cmp.Option {
tf := newIfaceFilter(ifaces)
return cmp.FilterPath(tf.filter, cmp.Ignore())
}
type ifaceFilter []reflect.Type
func newIfaceFilter(ifaces interface{}) (tf ifaceFilter) {
t := reflect.TypeOf(ifaces)
if ifaces == nil || t.Name() != "" || t.Kind() != reflect.Struct {
panic("input must be an anonymous struct")
}
for i := 0; i < t.NumField(); i++ {
fi := t.Field(i)
switch {
case !fi.Anonymous:
panic("struct cannot have named fields")
case fi.Type.Kind() != reflect.Interface:
panic("embedded field must be an interface type")
case fi.Type.NumMethod() == 0:
// This matches everything; why would you ever want this?
panic("cannot ignore empty interface")
default:
tf = append(tf, fi.Type)
}
}
return tf
}
func (tf ifaceFilter) filter(p cmp.Path) bool {
if len(p) < 1 {
return false
}
t := p.Last().Type()
for _, ti := range tf {
if t.AssignableTo(ti) {
return true
}
if t.Kind() != reflect.Ptr && reflect.PtrTo(t).AssignableTo(ti) {
return true
}
}
return false
}
// IgnoreUnexported returns an Option that only ignores the immediate unexported
// fields of a struct, including anonymous fields of unexported types.
// In particular, unexported fields within the struct's exported fields
// of struct types, including anonymous fields, will not be ignored unless the
// type of the field itself is also passed to IgnoreUnexported.
//
// Avoid ignoring unexported fields of a type which you do not control (i.e. a
// type from another repository), as changes to the implementation of such types
// may change how the comparison behaves. Prefer a custom Comparer instead.
func IgnoreUnexported(typs ...interface{}) cmp.Option {
ux := newUnexportedFilter(typs...)
return cmp.FilterPath(ux.filter, cmp.Ignore())
}
type unexportedFilter struct{ m map[reflect.Type]bool }
func newUnexportedFilter(typs ...interface{}) unexportedFilter {
ux := unexportedFilter{m: make(map[reflect.Type]bool)}
for _, typ := range typs {
t := reflect.TypeOf(typ)
if t == nil || t.Kind() != reflect.Struct {
panic(fmt.Sprintf("invalid struct type: %T", typ))
}
ux.m[t] = true
}
return ux
}
func (xf unexportedFilter) filter(p cmp.Path) bool {
sf, ok := p.Index(-1).(cmp.StructField)
if !ok {
return false
}
return xf.m[p.Index(-2).Type()] && !isExported(sf.Name())
}
// isExported reports whether the identifier is exported.
func isExported(id string) bool {
r, _ := utf8.DecodeRuneInString(id)
return unicode.IsUpper(r)
}
// IgnoreSliceElements returns an Option that ignores elements of []V.
// The discard function must be of the form "func(T) bool" which is used to
// ignore slice elements of type V, where V is assignable to T.
// Elements are ignored if the function reports true.
func IgnoreSliceElements(discardFunc interface{}) cmp.Option {
vf := reflect.ValueOf(discardFunc)
if !function.IsType(vf.Type(), function.ValuePredicate) || vf.IsNil() {
panic(fmt.Sprintf("invalid discard function: %T", discardFunc))
}
return cmp.FilterPath(func(p cmp.Path) bool {
si, ok := p.Index(-1).(cmp.SliceIndex)
if !ok {
return false
}
if !si.Type().AssignableTo(vf.Type().In(0)) {
return false
}
vx, vy := si.Values()
if vx.IsValid() && vf.Call([]reflect.Value{vx})[0].Bool() {
return true
}
if vy.IsValid() && vf.Call([]reflect.Value{vy})[0].Bool() {
return true
}
return false
}, cmp.Ignore())
}
// IgnoreMapEntries returns an Option that ignores entries of map[K]V.
// The discard function must be of the form "func(T, R) bool" which is used to
// ignore map entries of type K and V, where K and V are assignable to T and R.
// Entries are ignored if the function reports true.
func IgnoreMapEntries(discardFunc interface{}) cmp.Option {
vf := reflect.ValueOf(discardFunc)
if !function.IsType(vf.Type(), function.KeyValuePredicate) || vf.IsNil() {
panic(fmt.Sprintf("invalid discard function: %T", discardFunc))
}
return cmp.FilterPath(func(p cmp.Path) bool {
mi, ok := p.Index(-1).(cmp.MapIndex)
if !ok {
return false
}
if !mi.Key().Type().AssignableTo(vf.Type().In(0)) || !mi.Type().AssignableTo(vf.Type().In(1)) {
return false
}
k := mi.Key()
vx, vy := mi.Values()
if vx.IsValid() && vf.Call([]reflect.Value{k, vx})[0].Bool() {
return true
}
if vy.IsValid() && vf.Call([]reflect.Value{k, vy})[0].Bool() {
return true
}
return false
}, cmp.Ignore())
}

147
vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go generated vendored Normal file
View File

@ -0,0 +1,147 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
package cmpopts
import (
"fmt"
"reflect"
"sort"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/internal/function"
)
// SortSlices returns a Transformer option that sorts all []V.
// The less function must be of the form "func(T, T) bool" which is used to
// sort any slice with element type V that is assignable to T.
//
// The less function must be:
// • Deterministic: less(x, y) == less(x, y)
// • Irreflexive: !less(x, x)
// • Transitive: if !less(x, y) and !less(y, z), then !less(x, z)
//
// The less function does not have to be "total". That is, if !less(x, y) and
// !less(y, x) for two elements x and y, their relative order is maintained.
//
// SortSlices can be used in conjunction with EquateEmpty.
func SortSlices(lessFunc interface{}) cmp.Option {
vf := reflect.ValueOf(lessFunc)
if !function.IsType(vf.Type(), function.Less) || vf.IsNil() {
panic(fmt.Sprintf("invalid less function: %T", lessFunc))
}
ss := sliceSorter{vf.Type().In(0), vf}
return cmp.FilterValues(ss.filter, cmp.Transformer("cmpopts.SortSlices", ss.sort))
}
type sliceSorter struct {
in reflect.Type // T
fnc reflect.Value // func(T, T) bool
}
func (ss sliceSorter) filter(x, y interface{}) bool {
vx, vy := reflect.ValueOf(x), reflect.ValueOf(y)
if !(x != nil && y != nil && vx.Type() == vy.Type()) ||
!(vx.Kind() == reflect.Slice && vx.Type().Elem().AssignableTo(ss.in)) ||
(vx.Len() <= 1 && vy.Len() <= 1) {
return false
}
// Check whether the slices are already sorted to avoid an infinite
// recursion cycle applying the same transform to itself.
ok1 := sort.SliceIsSorted(x, func(i, j int) bool { return ss.less(vx, i, j) })
ok2 := sort.SliceIsSorted(y, func(i, j int) bool { return ss.less(vy, i, j) })
return !ok1 || !ok2
}
func (ss sliceSorter) sort(x interface{}) interface{} {
src := reflect.ValueOf(x)
dst := reflect.MakeSlice(src.Type(), src.Len(), src.Len())
for i := 0; i < src.Len(); i++ {
dst.Index(i).Set(src.Index(i))
}
sort.SliceStable(dst.Interface(), func(i, j int) bool { return ss.less(dst, i, j) })
ss.checkSort(dst)
return dst.Interface()
}
func (ss sliceSorter) checkSort(v reflect.Value) {
start := -1 // Start of a sequence of equal elements.
for i := 1; i < v.Len(); i++ {
if ss.less(v, i-1, i) {
// Check that first and last elements in v[start:i] are equal.
if start >= 0 && (ss.less(v, start, i-1) || ss.less(v, i-1, start)) {
panic(fmt.Sprintf("incomparable values detected: want equal elements: %v", v.Slice(start, i)))
}
start = -1
} else if start == -1 {
start = i
}
}
}
func (ss sliceSorter) less(v reflect.Value, i, j int) bool {
vx, vy := v.Index(i), v.Index(j)
return ss.fnc.Call([]reflect.Value{vx, vy})[0].Bool()
}
// SortMaps returns a Transformer option that flattens map[K]V types to be a
// sorted []struct{K, V}. The less function must be of the form
// "func(T, T) bool" which is used to sort any map with key K that is
// assignable to T.
//
// Flattening the map into a slice has the property that cmp.Equal is able to
// use Comparers on K or the K.Equal method if it exists.
//
// The less function must be:
// • Deterministic: less(x, y) == less(x, y)
// • Irreflexive: !less(x, x)
// • Transitive: if !less(x, y) and !less(y, z), then !less(x, z)
// • Total: if x != y, then either less(x, y) or less(y, x)
//
// SortMaps can be used in conjunction with EquateEmpty.
func SortMaps(lessFunc interface{}) cmp.Option {
vf := reflect.ValueOf(lessFunc)
if !function.IsType(vf.Type(), function.Less) || vf.IsNil() {
panic(fmt.Sprintf("invalid less function: %T", lessFunc))
}
ms := mapSorter{vf.Type().In(0), vf}
return cmp.FilterValues(ms.filter, cmp.Transformer("cmpopts.SortMaps", ms.sort))
}
type mapSorter struct {
in reflect.Type // T
fnc reflect.Value // func(T, T) bool
}
func (ms mapSorter) filter(x, y interface{}) bool {
vx, vy := reflect.ValueOf(x), reflect.ValueOf(y)
return (x != nil && y != nil && vx.Type() == vy.Type()) &&
(vx.Kind() == reflect.Map && vx.Type().Key().AssignableTo(ms.in)) &&
(vx.Len() != 0 || vy.Len() != 0)
}
func (ms mapSorter) sort(x interface{}) interface{} {
src := reflect.ValueOf(x)
outType := reflect.StructOf([]reflect.StructField{
{Name: "K", Type: src.Type().Key()},
{Name: "V", Type: src.Type().Elem()},
})
dst := reflect.MakeSlice(reflect.SliceOf(outType), src.Len(), src.Len())
for i, k := range src.MapKeys() {
v := reflect.New(outType).Elem()
v.Field(0).Set(k)
v.Field(1).Set(src.MapIndex(k))
dst.Index(i).Set(v)
}
sort.Slice(dst.Interface(), func(i, j int) bool { return ms.less(dst, i, j) })
ms.checkSort(dst)
return dst.Interface()
}
func (ms mapSorter) checkSort(v reflect.Value) {
for i := 1; i < v.Len(); i++ {
if !ms.less(v, i-1, i) {
panic(fmt.Sprintf("partial order detected: want %v < %v", v.Index(i-1), v.Index(i)))
}
}
}
func (ms mapSorter) less(v reflect.Value, i, j int) bool {
vx, vy := v.Index(i).Field(0), v.Index(j).Field(0)
return ms.fnc.Call([]reflect.Value{vx, vy})[0].Bool()
}

View File

@ -0,0 +1,182 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
package cmpopts
import (
"fmt"
"reflect"
"strings"
"github.com/google/go-cmp/cmp"
)
// filterField returns a new Option where opt is only evaluated on paths that
// include a specific exported field on a single struct type.
// The struct type is specified by passing in a value of that type.
//
// The name may be a dot-delimited string (e.g., "Foo.Bar") to select a
// specific sub-field that is embedded or nested within the parent struct.
func filterField(typ interface{}, name string, opt cmp.Option) cmp.Option {
// TODO: This is currently unexported over concerns of how helper filters
// can be composed together easily.
// TODO: Add tests for FilterField.
sf := newStructFilter(typ, name)
return cmp.FilterPath(sf.filter, opt)
}
type structFilter struct {
t reflect.Type // The root struct type to match on
ft fieldTree // Tree of fields to match on
}
func newStructFilter(typ interface{}, names ...string) structFilter {
// TODO: Perhaps allow * as a special identifier to allow ignoring any
// number of path steps until the next field match?
// This could be useful when a concrete struct gets transformed into
// an anonymous struct where it is not possible to specify that by type,
// but the transformer happens to provide guarantees about the names of
// the transformed fields.
t := reflect.TypeOf(typ)
if t == nil || t.Kind() != reflect.Struct {
panic(fmt.Sprintf("%T must be a struct", typ))
}
var ft fieldTree
for _, name := range names {
cname, err := canonicalName(t, name)
if err != nil {
panic(fmt.Sprintf("%s: %v", strings.Join(cname, "."), err))
}
ft.insert(cname)
}
return structFilter{t, ft}
}
func (sf structFilter) filter(p cmp.Path) bool {
for i, ps := range p {
if ps.Type().AssignableTo(sf.t) && sf.ft.matchPrefix(p[i+1:]) {
return true
}
}
return false
}
// fieldTree represents a set of dot-separated identifiers.
//
// For example, inserting the following selectors:
// Foo
// Foo.Bar.Baz
// Foo.Buzz
// Nuka.Cola.Quantum
//
// Results in a tree of the form:
// {sub: {
// "Foo": {ok: true, sub: {
// "Bar": {sub: {
// "Baz": {ok: true},
// }},
// "Buzz": {ok: true},
// }},
// "Nuka": {sub: {
// "Cola": {sub: {
// "Quantum": {ok: true},
// }},
// }},
// }}
type fieldTree struct {
ok bool // Whether this is a specified node
sub map[string]fieldTree // The sub-tree of fields under this node
}
// insert inserts a sequence of field accesses into the tree.
func (ft *fieldTree) insert(cname []string) {
if ft.sub == nil {
ft.sub = make(map[string]fieldTree)
}
if len(cname) == 0 {
ft.ok = true
return
}
sub := ft.sub[cname[0]]
sub.insert(cname[1:])
ft.sub[cname[0]] = sub
}
// matchPrefix reports whether any selector in the fieldTree matches
// the start of path p.
func (ft fieldTree) matchPrefix(p cmp.Path) bool {
for _, ps := range p {
switch ps := ps.(type) {
case cmp.StructField:
ft = ft.sub[ps.Name()]
if ft.ok {
return true
}
if len(ft.sub) == 0 {
return false
}
case cmp.Indirect:
default:
return false
}
}
return false
}
// canonicalName returns a list of identifiers where any struct field access
// through an embedded field is expanded to include the names of the embedded
// types themselves.
//
// For example, suppose field "Foo" is not directly in the parent struct,
// but actually from an embedded struct of type "Bar". Then, the canonical name
// of "Foo" is actually "Bar.Foo".
//
// Suppose field "Foo" is not directly in the parent struct, but actually
// a field in two different embedded structs of types "Bar" and "Baz".
// Then the selector "Foo" causes a panic since it is ambiguous which one it
// refers to. The user must specify either "Bar.Foo" or "Baz.Foo".
func canonicalName(t reflect.Type, sel string) ([]string, error) {
var name string
sel = strings.TrimPrefix(sel, ".")
if sel == "" {
return nil, fmt.Errorf("name must not be empty")
}
if i := strings.IndexByte(sel, '.'); i < 0 {
name, sel = sel, ""
} else {
name, sel = sel[:i], sel[i:]
}
// Type must be a struct or pointer to struct.
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
if t.Kind() != reflect.Struct {
return nil, fmt.Errorf("%v must be a struct", t)
}
// Find the canonical name for this current field name.
// If the field exists in an embedded struct, then it will be expanded.
if !isExported(name) {
// Disallow unexported fields:
// * To discourage people from actually touching unexported fields
// * FieldByName is buggy (https://golang.org/issue/4876)
return []string{name}, fmt.Errorf("name must be exported")
}
sf, ok := t.FieldByName(name)
if !ok {
return []string{name}, fmt.Errorf("does not exist")
}
var ss []string
for i := range sf.Index {
ss = append(ss, t.FieldByIndex(sf.Index[:i+1]).Name)
}
if sel == "" {
return ss, nil
}
ssPost, err := canonicalName(sf.Type, sel)
return append(ss, ssPost...), err
}

35
vendor/github.com/google/go-cmp/cmp/cmpopts/xform.go generated vendored Normal file
View File

@ -0,0 +1,35 @@
// Copyright 2018, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
package cmpopts
import (
"github.com/google/go-cmp/cmp"
)
type xformFilter struct{ xform cmp.Option }
func (xf xformFilter) filter(p cmp.Path) bool {
for _, ps := range p {
if t, ok := ps.(cmp.Transform); ok && t.Option() == xf.xform {
return false
}
}
return true
}
// AcyclicTransformer returns a Transformer with a filter applied that ensures
// that the transformer cannot be recursively applied upon its own output.
//
// An example use case is a transformer that splits a string by lines:
// AcyclicTransformer("SplitLines", func(s string) []string{
// return strings.Split(s, "\n")
// })
//
// Had this been an unfiltered Transformer instead, this would result in an
// infinite cycle converting a string to []string to [][]string and so on.
func AcyclicTransformer(name string, xformFunc interface{}) cmp.Option {
xf := xformFilter{cmp.Transformer(name, xformFunc)}
return cmp.FilterPath(xf.filter, xf.xform)
}

1
vendor/github.com/google/uuid/go.mod generated vendored Normal file
View File

@ -0,0 +1 @@
module github.com/google/uuid

View File

@ -48,6 +48,7 @@ func setNodeInterface(name string) bool {
// does not specify a specific interface generate a random Node ID
// (section 4.1.6)
if name == "" {
ifname = "random"
randomBits(nodeID[:])
return true
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 Google Inc. All rights reserved.
// Copyright 2018 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@ -35,20 +35,43 @@ const (
var rander = rand.Reader // random function
// Parse decodes s into a UUID or returns an error. Both the UUID form of
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded.
// Parse decodes s into a UUID or returns an error. Both the standard UUID
// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the
// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex
// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.
func Parse(s string) (UUID, error) {
var uuid UUID
if len(s) != 36 {
if len(s) != 36+9 {
return uuid, fmt.Errorf("invalid UUID length: %d", len(s))
}
switch len(s) {
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
case 36:
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
case 36 + 9:
if strings.ToLower(s[:9]) != "urn:uuid:" {
return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9])
}
s = s[9:]
// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
case 36 + 2:
s = s[1:]
// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
case 32:
var ok bool
for i := range uuid {
uuid[i], ok = xtob(s[i*2], s[i*2+1])
if !ok {
return uuid, errors.New("invalid UUID format")
}
}
return uuid, nil
default:
return uuid, fmt.Errorf("invalid UUID length: %d", len(s))
}
// s is now at least 36 bytes long
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
return uuid, errors.New("invalid UUID format")
}
@ -70,15 +93,29 @@ func Parse(s string) (UUID, error) {
// ParseBytes is like Parse, except it parses a byte slice instead of a string.
func ParseBytes(b []byte) (UUID, error) {
var uuid UUID
if len(b) != 36 {
if len(b) != 36+9 {
return uuid, fmt.Errorf("invalid UUID length: %d", len(b))
}
switch len(b) {
case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) {
return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9])
}
b = b[9:]
case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
b = b[1:]
case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
var ok bool
for i := 0; i < 32; i += 2 {
uuid[i/2], ok = xtob(b[i], b[i+1])
if !ok {
return uuid, errors.New("invalid UUID format")
}
}
return uuid, nil
default:
return uuid, fmt.Errorf("invalid UUID length: %d", len(b))
}
// s is now at least 36 bytes long
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' {
return uuid, errors.New("invalid UUID format")
}
@ -97,6 +134,16 @@ func ParseBytes(b []byte) (UUID, error) {
return uuid, nil
}
// MustParse is like Parse but panics if the string cannot be parsed.
// It simplifies safe initialization of global variables holding compiled UUIDs.
func MustParse(s string) UUID {
uuid, err := Parse(s)
if err != nil {
panic(`uuid: Parse(` + s + `): ` + err.Error())
}
return uuid
}
// FromBytes creates a new UUID from a byte slice. Returns an error if the slice
// does not have a length of 16. The bytes are copied from the slice.
func FromBytes(b []byte) (uuid UUID, err error) {
@ -130,7 +177,7 @@ func (uuid UUID) URN() string {
}
func encodeHex(dst []byte, uuid UUID) {
hex.Encode(dst[:], uuid[:4])
hex.Encode(dst, uuid[:4])
dst[8] = '-'
hex.Encode(dst[9:13], uuid[4:6])
dst[13] = '-'

View File

@ -545,7 +545,11 @@ func v3auth(cloud *Cloud, opts *ClientOpts) (*gophercloud.AuthOptions, error) {
scope := new(gophercloud.AuthScope)
// Application credentials don't support scope
if !isApplicationCredential(cloud.AuthInfo) {
if isApplicationCredential(cloud.AuthInfo) {
// If Domain* is set, but UserDomain* or ProjectDomain* aren't,
// then use Domain* as the default setting.
cloud = setDomainIfNeeded(cloud)
} else {
if !isProjectScoped(cloud.AuthInfo) {
if cloud.AuthInfo.DomainID != "" {
scope.DomainID = cloud.AuthInfo.DomainID

View File

@ -4,118 +4,118 @@ package clientconfig
// The format of the clouds-public.yml is documented at
// https://docs.openstack.org/python-openstackclient/latest/configuration/
type PublicClouds struct {
Clouds map[string]Cloud `yaml:"public-clouds"`
Clouds map[string]Cloud `yaml:"public-clouds" json:"public-clouds"`
}
// Clouds represents a collection of Cloud entries in a clouds.yaml file.
// The format of clouds.yaml is documented at
// https://docs.openstack.org/os-client-config/latest/user/configuration.html.
type Clouds struct {
Clouds map[string]Cloud `yaml:"clouds"`
Clouds map[string]Cloud `yaml:"clouds" json:"clouds"`
}
// Cloud represents an entry in a clouds.yaml/public-clouds.yaml/secure.yaml file.
type Cloud struct {
Cloud string `yaml:"cloud"`
Profile string `yaml:"profile"`
AuthInfo *AuthInfo `yaml:"auth"`
AuthType AuthType `yaml:"auth_type"`
RegionName string `yaml:"region_name"`
Regions []interface{} `yaml:"regions"`
Cloud string `yaml:"cloud" json:"cloud"`
Profile string `yaml:"profile" json:"profile"`
AuthInfo *AuthInfo `yaml:"auth" json:"auth"`
AuthType AuthType `yaml:"auth_type" json:"auth_type"`
RegionName string `yaml:"region_name" json:"region_name"`
Regions []interface{} `yaml:"regions" json:"regions"`
// API Version overrides.
IdentityAPIVersion string `yaml:"identity_api_version"`
VolumeAPIVersion string `yaml:"volume_api_version"`
IdentityAPIVersion string `yaml:"identity_api_version" json:"identity_api_version"`
VolumeAPIVersion string `yaml:"volume_api_version" json:"volume_api_version"`
// Verify whether or not SSL API requests should be verified.
Verify *bool `yaml:"verify"`
Verify *bool `yaml:"verify" json:"verify"`
// CACertFile a path to a CA Cert bundle that can be used as part of
// verifying SSL API requests.
CACertFile string `yaml:"cacert"`
CACertFile string `yaml:"cacert" json:"cacert"`
// ClientCertFile a path to a client certificate to use as part of the SSL
// transaction.
ClientCertFile string `yaml:"cert"`
ClientCertFile string `yaml:"cert" json:"cert"`
// ClientKeyFile a path to a client key to use as part of the SSL
// transaction.
ClientKeyFile string `yaml:"key"`
ClientKeyFile string `yaml:"key" json:"key"`
}
// AuthInfo represents the auth section of a cloud entry or
// auth options entered explicitly in ClientOpts.
type AuthInfo struct {
// AuthURL is the keystone/identity endpoint URL.
AuthURL string `yaml:"auth_url"`
AuthURL string `yaml:"auth_url" json:"auth_url"`
// Token is a pre-generated authentication token.
Token string `yaml:"token"`
Token string `yaml:"token" json:"token"`
// Username is the username of the user.
Username string `yaml:"username"`
Username string `yaml:"username" json:"username"`
// UserID is the unique ID of a user.
UserID string `yaml:"user_id"`
UserID string `yaml:"user_id" json:"user_id"`
// Password is the password of the user.
Password string `yaml:"password"`
Password string `yaml:"password" json:"password"`
// Application Credential ID to login with.
ApplicationCredentialID string `yaml:"application_credential_id"`
ApplicationCredentialID string `yaml:"application_credential_id" json:"application_credential_id"`
// Application Credential name to login with.
ApplicationCredentialName string `yaml:"application_credential_name"`
ApplicationCredentialName string `yaml:"application_credential_name" json:"application_credential_name"`
// Application Credential secret to login with.
ApplicationCredentialSecret string `yaml:"application_credential_secret"`
ApplicationCredentialSecret string `yaml:"application_credential_secret" json:"application_credential_secret"`
// ProjectName is the common/human-readable name of a project.
// Users can be scoped to a project.
// ProjectName on its own is not enough to ensure a unique scope. It must
// also be combined with either a ProjectDomainName or ProjectDomainID.
// ProjectName cannot be combined with ProjectID in a scope.
ProjectName string `yaml:"project_name"`
ProjectName string `yaml:"project_name" json:"project_name"`
// ProjectID is the unique ID of a project.
// It can be used to scope a user to a specific project.
ProjectID string `yaml:"project_id"`
ProjectID string `yaml:"project_id" json:"project_id"`
// UserDomainName is the name of the domain where a user resides.
// It is used to identify the source domain of a user.
UserDomainName string `yaml:"user_domain_name"`
UserDomainName string `yaml:"user_domain_name" json:"user_domain_name"`
// UserDomainID is the unique ID of the domain where a user resides.
// It is used to identify the source domain of a user.
UserDomainID string `yaml:"user_domain_id"`
UserDomainID string `yaml:"user_domain_id" json:"user_domain_id"`
// ProjectDomainName is the name of the domain where a project resides.
// It is used to identify the source domain of a project.
// ProjectDomainName can be used in addition to a ProjectName when scoping
// a user to a specific project.
ProjectDomainName string `yaml:"project_domain_name"`
ProjectDomainName string `yaml:"project_domain_name" json:"project_domain_name"`
// ProjectDomainID is the name of the domain where a project resides.
// It is used to identify the source domain of a project.
// ProjectDomainID can be used in addition to a ProjectName when scoping
// a user to a specific project.
ProjectDomainID string `yaml:"project_domain_id"`
ProjectDomainID string `yaml:"project_domain_id" json:"project_domain_id"`
// DomainName is the name of a domain which can be used to identify the
// source domain of either a user or a project.
// If UserDomainName and ProjectDomainName are not specified, then DomainName
// is used as a default choice.
// It can also be used be used to specify a domain-only scope.
DomainName string `yaml:"domain_name"`
DomainName string `yaml:"domain_name" json:"domain_name"`
// DomainID is the unique ID of a domain which can be used to identify the
// source domain of eitehr a user or a project.
// If UserDomainID and ProjectDomainID are not specified, then DomainID is
// used as a default choice.
// It can also be used be used to specify a domain-only scope.
DomainID string `yaml:"domain_id"`
DomainID string `yaml:"domain_id" json:"domain_id"`
// DefaultDomain is the domain ID to fall back on if no other domain has
// been specified and a domain is required for scope.
DefaultDomain string `yaml:"default_domain"`
DefaultDomain string `yaml:"default_domain" json:"default_domain"`
}

View File

@ -22,4 +22,4 @@ _testmain.go
*.exe
.idea/
*.iml
*.iml

View File

@ -3,11 +3,11 @@ sudo: false
matrix:
include:
- go: 1.4
- go: 1.5
- go: 1.6
- go: 1.7
- go: 1.8
- go: 1.7.x
- go: 1.8.x
- go: 1.9.x
- go: 1.10.x
- go: 1.11.x
- go: tip
allow_failures:
- go: tip

View File

@ -4,5 +4,6 @@
# Please keep the list sorted.
Gary Burd <gary@beagledreams.com>
Google LLC (https://opensource.google.com/)
Joachim Bauch <mail@joachim-bauch.de>

View File

@ -51,7 +51,7 @@ subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn
<tr><td>Write message using io.WriteCloser</td><td><a href="http://godoc.org/github.com/gorilla/websocket#Conn.NextWriter">Yes</a></td><td>No, see note 3</td></tr>
</table>
Notes:
Notes:
1. Large messages are fragmented in [Chrome's new WebSocket implementation](http://www.ietf.org/mail-archive/web/hybi/current/msg10503.html).
2. The application can get the type of a received data message by implementing

View File

@ -5,15 +5,15 @@
package websocket
import (
"bufio"
"bytes"
"context"
"crypto/tls"
"encoding/base64"
"errors"
"io"
"io/ioutil"
"net"
"net/http"
"net/http/httptrace"
"net/url"
"strings"
"time"
@ -53,6 +53,10 @@ type Dialer struct {
// NetDial is nil, net.Dial is used.
NetDial func(network, addr string) (net.Conn, error)
// NetDialContext specifies the dial function for creating TCP connections. If
// NetDialContext is nil, net.DialContext is used.
NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error)
// Proxy specifies a function to return a proxy for a given
// Request. If the function returns a non-nil error, the
// request is aborted with the provided error.
@ -71,6 +75,17 @@ type Dialer struct {
// do not limit the size of the messages that can be sent or received.
ReadBufferSize, WriteBufferSize int
// WriteBufferPool is a pool of buffers for write operations. If the value
// is not set, then write buffers are allocated to the connection for the
// lifetime of the connection.
//
// A pool is most useful when the application has a modest volume of writes
// across a large number of connections.
//
// Applications should use a single pool for each unique value of
// WriteBufferSize.
WriteBufferPool BufferPool
// Subprotocols specifies the client's requested subprotocols.
Subprotocols []string
@ -86,52 +101,13 @@ type Dialer struct {
Jar http.CookieJar
}
var errMalformedURL = errors.New("malformed ws or wss URL")
// parseURL parses the URL.
//
// This function is a replacement for the standard library url.Parse function.
// In Go 1.4 and earlier, url.Parse loses information from the path.
func parseURL(s string) (*url.URL, error) {
// From the RFC:
//
// ws-URI = "ws:" "//" host [ ":" port ] path [ "?" query ]
// wss-URI = "wss:" "//" host [ ":" port ] path [ "?" query ]
var u url.URL
switch {
case strings.HasPrefix(s, "ws://"):
u.Scheme = "ws"
s = s[len("ws://"):]
case strings.HasPrefix(s, "wss://"):
u.Scheme = "wss"
s = s[len("wss://"):]
default:
return nil, errMalformedURL
}
if i := strings.Index(s, "?"); i >= 0 {
u.RawQuery = s[i+1:]
s = s[:i]
}
if i := strings.Index(s, "/"); i >= 0 {
u.Opaque = s[i:]
s = s[:i]
} else {
u.Opaque = "/"
}
u.Host = s
if strings.Contains(u.Host, "@") {
// Don't bother parsing user information because user information is
// not allowed in websocket URIs.
return nil, errMalformedURL
}
return &u, nil
// Dial creates a new client connection by calling DialContext with a background context.
func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
return d.DialContext(context.Background(), urlStr, requestHeader)
}
var errMalformedURL = errors.New("malformed ws or wss URL")
func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) {
hostPort = u.Host
hostNoPort = u.Host
@ -150,26 +126,29 @@ func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) {
return hostPort, hostNoPort
}
// DefaultDialer is a dialer with all fields set to the default zero values.
// DefaultDialer is a dialer with all fields set to the default values.
var DefaultDialer = &Dialer{
Proxy: http.ProxyFromEnvironment,
Proxy: http.ProxyFromEnvironment,
HandshakeTimeout: 45 * time.Second,
}
// Dial creates a new client connection. Use requestHeader to specify the
// nilDialer is dialer to use when receiver is nil.
var nilDialer = *DefaultDialer
// DialContext creates a new client connection. Use requestHeader to specify the
// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie).
// Use the response.Header to get the selected subprotocol
// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
//
// The context will be used in the request and in the Dialer
//
// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
// non-nil *http.Response so that callers can handle redirects, authentication,
// etcetera. The response body may not contain the entire response and does not
// need to be closed by the application.
func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
if d == nil {
d = &Dialer{
Proxy: http.ProxyFromEnvironment,
}
d = &nilDialer
}
challengeKey, err := generateChallengeKey()
@ -177,7 +156,7 @@ func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Re
return nil, nil, err
}
u, err := parseURL(urlStr)
u, err := url.Parse(urlStr)
if err != nil {
return nil, nil, err
}
@ -205,6 +184,7 @@ func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Re
Header: make(http.Header),
Host: u.Host,
}
req = req.WithContext(ctx)
// Set the cookies present in the cookie jar of the dialer
if d.Jar != nil {
@ -237,45 +217,83 @@ func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Re
k == "Sec-Websocket-Extensions" ||
(k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0):
return nil, nil, errors.New("websocket: duplicate header not allowed: " + k)
case k == "Sec-Websocket-Protocol":
req.Header["Sec-WebSocket-Protocol"] = vs
default:
req.Header[k] = vs
}
}
if d.EnableCompression {
req.Header.Set("Sec-Websocket-Extensions", "permessage-deflate; server_no_context_takeover; client_no_context_takeover")
req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"}
}
if d.HandshakeTimeout != 0 {
var cancel func()
ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout)
defer cancel()
}
// Get network dial function.
var netDial func(network, add string) (net.Conn, error)
if d.NetDialContext != nil {
netDial = func(network, addr string) (net.Conn, error) {
return d.NetDialContext(ctx, network, addr)
}
} else if d.NetDial != nil {
netDial = d.NetDial
} else {
netDialer := &net.Dialer{}
netDial = func(network, addr string) (net.Conn, error) {
return netDialer.DialContext(ctx, network, addr)
}
}
// If needed, wrap the dial function to set the connection deadline.
if deadline, ok := ctx.Deadline(); ok {
forwardDial := netDial
netDial = func(network, addr string) (net.Conn, error) {
c, err := forwardDial(network, addr)
if err != nil {
return nil, err
}
err = c.SetDeadline(deadline)
if err != nil {
c.Close()
return nil, err
}
return c, nil
}
}
// If needed, wrap the dial function to connect through a proxy.
if d.Proxy != nil {
proxyURL, err := d.Proxy(req)
if err != nil {
return nil, nil, err
}
if proxyURL != nil {
dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial))
if err != nil {
return nil, nil, err
}
netDial = dialer.Dial
}
}
hostPort, hostNoPort := hostPortNoPort(u)
var proxyURL *url.URL
// Check wether the proxy method has been configured
if d.Proxy != nil {
proxyURL, err = d.Proxy(req)
}
if err != nil {
return nil, nil, err
trace := httptrace.ContextClientTrace(ctx)
if trace != nil && trace.GetConn != nil {
trace.GetConn(hostPort)
}
var targetHostPort string
if proxyURL != nil {
targetHostPort, _ = hostPortNoPort(proxyURL)
} else {
targetHostPort = hostPort
netConn, err := netDial("tcp", hostPort)
if trace != nil && trace.GotConn != nil {
trace.GotConn(httptrace.GotConnInfo{
Conn: netConn,
})
}
var deadline time.Time
if d.HandshakeTimeout != 0 {
deadline = time.Now().Add(d.HandshakeTimeout)
}
netDial := d.NetDial
if netDial == nil {
netDialer := &net.Dialer{Deadline: deadline}
netDial = netDialer.Dial
}
netConn, err := netDial("tcp", targetHostPort)
if err != nil {
return nil, nil, err
}
@ -286,42 +304,6 @@ func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Re
}
}()
if err := netConn.SetDeadline(deadline); err != nil {
return nil, nil, err
}
if proxyURL != nil {
connectHeader := make(http.Header)
if user := proxyURL.User; user != nil {
proxyUser := user.Username()
if proxyPassword, passwordSet := user.Password(); passwordSet {
credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword))
connectHeader.Set("Proxy-Authorization", "Basic "+credential)
}
}
connectReq := &http.Request{
Method: "CONNECT",
URL: &url.URL{Opaque: hostPort},
Host: hostPort,
Header: connectHeader,
}
connectReq.Write(netConn)
// Read response.
// Okay to use and discard buffered reader here, because
// TLS server will not speak until spoken to.
br := bufio.NewReader(netConn)
resp, err := http.ReadResponse(br, connectReq)
if err != nil {
return nil, nil, err
}
if resp.StatusCode != 200 {
f := strings.SplitN(resp.Status, " ", 2)
return nil, nil, errors.New(f[1])
}
}
if u.Scheme == "https" {
cfg := cloneTLSConfig(d.TLSClientConfig)
if cfg.ServerName == "" {
@ -329,22 +311,31 @@ func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Re
}
tlsConn := tls.Client(netConn, cfg)
netConn = tlsConn
if err := tlsConn.Handshake(); err != nil {
return nil, nil, err
var err error
if trace != nil {
err = doHandshakeWithTrace(trace, tlsConn, cfg)
} else {
err = doHandshake(tlsConn, cfg)
}
if !cfg.InsecureSkipVerify {
if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil {
return nil, nil, err
}
if err != nil {
return nil, nil, err
}
}
conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize)
conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil)
if err := req.Write(netConn); err != nil {
return nil, nil, err
}
if trace != nil && trace.GotFirstResponseByte != nil {
if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 {
trace.GotFirstResponseByte()
}
}
resp, err := http.ReadResponse(conn.br, req)
if err != nil {
return nil, nil, err
@ -390,3 +381,15 @@ func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Re
netConn = nil // to avoid close in defer.
return conn, resp, nil
}
func doHandshake(tlsConn *tls.Conn, cfg *tls.Config) error {
if err := tlsConn.Handshake(); err != nil {
return err
}
if !cfg.InsecureSkipVerify {
if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil {
return err
}
}
return nil
}

View File

@ -76,7 +76,7 @@ const (
// is UTF-8 encoded text.
PingMessage = 9
// PongMessage denotes a ping control message. The optional message payload
// PongMessage denotes a pong control message. The optional message payload
// is UTF-8 encoded text.
PongMessage = 10
)
@ -100,9 +100,8 @@ func (e *netError) Error() string { return e.msg }
func (e *netError) Temporary() bool { return e.temporary }
func (e *netError) Timeout() bool { return e.timeout }
// CloseError represents close frame.
// CloseError represents a close message.
type CloseError struct {
// Code is defined in RFC 6455, section 11.7.
Code int
@ -224,6 +223,20 @@ func isValidReceivedCloseCode(code int) bool {
return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999)
}
// BufferPool represents a pool of buffers. The *sync.Pool type satisfies this
// interface. The type of the value stored in a pool is not specified.
type BufferPool interface {
// Get gets a value from the pool or returns nil if the pool is empty.
Get() interface{}
// Put adds a value to the pool.
Put(interface{})
}
// writePoolData is the type added to the write buffer pool. This wrapper is
// used to prevent applications from peeking at and depending on the values
// added to the pool.
type writePoolData struct{ buf []byte }
// The Conn type represents a WebSocket connection.
type Conn struct {
conn net.Conn
@ -233,6 +246,8 @@ type Conn struct {
// Write fields
mu chan bool // used as mutex to protect write to conn
writeBuf []byte // frame is constructed in this buffer.
writePool BufferPool
writeBufSize int
writeDeadline time.Time
writer io.WriteCloser // the current writer returned to the application
isWriting bool // for best-effort concurrent write detection
@ -264,64 +279,29 @@ type Conn struct {
newDecompressionReader func(io.Reader) io.ReadCloser
}
func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int) *Conn {
return newConnBRW(conn, isServer, readBufferSize, writeBufferSize, nil)
}
func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, writeBufferPool BufferPool, br *bufio.Reader, writeBuf []byte) *Conn {
type writeHook struct {
p []byte
}
func (wh *writeHook) Write(p []byte) (int, error) {
wh.p = p
return len(p), nil
}
func newConnBRW(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, brw *bufio.ReadWriter) *Conn {
mu := make(chan bool, 1)
mu <- true
var br *bufio.Reader
if readBufferSize == 0 && brw != nil && brw.Reader != nil {
// Reuse the supplied bufio.Reader if the buffer has a useful size.
// This code assumes that peek on a reader returns
// bufio.Reader.buf[:0].
brw.Reader.Reset(conn)
if p, err := brw.Reader.Peek(0); err == nil && cap(p) >= 256 {
br = brw.Reader
}
}
if br == nil {
if readBufferSize == 0 {
readBufferSize = defaultReadBufferSize
}
if readBufferSize < maxControlFramePayloadSize {
} else if readBufferSize < maxControlFramePayloadSize {
// must be large enough for control frame
readBufferSize = maxControlFramePayloadSize
}
br = bufio.NewReaderSize(conn, readBufferSize)
}
var writeBuf []byte
if writeBufferSize == 0 && brw != nil && brw.Writer != nil {
// Use the bufio.Writer's buffer if the buffer has a useful size. This
// code assumes that bufio.Writer.buf[:1] is passed to the
// bufio.Writer's underlying writer.
var wh writeHook
brw.Writer.Reset(&wh)
brw.Writer.WriteByte(0)
brw.Flush()
if cap(wh.p) >= maxFrameHeaderSize+256 {
writeBuf = wh.p[:cap(wh.p)]
}
}
if writeBuf == nil {
if writeBufferSize == 0 {
writeBufferSize = defaultWriteBufferSize
}
writeBuf = make([]byte, writeBufferSize+maxFrameHeaderSize)
if writeBufferSize <= 0 {
writeBufferSize = defaultWriteBufferSize
}
writeBufferSize += maxFrameHeaderSize
if writeBuf == nil && writeBufferPool == nil {
writeBuf = make([]byte, writeBufferSize)
}
mu := make(chan bool, 1)
mu <- true
c := &Conn{
isServer: isServer,
br: br,
@ -329,6 +309,8 @@ func newConnBRW(conn net.Conn, isServer bool, readBufferSize, writeBufferSize in
mu: mu,
readFinal: true,
writeBuf: writeBuf,
writePool: writeBufferPool,
writeBufSize: writeBufferSize,
enableWriteCompression: true,
compressionLevel: defaultCompressionLevel,
}
@ -343,7 +325,8 @@ func (c *Conn) Subprotocol() string {
return c.subprotocol
}
// Close closes the underlying network connection without sending or waiting for a close frame.
// Close closes the underlying network connection without sending or waiting
// for a close message.
func (c *Conn) Close() error {
return c.conn.Close()
}
@ -370,7 +353,16 @@ func (c *Conn) writeFatal(err error) error {
return err
}
func (c *Conn) write(frameType int, deadline time.Time, bufs ...[]byte) error {
func (c *Conn) read(n int) ([]byte, error) {
p, err := c.br.Peek(n)
if err == io.EOF {
err = errUnexpectedEOF
}
c.br.Discard(len(p))
return p, err
}
func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error {
<-c.mu
defer func() { c.mu <- true }()
@ -382,15 +374,14 @@ func (c *Conn) write(frameType int, deadline time.Time, bufs ...[]byte) error {
}
c.conn.SetWriteDeadline(deadline)
for _, buf := range bufs {
if len(buf) > 0 {
_, err := c.conn.Write(buf)
if err != nil {
return c.writeFatal(err)
}
}
if len(buf1) == 0 {
_, err = c.conn.Write(buf0)
} else {
err = c.writeBufs(buf0, buf1)
}
if err != nil {
return c.writeFatal(err)
}
if frameType == CloseMessage {
c.writeFatal(ErrCloseSent)
}
@ -476,7 +467,19 @@ func (c *Conn) prepWrite(messageType int) error {
c.writeErrMu.Lock()
err := c.writeErr
c.writeErrMu.Unlock()
return err
if err != nil {
return err
}
if c.writeBuf == nil {
wpd, ok := c.writePool.Get().(writePoolData)
if ok {
c.writeBuf = wpd.buf
} else {
c.writeBuf = make([]byte, c.writeBufSize)
}
}
return nil
}
// NextWriter returns a writer for the next message to send. The writer's Close
@ -484,6 +487,9 @@ func (c *Conn) prepWrite(messageType int) error {
//
// There can be at most one open writer on a connection. NextWriter closes the
// previous writer if the application has not already done so.
//
// All message types (TextMessage, BinaryMessage, CloseMessage, PingMessage and
// PongMessage) are supported.
func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) {
if err := c.prepWrite(messageType); err != nil {
return nil, err
@ -599,6 +605,10 @@ func (w *messageWriter) flushFrame(final bool, extra []byte) error {
if final {
c.writer = nil
if c.writePool != nil {
c.writePool.Put(writePoolData{buf: c.writeBuf})
c.writeBuf = nil
}
return nil
}
@ -764,7 +774,6 @@ func (c *Conn) SetWriteDeadline(t time.Time) error {
// Read methods
func (c *Conn) advanceFrame() (int, error) {
// 1. Skip remainder of previous frame.
if c.readRemaining > 0 {
@ -1033,7 +1042,7 @@ func (c *Conn) SetReadDeadline(t time.Time) error {
}
// SetReadLimit sets the maximum size for a message read from the peer. If a
// message exceeds the limit, the connection sends a close frame to the peer
// message exceeds the limit, the connection sends a close message to the peer
// and returns ErrReadLimit to the application.
func (c *Conn) SetReadLimit(limit int64) {
c.readLimit = limit
@ -1046,24 +1055,22 @@ func (c *Conn) CloseHandler() func(code int, text string) error {
// SetCloseHandler sets the handler for close messages received from the peer.
// The code argument to h is the received close code or CloseNoStatusReceived
// if the close message is empty. The default close handler sends a close frame
// back to the peer.
// if the close message is empty. The default close handler sends a close
// message back to the peer.
//
// The application must read the connection to process close messages as
// described in the section on Control Frames above.
// The handler function is called from the NextReader, ReadMessage and message
// reader Read methods. The application must read the connection to process
// close messages as described in the section on Control Messages above.
//
// The connection read methods return a CloseError when a close frame is
// The connection read methods return a CloseError when a close message is
// received. Most applications should handle close messages as part of their
// normal error handling. Applications should only set a close handler when the
// application must perform some action before sending a close frame back to
// application must perform some action before sending a close message back to
// the peer.
func (c *Conn) SetCloseHandler(h func(code int, text string) error) {
if h == nil {
h = func(code int, text string) error {
message := []byte{}
if code != CloseNoStatusReceived {
message = FormatCloseMessage(code, "")
}
message := FormatCloseMessage(code, "")
c.WriteControl(CloseMessage, message, time.Now().Add(writeWait))
return nil
}
@ -1077,11 +1084,12 @@ func (c *Conn) PingHandler() func(appData string) error {
}
// SetPingHandler sets the handler for ping messages received from the peer.
// The appData argument to h is the PING frame application data. The default
// The appData argument to h is the PING message application data. The default
// ping handler sends a pong to the peer.
//
// The application must read the connection to process ping messages as
// described in the section on Control Frames above.
// The handler function is called from the NextReader, ReadMessage and message
// reader Read methods. The application must read the connection to process
// ping messages as described in the section on Control Messages above.
func (c *Conn) SetPingHandler(h func(appData string) error) {
if h == nil {
h = func(message string) error {
@ -1103,11 +1111,12 @@ func (c *Conn) PongHandler() func(appData string) error {
}
// SetPongHandler sets the handler for pong messages received from the peer.
// The appData argument to h is the PONG frame application data. The default
// The appData argument to h is the PONG message application data. The default
// pong handler does nothing.
//
// The application must read the connection to process ping messages as
// described in the section on Control Frames above.
// The handler function is called from the NextReader, ReadMessage and message
// reader Read methods. The application must read the connection to process
// pong messages as described in the section on Control Messages above.
func (c *Conn) SetPongHandler(h func(appData string) error) {
if h == nil {
h = func(string) error { return nil }
@ -1141,7 +1150,14 @@ func (c *Conn) SetCompressionLevel(level int) error {
}
// FormatCloseMessage formats closeCode and text as a WebSocket close message.
// An empty message is returned for code CloseNoStatusReceived.
func FormatCloseMessage(closeCode int, text string) []byte {
if closeCode == CloseNoStatusReceived {
// Return empty message because it's illegal to send
// CloseNoStatusReceived. Return non-nil value in case application
// checks for nil.
return []byte{}
}
buf := make([]byte, 2+len(text))
binary.BigEndian.PutUint16(buf, uint16(closeCode))
copy(buf[2:], text)

View File

@ -1,21 +0,0 @@
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.5
package websocket
import "io"
func (c *Conn) read(n int) ([]byte, error) {
p, err := c.br.Peek(n)
if err == io.EOF {
err = errUnexpectedEOF
}
if len(p) > 0 {
// advance over the bytes just read
io.ReadFull(c.br, p)
}
return p, err
}

View File

@ -2,17 +2,14 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.5
// +build go1.8
package websocket
import "io"
import "net"
func (c *Conn) read(n int) ([]byte, error) {
p, err := c.br.Peek(n)
if err == io.EOF {
err = errUnexpectedEOF
}
c.br.Discard(len(p))
return p, err
func (c *Conn) writeBufs(bufs ...[]byte) error {
b := net.Buffers(bufs)
_, err := b.WriteTo(c.conn)
return err
}

View File

@ -0,0 +1,18 @@
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.8
package websocket
func (c *Conn) writeBufs(bufs ...[]byte) error {
for _, buf := range bufs {
if len(buf) > 0 {
if _, err := c.conn.Write(buf); err != nil {
return err
}
}
}
return nil
}

View File

@ -6,9 +6,8 @@
//
// Overview
//
// The Conn type represents a WebSocket connection. A server application uses
// the Upgrade function from an Upgrader object with a HTTP request handler
// to get a pointer to a Conn:
// The Conn type represents a WebSocket connection. A server application calls
// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn:
//
// var upgrader = websocket.Upgrader{
// ReadBufferSize: 1024,
@ -31,10 +30,12 @@
// for {
// messageType, p, err := conn.ReadMessage()
// if err != nil {
// log.Println(err)
// return
// }
// if err = conn.WriteMessage(messageType, p); err != nil {
// return err
// if err := conn.WriteMessage(messageType, p); err != nil {
// log.Println(err)
// return
// }
// }
//
@ -85,20 +86,26 @@
// and pong. Call the connection WriteControl, WriteMessage or NextWriter
// methods to send a control message to the peer.
//
// Connections handle received close messages by sending a close message to the
// peer and returning a *CloseError from the the NextReader, ReadMessage or the
// message Read method.
// Connections handle received close messages by calling the handler function
// set with the SetCloseHandler method and by returning a *CloseError from the
// NextReader, ReadMessage or the message Read method. The default close
// handler sends a close message to the peer.
//
// Connections handle received ping and pong messages by invoking callback
// functions set with SetPingHandler and SetPongHandler methods. The callback
// functions are called from the NextReader, ReadMessage and the message Read
// methods.
// Connections handle received ping messages by calling the handler function
// set with the SetPingHandler method. The default ping handler sends a pong
// message to the peer.
//
// The default ping handler sends a pong to the peer. The application's reading
// goroutine can block for a short time while the handler writes the pong data
// to the connection.
// Connections handle received pong messages by calling the handler function
// set with the SetPongHandler method. The default pong handler does nothing.
// If an application sends ping messages, then the application should set a
// pong handler to receive the corresponding pong.
//
// The application must read the connection to process ping, pong and close
// The control message handler functions are called from the NextReader,
// ReadMessage and message reader Read methods. The default close and ping
// handlers can block these methods for a short time when the handler writes to
// the connection.
//
// The application must read the connection to process close, ping and pong
// messages sent from the peer. If the application is not otherwise interested
// in messages from the peer, then the application should start a goroutine to
// read and discard messages from the peer. A simple example is:
@ -137,19 +144,12 @@
// method fails the WebSocket handshake with HTTP status 403.
//
// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail
// the handshake if the Origin request header is present and not equal to the
// Host request header.
// the handshake if the Origin request header is present and the Origin host is
// not equal to the Host request header.
//
// An application can allow connections from any origin by specifying a
// function that always returns true:
//
// var upgrader = websocket.Upgrader{
// CheckOrigin: func(r *http.Request) bool { return true },
// }
//
// The deprecated Upgrade function does not enforce an origin policy. It's the
// application's responsibility to check the Origin header before calling
// Upgrade.
// The deprecated package-level Upgrade function does not perform origin
// checking. The application is responsible for checking the Origin header
// before calling the Upgrade function.
//
// Compression EXPERIMENTAL
//

View File

@ -9,12 +9,14 @@ import (
"io"
)
// WriteJSON is deprecated, use c.WriteJSON instead.
// WriteJSON writes the JSON encoding of v as a message.
//
// Deprecated: Use c.WriteJSON instead.
func WriteJSON(c *Conn, v interface{}) error {
return c.WriteJSON(v)
}
// WriteJSON writes the JSON encoding of v to the connection.
// WriteJSON writes the JSON encoding of v as a message.
//
// See the documentation for encoding/json Marshal for details about the
// conversion of Go values to JSON.
@ -31,7 +33,10 @@ func (c *Conn) WriteJSON(v interface{}) error {
return err2
}
// ReadJSON is deprecated, use c.ReadJSON instead.
// ReadJSON reads the next JSON-encoded message from the connection and stores
// it in the value pointed to by v.
//
// Deprecated: Use c.ReadJSON instead.
func ReadJSON(c *Conn, v interface{}) error {
return c.ReadJSON(v)
}

View File

@ -11,7 +11,6 @@ import "unsafe"
const wordSize = int(unsafe.Sizeof(uintptr(0)))
func maskBytes(key [4]byte, pos int, b []byte) int {
// Mask one byte at a time for small buffers.
if len(b) < 2*wordSize {
for i := range b {

View File

@ -19,7 +19,6 @@ import (
type PreparedMessage struct {
messageType int
data []byte
err error
mu sync.Mutex
frames map[prepareKey]*preparedFrame
}

77
vendor/github.com/gorilla/websocket/proxy.go generated vendored Normal file
View File

@ -0,0 +1,77 @@
// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package websocket
import (
"bufio"
"encoding/base64"
"errors"
"net"
"net/http"
"net/url"
"strings"
)
type netDialerFunc func(network, addr string) (net.Conn, error)
func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) {
return fn(network, addr)
}
func init() {
proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) {
return &httpProxyDialer{proxyURL: proxyURL, fowardDial: forwardDialer.Dial}, nil
})
}
type httpProxyDialer struct {
proxyURL *url.URL
fowardDial func(network, addr string) (net.Conn, error)
}
func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) {
hostPort, _ := hostPortNoPort(hpd.proxyURL)
conn, err := hpd.fowardDial(network, hostPort)
if err != nil {
return nil, err
}
connectHeader := make(http.Header)
if user := hpd.proxyURL.User; user != nil {
proxyUser := user.Username()
if proxyPassword, passwordSet := user.Password(); passwordSet {
credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword))
connectHeader.Set("Proxy-Authorization", "Basic "+credential)
}
}
connectReq := &http.Request{
Method: "CONNECT",
URL: &url.URL{Opaque: addr},
Host: addr,
Header: connectHeader,
}
if err := connectReq.Write(conn); err != nil {
conn.Close()
return nil, err
}
// Read response. It's OK to use and discard buffered reader here becaue
// the remote server does not speak until spoken to.
br := bufio.NewReader(conn)
resp, err := http.ReadResponse(br, connectReq)
if err != nil {
conn.Close()
return nil, err
}
if resp.StatusCode != 200 {
conn.Close()
f := strings.SplitN(resp.Status, " ", 2)
return nil, errors.New(f[1])
}
return conn, nil
}

View File

@ -7,7 +7,7 @@ package websocket
import (
"bufio"
"errors"
"net"
"io"
"net/http"
"net/url"
"strings"
@ -33,10 +33,23 @@ type Upgrader struct {
// or received.
ReadBufferSize, WriteBufferSize int
// WriteBufferPool is a pool of buffers for write operations. If the value
// is not set, then write buffers are allocated to the connection for the
// lifetime of the connection.
//
// A pool is most useful when the application has a modest volume of writes
// across a large number of connections.
//
// Applications should use a single pool for each unique value of
// WriteBufferSize.
WriteBufferPool BufferPool
// Subprotocols specifies the server's supported protocols in order of
// preference. If this field is set, then the Upgrade method negotiates a
// preference. If this field is not nil, then the Upgrade method negotiates a
// subprotocol by selecting the first match in this list with a protocol
// requested by the client.
// requested by the client. If there's no match, then no protocol is
// negotiated (the Sec-Websocket-Protocol header is not included in the
// handshake response).
Subprotocols []string
// Error specifies the function for generating HTTP error responses. If Error
@ -44,8 +57,12 @@ type Upgrader struct {
Error func(w http.ResponseWriter, r *http.Request, status int, reason error)
// CheckOrigin returns true if the request Origin header is acceptable. If
// CheckOrigin is nil, the host in the Origin header must not be set or
// must match the host of the request.
// CheckOrigin is nil, then a safe default is used: return false if the
// Origin request header is present and the origin host is not equal to
// request Host header.
//
// A CheckOrigin function should carefully validate the request origin to
// prevent cross-site request forgery.
CheckOrigin func(r *http.Request) bool
// EnableCompression specify if the server should attempt to negotiate per
@ -76,7 +93,7 @@ func checkSameOrigin(r *http.Request) bool {
if err != nil {
return false
}
return u.Host == r.Host
return equalASCIIFold(u.Host, r.Host)
}
func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string {
@ -99,42 +116,44 @@ func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header
//
// The responseHeader is included in the response to the client's upgrade
// request. Use the responseHeader to specify cookies (Set-Cookie) and the
// application negotiated subprotocol (Sec-Websocket-Protocol).
// application negotiated subprotocol (Sec-WebSocket-Protocol).
//
// If the upgrade fails, then Upgrade replies to the client with an HTTP error
// response.
func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) {
if r.Method != "GET" {
return u.returnError(w, r, http.StatusMethodNotAllowed, "websocket: not a websocket handshake: request method is not GET")
}
if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok {
return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-Websocket-Extensions' headers are unsupported")
}
const badHandshake = "websocket: the client is not using the websocket protocol: "
if !tokenListContainsValue(r.Header, "Connection", "upgrade") {
return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'upgrade' token not found in 'Connection' header")
return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header")
}
if !tokenListContainsValue(r.Header, "Upgrade", "websocket") {
return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'websocket' token not found in 'Upgrade' header")
return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header")
}
if r.Method != "GET" {
return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET")
}
if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") {
return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header")
}
if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok {
return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported")
}
checkOrigin := u.CheckOrigin
if checkOrigin == nil {
checkOrigin = checkSameOrigin
}
if !checkOrigin(r) {
return u.returnError(w, r, http.StatusForbidden, "websocket: 'Origin' header value not allowed")
return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin")
}
challengeKey := r.Header.Get("Sec-Websocket-Key")
if challengeKey == "" {
return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: `Sec-Websocket-Key' header is missing or blank")
return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: `Sec-WebSocket-Key' header is missing or blank")
}
subprotocol := u.selectSubprotocol(r, responseHeader)
@ -151,17 +170,12 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade
}
}
var (
netConn net.Conn
err error
)
h, ok := w.(http.Hijacker)
if !ok {
return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker")
}
var brw *bufio.ReadWriter
netConn, brw, err = h.Hijack()
netConn, brw, err := h.Hijack()
if err != nil {
return u.returnError(w, r, http.StatusInternalServerError, err.Error())
}
@ -171,7 +185,21 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade
return nil, errors.New("websocket: client sent data before handshake is complete")
}
c := newConnBRW(netConn, true, u.ReadBufferSize, u.WriteBufferSize, brw)
var br *bufio.Reader
if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 {
// Reuse hijacked buffered reader as connection reader.
br = brw.Reader
}
buf := bufioWriterBuffer(netConn, brw.Writer)
var writeBuf []byte
if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 {
// Reuse hijacked write buffer as connection buffer.
writeBuf = buf
}
c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf)
c.subprotocol = subprotocol
if compress {
@ -179,17 +207,23 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade
c.newDecompressionReader = decompressNoContextTakeover
}
p := c.writeBuf[:0]
// Use larger of hijacked buffer and connection write buffer for header.
p := buf
if len(c.writeBuf) > len(p) {
p = c.writeBuf
}
p = p[:0]
p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...)
p = append(p, computeAcceptKey(challengeKey)...)
p = append(p, "\r\n"...)
if c.subprotocol != "" {
p = append(p, "Sec-Websocket-Protocol: "...)
p = append(p, "Sec-WebSocket-Protocol: "...)
p = append(p, c.subprotocol...)
p = append(p, "\r\n"...)
}
if compress {
p = append(p, "Sec-Websocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...)
p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...)
}
for k, vs := range responseHeader {
if k == "Sec-Websocket-Protocol" {
@ -230,13 +264,14 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade
// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
//
// This function is deprecated, use websocket.Upgrader instead.
// Deprecated: Use websocket.Upgrader instead.
//
// The application is responsible for checking the request origin before
// calling Upgrade. An example implementation of the same origin policy is:
// Upgrade does not perform origin checking. The application is responsible for
// checking the Origin header before calling Upgrade. An example implementation
// of the same origin policy check is:
//
// if req.Header.Get("Origin") != "http://"+req.Host {
// http.Error(w, "Origin not allowed", 403)
// http.Error(w, "Origin not allowed", http.StatusForbidden)
// return
// }
//
@ -289,3 +324,40 @@ func IsWebSocketUpgrade(r *http.Request) bool {
return tokenListContainsValue(r.Header, "Connection", "upgrade") &&
tokenListContainsValue(r.Header, "Upgrade", "websocket")
}
// bufioReaderSize size returns the size of a bufio.Reader.
func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int {
// This code assumes that peek on a reset reader returns
// bufio.Reader.buf[:0].
// TODO: Use bufio.Reader.Size() after Go 1.10
br.Reset(originalReader)
if p, err := br.Peek(0); err == nil {
return cap(p)
}
return 0
}
// writeHook is an io.Writer that records the last slice passed to it vio
// io.Writer.Write.
type writeHook struct {
p []byte
}
func (wh *writeHook) Write(p []byte) (int, error) {
wh.p = p
return len(p), nil
}
// bufioWriterBuffer grabs the buffer from a bufio.Writer.
func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte {
// This code assumes that bufio.Writer.buf[:1] is passed to the
// bufio.Writer's underlying writer.
var wh writeHook
bw.Reset(&wh)
bw.WriteByte(0)
bw.Flush()
bw.Reset(originalWriter)
return wh.p[:cap(wh.p)]
}

19
vendor/github.com/gorilla/websocket/trace.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
// +build go1.8
package websocket
import (
"crypto/tls"
"net/http/httptrace"
)
func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error {
if trace.TLSHandshakeStart != nil {
trace.TLSHandshakeStart()
}
err := doHandshake(tlsConn, cfg)
if trace.TLSHandshakeDone != nil {
trace.TLSHandshakeDone(tlsConn.ConnectionState(), err)
}
return err
}

12
vendor/github.com/gorilla/websocket/trace_17.go generated vendored Normal file
View File

@ -0,0 +1,12 @@
// +build !go1.8
package websocket
import (
"crypto/tls"
"net/http/httptrace"
)
func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error {
return doHandshake(tlsConn, cfg)
}

View File

@ -11,6 +11,7 @@ import (
"io"
"net/http"
"strings"
"unicode/utf8"
)
var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
@ -111,14 +112,14 @@ func nextTokenOrQuoted(s string) (value string, rest string) {
case escape:
escape = false
p[j] = b
j += 1
j++
case b == '\\':
escape = true
case b == '"':
return string(p[:j]), s[i+1:]
default:
p[j] = b
j += 1
j++
}
}
return "", ""
@ -127,8 +128,31 @@ func nextTokenOrQuoted(s string) (value string, rest string) {
return "", ""
}
// equalASCIIFold returns true if s is equal to t with ASCII case folding.
func equalASCIIFold(s, t string) bool {
for s != "" && t != "" {
sr, size := utf8.DecodeRuneInString(s)
s = s[size:]
tr, size := utf8.DecodeRuneInString(t)
t = t[size:]
if sr == tr {
continue
}
if 'A' <= sr && sr <= 'Z' {
sr = sr + 'a' - 'A'
}
if 'A' <= tr && tr <= 'Z' {
tr = tr + 'a' - 'A'
}
if sr != tr {
return false
}
}
return s == t
}
// tokenListContainsValue returns true if the 1#token header with the given
// name contains token.
// name contains a token equal to value with ASCII case folding.
func tokenListContainsValue(header http.Header, name string, value string) bool {
headers:
for _, s := range header[name] {
@ -142,7 +166,7 @@ headers:
if s != "" && s[0] != ',' {
continue headers
}
if strings.EqualFold(t, value) {
if equalASCIIFold(t, value) {
return true
}
if s == "" {
@ -154,9 +178,8 @@ headers:
return false
}
// parseExtensiosn parses WebSocket extensions from a header.
// parseExtensions parses WebSocket extensions from a header.
func parseExtensions(header http.Header) []map[string]string {
// From RFC 6455:
//
// Sec-WebSocket-Extensions = extension-list

473
vendor/github.com/gorilla/websocket/x_net_proxy.go generated vendored Normal file
View File

@ -0,0 +1,473 @@
// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT.
//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy
// Package proxy provides support for a variety of protocols to proxy network
// data.
//
package websocket
import (
"errors"
"io"
"net"
"net/url"
"os"
"strconv"
"strings"
"sync"
)
type proxy_direct struct{}
// Direct is a direct proxy: one that makes network connections directly.
var proxy_Direct = proxy_direct{}
func (proxy_direct) Dial(network, addr string) (net.Conn, error) {
return net.Dial(network, addr)
}
// A PerHost directs connections to a default Dialer unless the host name
// requested matches one of a number of exceptions.
type proxy_PerHost struct {
def, bypass proxy_Dialer
bypassNetworks []*net.IPNet
bypassIPs []net.IP
bypassZones []string
bypassHosts []string
}
// NewPerHost returns a PerHost Dialer that directs connections to either
// defaultDialer or bypass, depending on whether the connection matches one of
// the configured rules.
func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost {
return &proxy_PerHost{
def: defaultDialer,
bypass: bypass,
}
}
// Dial connects to the address addr on the given network through either
// defaultDialer or bypass.
func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) {
host, _, err := net.SplitHostPort(addr)
if err != nil {
return nil, err
}
return p.dialerForRequest(host).Dial(network, addr)
}
func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer {
if ip := net.ParseIP(host); ip != nil {
for _, net := range p.bypassNetworks {
if net.Contains(ip) {
return p.bypass
}
}
for _, bypassIP := range p.bypassIPs {
if bypassIP.Equal(ip) {
return p.bypass
}
}
return p.def
}
for _, zone := range p.bypassZones {
if strings.HasSuffix(host, zone) {
return p.bypass
}
if host == zone[1:] {
// For a zone ".example.com", we match "example.com"
// too.
return p.bypass
}
}
for _, bypassHost := range p.bypassHosts {
if bypassHost == host {
return p.bypass
}
}
return p.def
}
// AddFromString parses a string that contains comma-separated values
// specifying hosts that should use the bypass proxy. Each value is either an
// IP address, a CIDR range, a zone (*.example.com) or a host name
// (localhost). A best effort is made to parse the string and errors are
// ignored.
func (p *proxy_PerHost) AddFromString(s string) {
hosts := strings.Split(s, ",")
for _, host := range hosts {
host = strings.TrimSpace(host)
if len(host) == 0 {
continue
}
if strings.Contains(host, "/") {
// We assume that it's a CIDR address like 127.0.0.0/8
if _, net, err := net.ParseCIDR(host); err == nil {
p.AddNetwork(net)
}
continue
}
if ip := net.ParseIP(host); ip != nil {
p.AddIP(ip)
continue
}
if strings.HasPrefix(host, "*.") {
p.AddZone(host[1:])
continue
}
p.AddHost(host)
}
}
// AddIP specifies an IP address that will use the bypass proxy. Note that
// this will only take effect if a literal IP address is dialed. A connection
// to a named host will never match an IP.
func (p *proxy_PerHost) AddIP(ip net.IP) {
p.bypassIPs = append(p.bypassIPs, ip)
}
// AddNetwork specifies an IP range that will use the bypass proxy. Note that
// this will only take effect if a literal IP address is dialed. A connection
// to a named host will never match.
func (p *proxy_PerHost) AddNetwork(net *net.IPNet) {
p.bypassNetworks = append(p.bypassNetworks, net)
}
// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of
// "example.com" matches "example.com" and all of its subdomains.
func (p *proxy_PerHost) AddZone(zone string) {
if strings.HasSuffix(zone, ".") {
zone = zone[:len(zone)-1]
}
if !strings.HasPrefix(zone, ".") {
zone = "." + zone
}
p.bypassZones = append(p.bypassZones, zone)
}
// AddHost specifies a host name that will use the bypass proxy.
func (p *proxy_PerHost) AddHost(host string) {
if strings.HasSuffix(host, ".") {
host = host[:len(host)-1]
}
p.bypassHosts = append(p.bypassHosts, host)
}
// A Dialer is a means to establish a connection.
type proxy_Dialer interface {
// Dial connects to the given address via the proxy.
Dial(network, addr string) (c net.Conn, err error)
}
// Auth contains authentication parameters that specific Dialers may require.
type proxy_Auth struct {
User, Password string
}
// FromEnvironment returns the dialer specified by the proxy related variables in
// the environment.
func proxy_FromEnvironment() proxy_Dialer {
allProxy := proxy_allProxyEnv.Get()
if len(allProxy) == 0 {
return proxy_Direct
}
proxyURL, err := url.Parse(allProxy)
if err != nil {
return proxy_Direct
}
proxy, err := proxy_FromURL(proxyURL, proxy_Direct)
if err != nil {
return proxy_Direct
}
noProxy := proxy_noProxyEnv.Get()
if len(noProxy) == 0 {
return proxy
}
perHost := proxy_NewPerHost(proxy, proxy_Direct)
perHost.AddFromString(noProxy)
return perHost
}
// proxySchemes is a map from URL schemes to a function that creates a Dialer
// from a URL with such a scheme.
var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)
// RegisterDialerType takes a URL scheme and a function to generate Dialers from
// a URL with that scheme and a forwarding Dialer. Registered schemes are used
// by FromURL.
func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) {
if proxy_proxySchemes == nil {
proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error))
}
proxy_proxySchemes[scheme] = f
}
// FromURL returns a Dialer given a URL specification and an underlying
// Dialer for it to make network requests.
func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) {
var auth *proxy_Auth
if u.User != nil {
auth = new(proxy_Auth)
auth.User = u.User.Username()
if p, ok := u.User.Password(); ok {
auth.Password = p
}
}
switch u.Scheme {
case "socks5":
return proxy_SOCKS5("tcp", u.Host, auth, forward)
}
// If the scheme doesn't match any of the built-in schemes, see if it
// was registered by another package.
if proxy_proxySchemes != nil {
if f, ok := proxy_proxySchemes[u.Scheme]; ok {
return f(u, forward)
}
}
return nil, errors.New("proxy: unknown scheme: " + u.Scheme)
}
var (
proxy_allProxyEnv = &proxy_envOnce{
names: []string{"ALL_PROXY", "all_proxy"},
}
proxy_noProxyEnv = &proxy_envOnce{
names: []string{"NO_PROXY", "no_proxy"},
}
)
// envOnce looks up an environment variable (optionally by multiple
// names) once. It mitigates expensive lookups on some platforms
// (e.g. Windows).
// (Borrowed from net/http/transport.go)
type proxy_envOnce struct {
names []string
once sync.Once
val string
}
func (e *proxy_envOnce) Get() string {
e.once.Do(e.init)
return e.val
}
func (e *proxy_envOnce) init() {
for _, n := range e.names {
e.val = os.Getenv(n)
if e.val != "" {
return
}
}
}
// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address
// with an optional username and password. See RFC 1928 and RFC 1929.
func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) {
s := &proxy_socks5{
network: network,
addr: addr,
forward: forward,
}
if auth != nil {
s.user = auth.User
s.password = auth.Password
}
return s, nil
}
type proxy_socks5 struct {
user, password string
network, addr string
forward proxy_Dialer
}
const proxy_socks5Version = 5
const (
proxy_socks5AuthNone = 0
proxy_socks5AuthPassword = 2
)
const proxy_socks5Connect = 1
const (
proxy_socks5IP4 = 1
proxy_socks5Domain = 3
proxy_socks5IP6 = 4
)
var proxy_socks5Errors = []string{
"",
"general failure",
"connection forbidden",
"network unreachable",
"host unreachable",
"connection refused",
"TTL expired",
"command not supported",
"address type not supported",
}
// Dial connects to the address addr on the given network via the SOCKS5 proxy.
func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) {
switch network {
case "tcp", "tcp6", "tcp4":
default:
return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network)
}
conn, err := s.forward.Dial(s.network, s.addr)
if err != nil {
return nil, err
}
if err := s.connect(conn, addr); err != nil {
conn.Close()
return nil, err
}
return conn, nil
}
// connect takes an existing connection to a socks5 proxy server,
// and commands the server to extend that connection to target,
// which must be a canonical address with a host and port.
func (s *proxy_socks5) connect(conn net.Conn, target string) error {
host, portStr, err := net.SplitHostPort(target)
if err != nil {
return err
}
port, err := strconv.Atoi(portStr)
if err != nil {
return errors.New("proxy: failed to parse port number: " + portStr)
}
if port < 1 || port > 0xffff {
return errors.New("proxy: port number out of range: " + portStr)
}
// the size here is just an estimate
buf := make([]byte, 0, 6+len(host))
buf = append(buf, proxy_socks5Version)
if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 {
buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword)
} else {
buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone)
}
if _, err := conn.Write(buf); err != nil {
return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error())
}
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error())
}
if buf[0] != 5 {
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0])))
}
if buf[1] == 0xff {
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication")
}
// See RFC 1929
if buf[1] == proxy_socks5AuthPassword {
buf = buf[:0]
buf = append(buf, 1 /* password protocol version */)
buf = append(buf, uint8(len(s.user)))
buf = append(buf, s.user...)
buf = append(buf, uint8(len(s.password)))
buf = append(buf, s.password...)
if _, err := conn.Write(buf); err != nil {
return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
}
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
}
if buf[1] != 0 {
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password")
}
}
buf = buf[:0]
buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */)
if ip := net.ParseIP(host); ip != nil {
if ip4 := ip.To4(); ip4 != nil {
buf = append(buf, proxy_socks5IP4)
ip = ip4
} else {
buf = append(buf, proxy_socks5IP6)
}
buf = append(buf, ip...)
} else {
if len(host) > 255 {
return errors.New("proxy: destination host name too long: " + host)
}
buf = append(buf, proxy_socks5Domain)
buf = append(buf, byte(len(host)))
buf = append(buf, host...)
}
buf = append(buf, byte(port>>8), byte(port))
if _, err := conn.Write(buf); err != nil {
return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
}
if _, err := io.ReadFull(conn, buf[:4]); err != nil {
return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
}
failure := "unknown error"
if int(buf[1]) < len(proxy_socks5Errors) {
failure = proxy_socks5Errors[buf[1]]
}
if len(failure) > 0 {
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure)
}
bytesToDiscard := 0
switch buf[3] {
case proxy_socks5IP4:
bytesToDiscard = net.IPv4len
case proxy_socks5IP6:
bytesToDiscard = net.IPv6len
case proxy_socks5Domain:
_, err := io.ReadFull(conn, buf[:1])
if err != nil {
return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error())
}
bytesToDiscard = int(buf[0])
default:
return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr)
}
if cap(buf) < bytesToDiscard {
buf = make([]byte, bytesToDiscard)
} else {
buf = buf[:bytesToDiscard]
}
if _, err := io.ReadFull(conn, buf); err != nil {
return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error())
}
// Also need to discard the port number
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error())
}
return nil
}

View File

@ -1,11 +1,7 @@
// checkpoint is a package for checking version information and alerts
// for a HashiCorp product.
package checkpoint
import (
"bytes"
"context"
"crypto/rand"
crand "crypto/rand"
"encoding/binary"
"encoding/json"
"fmt"
@ -23,112 +19,9 @@ import (
"time"
"github.com/hashicorp/go-cleanhttp"
uuid "github.com/hashicorp/go-uuid"
)
var magicBytes [4]byte = [4]byte{0x35, 0x77, 0x69, 0xFB}
// ReportParams are the parameters for configuring a telemetry report.
type ReportParams struct {
// Signature is some random signature that should be stored and used
// as a cookie-like value. This ensures that alerts aren't repeated.
// If the signature is changed, repeat alerts may be sent down. The
// signature should NOT be anything identifiable to a user (such as
// a MAC address). It should be random.
//
// If SignatureFile is given, then the signature will be read from this
// file. If the file doesn't exist, then a random signature will
// automatically be generated and stored here. SignatureFile will be
// ignored if Signature is given.
Signature string `json:"signature"`
SignatureFile string `json:"-"`
StartTime time.Time `json:"start_time"`
EndTime time.Time `json:"end_time"`
Arch string `json:"arch"`
OS string `json:"os"`
Payload interface{} `json:"payload,omitempty"`
Product string `json:"product"`
RunID string `json:"run_id"`
SchemaVersion string `json:"schema_version"`
Version string `json:"version"`
}
func (i *ReportParams) signature() string {
signature := i.Signature
if i.Signature == "" && i.SignatureFile != "" {
var err error
signature, err = checkSignature(i.SignatureFile)
if err != nil {
return ""
}
}
return signature
}
// Report sends telemetry information to checkpoint
func Report(ctx context.Context, r *ReportParams) error {
if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" {
return nil
}
req, err := ReportRequest(r)
if err != nil {
return err
}
client := cleanhttp.DefaultClient()
resp, err := client.Do(req.WithContext(ctx))
if err != nil {
return err
}
if resp.StatusCode != 201 {
return fmt.Errorf("Unknown status: %d", resp.StatusCode)
}
return nil
}
// ReportRequest creates a request object for making a report
func ReportRequest(r *ReportParams) (*http.Request, error) {
// Populate some fields automatically if we can
if r.RunID == "" {
uuid, err := uuid.GenerateUUID()
if err != nil {
return nil, err
}
r.RunID = uuid
}
if r.Arch == "" {
r.Arch = runtime.GOARCH
}
if r.OS == "" {
r.OS = runtime.GOOS
}
if r.Signature == "" {
r.Signature = r.signature()
}
b, err := json.Marshal(r)
if err != nil {
return nil, err
}
u := &url.URL{
Scheme: "https",
Host: "checkpoint-api.hashicorp.com",
Path: fmt.Sprintf("/v1/telemetry/%s", r.Product),
}
req, err := http.NewRequest("POST", u.String(), bytes.NewReader(b))
if err != nil {
return nil, err
}
req.Header.Add("Accept", "application/json")
req.Header.Add("User-Agent", "HashiCorp/go-checkpoint")
return req, nil
}
var magicBytes = [4]byte{0x35, 0x77, 0x69, 0xFB}
// CheckParams are the parameters for configuring a check request.
type CheckParams struct {
@ -177,14 +70,14 @@ type CheckParams struct {
// CheckResponse is the response for a check request.
type CheckResponse struct {
Product string
CurrentVersion string `json:"current_version"`
CurrentReleaseDate int `json:"current_release_date"`
CurrentDownloadURL string `json:"current_download_url"`
CurrentChangelogURL string `json:"current_changelog_url"`
ProjectWebsite string `json:"project_website"`
Outdated bool `json:"outdated"`
Alerts []*CheckAlert
Product string `json:"product"`
CurrentVersion string `json:"current_version"`
CurrentReleaseDate int `json:"current_release_date"`
CurrentDownloadURL string `json:"current_download_url"`
CurrentChangelogURL string `json:"current_changelog_url"`
ProjectWebsite string `json:"project_website"`
Outdated bool `json:"outdated"`
Alerts []*CheckAlert `json:"alerts"`
}
// CheckAlert is a single alert message from a check request.
@ -192,11 +85,11 @@ type CheckResponse struct {
// These never have to be manually constructed, and are typically populated
// into a CheckResponse as a result of the Check request.
type CheckAlert struct {
ID int
Date int
Message string
URL string
Level string
ID int `json:"id"`
Date int `json:"date"`
Message string `json:"message"`
URL string `json:"url"`
Level string `json:"level"`
}
// Check checks for alerts and new version information.
@ -205,7 +98,7 @@ func Check(p *CheckParams) (*CheckResponse, error) {
return &CheckResponse{}, nil
}
// set a default timeout of 3 sec for the check request (in milliseconds)
// Set a default timeout of 3 sec for the check request (in milliseconds)
timeout := 3000
if _, err := strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT")); err == nil {
timeout, _ = strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT"))
@ -253,8 +146,8 @@ func Check(p *CheckParams) (*CheckResponse, error) {
if err != nil {
return nil, err
}
req.Header.Add("Accept", "application/json")
req.Header.Add("User-Agent", "HashiCorp/go-checkpoint")
req.Header.Set("Accept", "application/json")
req.Header.Set("User-Agent", "HashiCorp/go-checkpoint")
client := cleanhttp.DefaultClient()
@ -266,6 +159,8 @@ func Check(p *CheckParams) (*CheckResponse, error) {
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return nil, fmt.Errorf("Unknown status: %d", resp.StatusCode)
}
@ -390,14 +285,11 @@ func checkCache(current string, path string, d time.Duration) (io.ReadCloser, er
return f, nil
}
func checkResult(r io.Reader) (*CheckResponse, error) {
var result CheckResponse
dec := json.NewDecoder(r)
if err := dec.Decode(&result); err != nil {
if err := json.NewDecoder(r).Decode(&result); err != nil {
return nil, err
}
return &result, nil
}
@ -426,7 +318,7 @@ func checkSignature(path string) (string, error) {
var b [16]byte
n := 0
for n < 16 {
n2, err := rand.Read(b[n:])
n2, err := crand.Read(b[n:])
if err != nil {
return "", err
}
@ -456,7 +348,7 @@ func writeCacheHeader(f io.Writer, v string) error {
}
// Write out our current version length
var length uint32 = uint32(len(v))
length := uint32(len(v))
if err := binary.Write(f, binary.LittleEndian, length); err != nil {
return err
}

Some files were not shown because too many files have changed in this diff Show More