Merge pull request #8284 from hashicorp/fix_performance_regression
Fix performance regression
This commit is contained in:
commit
1b3e346297
|
@ -8,9 +8,11 @@ import (
|
|||
func buildEc2Filters(input map[string]string) []*ec2.Filter {
|
||||
var filters []*ec2.Filter
|
||||
for k, v := range input {
|
||||
a := k
|
||||
b := v
|
||||
filters = append(filters, &ec2.Filter{
|
||||
Name: &k,
|
||||
Values: []*string{&v},
|
||||
Name: &a,
|
||||
Values: []*string{&b},
|
||||
})
|
||||
}
|
||||
return filters
|
||||
|
|
55
go.mod
55
go.mod
|
@ -5,23 +5,24 @@ require (
|
|||
github.com/1and1/oneandone-cloudserver-sdk-go v1.0.1
|
||||
github.com/Azure/azure-sdk-for-go v30.0.0+incompatible
|
||||
github.com/Azure/go-autorest v12.0.0+incompatible
|
||||
github.com/ChrisTrenkamp/goxpath v0.0.0-20170922090931-c385f95c6022
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20180810175552-4a21cbd618b4 // indirect
|
||||
github.com/ChrisTrenkamp/goxpath v0.0.0-20170625215350-4fe035839290
|
||||
github.com/NaverCloudPlatform/ncloud-sdk-go v0.0.0-20180110055012-c2e73f942591
|
||||
github.com/PuerkitoBio/goquery v1.5.0 // indirect
|
||||
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect
|
||||
github.com/Telmate/proxmox-api-go v0.0.0-20190815172943-ef9222844e60
|
||||
github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af // indirect
|
||||
github.com/agext/levenshtein v1.2.2 // indirect
|
||||
github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190418113227-25233c783f4e
|
||||
github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190103054945-8205d1f41e70
|
||||
github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20170113022742-e6dbea820a9f
|
||||
github.com/antchfx/htmlquery v1.0.0 // indirect
|
||||
github.com/antchfx/xmlquery v1.0.0 // indirect
|
||||
|
||||
github.com/antchfx/xpath v0.0.0-20170728053731-b5c552e1acbd // indirect
|
||||
github.com/antchfx/xquery v0.0.0-20170730121040-eb8c3c172607 // indirect
|
||||
github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6 // indirect
|
||||
github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0 // indirect
|
||||
github.com/approvals/go-approval-tests v0.0.0-20160714161514-ad96e53bea43
|
||||
github.com/armon/go-radix v1.0.0 // indirect
|
||||
github.com/aws/aws-sdk-go v1.24.1
|
||||
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f // indirect
|
||||
github.com/biogo/hts v0.0.0-20160420073057-50da7d4131a3
|
||||
github.com/c2h5oh/datasize v0.0.0-20171227191756-4eba002a5eae
|
||||
github.com/cheggaaa/pb v1.0.27
|
||||
|
@ -38,7 +39,7 @@ require (
|
|||
github.com/docker/docker v0.0.0-20180422163414-57142e89befe // indirect
|
||||
github.com/dustin/go-humanize v1.0.0 // indirect
|
||||
github.com/dylanmei/iso8601 v0.1.0 // indirect
|
||||
github.com/dylanmei/winrmtest v0.0.0-20190225150635-99b7fe2fddf1
|
||||
github.com/dylanmei/winrmtest v0.0.0-20170819153634-c2fbb09e6c08
|
||||
github.com/exoscale/egoscale v0.18.1
|
||||
github.com/fatih/camelcase v1.0.0
|
||||
github.com/fatih/structtag v1.0.0
|
||||
|
@ -48,27 +49,25 @@ require (
|
|||
github.com/gocolly/colly v1.2.0
|
||||
github.com/gofrs/flock v0.7.1
|
||||
github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3
|
||||
github.com/golang/example v0.0.0-20170904185048-46695d81d1fa // indirect
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect
|
||||
github.com/google/go-cmp v0.3.0
|
||||
github.com/google/go-querystring v1.0.0 // indirect
|
||||
github.com/google/shlex v0.0.0-20150127133951-6f45313302b9
|
||||
github.com/google/uuid v1.1.1
|
||||
github.com/google/uuid v1.0.0
|
||||
github.com/gophercloud/gophercloud v0.2.0
|
||||
github.com/gophercloud/utils v0.0.0-20190128072930-fbb6ab446f01
|
||||
github.com/gophercloud/utils v0.0.0-20190124192022-a5c25e7a53a6
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e // indirect
|
||||
github.com/gorilla/websocket v1.4.0 // indirect
|
||||
github.com/gorilla/websocket v0.0.0-20170319172727-a91eba7f9777 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.1.0
|
||||
github.com/hashicorp/consul v1.4.0
|
||||
github.com/hashicorp/errwrap v1.0.0
|
||||
github.com/hashicorp/go-checkpoint v0.5.0
|
||||
github.com/hashicorp/go-checkpoint v0.0.0-20171009173528-1545e56e46de
|
||||
github.com/hashicorp/go-cleanhttp v0.5.0
|
||||
github.com/hashicorp/go-getter v1.4.0
|
||||
github.com/hashicorp/go-msgpack v0.5.4 // indirect
|
||||
github.com/hashicorp/go-getter v1.3.1-0.20190906090232-a0f878cb75da
|
||||
github.com/hashicorp/go-multierror v1.0.0
|
||||
github.com/hashicorp/go-oracle-terraform v0.0.0-20181016190316-007121241b79
|
||||
github.com/hashicorp/go-retryablehttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-rootcerts v1.0.0 // indirect
|
||||
github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 // indirect
|
||||
github.com/hashicorp/go-uuid v1.0.1
|
||||
github.com/hashicorp/go-version v1.2.0
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
|
@ -83,49 +82,49 @@ require (
|
|||
github.com/joyent/triton-go v0.0.0-20180116165742-545edbe0d564
|
||||
github.com/json-iterator/go v1.1.6 // indirect
|
||||
github.com/jtolds/gls v4.2.1+incompatible // indirect
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0
|
||||
github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1
|
||||
github.com/kennygrant/sanitize v1.2.4 // indirect
|
||||
github.com/klauspost/compress v0.0.0-20160131094358-f86d2e6d8a77 // indirect
|
||||
github.com/klauspost/cpuid v0.0.0-20160106104451-349c67577817 // indirect
|
||||
github.com/klauspost/crc32 v0.0.0-20160114101742-999f3125931f // indirect
|
||||
github.com/klauspost/pgzip v0.0.0-20151221113845-47f36e165cec
|
||||
github.com/kr/fs v0.0.0-20131111012553-2788f0dbd169 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/linode/linodego v0.7.1
|
||||
github.com/masterzen/azure-sdk-for-go v0.0.0-20161014135628-ee4f0065d00c // indirect
|
||||
github.com/masterzen/simplexml v0.0.0-20190410153822-31eea3082786 // indirect
|
||||
github.com/masterzen/winrm v0.0.0-20190223112901-5e5c9a7fe54b
|
||||
github.com/mattn/go-colorable v0.1.1 // indirect
|
||||
github.com/masterzen/winrm v0.0.0-20180224160350-7e40f93ae939
|
||||
github.com/mattn/go-tty v0.0.0-20190424173100-523744f04859
|
||||
github.com/miekg/dns v1.1.1 // indirect
|
||||
github.com/mitchellh/cli v1.0.0
|
||||
github.com/mitchellh/go-fs v0.0.0-20180402234041-7b48fa161ea7
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/mitchellh/go-homedir v1.0.0
|
||||
github.com/mitchellh/go-vnc v0.0.0-20150629162542-723ed9867aed
|
||||
github.com/mitchellh/go-wordwrap v1.0.0 // indirect
|
||||
github.com/mitchellh/iochan v1.0.0
|
||||
github.com/mitchellh/mapstructure v0.0.0-20180111000720-b4575eea38cc
|
||||
github.com/mitchellh/panicwrap v0.0.0-20190213213626-17011010aaa4
|
||||
github.com/mitchellh/prefixedio v0.0.0-20190213213902-5733675afd51
|
||||
github.com/mitchellh/panicwrap v0.0.0-20170106182340-fce601fe5557
|
||||
github.com/mitchellh/prefixedio v0.0.0-20151214002211-6e6954073784
|
||||
github.com/mitchellh/reflectwalk v1.0.0
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.1 // indirect
|
||||
github.com/moul/anonuuid v0.0.0-20160222162117-609b752a95ef // indirect
|
||||
github.com/moul/gotty-client v0.0.0-20180327180212-b26a57ebc215 // indirect
|
||||
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32 // indirect
|
||||
github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.0-20180105111133-96aac992fc8b
|
||||
github.com/onsi/ginkgo v1.10.2 // indirect
|
||||
github.com/onsi/gomega v1.7.0 // indirect
|
||||
github.com/onsi/ginkgo v1.7.0 // indirect
|
||||
github.com/onsi/gomega v1.4.3 // indirect
|
||||
github.com/oracle/oci-go-sdk v1.8.0
|
||||
github.com/outscale/osc-go v0.0.1
|
||||
github.com/packer-community/winrmcp v0.0.0-20180921204643-0fd363d6159a
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/pkg/sftp v0.0.0-20160118190721-e84cc8c755ca
|
||||
github.com/posener/complete v1.2.1
|
||||
github.com/posener/complete v1.1.1
|
||||
github.com/profitbricks/profitbricks-sdk-go v4.0.2+incompatible
|
||||
github.com/renstrom/fuzzysearch v0.0.0-20160331204855-2d205ac6ec17 // indirect
|
||||
github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735 // indirect
|
||||
github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca // indirect
|
||||
github.com/satori/go.uuid v1.2.0 // indirect
|
||||
github.com/scaleway/scaleway-cli v0.0.0-20180921094345-7b12c9699d70
|
||||
github.com/shirou/gopsutil v2.18.12+incompatible
|
||||
github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4 // indirect
|
||||
|
@ -135,18 +134,18 @@ require (
|
|||
github.com/temoto/robotstxt v1.1.1 // indirect
|
||||
github.com/tencentcloud/tencentcloud-sdk-go v3.0.94+incompatible
|
||||
github.com/ucloud/ucloud-sdk-go v0.8.7
|
||||
github.com/ugorji/go v0.0.0-20180813092308-00b869d2f4a5
|
||||
github.com/ugorji/go v0.0.0-20151218193438-646ae4a518c1
|
||||
github.com/ulikunitz/xz v0.5.5
|
||||
github.com/vmware/govmomi v0.0.0-20170707011325-c2105a174311
|
||||
github.com/xanzy/go-cloudstack v0.0.0-20190526095453-42f262b63ed0
|
||||
github.com/yandex-cloud/go-genproto v0.0.0-20190916101622-7617782d381e
|
||||
github.com/yandex-cloud/go-sdk v0.0.0-20190916101744-c781afa45829
|
||||
github.com/zclconf/go-cty v1.1.0
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58
|
||||
golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0
|
||||
google.golang.org/api v0.9.0
|
||||
google.golang.org/grpc v1.21.1
|
||||
|
|
104
go.sum
104
go.sum
|
@ -19,8 +19,8 @@ github.com/Azure/go-ntlmssp v0.0.0-20180810175552-4a21cbd618b4 h1:pSm8mp0T2OH2CP
|
|||
github.com/Azure/go-ntlmssp v0.0.0-20180810175552-4a21cbd618b4/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/ChrisTrenkamp/goxpath v0.0.0-20170922090931-c385f95c6022 h1:y8Gs8CzNfDF5AZvjr+5UyGQvQEBL7pwo+v+wX6q9JI8=
|
||||
github.com/ChrisTrenkamp/goxpath v0.0.0-20170922090931-c385f95c6022/go.mod h1:nuWgzSkT5PnyOd+272uUmV0dnAnAn42Mk7PiQC5VzN4=
|
||||
github.com/ChrisTrenkamp/goxpath v0.0.0-20170625215350-4fe035839290 h1:K9I21XUHNbYD3GNMmJBN0UKJCpdP+glftwNZ7Bo8kqY=
|
||||
github.com/ChrisTrenkamp/goxpath v0.0.0-20170625215350-4fe035839290/go.mod h1:nuWgzSkT5PnyOd+272uUmV0dnAnAn42Mk7PiQC5VzN4=
|
||||
github.com/NaverCloudPlatform/ncloud-sdk-go v0.0.0-20180110055012-c2e73f942591 h1:/P9HCl71+Eh6vDbKNyRu+rpIIR70UCZWNOGexVV3e6k=
|
||||
github.com/NaverCloudPlatform/ncloud-sdk-go v0.0.0-20180110055012-c2e73f942591/go.mod h1:EHGzQGbwozJBj/4qj3WGrTJ0FqjgOTOxLQ0VNWvPn08=
|
||||
github.com/PuerkitoBio/goquery v1.5.0 h1:uGvmFXOA73IKluu/F84Xd1tt/z07GYm8X49XKHP7EJk=
|
||||
|
@ -33,27 +33,23 @@ github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af h1:DBNMBMuMiWYu0b+8KM
|
|||
github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw=
|
||||
github.com/agext/levenshtein v1.2.1 h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8=
|
||||
github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
|
||||
github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE=
|
||||
github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
|
||||
github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190418113227-25233c783f4e h1:/8wOj52pewmIX/8d5eVO3t7Rr3astkBI/ruyg4WNqRo=
|
||||
github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190418113227-25233c783f4e/go.mod h1:T9M45xf79ahXVelWoOBmH0y4aC1t5kXO5BxwyakgIGA=
|
||||
github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190103054945-8205d1f41e70 h1:FrF4uxA24DF3ARNXVbUin3wa5fDLaB1Cy8mKks/LRz4=
|
||||
github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190103054945-8205d1f41e70/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=
|
||||
github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20170113022742-e6dbea820a9f h1:jI4DIE5Vf4oRaHfthB0oRhU+yuYuoOTurDzwAlskP00=
|
||||
github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20170113022742-e6dbea820a9f/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=
|
||||
github.com/andybalholm/cascadia v1.0.0 h1:hOCXnnZ5A+3eVDX8pvgl4kofXv2ELss0bKcqRySc45o=
|
||||
github.com/andybalholm/cascadia v1.0.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
|
||||
github.com/antchfx/htmlquery v1.0.0 h1:O5IXz8fZF3B3MW+B33MZWbTHBlYmcfw0BAxgErHuaMA=
|
||||
github.com/antchfx/htmlquery v1.0.0/go.mod h1:MS9yksVSQXls00iXkiMqXr0J+umL/AmxXKuP28SUJM8=
|
||||
github.com/antchfx/xmlquery v1.0.0 h1:YuEPqexGG2opZKNc9JU3Zw6zFXwC47wNcy6/F8oKsrM=
|
||||
github.com/antchfx/xmlquery v1.0.0/go.mod h1:/+CnyD/DzHRnv2eRxrVbieRU/FIF6N0C+7oTtyUtCKk=
|
||||
github.com/antchfx/xpath v0.0.0-20190129040759-c8489ed3251e h1:ptBAamGVd6CfRsUtyHD+goy2JGhv1QC32v3gqM8mYAM=
|
||||
github.com/antchfx/xpath v0.0.0-20190129040759-c8489ed3251e/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk=
|
||||
github.com/antchfx/xquery v0.0.0-20180515051857-ad5b8c7a47b0 h1:JaCC8jz0zdMLk2m+qCCVLLLM/PL93p84w4pK3aJWj60=
|
||||
github.com/antchfx/xquery v0.0.0-20180515051857-ad5b8c7a47b0/go.mod h1:LzD22aAzDP8/dyiCKFp31He4m2GPjl0AFyzDtZzUu9M=
|
||||
github.com/antchfx/xpath v0.0.0-20170728053731-b5c552e1acbd h1:S3Fr6QnkpW9VRjiEY4psQHhhbbahASuNVj52YIce7lI=
|
||||
github.com/antchfx/xpath v0.0.0-20170728053731-b5c552e1acbd/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk=
|
||||
github.com/antchfx/xquery v0.0.0-20170730121040-eb8c3c172607 h1:BFFG6KP8ASFBg2ptWsJn8p8RDufBjBDKIxLU7BTYGOM=
|
||||
github.com/antchfx/xquery v0.0.0-20170730121040-eb8c3c172607/go.mod h1:LzD22aAzDP8/dyiCKFp31He4m2GPjl0AFyzDtZzUu9M=
|
||||
github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6 h1:uZuxRZCz65cG1o6K/xUqImNcYKtmk9ylqaH0itMSvzA=
|
||||
github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q=
|
||||
github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM=
|
||||
github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0 h1:MzVXffFUye+ZcSR6opIgz9Co7WcDx6ZcY+RjfFHoA0I=
|
||||
github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM=
|
||||
github.com/apparentlymart/go-textseg v1.0.0 h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2wFoYVvnCs0=
|
||||
github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk=
|
||||
github.com/approvals/go-approval-tests v0.0.0-20160714161514-ad96e53bea43 h1:ePCAQPf5tUc5IMcUvu6euhSGna7jzs7eiXtJXHig6Zc=
|
||||
|
@ -70,8 +66,6 @@ github.com/aws/aws-sdk-go v1.24.1 h1:B2NRyTV1/+h+Dg8Bh7vnuvW6QZz/NBL+uzgC2uILDMI
|
|||
github.com/aws/aws-sdk-go v1.24.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/azr/flock v0.0.0-20190823144736-958d66434653 h1:2H3Cu0cbG8iszfcgnANwC/cm0YkPJIQvaJ9/tSpwh9o=
|
||||
github.com/azr/flock v0.0.0-20190823144736-958d66434653/go.mod h1:EI7lzWWilX2K3ZMZ7Ta+E4DZtWzMC2tbn3cM3oVPuAU=
|
||||
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA=
|
||||
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc=
|
||||
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas=
|
||||
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4=
|
||||
github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=
|
||||
|
@ -114,8 +108,8 @@ github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4
|
|||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dylanmei/iso8601 v0.1.0 h1:812NGQDBcqquTfH5Yeo7lwR0nzx/cKdsmf3qMjPURUI=
|
||||
github.com/dylanmei/iso8601 v0.1.0/go.mod h1:w9KhXSgIyROl1DefbMYIE7UVSIvELTbMrCfx+QkYnoQ=
|
||||
github.com/dylanmei/winrmtest v0.0.0-20190225150635-99b7fe2fddf1 h1:r1oACdS2XYiAWcfF8BJXkoU8l1J71KehGR+d99yWEDA=
|
||||
github.com/dylanmei/winrmtest v0.0.0-20190225150635-99b7fe2fddf1/go.mod h1:lcy9/2gH1jn/VCLouHA6tOEwLoNVd4GW6zhuKLmHC2Y=
|
||||
github.com/dylanmei/winrmtest v0.0.0-20170819153634-c2fbb09e6c08 h1:0bp6/GrNOrTDtSXe9YYGCwf8jp5Fb/b+4a6MTRm4qzY=
|
||||
github.com/dylanmei/winrmtest v0.0.0-20170819153634-c2fbb09e6c08/go.mod h1:VBVDFSBXCIW8JaHQpI8lldSKfYaLMzP9oyq6IJ4fhzY=
|
||||
github.com/exoscale/egoscale v0.18.1 h1:1FNZVk8jHUx0AvWhOZxLEDNlacTU0chMXUUNkm9EZaI=
|
||||
github.com/exoscale/egoscale v0.18.1/go.mod h1:Z7OOdzzTOz1Q1PjQXumlz9Wn/CddH0zSYdCF3rnBKXE=
|
||||
github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8=
|
||||
|
@ -132,7 +126,6 @@ github.com/go-ini/ini v1.25.4 h1:Mujh4R/dH6YL8bxuISne3xX2+qcQ9p0IxKAP6ExWoUo=
|
|||
github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI=
|
||||
github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM=
|
||||
github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68=
|
||||
github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
|
||||
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
|
||||
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
|
||||
|
@ -145,8 +138,6 @@ github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRx
|
|||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 h1:zN2lZNZRflqFyxVaTIU61KNKQ9C0055u9CAfpmqUvo4=
|
||||
github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3/go.mod h1:nPpo7qLxd6XL3hWJG/O60sR8ZKfMCiIoNap5GvD12KU=
|
||||
github.com/golang/example v0.0.0-20170904185048-46695d81d1fa h1:iqCQC2Z53KkwGgTN9szyL4q0OQHmuNjeoNnMT6lk66k=
|
||||
github.com/golang/example v0.0.0-20170904185048-46695d81d1fa/go.mod h1:tO/5UvQ/uKigUjQBPqzstj6uxd3fUIjddi19DxGJeWg=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
|
@ -177,19 +168,19 @@ github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OI
|
|||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/shlex v0.0.0-20150127133951-6f45313302b9 h1:JM174NTeGNJ2m/oLH3UOWOvWQQKd+BoL3hcSCUWFLt0=
|
||||
github.com/google/shlex v0.0.0-20150127133951-6f45313302b9/go.mod h1:RpwtwJQFrIEPstU94h88MWPXP2ektJZ8cZ0YntAmXiE=
|
||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/gophercloud/gophercloud v0.2.0 h1:lD2Bce2xBAMNNcFZ0dObTpXkGLlVIb33RPVUNVpw6ic=
|
||||
github.com/gophercloud/gophercloud v0.2.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
|
||||
github.com/gophercloud/utils v0.0.0-20190128072930-fbb6ab446f01 h1:OgCNGSnEalfkRpn//WGJHhpo7fkP+LhTpvEITZ7CkK4=
|
||||
github.com/gophercloud/utils v0.0.0-20190128072930-fbb6ab446f01/go.mod h1:wjDF8z83zTeg5eMLml5EBSlAhbF7G8DobyI1YsMuyzw=
|
||||
github.com/gophercloud/utils v0.0.0-20190124192022-a5c25e7a53a6 h1:Cw/B8Bu7Rryomxf7bjc8zNfIyLgjxsDd91n0eGRWpuo=
|
||||
github.com/gophercloud/utils v0.0.0-20190124192022-a5c25e7a53a6/go.mod h1:wjDF8z83zTeg5eMLml5EBSlAhbF7G8DobyI1YsMuyzw=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e h1:JKmoR8x90Iww1ks85zJ1lfDGgIiMDuIptTOhJq+zKyg=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v0.0.0-20170319172727-a91eba7f9777 h1:JIM+OacoOJRU30xpjMf8sulYqjr0ViA3WDrTX6j/yDI=
|
||||
github.com/gorilla/websocket v0.0.0-20170319172727-a91eba7f9777/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 h1:THDBEeQ9xZ8JEaCLyLQqXMMdRqNr0QAUJTIkQAUtFjg=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE=
|
||||
|
@ -198,26 +189,24 @@ github.com/hashicorp/consul v1.4.0 h1:PQTW4xCuAExEiSbhrsFsikzbW5gVBoi74BjUvYFyKH
|
|||
github.com/hashicorp/consul v1.4.0/go.mod h1:mFrjN1mfidgJfYP1xrJCF+AfRhr6Eaqhb2+sfyn/OOI=
|
||||
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-checkpoint v0.5.0 h1:MFYpPZCnQqQTE18jFwSII6eUQrD/oxMFp3mlgcqk5mU=
|
||||
github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg=
|
||||
github.com/hashicorp/go-checkpoint v0.0.0-20171009173528-1545e56e46de h1:XDCSythtg8aWSRSO29uwhgh7b127fWr+m5SemqjSUL8=
|
||||
github.com/hashicorp/go-checkpoint v0.0.0-20171009173528-1545e56e46de/go.mod h1:xIwEieBHERyEvaeKF/TcHh1Hu+lxPM+n2vT1+g9I4m4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-getter v1.4.0 h1:ENHNi8494porjD0ZhIrjlAHnveSFhY7hvOJrV/fsKkw=
|
||||
github.com/hashicorp/go-getter v1.4.0/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY=
|
||||
github.com/hashicorp/go-getter v1.3.1-0.20190906090232-a0f878cb75da h1:HAasZmyRrb7/paYuww5RfVwY3wkFpsbMNYwBxOSZquY=
|
||||
github.com/hashicorp/go-getter v1.3.1-0.20190906090232-a0f878cb75da/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4=
|
||||
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||
github.com/hashicorp/go-msgpack v0.5.4 h1:SFT72YqIkOcLdWJUYcriVX7hbrZpwc/f7h8aW2NUqrA=
|
||||
github.com/hashicorp/go-msgpack v0.5.4/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||
github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
|
||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||
github.com/hashicorp/go-oracle-terraform v0.0.0-20181016190316-007121241b79 h1:RKu7yAXZTaQsxj1K9GDsh+QVw0+Wu1SWHxtbFN0n+hE=
|
||||
github.com/hashicorp/go-oracle-terraform v0.0.0-20181016190316-007121241b79/go.mod h1:09jT3Y/OIsjTjQ2+3bkVNPDKqWcGIYYvjB2BEKVUdvc=
|
||||
github.com/hashicorp/go-retryablehttp v0.5.2 h1:AoISa4P4IsW0/m4T6St8Yw38gTl5GtBAgfkhYh1xAz4=
|
||||
github.com/hashicorp/go-retryablehttp v0.5.2/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
|
||||
github.com/hashicorp/go-rootcerts v1.0.0 h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82kpwzSwCI=
|
||||
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
|
||||
github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 h1:VBj0QYQ0u2MCJzBfeYXGexnAl17GsH1yidnoxCqqD9E=
|
||||
github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90/go.mod h1:o4zcYY1e0GEZI6eSEr+43QDYmuGglw1qSO6qdHUHCgg=
|
||||
github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo=
|
||||
github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I=
|
||||
github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs=
|
||||
|
@ -239,7 +228,6 @@ github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
|||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hashicorp/hcl/v2 v2.0.0 h1:efQznTz+ydmQXq3BOnRa3AXzvCeTq1P4dKj/z5GLlY8=
|
||||
github.com/hashicorp/hcl/v2 v2.0.0/go.mod h1:oVVDG71tEinNGYCxinCYadcmKU9bglqW9pV3txagJ90=
|
||||
github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y=
|
||||
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
|
||||
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
|
||||
github.com/hashicorp/memberlist v0.1.3 h1:EmmoJme1matNzb+hMpDuR/0sbJSUisxyqBGG676r31M=
|
||||
|
@ -272,8 +260,6 @@ github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpR
|
|||
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1 h1:PJPDf8OUfOK1bb/NeTKd4f1QXZItOX389VN3B6qC8ro=
|
||||
github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA=
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
|
||||
github.com/kennygrant/sanitize v1.2.4 h1:gN25/otpP5vAsO2djbMhF/LQX6R7+O1TB4yv8NzpJ3o=
|
||||
github.com/kennygrant/sanitize v1.2.4/go.mod h1:LGsjYYtgxbetdg5owWB2mpgUL6e2nfw2eObZ0u0qvak=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
|
@ -297,24 +283,19 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
|||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4=
|
||||
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/linode/linodego v0.7.1 h1:4WZmMpSA2NRwlPZcc0+4Gyn7rr99Evk9bnr0B3gXRKE=
|
||||
github.com/linode/linodego v0.7.1/go.mod h1:ga11n3ivecUrPCHN0rANxKmfWBJVkOXfLMZinAbj2sY=
|
||||
github.com/masterzen/simplexml v0.0.0-20160608183007-4572e39b1ab9/go.mod h1:kCEbxUJlNDEBNbdQMkPSp6yaKcRXVI6f4ddk8Riv4bc=
|
||||
github.com/masterzen/azure-sdk-for-go v0.0.0-20161014135628-ee4f0065d00c h1:FMUOnVGy8nWk1cvlMCAoftRItQGMxI0vzJ3dQjeZTCE=
|
||||
github.com/masterzen/azure-sdk-for-go v0.0.0-20161014135628-ee4f0065d00c/go.mod h1:mf8fjOu33zCqxUjuiU3I8S1lJMyEAlH+0F2+M5xl3hE=
|
||||
github.com/masterzen/simplexml v0.0.0-20190410153822-31eea3082786 h1:2ZKn+w/BJeL43sCxI2jhPLRv73oVVOjEKZjKkflyqxg=
|
||||
github.com/masterzen/simplexml v0.0.0-20190410153822-31eea3082786/go.mod h1:kCEbxUJlNDEBNbdQMkPSp6yaKcRXVI6f4ddk8Riv4bc=
|
||||
github.com/masterzen/winrm v0.0.0-20190223112901-5e5c9a7fe54b h1:/1RFh2SLCJ+tEnT73+Fh5R2AO89sQqs8ba7o+hx1G0Y=
|
||||
github.com/masterzen/winrm v0.0.0-20190223112901-5e5c9a7fe54b/go.mod h1:wr1VqkwW0AB5JS0QLy5GpVMS9E3VtRoSYXUYyVk46KY=
|
||||
github.com/masterzen/winrm v0.0.0-20180224160350-7e40f93ae939 h1:cRFHA33ER97Xy5jmjS519OXCS/yE3AT3zdbQAg0Z53g=
|
||||
github.com/masterzen/winrm v0.0.0-20180224160350-7e40f93ae939/go.mod h1:CfZSN7zwz5gJiFhZJz49Uzk7mEBHIceWmbFmYx7Hf7E=
|
||||
github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg=
|
||||
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.5 h1:tHXDdz1cpzGaovsTB+TVB8q90WEokoVmfMqoVcrLUgw=
|
||||
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
|
||||
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-tty v0.0.0-20190424173100-523744f04859 h1:smQbSzmT3EHl4EUwtFwFGmGIpiYgIiiPeVv1uguIQEE=
|
||||
|
@ -328,26 +309,22 @@ github.com/mitchellh/go-fs v0.0.0-20180402234041-7b48fa161ea7 h1:PXPMDtfqV+rZJsh
|
|||
github.com/mitchellh/go-fs v0.0.0-20180402234041-7b48fa161ea7/go.mod h1:g7SZj7ABpStq3tM4zqHiVEG5un/DZ1+qJJKO7qx1EvU=
|
||||
github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0=
|
||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=
|
||||
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||
github.com/mitchellh/go-vnc v0.0.0-20150629162542-723ed9867aed h1:FI2NIv6fpef6BQl2u3IZX/Cj20tfypRF4yd+uaHOMtI=
|
||||
github.com/mitchellh/go-vnc v0.0.0-20150629162542-723ed9867aed/go.mod h1:3rdaFaCv4AyBgu5ALFM0+tSuHrBh6v692nyQe3ikrq0=
|
||||
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 h1:DpOJ2HYzCv8LZP15IdmG+YdwD2luVPHITV96TkirNBM=
|
||||
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
|
||||
github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4=
|
||||
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
|
||||
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
|
||||
github.com/mitchellh/iochan v1.0.0 h1:C+X3KsSTLFVBr/tK1eYN/vs4rJcvsiLU338UhYPJWeY=
|
||||
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20180111000720-b4575eea38cc h1:5T6hzGUO5OrL6MdYXYoLQtRWJDDgjdlOVBn9mIqGY1g=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20180111000720-b4575eea38cc/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/panicwrap v0.0.0-20190213213626-17011010aaa4 h1:jw9tsdJ1FQmUkyTXdIF/nByTX+mMnnp16glnvGZMsC4=
|
||||
github.com/mitchellh/panicwrap v0.0.0-20190213213626-17011010aaa4/go.mod h1:YYMf4xtQnR8LRC0vKi3afvQ5QwRPQ17zjcpkBCufb+I=
|
||||
github.com/mitchellh/prefixedio v0.0.0-20190213213902-5733675afd51 h1:eD92Am0Qf3rqhsOeA1zwBHSfRkoHrt4o6uORamdmJP8=
|
||||
github.com/mitchellh/prefixedio v0.0.0-20190213213902-5733675afd51/go.mod h1:kB1naBgV9ORnkiTVeyJOI1DavaJkG4oNIq0Af6ZVKUo=
|
||||
github.com/mitchellh/panicwrap v0.0.0-20170106182340-fce601fe5557 h1:w1QuuAA2km2Hax+EPamrq5ZRBeaNv2vsjvgB4an0zoU=
|
||||
github.com/mitchellh/panicwrap v0.0.0-20170106182340-fce601fe5557/go.mod h1:QuAqW7/z+iv6aWFJdrA8kCbsF0OOJVKCICqTcYBexuY=
|
||||
github.com/mitchellh/prefixedio v0.0.0-20151214002211-6e6954073784 h1:+DAetXqxv/mSyCkE9KBIYOZs9b68y7SUaDCxQMRjA68=
|
||||
github.com/mitchellh/prefixedio v0.0.0-20151214002211-6e6954073784/go.mod h1:kB1naBgV9ORnkiTVeyJOI1DavaJkG4oNIq0Af6ZVKUo=
|
||||
github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY=
|
||||
github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
|
@ -365,10 +342,10 @@ github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d/go.mod h1:YUTz3bUH
|
|||
github.com/olekukonko/tablewriter v0.0.0-20180105111133-96aac992fc8b h1:LGItPaClbzopugAomw5VFKnG3h1dUr9QW5KOU+m8gu0=
|
||||
github.com/olekukonko/tablewriter v0.0.0-20180105111133-96aac992fc8b/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.2 h1:uqH7bpe+ERSiDa34FDOF7RikN6RzXgduUF8yarlZp94=
|
||||
github.com/onsi/ginkgo v1.10.2/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/oracle/oci-go-sdk v1.8.0 h1:4SO45bKV0I3/Mn1os3ANDZmV0eSE5z5CLdSUIkxtyzs=
|
||||
github.com/oracle/oci-go-sdk v1.8.0/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888=
|
||||
|
@ -390,8 +367,6 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
|
|||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/posener/complete v1.1.1 h1:ccV59UEOTzVDnDUEFdT95ZzHVZ+5+158q8+SJb2QV5w=
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
github.com/posener/complete v1.2.1 h1:LrvDIY//XNo65Lq84G/akBuMGlawHvGBABv8f/ZN6DI=
|
||||
github.com/posener/complete v1.2.1/go.mod h1:6gapUrK/U1TAN7ciCoNRIdVC5sbdBTUh1DKN0g6uH7E=
|
||||
github.com/profitbricks/profitbricks-sdk-go v4.0.2+incompatible h1:ZoVHH6voxW9Onzo6z2yLtocVoN6mBocyDoqoyAMHokE=
|
||||
github.com/profitbricks/profitbricks-sdk-go v4.0.2+incompatible/go.mod h1:T3/WrziK7fYH3C8ilAFAHe99R452/IzIG3YYkqaOFeQ=
|
||||
github.com/renstrom/fuzzysearch v0.0.0-20160331204855-2d205ac6ec17 h1:4qPms2txLWMLXKzqlnYSulKRS4cS9aYgPtAEpUelQok=
|
||||
|
@ -408,7 +383,6 @@ github.com/scaleway/scaleway-cli v0.0.0-20180921094345-7b12c9699d70 h1:DaqC32ZwO
|
|||
github.com/scaleway/scaleway-cli v0.0.0-20180921094345-7b12c9699d70/go.mod h1:XjlXWPd6VONhsRSEuzGkV8mzRpH7ou1cdLV7IKJk96s=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/shirou/gopsutil v2.18.12+incompatible h1:1eaJvGomDnH74/5cF4CTmTbLHAriGFsTZppLXDX93OM=
|
||||
github.com/shirou/gopsutil v2.18.12+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
|
||||
|
@ -438,8 +412,8 @@ github.com/tencentcloud/tencentcloud-sdk-go v3.0.94+incompatible h1:G8i7dPMK1RCp
|
|||
github.com/tencentcloud/tencentcloud-sdk-go v3.0.94+incompatible/go.mod h1:0PfYow01SHPMhKY31xa+EFz2RStxIqj6JFAJS+IkCi4=
|
||||
github.com/ucloud/ucloud-sdk-go v0.8.7 h1:BmXOb5RivI0Uu4oZRpjI6SQ9/y7n/H9wxTGR1txIE8o=
|
||||
github.com/ucloud/ucloud-sdk-go v0.8.7/go.mod h1:lM6fpI8y6iwACtlbHUav823/uKPdXsNBlnBpRF2fj3c=
|
||||
github.com/ugorji/go v0.0.0-20180813092308-00b869d2f4a5 h1:cMjKdf4PxEBN9K5HaD9UMW8gkTbM0kMzkTa9SJe0WNQ=
|
||||
github.com/ugorji/go v0.0.0-20180813092308-00b869d2f4a5/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ=
|
||||
github.com/ugorji/go v0.0.0-20151218193438-646ae4a518c1 h1:U6ufy3mLDgg9RYupntOvAF7xCmNNquyKaYaaVHo1Nnk=
|
||||
github.com/ugorji/go v0.0.0-20151218193438-646ae4a518c1/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ=
|
||||
github.com/ulikunitz/xz v0.5.5 h1:pFrO0lVpTBXLpYw+pnLj6TbvHuyjXMfjGeCwSqCVwok=
|
||||
github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
|
||||
github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk=
|
||||
|
@ -464,14 +438,11 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf
|
|||
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3 h1:KYQXGkl6vs02hK7pK4eIbw0NpNPedieTSTEiJ//bwGs=
|
||||
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190222235706-ffb98f73852f/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 h1:58fnuSXlxZmFdJyvtTFVmVhcMLU6v5fEb/ok4wyqtNU=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
|
@ -526,7 +497,6 @@ golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5h
|
|||
golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -536,8 +506,6 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa h1:KIDDMLT1O0Nr7TSxp8xM5tJcdn8tgyAONntO829og1M=
|
||||
golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To=
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"fmt"
|
||||
|
||||
"github.com/ChrisTrenkamp/goxpath/internal/execxp"
|
||||
"github.com/ChrisTrenkamp/goxpath/parser"
|
||||
"github.com/ChrisTrenkamp/goxpath/internal/parser"
|
||||
"github.com/ChrisTrenkamp/goxpath/tree"
|
||||
)
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ package execxp
|
|||
import (
|
||||
"encoding/xml"
|
||||
|
||||
"github.com/ChrisTrenkamp/goxpath/parser"
|
||||
"github.com/ChrisTrenkamp/goxpath/internal/parser"
|
||||
"github.com/ChrisTrenkamp/goxpath/tree"
|
||||
)
|
||||
|
||||
|
|
|
@ -6,14 +6,15 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ChrisTrenkamp/goxpath/internal/execxp/findutil"
|
||||
"github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns"
|
||||
"github.com/ChrisTrenkamp/goxpath/internal/parser"
|
||||
"github.com/ChrisTrenkamp/goxpath/internal/parser/findutil"
|
||||
"github.com/ChrisTrenkamp/goxpath/internal/parser/intfns"
|
||||
"github.com/ChrisTrenkamp/goxpath/internal/xconst"
|
||||
"github.com/ChrisTrenkamp/goxpath/internal/xsort"
|
||||
"github.com/ChrisTrenkamp/goxpath/lexer"
|
||||
"github.com/ChrisTrenkamp/goxpath/parser"
|
||||
"github.com/ChrisTrenkamp/goxpath/parser/pathexpr"
|
||||
|
||||
"github.com/ChrisTrenkamp/goxpath/internal/lexer"
|
||||
"github.com/ChrisTrenkamp/goxpath/internal/parser/pathexpr"
|
||||
"github.com/ChrisTrenkamp/goxpath/tree"
|
||||
"github.com/ChrisTrenkamp/goxpath/xconst"
|
||||
)
|
||||
|
||||
type xpFilt struct {
|
||||
|
|
|
@ -3,7 +3,7 @@ package lexer
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ChrisTrenkamp/goxpath/xconst"
|
||||
"github.com/ChrisTrenkamp/goxpath/internal/xconst"
|
||||
)
|
||||
|
||||
func absLocPathState(l *Lexer) stateFn {
|
|
@ -1,6 +1,6 @@
|
|||
package parser
|
||||
|
||||
import "github.com/ChrisTrenkamp/goxpath/lexer"
|
||||
import "github.com/ChrisTrenkamp/goxpath/internal/lexer"
|
||||
|
||||
//NodeType enumerations
|
||||
const (
|
|
@ -3,9 +3,9 @@ package findutil
|
|||
import (
|
||||
"encoding/xml"
|
||||
|
||||
"github.com/ChrisTrenkamp/goxpath/parser/pathexpr"
|
||||
"github.com/ChrisTrenkamp/goxpath/internal/parser/pathexpr"
|
||||
"github.com/ChrisTrenkamp/goxpath/internal/xconst"
|
||||
"github.com/ChrisTrenkamp/goxpath/tree"
|
||||
"github.com/ChrisTrenkamp/goxpath/xconst"
|
||||
)
|
||||
|
||||
const (
|
|
@ -3,7 +3,7 @@ package parser
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ChrisTrenkamp/goxpath/lexer"
|
||||
"github.com/ChrisTrenkamp/goxpath/internal/lexer"
|
||||
)
|
||||
|
||||
type stateType int
|
|
@ -1,53 +1,2 @@
|
|||
# Ignore docs files
|
||||
_gh_pages
|
||||
_site
|
||||
|
||||
# Ignore temporary files
|
||||
README.html
|
||||
coverage.out
|
||||
.tmp
|
||||
|
||||
# Numerous always-ignore extensions
|
||||
*.diff
|
||||
*.err
|
||||
*.log
|
||||
*.orig
|
||||
*.rej
|
||||
*.swo
|
||||
*.swp
|
||||
*.vi
|
||||
*.zip
|
||||
*~
|
||||
|
||||
# OS or Editor folders
|
||||
._*
|
||||
.cache
|
||||
.DS_Store
|
||||
.idea
|
||||
.project
|
||||
.settings
|
||||
.tmproj
|
||||
*.esproj
|
||||
*.sublime-project
|
||||
*.sublime-workspace
|
||||
nbproject
|
||||
Thumbs.db
|
||||
|
||||
# Komodo
|
||||
.komodotools
|
||||
*.komodoproject
|
||||
|
||||
# SCSS-Lint
|
||||
scss-lint-report.xml
|
||||
|
||||
# grunt-contrib-sass cache
|
||||
.sass-cache
|
||||
|
||||
# Jekyll metadata
|
||||
docs/.jekyll-metadata
|
||||
|
||||
# Folders to ignore
|
||||
.build
|
||||
.test
|
||||
bower_components
|
||||
node_modules
|
||||
|
|
|
@ -1,25 +1,70 @@
|
|||
language: go
|
||||
sudo: false
|
||||
matrix:
|
||||
fast_finish: true
|
||||
include:
|
||||
- go: 1.11.x
|
||||
env: TEST_METHOD=goveralls
|
||||
- go: 1.10.x
|
||||
- go: tip
|
||||
- go: 1.9.x
|
||||
- go: 1.8.x
|
||||
- go: 1.7.x
|
||||
- go: 1.6.x
|
||||
- go: 1.5.x
|
||||
allow_failures:
|
||||
- go: tip
|
||||
- go: 1.9.x
|
||||
- go: 1.8.x
|
||||
- go: 1.7.x
|
||||
- go: 1.6.x
|
||||
- go: 1.5.x
|
||||
script: ./test.sh $TEST_METHOD
|
||||
go:
|
||||
- 1.8
|
||||
- 1.7.5
|
||||
- 1.7.4
|
||||
- 1.7.3
|
||||
- 1.7.2
|
||||
- 1.7.1
|
||||
- 1.7
|
||||
- tip
|
||||
- 1.6.4
|
||||
- 1.6.3
|
||||
- 1.6.2
|
||||
- 1.6.1
|
||||
- 1.6
|
||||
- 1.5.4
|
||||
- 1.5.3
|
||||
- 1.5.2
|
||||
- 1.5.1
|
||||
- 1.5
|
||||
- 1.4.3
|
||||
- 1.4.2
|
||||
- 1.4.1
|
||||
- 1.4
|
||||
- 1.3.3
|
||||
- 1.3.2
|
||||
- 1.3.1
|
||||
- 1.3
|
||||
- 1.2.2
|
||||
- 1.2.1
|
||||
- 1.2
|
||||
- 1.1.2
|
||||
- 1.1.1
|
||||
- 1.1
|
||||
before_install:
|
||||
- go get github.com/mattn/goveralls
|
||||
script:
|
||||
- $HOME/gopath/bin/goveralls -service=travis-ci
|
||||
notifications:
|
||||
email:
|
||||
on_success: never
|
||||
matrix:
|
||||
fast_finish: true
|
||||
allow_failures:
|
||||
- go: tip
|
||||
- go: 1.6.4
|
||||
- go: 1.6.3
|
||||
- go: 1.6.2
|
||||
- go: 1.6.1
|
||||
- go: 1.6
|
||||
- go: 1.5.4
|
||||
- go: 1.5.3
|
||||
- go: 1.5.2
|
||||
- go: 1.5.1
|
||||
- go: 1.5
|
||||
- go: 1.4.3
|
||||
- go: 1.4.2
|
||||
- go: 1.4.1
|
||||
- go: 1.4
|
||||
- go: 1.3.3
|
||||
- go: 1.3.2
|
||||
- go: 1.3.1
|
||||
- go: 1.3
|
||||
- go: 1.2.2
|
||||
- go: 1.2.1
|
||||
- go: 1.2
|
||||
- go: 1.1.2
|
||||
- go: 1.1.1
|
||||
- go: 1.1
|
||||
|
|
|
@ -11,7 +11,7 @@ This package implements distance and similarity metrics for strings, based on th
|
|||
|
||||
## Project Status
|
||||
|
||||
v1.2.2 Stable: Guaranteed no breaking changes to the API in future v1.x releases. Probably safe to use in production, though provided on "AS IS" basis.
|
||||
v1.2.1 Stable: Guaranteed no breaking changes to the API in future v1.x releases. Probably safe to use in production, though provided on "AS IS" basis.
|
||||
|
||||
This package is being actively maintained. If you encounter any problems or have any suggestions for improvement, please [open an issue](https://github.com/agext/levenshtein/issues). Pull requests are welcome.
|
||||
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
module github.com/agext/levenshtein
|
|
@ -1,10 +0,0 @@
|
|||
set -ev
|
||||
|
||||
if [[ "$1" == "goveralls" ]]; then
|
||||
echo "Testing with goveralls..."
|
||||
go get github.com/mattn/goveralls
|
||||
$HOME/gopath/bin/goveralls -service=travis-ci
|
||||
else
|
||||
echo "Testing with go test..."
|
||||
go test -v ./...
|
||||
fi
|
|
@ -12,23 +12,15 @@ import (
|
|||
"strings"
|
||||
)
|
||||
|
||||
// headerSorter defines the key-value structure for storing the sorted data in signHeader.
|
||||
// 用于signHeader的字典排序存放容器。
|
||||
type headerSorter struct {
|
||||
Keys []string
|
||||
Vals []string
|
||||
}
|
||||
|
||||
// signHeader signs the header and sets it as the authorization header.
|
||||
// 生成签名方法(直接设置请求的Header)。
|
||||
func (conn Conn) signHeader(req *http.Request, canonicalizedResource string) {
|
||||
// Get the final authorization string
|
||||
authorizationStr := "OSS " + conn.config.AccessKeyID + ":" + conn.getSignedStr(req, canonicalizedResource)
|
||||
|
||||
// Give the parameter "Authorization" value
|
||||
req.Header.Set(HTTPHeaderAuthorization, authorizationStr)
|
||||
}
|
||||
|
||||
func (conn Conn) getSignedStr(req *http.Request, canonicalizedResource string) string {
|
||||
// Find out the "x-oss-"'s address in header of the request
|
||||
// Find out the "x-oss-"'s address in this request'header
|
||||
temp := make(map[string]string)
|
||||
|
||||
for k, v := range req.Header {
|
||||
|
@ -38,17 +30,16 @@ func (conn Conn) getSignedStr(req *http.Request, canonicalizedResource string) s
|
|||
}
|
||||
hs := newHeaderSorter(temp)
|
||||
|
||||
// Sort the temp by the ascending order
|
||||
// Sort the temp by the Ascending Order
|
||||
hs.Sort()
|
||||
|
||||
// Get the canonicalizedOSSHeaders
|
||||
// Get the CanonicalizedOSSHeaders
|
||||
canonicalizedOSSHeaders := ""
|
||||
for i := range hs.Keys {
|
||||
canonicalizedOSSHeaders += hs.Keys[i] + ":" + hs.Vals[i] + "\n"
|
||||
}
|
||||
|
||||
// Give other parameters values
|
||||
// when sign URL, date is expires
|
||||
date := req.Header.Get(HTTPHeaderDate)
|
||||
contentType := req.Header.Get(HTTPHeaderContentType)
|
||||
contentMd5 := req.Header.Get(HTTPHeaderContentMD5)
|
||||
|
@ -58,10 +49,14 @@ func (conn Conn) getSignedStr(req *http.Request, canonicalizedResource string) s
|
|||
io.WriteString(h, signStr)
|
||||
signedStr := base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||
|
||||
return signedStr
|
||||
// Get the final Authorization' string
|
||||
authorizationStr := "OSS " + conn.config.AccessKeyID + ":" + signedStr
|
||||
|
||||
// Give the parameter "Authorization" value
|
||||
req.Header.Set(HTTPHeaderAuthorization, authorizationStr)
|
||||
}
|
||||
|
||||
// newHeaderSorter is an additional function for function SignHeader.
|
||||
// Additional function for function SignHeader.
|
||||
func newHeaderSorter(m map[string]string) *headerSorter {
|
||||
hs := &headerSorter{
|
||||
Keys: make([]string, 0, len(m)),
|
||||
|
@ -75,22 +70,22 @@ func newHeaderSorter(m map[string]string) *headerSorter {
|
|||
return hs
|
||||
}
|
||||
|
||||
// Sort is an additional function for function SignHeader.
|
||||
// Additional function for function SignHeader.
|
||||
func (hs *headerSorter) Sort() {
|
||||
sort.Sort(hs)
|
||||
}
|
||||
|
||||
// Len is an additional function for function SignHeader.
|
||||
// Additional function for function SignHeader.
|
||||
func (hs *headerSorter) Len() int {
|
||||
return len(hs.Vals)
|
||||
}
|
||||
|
||||
// Less is an additional function for function SignHeader.
|
||||
// Additional function for function SignHeader.
|
||||
func (hs *headerSorter) Less(i, j int) bool {
|
||||
return bytes.Compare([]byte(hs.Keys[i]), []byte(hs.Keys[j])) < 0
|
||||
}
|
||||
|
||||
// Swap is an additional function for function SignHeader.
|
||||
// Additional function for function SignHeader.
|
||||
func (hs *headerSorter) Swap(i, j int) {
|
||||
hs.Vals[i], hs.Vals[j] = hs.Vals[j], hs.Vals[i]
|
||||
hs.Keys[i], hs.Keys[j] = hs.Keys[j], hs.Keys[i]
|
||||
|
|
|
@ -5,16 +5,14 @@ import (
|
|||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"hash"
|
||||
"hash/crc64"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Bucket implements the operations of object.
|
||||
|
@ -23,15 +21,16 @@ type Bucket struct {
|
|||
BucketName string
|
||||
}
|
||||
|
||||
// PutObject creates a new object and it will overwrite the original one if it exists already.
|
||||
//
|
||||
// objectKey the object key in UTF-8 encoding. The length must be between 1 and 1023, and cannot start with "/" or "\".
|
||||
// reader io.Reader instance for reading the data for uploading
|
||||
// options the options for uploading the object. The valid options here are CacheControl, ContentDisposition, ContentEncoding
|
||||
// Expires, ServerSideEncryption, ObjectACL and Meta. Refer to the link below for more details.
|
||||
// https://help.aliyun.com/document_detail/oss/api-reference/object/PutObject.html
|
||||
// PutObject 新建Object,如果Object已存在,覆盖原有Object。
|
||||
//
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// objectKey 上传对象的名称,使用UTF-8编码、长度必须在1-1023字节之间、不能以“/”或者“\”字符开头。
|
||||
// reader io.Reader读取object的数据。
|
||||
// options 上传对象时可以指定对象的属性,可用选项有CacheControl、ContentDisposition、ContentEncoding、
|
||||
// Expires、ServerSideEncryption、ObjectACL、Meta,具体含义请参看
|
||||
// https://help.aliyun.com/document_detail/oss/api-reference/object/PutObject.html
|
||||
//
|
||||
// error 操作无错误为nil,非nil为错误信息。
|
||||
//
|
||||
func (bucket Bucket) PutObject(objectKey string, reader io.Reader, options ...Option) error {
|
||||
opts := addContentType(options, objectKey)
|
||||
|
@ -49,13 +48,14 @@ func (bucket Bucket) PutObject(objectKey string, reader io.Reader, options ...Op
|
|||
return err
|
||||
}
|
||||
|
||||
// PutObjectFromFile creates a new object from the local file.
|
||||
//
|
||||
// objectKey object key.
|
||||
// filePath the local file path to upload.
|
||||
// options the options for uploading the object. Refer to the parameter options in PutObject for more details.
|
||||
// PutObjectFromFile 新建Object,内容从本地文件中读取。
|
||||
//
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// objectKey 上传对象的名称。
|
||||
// filePath 本地文件,上传对象的值为该文件内容。
|
||||
// options 上传对象时可以指定对象的属性。详见PutObject的options。
|
||||
//
|
||||
// error 操作无错误为nil,非nil为错误信息。
|
||||
//
|
||||
func (bucket Bucket) PutObjectFromFile(objectKey, filePath string, options ...Option) error {
|
||||
fd, err := os.Open(filePath)
|
||||
|
@ -79,13 +79,14 @@ func (bucket Bucket) PutObjectFromFile(objectKey, filePath string, options ...Op
|
|||
return err
|
||||
}
|
||||
|
||||
// DoPutObject does the actual upload work.
|
||||
//
|
||||
// request the request instance for uploading an object.
|
||||
// options the options for uploading an object.
|
||||
// DoPutObject 上传文件。
|
||||
//
|
||||
// Response the response from OSS.
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// request 上传请求。
|
||||
// options 上传选项。
|
||||
//
|
||||
// Response 上传请求返回值。
|
||||
// error 操作无错误为nil,非nil为错误信息。
|
||||
//
|
||||
func (bucket Bucket) DoPutObject(request *PutObjectRequest, options []Option) (*Response, error) {
|
||||
isOptSet, _, _ := isOptionSet(options, HTTPHeaderContentType)
|
||||
|
@ -95,8 +96,7 @@ func (bucket Bucket) DoPutObject(request *PutObjectRequest, options []Option) (*
|
|||
|
||||
listener := getProgressListener(options)
|
||||
|
||||
params := map[string]interface{}{}
|
||||
resp, err := bucket.do("PUT", request.ObjectKey, params, options, request.Reader, listener)
|
||||
resp, err := bucket.do("PUT", request.ObjectKey, "", "", options, request.Reader, listener)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -113,64 +113,60 @@ func (bucket Bucket) DoPutObject(request *PutObjectRequest, options []Option) (*
|
|||
return resp, err
|
||||
}
|
||||
|
||||
// GetObject downloads the object.
|
||||
//
|
||||
// objectKey the object key.
|
||||
// options the options for downloading the object. The valid values are: Range, IfModifiedSince, IfUnmodifiedSince, IfMatch,
|
||||
// IfNoneMatch, AcceptEncoding. For more details, please check out:
|
||||
// https://help.aliyun.com/document_detail/oss/api-reference/object/GetObject.html
|
||||
// GetObject 下载文件。
|
||||
//
|
||||
// io.ReadCloser reader instance for reading data from response. It must be called close() after the usage and only valid when error is nil.
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// objectKey 下载的文件名称。
|
||||
// options 对象的属性限制项,可选值有Range、IfModifiedSince、IfUnmodifiedSince、IfMatch、
|
||||
// IfNoneMatch、AcceptEncoding,详细请参考
|
||||
// https://help.aliyun.com/document_detail/oss/api-reference/object/GetObject.html
|
||||
//
|
||||
// io.ReadCloser reader,读取数据后需要close。error为nil时有效。
|
||||
// error 操作无错误为nil,非nil为错误信息。
|
||||
//
|
||||
func (bucket Bucket) GetObject(objectKey string, options ...Option) (io.ReadCloser, error) {
|
||||
result, err := bucket.DoGetObject(&GetObjectRequest{objectKey}, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return result.Response, nil
|
||||
return result.Response.Body, nil
|
||||
}
|
||||
|
||||
// GetObjectToFile downloads the data to a local file.
|
||||
//
|
||||
// objectKey the object key to download.
|
||||
// filePath the local file to store the object data.
|
||||
// options the options for downloading the object. Refer to the parameter options in method GetObject for more details.
|
||||
// GetObjectToFile 下载文件。
|
||||
//
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// objectKey 下载的文件名称。
|
||||
// filePath 下载对象的内容写到该本地文件。
|
||||
// options 对象的属性限制项。详见GetObject的options。
|
||||
//
|
||||
// error 操作无错误时返回error为nil,非nil为错误说明。
|
||||
//
|
||||
func (bucket Bucket) GetObjectToFile(objectKey, filePath string, options ...Option) error {
|
||||
tempFilePath := filePath + TempFileSuffix
|
||||
|
||||
// Calls the API to actually download the object. Returns the result instance.
|
||||
// 读取Object内容
|
||||
result, err := bucket.DoGetObject(&GetObjectRequest{objectKey}, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer result.Response.Close()
|
||||
defer result.Response.Body.Close()
|
||||
|
||||
// If the local file does not exist, create a new one. If it exists, overwrite it.
|
||||
// 如果文件不存在则创建,存在则清空
|
||||
fd, err := os.OpenFile(tempFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, FilePermMode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Copy the data to the local file path.
|
||||
// 存储数据到文件
|
||||
_, err = io.Copy(fd, result.Response.Body)
|
||||
fd.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Compares the CRC value
|
||||
// 比较CRC值
|
||||
hasRange, _, _ := isOptionSet(options, HTTPHeaderRange)
|
||||
encodeOpt, _ := findOption(options, HTTPHeaderAcceptEncoding, nil)
|
||||
acceptEncoding := ""
|
||||
if encodeOpt != nil {
|
||||
acceptEncoding = encodeOpt.(string)
|
||||
}
|
||||
if bucket.getConfig().IsEnableCRC && !hasRange && acceptEncoding != "gzip" {
|
||||
if bucket.getConfig().IsEnableCRC && !hasRange {
|
||||
result.Response.ClientCRC = result.ClientCRC.Sum64()
|
||||
err = checkCRC(result.Response, "GetObjectToFile")
|
||||
if err != nil {
|
||||
|
@ -182,17 +178,17 @@ func (bucket Bucket) GetObjectToFile(objectKey, filePath string, options ...Opti
|
|||
return os.Rename(tempFilePath, filePath)
|
||||
}
|
||||
|
||||
// DoGetObject is the actual API that gets the object. It's the internal function called by other public APIs.
|
||||
//
|
||||
// request the request to download the object.
|
||||
// options the options for downloading the file. Checks out the parameter options in method GetObject.
|
||||
// DoGetObject 下载文件
|
||||
//
|
||||
// GetObjectResult the result instance of getting the object.
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// request 下载请求
|
||||
// options 对象的属性限制项。详见GetObject的options。
|
||||
//
|
||||
// GetObjectResult 下载请求返回值。
|
||||
// error 操作无错误为nil,非nil为错误信息。
|
||||
//
|
||||
func (bucket Bucket) DoGetObject(request *GetObjectRequest, options []Option) (*GetObjectResult, error) {
|
||||
params, _ := getRawParams(options)
|
||||
resp, err := bucket.do("GET", request.ObjectKey, params, options, nil, nil)
|
||||
resp, err := bucket.do("GET", request.ObjectKey, "", "", options, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -201,7 +197,7 @@ func (bucket Bucket) DoGetObject(request *GetObjectRequest, options []Option) (*
|
|||
Response: resp,
|
||||
}
|
||||
|
||||
// CRC
|
||||
// crc
|
||||
var crcCalc hash.Hash64
|
||||
hasRange, _, _ := isOptionSet(options, HTTPHeaderRange)
|
||||
if bucket.getConfig().IsEnableCRC && !hasRange {
|
||||
|
@ -210,32 +206,32 @@ func (bucket Bucket) DoGetObject(request *GetObjectRequest, options []Option) (*
|
|||
result.ClientCRC = crcCalc
|
||||
}
|
||||
|
||||
// Progress
|
||||
// progress
|
||||
listener := getProgressListener(options)
|
||||
|
||||
contentLen, _ := strconv.ParseInt(resp.Headers.Get(HTTPHeaderContentLength), 10, 64)
|
||||
resp.Body = TeeReader(resp.Body, crcCalc, contentLen, listener, nil)
|
||||
resp.Body = ioutil.NopCloser(TeeReader(resp.Body, crcCalc, contentLen, listener, nil))
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// CopyObject copies the object inside the bucket.
|
||||
//
|
||||
// srcObjectKey the source object to copy.
|
||||
// destObjectKey the target object to copy.
|
||||
// options options for copying an object. You can specify the conditions of copy. The valid conditions are CopySourceIfMatch,
|
||||
// CopySourceIfNoneMatch, CopySourceIfModifiedSince, CopySourceIfUnmodifiedSince, MetadataDirective.
|
||||
// Also you can specify the target object's attributes, such as CacheControl, ContentDisposition, ContentEncoding, Expires,
|
||||
// ServerSideEncryption, ObjectACL, Meta. Refer to the link below for more details :
|
||||
// https://help.aliyun.com/document_detail/oss/api-reference/object/CopyObject.html
|
||||
// CopyObject 同一个bucket内拷贝Object。
|
||||
//
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// srcObjectKey Copy的源对象。
|
||||
// destObjectKey Copy的目标对象。
|
||||
// options Copy对象时,您可以指定源对象的限制条件,满足限制条件时copy,不满足时返回错误,您可以选择如下选项CopySourceIfMatch、
|
||||
// CopySourceIfNoneMatch、CopySourceIfModifiedSince、CopySourceIfUnmodifiedSince、MetadataDirective。
|
||||
// Copy对象时,您可以指定目标对象的属性,如CacheControl、ContentDisposition、ContentEncoding、Expires、
|
||||
// ServerSideEncryption、ObjectACL、Meta,选项的含义请参看
|
||||
// https://help.aliyun.com/document_detail/oss/api-reference/object/CopyObject.html
|
||||
//
|
||||
// error 操作无错误为nil,非nil为错误信息。
|
||||
//
|
||||
func (bucket Bucket) CopyObject(srcObjectKey, destObjectKey string, options ...Option) (CopyObjectResult, error) {
|
||||
var out CopyObjectResult
|
||||
options = append(options, CopySource(bucket.BucketName, url.QueryEscape(srcObjectKey)))
|
||||
params := map[string]interface{}{}
|
||||
resp, err := bucket.do("PUT", destObjectKey, params, options, nil, nil)
|
||||
resp, err := bucket.do("PUT", destObjectKey, "", "", options, nil, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
|
@ -245,28 +241,29 @@ func (bucket Bucket) CopyObject(srcObjectKey, destObjectKey string, options ...O
|
|||
return out, err
|
||||
}
|
||||
|
||||
// CopyObjectTo copies the object to another bucket.
|
||||
//
|
||||
// srcObjectKey source object key. The source bucket is Bucket.BucketName .
|
||||
// destBucketName target bucket name.
|
||||
// destObjectKey target object name.
|
||||
// options copy options, check out parameter options in function CopyObject for more details.
|
||||
// CopyObjectTo bucket间拷贝object。
|
||||
//
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// srcObjectKey 源Object名称。源Bucket名称为Bucket.BucketName。
|
||||
// destBucketName 目标Bucket名称。
|
||||
// destObjectKey 目标Object名称。
|
||||
// options Copy选项,详见CopyObject的options。
|
||||
//
|
||||
// error 操作无错误为nil,非nil为错误信息。
|
||||
//
|
||||
func (bucket Bucket) CopyObjectTo(destBucketName, destObjectKey, srcObjectKey string, options ...Option) (CopyObjectResult, error) {
|
||||
return bucket.copy(srcObjectKey, destBucketName, destObjectKey, options...)
|
||||
}
|
||||
|
||||
//
|
||||
// CopyObjectFrom copies the object to another bucket.
|
||||
// CopyObjectFrom bucket间拷贝object。
|
||||
//
|
||||
// srcBucketName source bucket name.
|
||||
// srcObjectKey source object name.
|
||||
// destObjectKey target object name. The target bucket name is Bucket.BucketName.
|
||||
// options copy options. Check out parameter options in function CopyObject.
|
||||
// srcBucketName 源Bucket名称。
|
||||
// srcObjectKey 源Object名称。
|
||||
// destObjectKey 目标Object名称。目标Bucket名称为Bucket.BucketName。
|
||||
// options Copy选项,详见CopyObject的options。
|
||||
//
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// error 操作无错误为nil,非nil为错误信息。
|
||||
//
|
||||
func (bucket Bucket) CopyObjectFrom(srcBucketName, srcObjectKey, destObjectKey string, options ...Option) (CopyObjectResult, error) {
|
||||
destBucketName := bucket.BucketName
|
||||
|
@ -287,8 +284,7 @@ func (bucket Bucket) copy(srcObjectKey, destBucketName, destObjectKey string, op
|
|||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
params := map[string]interface{}{}
|
||||
resp, err := bucket.Client.Conn.Do("PUT", destBucketName, destObjectKey, params, headers, nil, 0, nil)
|
||||
resp, err := bucket.Client.Conn.Do("PUT", destBucketName, destObjectKey, "", "", headers, nil, 0, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
|
@ -298,21 +294,22 @@ func (bucket Bucket) copy(srcObjectKey, destBucketName, destObjectKey string, op
|
|||
return out, err
|
||||
}
|
||||
|
||||
// AppendObject uploads the data in the way of appending an existing or new object.
|
||||
//
|
||||
// AppendObject the parameter appendPosition specifies which postion (in the target object) to append. For the first append (to a non-existing file),
|
||||
// the appendPosition should be 0. The appendPosition in the subsequent calls will be the current object length.
|
||||
// For example, the first appendObject's appendPosition is 0 and it uploaded 65536 bytes data, then the second call's position is 65536.
|
||||
// The response header x-oss-next-append-position after each successful request also specifies the next call's append position (so the caller need not to maintain this information).
|
||||
// AppendObject 追加方式上传。
|
||||
//
|
||||
// objectKey the target object to append to.
|
||||
// reader io.Reader. The read instance for reading the data to append.
|
||||
// appendPosition the start position to append.
|
||||
// destObjectProperties the options for the first appending, such as CacheControl, ContentDisposition, ContentEncoding,
|
||||
// Expires, ServerSideEncryption, ObjectACL.
|
||||
// AppendObject参数必须包含position,其值指定从何处进行追加。首次追加操作的position必须为0,
|
||||
// 后续追加操作的position是Object的当前长度。例如,第一次Append Object请求指定position值为0,
|
||||
// content-length是65536;那么,第二次Append Object需要指定position为65536。
|
||||
// 每次操作成功后,响应头部x-oss-next-append-position也会标明下一次追加的position。
|
||||
//
|
||||
// int64 the next append position, it's valid when error is nil.
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// objectKey 需要追加的Object。
|
||||
// reader io.Reader,读取追的内容。
|
||||
// appendPosition object追加的起始位置。
|
||||
// destObjectProperties 第一次追加时指定新对象的属性,如CacheControl、ContentDisposition、ContentEncoding、
|
||||
// Expires、ServerSideEncryption、ObjectACL。
|
||||
//
|
||||
// int64 下次追加的开始位置,error为nil空时有效。
|
||||
// error 操作无错误为nil,非nil为错误信息。
|
||||
//
|
||||
func (bucket Bucket) AppendObject(objectKey string, reader io.Reader, appendPosition int64, options ...Option) (int64, error) {
|
||||
request := &AppendObjectRequest{
|
||||
|
@ -322,25 +319,21 @@ func (bucket Bucket) AppendObject(objectKey string, reader io.Reader, appendPosi
|
|||
}
|
||||
|
||||
result, err := bucket.DoAppendObject(request, options)
|
||||
if err != nil {
|
||||
return appendPosition, err
|
||||
}
|
||||
|
||||
return result.NextPosition, err
|
||||
}
|
||||
|
||||
// DoAppendObject is the actual API that does the object append.
|
||||
//
|
||||
// request the request object for appending object.
|
||||
// options the options for appending object.
|
||||
// DoAppendObject 追加上传。
|
||||
//
|
||||
// AppendObjectResult the result object for appending object.
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// request 追加上传请求。
|
||||
// options 追加上传选项。
|
||||
//
|
||||
// AppendObjectResult 追加上传请求返回值。
|
||||
// error 操作无错误为nil,非nil为错误信息。
|
||||
//
|
||||
func (bucket Bucket) DoAppendObject(request *AppendObjectRequest, options []Option) (*AppendObjectResult, error) {
|
||||
params := map[string]interface{}{}
|
||||
params["append"] = nil
|
||||
params["position"] = strconv.FormatInt(request.Position, 10)
|
||||
params := "append&position=" + strconv.FormatInt(request.Position, 10)
|
||||
headers := make(map[string]string)
|
||||
|
||||
opts := addContentType(options, request.ObjectKey)
|
||||
|
@ -355,7 +348,7 @@ func (bucket Bucket) DoAppendObject(request *AppendObjectRequest, options []Opti
|
|||
listener := getProgressListener(options)
|
||||
|
||||
handleOptions(headers, opts)
|
||||
resp, err := bucket.Client.Conn.Do("POST", bucket.BucketName, request.ObjectKey, params, headers,
|
||||
resp, err := bucket.Client.Conn.Do("POST", bucket.BucketName, request.ObjectKey, params, params, headers,
|
||||
request.Reader, initCRC, listener)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -378,15 +371,15 @@ func (bucket Bucket) DoAppendObject(request *AppendObjectRequest, options []Opti
|
|||
return result, nil
|
||||
}
|
||||
|
||||
// DeleteObject deletes the object.
|
||||
//
|
||||
// objectKey the object key to delete.
|
||||
// DeleteObject 删除Object。
|
||||
//
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// objectKey 待删除Object。
|
||||
//
|
||||
// error 操作无错误为nil,非nil为错误信息。
|
||||
//
|
||||
func (bucket Bucket) DeleteObject(objectKey string) error {
|
||||
params := map[string]interface{}{}
|
||||
resp, err := bucket.do("DELETE", objectKey, params, nil, nil, nil)
|
||||
resp, err := bucket.do("DELETE", objectKey, "", "", nil, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -394,14 +387,14 @@ func (bucket Bucket) DeleteObject(objectKey string) error {
|
|||
return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
|
||||
}
|
||||
|
||||
// DeleteObjects deletes multiple objects.
|
||||
//
|
||||
// objectKeys the object keys to delete.
|
||||
// options the options for deleting objects.
|
||||
// Supported option is DeleteObjectsQuiet which means it will not return error even deletion failed (not recommended). By default it's not used.
|
||||
// DeleteObjects 批量删除object。
|
||||
//
|
||||
// DeleteObjectsResult the result object.
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// objectKeys 待删除object类表。
|
||||
// options 删除选项,DeleteObjectsQuiet,是否是安静模式,默认不使用。
|
||||
//
|
||||
// DeleteObjectsResult 非安静模式的的返回值。
|
||||
// error 操作无错误为nil,非nil为错误信息。
|
||||
//
|
||||
func (bucket Bucket) DeleteObjects(objectKeys []string, options ...Option) (DeleteObjectsResult, error) {
|
||||
out := DeleteObjectsResult{}
|
||||
|
@ -411,6 +404,7 @@ func (bucket Bucket) DeleteObjects(objectKeys []string, options ...Option) (Dele
|
|||
}
|
||||
isQuiet, _ := findOption(options, deleteObjectsQuiet, false)
|
||||
dxml.Quiet = isQuiet.(bool)
|
||||
encode := "&encoding-type=url"
|
||||
|
||||
bs, err := xml.Marshal(dxml)
|
||||
if err != nil {
|
||||
|
@ -424,12 +418,7 @@ func (bucket Bucket) DeleteObjects(objectKeys []string, options ...Option) (Dele
|
|||
sum := md5.Sum(bs)
|
||||
b64 := base64.StdEncoding.EncodeToString(sum[:])
|
||||
options = append(options, ContentMD5(b64))
|
||||
|
||||
params := map[string]interface{}{}
|
||||
params["delete"] = nil
|
||||
params["encoding-type"] = "url"
|
||||
|
||||
resp, err := bucket.do("POST", "", params, options, buffer, nil)
|
||||
resp, err := bucket.do("POST", "", "delete"+encode, "delete", options, buffer, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
|
@ -443,58 +432,54 @@ func (bucket Bucket) DeleteObjects(objectKeys []string, options ...Option) (Dele
|
|||
return out, err
|
||||
}
|
||||
|
||||
// IsObjectExist checks if the object exists.
|
||||
//
|
||||
// bool flag of object's existence (true:exists; false:non-exist) when error is nil.
|
||||
// IsObjectExist object是否存在。
|
||||
//
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// bool object是否存在,true存在,false不存在。error为nil时有效。
|
||||
//
|
||||
// error 操作无错误为nil,非nil为错误信息。
|
||||
//
|
||||
func (bucket Bucket) IsObjectExist(objectKey string) (bool, error) {
|
||||
_, err := bucket.GetObjectMeta(objectKey)
|
||||
if err == nil {
|
||||
listRes, err := bucket.ListObjects(Prefix(objectKey), MaxKeys(1))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if len(listRes.Objects) == 1 && listRes.Objects[0].Key == objectKey {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
switch err.(type) {
|
||||
case ServiceError:
|
||||
if err.(ServiceError).StatusCode == 404 {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, err
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// ListObjects lists the objects under the current bucket.
|
||||
//
|
||||
// options it contains all the filters for listing objects.
|
||||
// It could specify a prefix filter on object keys, the max keys count to return and the object key marker and the delimiter for grouping object names.
|
||||
// The key marker means the returned objects' key must be greater than it in lexicographic order.
|
||||
// ListObjects 获得Bucket下筛选后所有的object的列表。
|
||||
//
|
||||
// For example, if the bucket has 8 objects, my-object-1, my-object-11, my-object-2, my-object-21,
|
||||
// my-object-22, my-object-3, my-object-31, my-object-32. If the prefix is my-object-2 (no other filters), then it returns
|
||||
// my-object-2, my-object-21, my-object-22 three objects. If the marker is my-object-22 (no other filters), then it returns
|
||||
// my-object-3, my-object-31, my-object-32 three objects. If the max keys is 5, then it returns 5 objects.
|
||||
// The three filters could be used together to achieve filter and paging functionality.
|
||||
// If the prefix is the folder name, then it could list all files under this folder (including the files under its subfolders).
|
||||
// But if the delimiter is specified with '/', then it only returns that folder's files (no subfolder's files). The direct subfolders are in the commonPrefixes properties.
|
||||
// For example, if the bucket has three objects fun/test.jpg, fun/movie/001.avi, fun/movie/007.avi. And if the prefix is "fun/", then it returns all three objects.
|
||||
// But if the delimiter is '/', then only "fun/test.jpg" is returned as files and fun/movie/ is returned as common prefix.
|
||||
// options ListObject的筛选行为。Prefix指定的前缀、MaxKeys最大数目、Marker第一个开始、Delimiter对Object名字进行分组的字符。
|
||||
//
|
||||
// For common usage scenario, check out sample/list_object.go.
|
||||
// 您有如下8个object,my-object-1, my-object-11, my-object-2, my-object-21,
|
||||
// my-object-22, my-object-3, my-object-31, my-object-32。如果您指定了Prefix为my-object-2,
|
||||
// 则返回my-object-2, my-object-21, my-object-22三个object。如果您指定了Marker为my-object-22,
|
||||
// 则返回my-object-3, my-object-31, my-object-32三个object。如果您指定MaxKeys则每次最多返回MaxKeys个,
|
||||
// 最后一次可能不足。这三个参数可以组合使用,实现分页等功能。如果把prefix设为某个文件夹名,就可以罗列以此prefix开头的文件,
|
||||
// 即该文件夹下递归的所有的文件和子文件夹。如果再把delimiter设置为"/"时,返回值就只罗列该文件夹下的文件,该文件夹下的子文件名
|
||||
// 返回在CommonPrefixes部分,子文件夹下递归的文件和文件夹不被显示。例如一个bucket存在三个object,fun/test.jpg、
|
||||
// fun/movie/001.avi、fun/movie/007.avi。若设定prefix为"fun/",则返回三个object;如果增加设定
|
||||
// delimiter为"/",则返回文件"fun/test.jpg"和前缀"fun/movie/",即实现了文件夹的逻辑。
|
||||
//
|
||||
// ListObjectsResponse the return value after operation succeeds (only valid when error is nil).
|
||||
// 常用场景,请参数示例sample/list_object.go。
|
||||
//
|
||||
// ListObjectsResponse 操作成功后的返回值,成员Objects为bucket中对象列表。error为nil时该返回值有效。
|
||||
//
|
||||
func (bucket Bucket) ListObjects(options ...Option) (ListObjectsResult, error) {
|
||||
var out ListObjectsResult
|
||||
|
||||
options = append(options, EncodingType("url"))
|
||||
params, err := getRawParams(options)
|
||||
params, err := handleParams(options)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
|
||||
resp, err := bucket.do("GET", "", params, options, nil, nil)
|
||||
resp, err := bucket.do("GET", "", params, "", nil, nil, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
|
@ -509,13 +494,14 @@ func (bucket Bucket) ListObjects(options ...Option) (ListObjectsResult, error) {
|
|||
return out, err
|
||||
}
|
||||
|
||||
// SetObjectMeta sets the metadata of the Object.
|
||||
//
|
||||
// objectKey object
|
||||
// options options for setting the metadata. The valid options are CacheControl, ContentDisposition, ContentEncoding, Expires,
|
||||
// ServerSideEncryption, and custom metadata.
|
||||
// SetObjectMeta 设置Object的Meta。
|
||||
//
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// objectKey object
|
||||
// options 指定对象的属性,有以下可选项CacheControl、ContentDisposition、ContentEncoding、Expires、
|
||||
// ServerSideEncryption、Meta。
|
||||
//
|
||||
// error 操作无错误时error为nil,非nil为错误信息。
|
||||
//
|
||||
func (bucket Bucket) SetObjectMeta(objectKey string, options ...Option) error {
|
||||
options = append(options, MetadataDirective(MetaReplace))
|
||||
|
@ -523,18 +509,18 @@ func (bucket Bucket) SetObjectMeta(objectKey string, options ...Option) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// GetObjectDetailedMeta gets the object's detailed metadata
|
||||
//
|
||||
// objectKey object key.
|
||||
// options the constraints of the object. Only when the object meets the requirements this method will return the metadata. Otherwise returns error. Valid options are IfModifiedSince, IfUnmodifiedSince,
|
||||
// IfMatch, IfNoneMatch. For more details check out https://help.aliyun.com/document_detail/oss/api-reference/object/HeadObject.html
|
||||
// GetObjectDetailedMeta 查询Object的头信息。
|
||||
//
|
||||
// http.Header object meta when error is nil.
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// objectKey object名称。
|
||||
// objectPropertyConstraints 对象的属性限制项,满足时正常返回,不满足时返回错误。现在项有IfModifiedSince、IfUnmodifiedSince、
|
||||
// IfMatch、IfNoneMatch。具体含义请参看 https://help.aliyun.com/document_detail/oss/api-reference/object/HeadObject.html
|
||||
//
|
||||
// http.Header 对象的meta,error为nil时有效。
|
||||
// error 操作无错误为nil,非nil为错误信息。
|
||||
//
|
||||
func (bucket Bucket) GetObjectDetailedMeta(objectKey string, options ...Option) (http.Header, error) {
|
||||
params := map[string]interface{}{}
|
||||
resp, err := bucket.do("HEAD", objectKey, params, options, nil, nil)
|
||||
resp, err := bucket.do("HEAD", objectKey, "", "", options, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -543,21 +529,19 @@ func (bucket Bucket) GetObjectDetailedMeta(objectKey string, options ...Option)
|
|||
return resp.Headers, nil
|
||||
}
|
||||
|
||||
// GetObjectMeta gets object metadata.
|
||||
//
|
||||
// GetObjectMeta is more lightweight than GetObjectDetailedMeta as it only returns basic metadata including ETag
|
||||
// size, LastModified. The size information is in the HTTP header Content-Length.
|
||||
// GetObjectMeta 查询Object的头信息。
|
||||
//
|
||||
// objectKey object key
|
||||
// GetObjectMeta相比GetObjectDetailedMeta更轻量,仅返回指定Object的少量基本meta信息,
|
||||
// 包括该Object的ETag、Size(对象大小)、LastModified,其中Size由响应头Content-Length的数值表示。
|
||||
//
|
||||
// http.Header the object's metadata, valid when error is nil.
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// objectKey object名称。
|
||||
//
|
||||
func (bucket Bucket) GetObjectMeta(objectKey string, options ...Option) (http.Header, error) {
|
||||
params := map[string]interface{}{}
|
||||
params["objectMeta"] = nil
|
||||
//resp, err := bucket.do("GET", objectKey, "?objectMeta", "", nil, nil, nil)
|
||||
resp, err := bucket.do("HEAD", objectKey, params, options, nil, nil)
|
||||
// http.Header 对象的meta,error为nil时有效。
|
||||
// error 操作无错误为nil,非nil为错误信息。
|
||||
//
|
||||
func (bucket Bucket) GetObjectMeta(objectKey string) (http.Header, error) {
|
||||
resp, err := bucket.do("GET", objectKey, "?objectMeta", "", nil, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -566,27 +550,26 @@ func (bucket Bucket) GetObjectMeta(objectKey string, options ...Option) (http.He
|
|||
return resp.Headers, nil
|
||||
}
|
||||
|
||||
// SetObjectACL updates the object's ACL.
|
||||
//
|
||||
// Only the bucket's owner could update object's ACL which priority is higher than bucket's ACL.
|
||||
// For example, if the bucket ACL is private and object's ACL is public-read-write.
|
||||
// Then object's ACL is used and it means all users could read or write that object.
|
||||
// When the object's ACL is not set, then bucket's ACL is used as the object's ACL.
|
||||
// SetObjectACL 修改Object的ACL权限。
|
||||
//
|
||||
// Object read operations include GetObject, HeadObject, CopyObject and UploadPartCopy on the source object;
|
||||
// Object write operations include PutObject, PostObject, AppendObject, DeleteObject, DeleteMultipleObjects,
|
||||
// CompleteMultipartUpload and CopyObject on target object.
|
||||
// 只有Bucket Owner才有权限调用PutObjectACL来修改Object的ACL。Object ACL优先级高于Bucket ACL。
|
||||
// 例如Bucket ACL是private的,而Object ACL是public-read-write的,则访问这个Object时,
|
||||
// 先判断Object的ACL,所以所有用户都拥有这个Object的访问权限,即使这个Bucket是private bucket。
|
||||
// 如果某个Object从来没设置过ACL,则访问权限遵循Bucket ACL。
|
||||
//
|
||||
// objectKey the target object key (to set the ACL on)
|
||||
// objectAcl object ACL. Valid options are PrivateACL, PublicReadACL, PublicReadWriteACL.
|
||||
// Object的读操作包括GetObject,HeadObject,CopyObject和UploadPartCopy中的对source object的读;
|
||||
// Object的写操作包括:PutObject,PostObject,AppendObject,DeleteObject,
|
||||
// DeleteMultipleObjects,CompleteMultipartUpload以及CopyObject对新的Object的写。
|
||||
//
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// objectKey 设置权限的object。
|
||||
// objectAcl 对象权限。可选值PrivateACL(私有读写)、PublicReadACL(公共读私有写)、PublicReadWriteACL(公共读写)。
|
||||
//
|
||||
// error 操作无错误为nil,非nil为错误信息。
|
||||
//
|
||||
func (bucket Bucket) SetObjectACL(objectKey string, objectACL ACLType) error {
|
||||
options := []Option{ObjectACL(objectACL)}
|
||||
params := map[string]interface{}{}
|
||||
params["acl"] = nil
|
||||
resp, err := bucket.do("PUT", objectKey, params, options, nil, nil)
|
||||
resp, err := bucket.do("PUT", objectKey, "acl", "acl", options, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -594,18 +577,17 @@ func (bucket Bucket) SetObjectACL(objectKey string, objectACL ACLType) error {
|
|||
return checkRespCode(resp.StatusCode, []int{http.StatusOK})
|
||||
}
|
||||
|
||||
// GetObjectACL gets object's ACL
|
||||
//
|
||||
// objectKey the object to get ACL from.
|
||||
// GetObjectACL 获取对象的ACL权限。
|
||||
//
|
||||
// GetObjectACLResult the result object when error is nil. GetObjectACLResult.Acl is the object ACL.
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// objectKey 获取权限的object。
|
||||
//
|
||||
// GetObjectAclResponse 获取权限操作返回值,error为nil时有效。GetObjectAclResponse.Acl为对象的权限。
|
||||
// error 操作无错误为nil,非nil为错误信息。
|
||||
//
|
||||
func (bucket Bucket) GetObjectACL(objectKey string) (GetObjectACLResult, error) {
|
||||
var out GetObjectACLResult
|
||||
params := map[string]interface{}{}
|
||||
params["acl"] = nil
|
||||
resp, err := bucket.do("GET", objectKey, params, nil, nil, nil)
|
||||
resp, err := bucket.do("GET", objectKey, "acl", "acl", nil, nil, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
|
@ -615,320 +597,8 @@ func (bucket Bucket) GetObjectACL(objectKey string) (GetObjectACLResult, error)
|
|||
return out, err
|
||||
}
|
||||
|
||||
// PutSymlink creates a symlink (to point to an existing object)
|
||||
//
|
||||
// Symlink cannot point to another symlink.
|
||||
// When creating a symlink, it does not check the existence of the target file, and does not check if the target file is symlink.
|
||||
// Neither it checks the caller's permission on the target file. All these checks are deferred to the actual GetObject call via this symlink.
|
||||
// If trying to add an existing file, as long as the caller has the write permission, the existing one will be overwritten.
|
||||
// If the x-oss-meta- is specified, it will be added as the metadata of the symlink file.
|
||||
//
|
||||
// symObjectKey the symlink object's key.
|
||||
// targetObjectKey the target object key to point to.
|
||||
//
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) PutSymlink(symObjectKey string, targetObjectKey string, options ...Option) error {
|
||||
options = append(options, symlinkTarget(url.QueryEscape(targetObjectKey)))
|
||||
params := map[string]interface{}{}
|
||||
params["symlink"] = nil
|
||||
resp, err := bucket.do("PUT", symObjectKey, params, options, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
return checkRespCode(resp.StatusCode, []int{http.StatusOK})
|
||||
}
|
||||
|
||||
// GetSymlink gets the symlink object with the specified key.
|
||||
// If the symlink object does not exist, returns 404.
|
||||
//
|
||||
// objectKey the symlink object's key.
|
||||
//
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// When error is nil, the target file key is in the X-Oss-Symlink-Target header of the returned object.
|
||||
//
|
||||
func (bucket Bucket) GetSymlink(objectKey string) (http.Header, error) {
|
||||
params := map[string]interface{}{}
|
||||
params["symlink"] = nil
|
||||
resp, err := bucket.do("GET", objectKey, params, nil, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
targetObjectKey := resp.Headers.Get(HTTPHeaderOssSymlinkTarget)
|
||||
targetObjectKey, err = url.QueryUnescape(targetObjectKey)
|
||||
if err != nil {
|
||||
return resp.Headers, err
|
||||
}
|
||||
resp.Headers.Set(HTTPHeaderOssSymlinkTarget, targetObjectKey)
|
||||
return resp.Headers, err
|
||||
}
|
||||
|
||||
// RestoreObject restores the object from the archive storage.
|
||||
//
|
||||
// An archive object is in cold status by default and it cannot be accessed.
|
||||
// When restore is called on the cold object, it will become available for access after some time.
|
||||
// If multiple restores are called on the same file when the object is being restored, server side does nothing for additional calls but returns success.
|
||||
// By default, the restored object is available for access for one day. After that it will be unavailable again.
|
||||
// But if another RestoreObject are called after the file is restored, then it will extend one day's access time of that object, up to 7 days.
|
||||
//
|
||||
// objectKey object key to restore.
|
||||
//
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) RestoreObject(objectKey string) error {
|
||||
params := map[string]interface{}{}
|
||||
params["restore"] = nil
|
||||
resp, err := bucket.do("POST", objectKey, params, nil, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
return checkRespCode(resp.StatusCode, []int{http.StatusOK, http.StatusAccepted})
|
||||
}
|
||||
|
||||
// SignURL signs the URL. Users could access the object directly with this URL without getting the AK.
|
||||
//
|
||||
// objectKey the target object to sign.
|
||||
// signURLConfig the configuration for the signed URL
|
||||
//
|
||||
// string returns the signed URL, when error is nil.
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) SignURL(objectKey string, method HTTPMethod, expiredInSec int64, options ...Option) (string, error) {
|
||||
if expiredInSec < 0 {
|
||||
return "", fmt.Errorf("invalid expires: %d, expires must bigger than 0", expiredInSec)
|
||||
}
|
||||
expiration := time.Now().Unix() + expiredInSec
|
||||
|
||||
params, err := getRawParams(options)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
headers := make(map[string]string)
|
||||
err = handleOptions(headers, options)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return bucket.Client.Conn.signURL(method, bucket.BucketName, objectKey, expiration, params, headers), nil
|
||||
}
|
||||
|
||||
// PutObjectWithURL uploads an object with the URL. If the object exists, it will be overwritten.
|
||||
// PutObjectWithURL It will not generate minetype according to the key name.
|
||||
//
|
||||
// signedURL signed URL.
|
||||
// reader io.Reader the read instance for reading the data for the upload.
|
||||
// options the options for uploading the data. The valid options are CacheControl, ContentDisposition, ContentEncoding,
|
||||
// Expires, ServerSideEncryption, ObjectACL and custom metadata. Check out the following link for details:
|
||||
// https://help.aliyun.com/document_detail/oss/api-reference/object/PutObject.html
|
||||
//
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) PutObjectWithURL(signedURL string, reader io.Reader, options ...Option) error {
|
||||
resp, err := bucket.DoPutObjectWithURL(signedURL, reader, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// PutObjectFromFileWithURL uploads an object from a local file with the signed URL.
|
||||
// PutObjectFromFileWithURL It does not generate mimetype according to object key's name or the local file name.
|
||||
//
|
||||
// signedURL the signed URL.
|
||||
// filePath local file path, such as dirfile.txt, for uploading.
|
||||
// options options for uploading, same as the options in PutObject function.
|
||||
//
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) PutObjectFromFileWithURL(signedURL, filePath string, options ...Option) error {
|
||||
fd, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
resp, err := bucket.DoPutObjectWithURL(signedURL, fd, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// DoPutObjectWithURL is the actual API that does the upload with URL work(internal for SDK)
|
||||
//
|
||||
// signedURL the signed URL.
|
||||
// reader io.Reader the read instance for getting the data to upload.
|
||||
// options options for uploading.
|
||||
//
|
||||
// Response the response object which contains the HTTP response.
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) DoPutObjectWithURL(signedURL string, reader io.Reader, options []Option) (*Response, error) {
|
||||
listener := getProgressListener(options)
|
||||
|
||||
params := map[string]interface{}{}
|
||||
resp, err := bucket.doURL("PUT", signedURL, params, options, reader, listener)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if bucket.getConfig().IsEnableCRC {
|
||||
err = checkCRC(resp, "DoPutObjectWithURL")
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
}
|
||||
|
||||
err = checkRespCode(resp.StatusCode, []int{http.StatusOK})
|
||||
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// GetObjectWithURL downloads the object and returns the reader instance, with the signed URL.
|
||||
//
|
||||
// signedURL the signed URL.
|
||||
// options options for downloading the object. Valid options are IfModifiedSince, IfUnmodifiedSince, IfMatch,
|
||||
// IfNoneMatch, AcceptEncoding. For more information, check out the following link:
|
||||
// https://help.aliyun.com/document_detail/oss/api-reference/object/GetObject.html
|
||||
//
|
||||
// io.ReadCloser the reader object for getting the data from response. It needs be closed after the usage. It's only valid when error is nil.
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) GetObjectWithURL(signedURL string, options ...Option) (io.ReadCloser, error) {
|
||||
result, err := bucket.DoGetObjectWithURL(signedURL, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result.Response, nil
|
||||
}
|
||||
|
||||
// GetObjectToFileWithURL downloads the object into a local file with the signed URL.
|
||||
//
|
||||
// signedURL the signed URL
|
||||
// filePath the local file path to download to.
|
||||
// options the options for downloading object. Check out the parameter options in function GetObject for the reference.
|
||||
//
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) GetObjectToFileWithURL(signedURL, filePath string, options ...Option) error {
|
||||
tempFilePath := filePath + TempFileSuffix
|
||||
|
||||
// Get the object's content
|
||||
result, err := bucket.DoGetObjectWithURL(signedURL, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer result.Response.Close()
|
||||
|
||||
// If the file does not exist, create one. If exists, then overwrite it.
|
||||
fd, err := os.OpenFile(tempFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, FilePermMode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Save the data to the file.
|
||||
_, err = io.Copy(fd, result.Response.Body)
|
||||
fd.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Compare the CRC value. If CRC values do not match, return error.
|
||||
hasRange, _, _ := isOptionSet(options, HTTPHeaderRange)
|
||||
encodeOpt, _ := findOption(options, HTTPHeaderAcceptEncoding, nil)
|
||||
acceptEncoding := ""
|
||||
if encodeOpt != nil {
|
||||
acceptEncoding = encodeOpt.(string)
|
||||
}
|
||||
|
||||
if bucket.getConfig().IsEnableCRC && !hasRange && acceptEncoding != "gzip" {
|
||||
result.Response.ClientCRC = result.ClientCRC.Sum64()
|
||||
err = checkCRC(result.Response, "GetObjectToFileWithURL")
|
||||
if err != nil {
|
||||
os.Remove(tempFilePath)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return os.Rename(tempFilePath, filePath)
|
||||
}
|
||||
|
||||
// DoGetObjectWithURL is the actual API that downloads the file with the signed URL.
|
||||
//
|
||||
// signedURL the signed URL.
|
||||
// options the options for getting object. Check out parameter options in GetObject for the reference.
|
||||
//
|
||||
// GetObjectResult the result object when the error is nil.
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) DoGetObjectWithURL(signedURL string, options []Option) (*GetObjectResult, error) {
|
||||
params, _ := getRawParams(options)
|
||||
resp, err := bucket.doURL("GET", signedURL, params, options, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := &GetObjectResult{
|
||||
Response: resp,
|
||||
}
|
||||
|
||||
// CRC
|
||||
var crcCalc hash.Hash64
|
||||
hasRange, _, _ := isOptionSet(options, HTTPHeaderRange)
|
||||
if bucket.getConfig().IsEnableCRC && !hasRange {
|
||||
crcCalc = crc64.New(crcTable())
|
||||
result.ServerCRC = resp.ServerCRC
|
||||
result.ClientCRC = crcCalc
|
||||
}
|
||||
|
||||
// Progress
|
||||
listener := getProgressListener(options)
|
||||
|
||||
contentLen, _ := strconv.ParseInt(resp.Headers.Get(HTTPHeaderContentLength), 10, 64)
|
||||
resp.Body = TeeReader(resp.Body, crcCalc, contentLen, listener, nil)
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
//
|
||||
// ProcessObject apply process on the specified image file.
|
||||
//
|
||||
// The supported process includes resize, rotate, crop, watermark, format,
|
||||
// udf, customized style, etc.
|
||||
//
|
||||
//
|
||||
// objectKey object key to process.
|
||||
// process process string, such as "image/resize,w_100|sys/saveas,o_dGVzdC5qcGc,b_dGVzdA"
|
||||
//
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) ProcessObject(objectKey string, process string) (ProcessObjectResult, error) {
|
||||
var out ProcessObjectResult
|
||||
params := map[string]interface{}{}
|
||||
params["x-oss-process"] = nil
|
||||
processData := fmt.Sprintf("%v=%v", "x-oss-process", process)
|
||||
data := strings.NewReader(processData)
|
||||
resp, err := bucket.do("POST", objectKey, params, nil, data, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
err = jsonUnmarshal(resp.Body, &out)
|
||||
return out, err
|
||||
}
|
||||
|
||||
// Private
|
||||
func (bucket Bucket) do(method, objectName string, params map[string]interface{}, options []Option,
|
||||
func (bucket Bucket) do(method, objectName, urlParams, subResource string, options []Option,
|
||||
data io.Reader, listener ProgressListener) (*Response, error) {
|
||||
headers := make(map[string]string)
|
||||
err := handleOptions(headers, options)
|
||||
|
@ -936,17 +606,7 @@ func (bucket Bucket) do(method, objectName string, params map[string]interface{}
|
|||
return nil, err
|
||||
}
|
||||
return bucket.Client.Conn.Do(method, bucket.BucketName, objectName,
|
||||
params, headers, data, 0, listener)
|
||||
}
|
||||
|
||||
func (bucket Bucket) doURL(method HTTPMethod, signedURL string, params map[string]interface{}, options []Option,
|
||||
data io.Reader, listener ProgressListener) (*Response, error) {
|
||||
headers := make(map[string]string)
|
||||
err := handleOptions(headers, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bucket.Client.Conn.DoURL(method, signedURL, headers, data, 0, listener)
|
||||
urlParams, subResource, headers, data, 0, listener)
|
||||
}
|
||||
|
||||
func (bucket Bucket) getConfig() *Config {
|
||||
|
|
|
@ -11,68 +11,70 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
// Client SDK's entry point. It's for bucket related options such as create/delete/set bucket (such as set/get ACL/lifecycle/referer/logging/website).
|
||||
// Object related operations are done by Bucket class.
|
||||
// Users use oss.New to create Client instance.
|
||||
//
|
||||
// Client Sdk的入口,Client的方法可以完成bucket的各种操作,如create/delete bucket,
|
||||
// set/get acl/lifecycle/referer/logging/website等。文件(object)的上传下载通过Bucket完成。
|
||||
// 用户用oss.New创建Client。
|
||||
//
|
||||
type (
|
||||
// Client OSS client
|
||||
// Client oss client
|
||||
Client struct {
|
||||
Config *Config // OSS client configuration
|
||||
Conn *Conn // Send HTTP request
|
||||
HTTPClient *http.Client //http.Client to use - if nil will make its own
|
||||
Config *Config // Oss Client configure
|
||||
Conn *Conn // Send http request
|
||||
}
|
||||
|
||||
// ClientOption client option such as UseCname, Timeout, SecurityToken.
|
||||
ClientOption func(*Client)
|
||||
)
|
||||
|
||||
// New creates a new client.
|
||||
//
|
||||
// endpoint the OSS datacenter endpoint such as http://oss-cn-hangzhou.aliyuncs.com .
|
||||
// accessKeyId access key Id.
|
||||
// accessKeySecret access key secret.
|
||||
// New 生成一个新的Client。
|
||||
//
|
||||
// Client creates the new client instance, the returned value is valid when error is nil.
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// endpoint 用户Bucket所在数据中心的访问域名,如http://oss-cn-hangzhou.aliyuncs.com。
|
||||
// accessKeyId 用户标识。
|
||||
// accessKeySecret 用户密钥。
|
||||
//
|
||||
// Client 生成的新Client。error为nil时有效。
|
||||
// error 操作无错误时为nil,非nil时表示操作出错。
|
||||
//
|
||||
func New(endpoint, accessKeyID, accessKeySecret string, options ...ClientOption) (*Client, error) {
|
||||
// Configuration
|
||||
// configuration
|
||||
config := getDefaultOssConfig()
|
||||
config.Endpoint = endpoint
|
||||
config.AccessKeyID = accessKeyID
|
||||
config.AccessKeySecret = accessKeySecret
|
||||
|
||||
// URL parse
|
||||
// url parse
|
||||
url := &urlMaker{}
|
||||
url.Init(config.Endpoint, config.IsCname, config.IsUseProxy)
|
||||
|
||||
// HTTP connect
|
||||
// http connect
|
||||
conn := &Conn{config: config, url: url}
|
||||
|
||||
// OSS client
|
||||
// oss client
|
||||
client := &Client{
|
||||
Config: config,
|
||||
Conn: conn,
|
||||
config,
|
||||
conn,
|
||||
}
|
||||
|
||||
// Client options parse
|
||||
// client options parse
|
||||
for _, option := range options {
|
||||
option(client)
|
||||
}
|
||||
|
||||
// Create HTTP connection
|
||||
err := conn.init(config, url, client.HTTPClient)
|
||||
// create http connect
|
||||
err := conn.init(config, url)
|
||||
|
||||
return client, err
|
||||
}
|
||||
|
||||
// Bucket gets the bucket instance.
|
||||
//
|
||||
// bucketName the bucket name.
|
||||
// Bucket the bucket object, when error is nil.
|
||||
// Bucket 取存储空间(Bucket)的对象实例。
|
||||
//
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// bucketName 存储空间名称。
|
||||
// Bucket 新的Bucket。error为nil时有效。
|
||||
//
|
||||
// error 操作无错误时返回nil,非nil为错误信息。
|
||||
//
|
||||
func (client Client) Bucket(bucketName string) (*Bucket, error) {
|
||||
return &Bucket{
|
||||
|
@ -81,36 +83,21 @@ func (client Client) Bucket(bucketName string) (*Bucket, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
// CreateBucket creates a bucket.
|
||||
//
|
||||
// bucketName the bucket name, it's globably unique and immutable. The bucket name can only consist of lowercase letters, numbers and dash ('-').
|
||||
// It must start with lowercase letter or number and the length can only be between 3 and 255.
|
||||
// options options for creating the bucket, with optional ACL. The ACL could be ACLPrivate, ACLPublicRead, and ACLPublicReadWrite. By default it's ACLPrivate.
|
||||
// It could also be specified with StorageClass option, which supports StorageStandard, StorageIA(infrequent access), StorageArchive.
|
||||
// CreateBucket 创建Bucket。
|
||||
//
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// bucketName bucket名称,在整个OSS中具有全局唯一性,且不能修改。bucket名称的只能包括小写字母,数字和短横线-,
|
||||
// 必须以小写字母或者数字开头,长度必须在3-255字节之间。
|
||||
// options 创建bucket的选项。您可以使用选项ACL,指定bucket的访问权限。Bucket有以下三种访问权限,私有读写(ACLPrivate)、
|
||||
// 公共读私有写(ACLPublicRead),公共读公共写(ACLPublicReadWrite),默认访问权限是私有读写。
|
||||
//
|
||||
// error 操作无错误时返回nil,非nil为错误信息。
|
||||
//
|
||||
func (client Client) CreateBucket(bucketName string, options ...Option) error {
|
||||
headers := make(map[string]string)
|
||||
handleOptions(headers, options)
|
||||
|
||||
buffer := new(bytes.Buffer)
|
||||
|
||||
isOptSet, val, _ := isOptionSet(options, storageClass)
|
||||
if isOptSet {
|
||||
cbConfig := createBucketConfiguration{StorageClass: val.(StorageClassType)}
|
||||
bs, err := xml.Marshal(cbConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buffer.Write(bs)
|
||||
|
||||
contentType := http.DetectContentType(buffer.Bytes())
|
||||
headers[HTTPHeaderContentType] = contentType
|
||||
}
|
||||
|
||||
params := map[string]interface{}{}
|
||||
resp, err := client.do("PUT", bucketName, params, headers, buffer)
|
||||
resp, err := client.do("PUT", bucketName, "", "", headers, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -119,25 +106,25 @@ func (client Client) CreateBucket(bucketName string, options ...Option) error {
|
|||
return checkRespCode(resp.StatusCode, []int{http.StatusOK})
|
||||
}
|
||||
|
||||
// ListBuckets lists buckets of the current account under the given endpoint, with optional filters.
|
||||
//
|
||||
// options specifies the filters such as Prefix, Marker and MaxKeys. Prefix is the bucket name's prefix filter.
|
||||
// And marker makes sure the returned buckets' name are greater than it in lexicographic order.
|
||||
// Maxkeys limits the max keys to return, and by default it's 100 and up to 1000.
|
||||
// For the common usage scenario, please check out list_bucket.go in the sample.
|
||||
// ListBucketsResponse the response object if error is nil.
|
||||
// ListBuckets 获取当前用户下的bucket。
|
||||
//
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// options 指定ListBuckets的筛选行为,Prefix、Marker、MaxKeys三个选项。Prefix限定前缀。
|
||||
// Marker设定从Marker之后的第一个开始返回。MaxKeys限定此次返回的最大数目,默认为100。
|
||||
// 常用使用场景的实现,参数示例程序list_bucket.go。
|
||||
// ListBucketsResponse 操作成功后的返回值,error为nil时该返回值有效。
|
||||
//
|
||||
// error 操作无错误时返回nil,非nil为错误信息。
|
||||
//
|
||||
func (client Client) ListBuckets(options ...Option) (ListBucketsResult, error) {
|
||||
var out ListBucketsResult
|
||||
|
||||
params, err := getRawParams(options)
|
||||
params, err := handleParams(options)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
|
||||
resp, err := client.do("GET", "", params, nil, nil)
|
||||
resp, err := client.do("GET", "", params, "", nil, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
|
@ -147,12 +134,13 @@ func (client Client) ListBuckets(options ...Option) (ListBucketsResult, error) {
|
|||
return out, err
|
||||
}
|
||||
|
||||
// IsBucketExist checks if the bucket exists
|
||||
//
|
||||
// bucketName the bucket name.
|
||||
// IsBucketExist Bucket是否存在。
|
||||
//
|
||||
// bool true if it exists, and it's only valid when error is nil.
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// bucketName 存储空间名称。
|
||||
//
|
||||
// bool 存储空间是否存在。error为nil时有效。
|
||||
// error 操作无错误时返回nil,非nil为错误信息。
|
||||
//
|
||||
func (client Client) IsBucketExist(bucketName string) (bool, error) {
|
||||
listRes, err := client.ListBuckets(Prefix(bucketName), MaxKeys(1))
|
||||
|
@ -166,15 +154,15 @@ func (client Client) IsBucketExist(bucketName string) (bool, error) {
|
|||
return false, nil
|
||||
}
|
||||
|
||||
// DeleteBucket deletes the bucket. Only empty bucket can be deleted (no object and parts).
|
||||
//
|
||||
// bucketName the bucket name.
|
||||
// DeleteBucket 删除空存储空间。非空时请先清理Object、Upload。
|
||||
//
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// bucketName 存储空间名称。
|
||||
//
|
||||
// error 操作无错误时返回nil,非nil为错误信息。
|
||||
//
|
||||
func (client Client) DeleteBucket(bucketName string) error {
|
||||
params := map[string]interface{}{}
|
||||
resp, err := client.do("DELETE", bucketName, params, nil, nil)
|
||||
resp, err := client.do("DELETE", bucketName, "", "", nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -183,20 +171,19 @@ func (client Client) DeleteBucket(bucketName string) error {
|
|||
return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
|
||||
}
|
||||
|
||||
// GetBucketLocation gets the bucket location.
|
||||
//
|
||||
// Checks out the following link for more information :
|
||||
// GetBucketLocation 查看Bucket所属数据中心位置的信息。
|
||||
//
|
||||
// 如果您想了解"访问域名和数据中心"详细信息,请参看
|
||||
// https://help.aliyun.com/document_detail/oss/user_guide/oss_concept/endpoint.html
|
||||
//
|
||||
// bucketName the bucket name
|
||||
// bucketName 存储空间名称。
|
||||
//
|
||||
// string bucket's datacenter location
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// string Bucket所属的数据中心位置信息。
|
||||
// error 操作无错误时返回nil,非nil为错误信息。
|
||||
//
|
||||
func (client Client) GetBucketLocation(bucketName string) (string, error) {
|
||||
params := map[string]interface{}{}
|
||||
params["location"] = nil
|
||||
resp, err := client.do("GET", bucketName, params, nil, nil)
|
||||
resp, err := client.do("GET", bucketName, "location", "location", nil, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -207,17 +194,18 @@ func (client Client) GetBucketLocation(bucketName string) (string, error) {
|
|||
return LocationConstraint, err
|
||||
}
|
||||
|
||||
// SetBucketACL sets bucket's ACL.
|
||||
//
|
||||
// bucketName the bucket name
|
||||
// bucketAcl the bucket ACL: ACLPrivate, ACLPublicRead and ACLPublicReadWrite.
|
||||
// SetBucketACL 修改Bucket的访问权限。
|
||||
//
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// bucketName 存储空间名称。
|
||||
// bucketAcl bucket的访问权限。Bucket有以下三种访问权限,Bucket有以下三种访问权限,私有读写(ACLPrivate)、
|
||||
// 公共读私有写(ACLPublicRead),公共读公共写(ACLPublicReadWrite)。
|
||||
//
|
||||
// error 操作无错误时返回nil,非nil为错误信息。
|
||||
//
|
||||
func (client Client) SetBucketACL(bucketName string, bucketACL ACLType) error {
|
||||
headers := map[string]string{HTTPHeaderOssACL: string(bucketACL)}
|
||||
params := map[string]interface{}{}
|
||||
resp, err := client.do("PUT", bucketName, params, headers, nil)
|
||||
resp, err := client.do("PUT", bucketName, "", "", headers, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -225,18 +213,17 @@ func (client Client) SetBucketACL(bucketName string, bucketACL ACLType) error {
|
|||
return checkRespCode(resp.StatusCode, []int{http.StatusOK})
|
||||
}
|
||||
|
||||
// GetBucketACL gets the bucket ACL.
|
||||
//
|
||||
// bucketName the bucket name.
|
||||
// GetBucketACL 获得Bucket的访问权限。
|
||||
//
|
||||
// GetBucketAclResponse the result object, and it's only valid when error is nil.
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// bucketName 存储空间名称。
|
||||
//
|
||||
// GetBucketAclResponse 操作成功后的返回值,error为nil时该返回值有效。
|
||||
// error 操作无错误时返回nil,非nil为错误信息。
|
||||
//
|
||||
func (client Client) GetBucketACL(bucketName string) (GetBucketACLResult, error) {
|
||||
var out GetBucketACLResult
|
||||
params := map[string]interface{}{}
|
||||
params["acl"] = nil
|
||||
resp, err := client.do("GET", bucketName, params, nil, nil)
|
||||
resp, err := client.do("GET", bucketName, "acl", "acl", nil, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
|
@ -246,16 +233,19 @@ func (client Client) GetBucketACL(bucketName string) (GetBucketACLResult, error)
|
|||
return out, err
|
||||
}
|
||||
|
||||
// SetBucketLifecycle sets the bucket's lifecycle.
|
||||
//
|
||||
// For more information, checks out following link:
|
||||
// SetBucketLifecycle 修改Bucket的生命周期设置。
|
||||
//
|
||||
// OSS提供Object生命周期管理来为用户管理对象。用户可以为某个Bucket定义生命周期配置,来为该Bucket的Object定义各种规则。
|
||||
// Bucket的拥有者可以通过SetBucketLifecycle来设置Bucket的Lifecycle配置。Lifecycle开启后,OSS将按照配置,
|
||||
// 定期自动删除与Lifecycle规则相匹配的Object。如果您想了解更多的生命周期的信息,请参看
|
||||
// https://help.aliyun.com/document_detail/oss/user_guide/manage_object/object_lifecycle.html
|
||||
//
|
||||
// bucketName the bucket name.
|
||||
// rules the lifecycle rules. There're two kind of rules: absolute time expiration and relative time expiration in days and day/month/year respectively.
|
||||
// Check out sample/bucket_lifecycle.go for more details.
|
||||
// bucketName 存储空间名称。
|
||||
// rules 生命周期规则列表。生命周期规则有两种格式,指定绝对和相对过期时间,分布由days和year/month/day控制。
|
||||
// 具体用法请参考示例程序sample/bucket_lifecycle.go。
|
||||
//
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// error 操作无错误时返回error为nil,非nil为错误信息。
|
||||
//
|
||||
func (client Client) SetBucketLifecycle(bucketName string, rules []LifecycleRule) error {
|
||||
lxml := lifecycleXML{Rules: convLifecycleRule(rules)}
|
||||
|
@ -270,9 +260,7 @@ func (client Client) SetBucketLifecycle(bucketName string, rules []LifecycleRule
|
|||
headers := map[string]string{}
|
||||
headers[HTTPHeaderContentType] = contentType
|
||||
|
||||
params := map[string]interface{}{}
|
||||
params["lifecycle"] = nil
|
||||
resp, err := client.do("PUT", bucketName, params, headers, buffer)
|
||||
resp, err := client.do("PUT", bucketName, "lifecycle", "lifecycle", headers, buffer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -280,17 +268,16 @@ func (client Client) SetBucketLifecycle(bucketName string, rules []LifecycleRule
|
|||
return checkRespCode(resp.StatusCode, []int{http.StatusOK})
|
||||
}
|
||||
|
||||
// DeleteBucketLifecycle deletes the bucket's lifecycle.
|
||||
//
|
||||
// DeleteBucketLifecycle 删除Bucket的生命周期设置。
|
||||
//
|
||||
//
|
||||
// bucketName the bucket name.
|
||||
// bucketName 存储空间名称。
|
||||
//
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// error 操作无错误为nil,非nil为错误信息。
|
||||
//
|
||||
func (client Client) DeleteBucketLifecycle(bucketName string) error {
|
||||
params := map[string]interface{}{}
|
||||
params["lifecycle"] = nil
|
||||
resp, err := client.do("DELETE", bucketName, params, nil, nil)
|
||||
resp, err := client.do("DELETE", bucketName, "lifecycle", "lifecycle", nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -298,18 +285,17 @@ func (client Client) DeleteBucketLifecycle(bucketName string) error {
|
|||
return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
|
||||
}
|
||||
|
||||
// GetBucketLifecycle gets the bucket's lifecycle settings.
|
||||
//
|
||||
// bucketName the bucket name.
|
||||
// GetBucketLifecycle 查看Bucket的生命周期设置。
|
||||
//
|
||||
// GetBucketLifecycleResponse the result object upon successful request. It's only valid when error is nil.
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// bucketName 存储空间名称。
|
||||
//
|
||||
// GetBucketLifecycleResponse 操作成功的返回值,error为nil时该返回值有效。Rules为该bucket上的规则列表。
|
||||
// error 操作无错误时为nil,非nil为错误信息。
|
||||
//
|
||||
func (client Client) GetBucketLifecycle(bucketName string) (GetBucketLifecycleResult, error) {
|
||||
var out GetBucketLifecycleResult
|
||||
params := map[string]interface{}{}
|
||||
params["lifecycle"] = nil
|
||||
resp, err := client.do("GET", bucketName, params, nil, nil)
|
||||
resp, err := client.do("GET", bucketName, "lifecycle", "lifecycle", nil, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
|
@ -319,20 +305,21 @@ func (client Client) GetBucketLifecycle(bucketName string) (GetBucketLifecycleRe
|
|||
return out, err
|
||||
}
|
||||
|
||||
// SetBucketReferer sets the bucket's referer whitelist and the flag if allowing empty referrer.
|
||||
//
|
||||
// To avoid stealing link on OSS data, OSS supports the HTTP referrer header. A whitelist referrer could be set either by API or web console, as well as
|
||||
// the allowing empty referrer flag. Note that this applies to requests from webbrowser only.
|
||||
// For example, for a bucket os-example and its referrer http://www.aliyun.com, all requests from this URL could access the bucket.
|
||||
// For more information, please check out this link :
|
||||
// SetBucketReferer 设置bucket的referer访问白名单和是否允许referer字段为空的请求访问。
|
||||
//
|
||||
// 防止用户在OSS上的数据被其他人盗用,OSS支持基于HTTP header中表头字段referer的防盗链方法。可以通过OSS控制台或者API的方式对
|
||||
// 一个bucket设置referer字段的白名单和是否允许referer字段为空的请求访问。例如,对于一个名为oss-example的bucket,
|
||||
// 设置其referer白名单为http://www.aliyun.com。则所有referer为http://www.aliyun.com的请求才能访问oss-example
|
||||
// 这个bucket中的object。如果您还需要了解更多信息,请参看
|
||||
// https://help.aliyun.com/document_detail/oss/user_guide/security_management/referer.html
|
||||
//
|
||||
// bucketName the bucket name.
|
||||
// referers the referrer white list. A bucket could have a referrer list and each referrer supports one '*' and multiple '?' as wildcards.
|
||||
// The sample could be found in sample/bucket_referer.go
|
||||
// allowEmptyReferer the flag of allowing empty referrer. By default it's true.
|
||||
// bucketName 存储空间名称。
|
||||
// referers 访问白名单列表。一个bucket可以支持多个referer参数。referer参数支持通配符"*"和"?"。
|
||||
// 用法请参看示例sample/bucket_referer.go
|
||||
// allowEmptyReferer 指定是否允许referer字段为空的请求访问。 默认为true。
|
||||
//
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// error 操作无错误为nil,非nil为错误信息。
|
||||
//
|
||||
func (client Client) SetBucketReferer(bucketName string, referers []string, allowEmptyReferer bool) error {
|
||||
rxml := RefererXML{}
|
||||
|
@ -356,9 +343,7 @@ func (client Client) SetBucketReferer(bucketName string, referers []string, allo
|
|||
headers := map[string]string{}
|
||||
headers[HTTPHeaderContentType] = contentType
|
||||
|
||||
params := map[string]interface{}{}
|
||||
params["referer"] = nil
|
||||
resp, err := client.do("PUT", bucketName, params, headers, buffer)
|
||||
resp, err := client.do("PUT", bucketName, "referer", "referer", headers, buffer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -366,18 +351,17 @@ func (client Client) SetBucketReferer(bucketName string, referers []string, allo
|
|||
return checkRespCode(resp.StatusCode, []int{http.StatusOK})
|
||||
}
|
||||
|
||||
// GetBucketReferer gets the bucket's referrer white list.
|
||||
//
|
||||
// bucketName the bucket name.
|
||||
// GetBucketReferer 获得Bucket的白名单地址。
|
||||
//
|
||||
// GetBucketRefererResponse the result object upon successful request. It's only valid when error is nil.
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// bucketName 存储空间名称。
|
||||
//
|
||||
// GetBucketRefererResponse 操作成功的返回值,error为nil时该返回值有效。
|
||||
// error 操作无错误时为nil,非nil为错误信息。
|
||||
//
|
||||
func (client Client) GetBucketReferer(bucketName string) (GetBucketRefererResult, error) {
|
||||
var out GetBucketRefererResult
|
||||
params := map[string]interface{}{}
|
||||
params["referer"] = nil
|
||||
resp, err := client.do("GET", bucketName, params, nil, nil)
|
||||
resp, err := client.do("GET", bucketName, "referer", "referer", nil, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
|
@ -387,17 +371,18 @@ func (client Client) GetBucketReferer(bucketName string) (GetBucketRefererResult
|
|||
return out, err
|
||||
}
|
||||
|
||||
// SetBucketLogging sets the bucket logging settings.
|
||||
//
|
||||
// OSS could automatically store the access log. Only the bucket owner could enable the logging.
|
||||
// Once enabled, OSS would save all the access log into hourly log files in a specified bucket.
|
||||
// For more information, please check out https://help.aliyun.com/document_detail/oss/user_guide/security_management/logging.html
|
||||
// SetBucketLogging 修改Bucket的日志设置。
|
||||
//
|
||||
// bucketName bucket name to enable the log.
|
||||
// targetBucket the target bucket name to store the log files.
|
||||
// targetPrefix the log files' prefix.
|
||||
// OSS为您提供自动保存访问日志记录功能。Bucket的拥有者可以开启访问日志记录功能。当一个bucket开启访问日志记录功能后,
|
||||
// OSS自动将访问这个bucket的请求日志,以小时为单位,按照固定的命名规则,生成一个Object写入用户指定的bucket中。
|
||||
// 如果您需要更多,请参看 https://help.aliyun.com/document_detail/oss/user_guide/security_management/logging.html
|
||||
//
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// bucketName 需要记录访问日志的Bucket。
|
||||
// targetBucket 访问日志记录到的Bucket。
|
||||
// targetPrefix bucketName中需要存储访问日志记录的object前缀。为空记录所有object的访问日志。
|
||||
//
|
||||
// error 操作无错误为nil,非nil为错误信息。
|
||||
//
|
||||
func (client Client) SetBucketLogging(bucketName, targetBucket, targetPrefix string,
|
||||
isEnable bool) error {
|
||||
|
@ -424,9 +409,7 @@ func (client Client) SetBucketLogging(bucketName, targetBucket, targetPrefix str
|
|||
headers := map[string]string{}
|
||||
headers[HTTPHeaderContentType] = contentType
|
||||
|
||||
params := map[string]interface{}{}
|
||||
params["logging"] = nil
|
||||
resp, err := client.do("PUT", bucketName, params, headers, buffer)
|
||||
resp, err := client.do("PUT", bucketName, "logging", "logging", headers, buffer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -434,16 +417,15 @@ func (client Client) SetBucketLogging(bucketName, targetBucket, targetPrefix str
|
|||
return checkRespCode(resp.StatusCode, []int{http.StatusOK})
|
||||
}
|
||||
|
||||
// DeleteBucketLogging deletes the logging configuration to disable the logging on the bucket.
|
||||
//
|
||||
// bucketName the bucket name to disable the logging.
|
||||
// DeleteBucketLogging 删除Bucket的日志设置。
|
||||
//
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// bucketName 需要删除访问日志的Bucket。
|
||||
//
|
||||
// error 操作无错误为nil,非nil为错误信息。
|
||||
//
|
||||
func (client Client) DeleteBucketLogging(bucketName string) error {
|
||||
params := map[string]interface{}{}
|
||||
params["logging"] = nil
|
||||
resp, err := client.do("DELETE", bucketName, params, nil, nil)
|
||||
resp, err := client.do("DELETE", bucketName, "logging", "logging", nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -451,18 +433,17 @@ func (client Client) DeleteBucketLogging(bucketName string) error {
|
|||
return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
|
||||
}
|
||||
|
||||
// GetBucketLogging gets the bucket's logging settings
|
||||
//
|
||||
// bucketName the bucket name
|
||||
// GetBucketLoggingResponse the result object upon successful request. It's only valid when error is nil.
|
||||
// GetBucketLogging 获得Bucket的日志设置。
|
||||
//
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// bucketName 需要删除访问日志的Bucket。
|
||||
// GetBucketLoggingResponse 操作成功的返回值,error为nil时该返回值有效。
|
||||
//
|
||||
// error 操作无错误为nil,非nil为错误信息。
|
||||
//
|
||||
func (client Client) GetBucketLogging(bucketName string) (GetBucketLoggingResult, error) {
|
||||
var out GetBucketLoggingResult
|
||||
params := map[string]interface{}{}
|
||||
params["logging"] = nil
|
||||
resp, err := client.do("GET", bucketName, params, nil, nil)
|
||||
resp, err := client.do("GET", bucketName, "logging", "logging", nil, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
|
@ -472,16 +453,17 @@ func (client Client) GetBucketLogging(bucketName string) (GetBucketLoggingResult
|
|||
return out, err
|
||||
}
|
||||
|
||||
// SetBucketWebsite sets the bucket's static website's index and error page.
|
||||
//
|
||||
// OSS supports static web site hosting for the bucket data. When the bucket is enabled with that, you can access the file in the bucket like the way to access a static website.
|
||||
// For more information, please check out: https://help.aliyun.com/document_detail/oss/user_guide/static_host_website.html
|
||||
// SetBucketWebsite 设置/修改Bucket的默认首页以及错误页。
|
||||
//
|
||||
// bucketName the bucket name to enable static web site.
|
||||
// indexDocument index page.
|
||||
// errorDocument error page.
|
||||
// OSS支持静态网站托管,Website操作可以将一个bucket设置成静态网站托管模式 。您可以将自己的Bucket配置成静态网站托管模式。
|
||||
// 如果您需要更多,请参看 https://help.aliyun.com/document_detail/oss/user_guide/static_host_website.html
|
||||
//
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// bucketName 需要设置Website的Bucket。
|
||||
// indexDocument 索引文档。
|
||||
// errorDocument 错误文档。
|
||||
//
|
||||
// error 操作无错误为nil,非nil为错误信息。
|
||||
//
|
||||
func (client Client) SetBucketWebsite(bucketName, indexDocument, errorDocument string) error {
|
||||
wxml := WebsiteXML{}
|
||||
|
@ -499,9 +481,7 @@ func (client Client) SetBucketWebsite(bucketName, indexDocument, errorDocument s
|
|||
headers := make(map[string]string)
|
||||
headers[HTTPHeaderContentType] = contentType
|
||||
|
||||
params := map[string]interface{}{}
|
||||
params["website"] = nil
|
||||
resp, err := client.do("PUT", bucketName, params, headers, buffer)
|
||||
resp, err := client.do("PUT", bucketName, "website", "website", headers, buffer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -509,16 +489,15 @@ func (client Client) SetBucketWebsite(bucketName, indexDocument, errorDocument s
|
|||
return checkRespCode(resp.StatusCode, []int{http.StatusOK})
|
||||
}
|
||||
|
||||
// DeleteBucketWebsite deletes the bucket's static web site settings.
|
||||
//
|
||||
// bucketName the bucket name.
|
||||
// DeleteBucketWebsite 删除Bucket的Website设置。
|
||||
//
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// bucketName 需要删除website设置的Bucket。
|
||||
//
|
||||
// error 操作无错误为nil,非nil为错误信息。
|
||||
//
|
||||
func (client Client) DeleteBucketWebsite(bucketName string) error {
|
||||
params := map[string]interface{}{}
|
||||
params["website"] = nil
|
||||
resp, err := client.do("DELETE", bucketName, params, nil, nil)
|
||||
resp, err := client.do("DELETE", bucketName, "website", "website", nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -526,18 +505,17 @@ func (client Client) DeleteBucketWebsite(bucketName string) error {
|
|||
return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
|
||||
}
|
||||
|
||||
// GetBucketWebsite gets the bucket's default page (index page) and the error page.
|
||||
//
|
||||
// bucketName the bucket name
|
||||
// GetBucketWebsite 获得Bucket的默认首页以及错误页。
|
||||
//
|
||||
// GetBucketWebsiteResponse the result object upon successful request. It's only valid when error is nil.
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// bucketName 存储空间名称。
|
||||
//
|
||||
// GetBucketWebsiteResponse 操作成功的返回值,error为nil时该返回值有效。
|
||||
// error 操作无错误为nil,非nil为错误信息。
|
||||
//
|
||||
func (client Client) GetBucketWebsite(bucketName string) (GetBucketWebsiteResult, error) {
|
||||
var out GetBucketWebsiteResult
|
||||
params := map[string]interface{}{}
|
||||
params["website"] = nil
|
||||
resp, err := client.do("GET", bucketName, params, nil, nil)
|
||||
resp, err := client.do("GET", bucketName, "website", "website", nil, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
|
@ -547,14 +525,15 @@ func (client Client) GetBucketWebsite(bucketName string) (GetBucketWebsiteResult
|
|||
return out, err
|
||||
}
|
||||
|
||||
// SetBucketCORS sets the bucket's CORS rules
|
||||
//
|
||||
// For more information, please check out https://help.aliyun.com/document_detail/oss/user_guide/security_management/cors.html
|
||||
// SetBucketCORS 设置Bucket的跨域访问(CORS)规则。
|
||||
//
|
||||
// bucketName the bucket name
|
||||
// corsRules the CORS rules to set. The related sample code is in sample/bucket_cors.go.
|
||||
// 跨域访问的更多信息,请参看 https://help.aliyun.com/document_detail/oss/user_guide/security_management/cors.html
|
||||
//
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// bucketName 需要设置Website的Bucket。
|
||||
// corsRules 待设置的CORS规则。用法请参看示例代码sample/bucket_cors.go。
|
||||
//
|
||||
// error 操作无错误为nil,非nil为错误信息。
|
||||
//
|
||||
func (client Client) SetBucketCORS(bucketName string, corsRules []CORSRule) error {
|
||||
corsxml := CORSXML{}
|
||||
|
@ -579,9 +558,7 @@ func (client Client) SetBucketCORS(bucketName string, corsRules []CORSRule) erro
|
|||
headers := map[string]string{}
|
||||
headers[HTTPHeaderContentType] = contentType
|
||||
|
||||
params := map[string]interface{}{}
|
||||
params["cors"] = nil
|
||||
resp, err := client.do("PUT", bucketName, params, headers, buffer)
|
||||
resp, err := client.do("PUT", bucketName, "cors", "cors", headers, buffer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -589,16 +566,15 @@ func (client Client) SetBucketCORS(bucketName string, corsRules []CORSRule) erro
|
|||
return checkRespCode(resp.StatusCode, []int{http.StatusOK})
|
||||
}
|
||||
|
||||
// DeleteBucketCORS deletes the bucket's static website settings.
|
||||
//
|
||||
// bucketName the bucket name.
|
||||
// DeleteBucketCORS 删除Bucket的Website设置。
|
||||
//
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// bucketName 需要删除cors设置的Bucket。
|
||||
//
|
||||
// error 操作无错误为nil,非nil为错误信息。
|
||||
//
|
||||
func (client Client) DeleteBucketCORS(bucketName string) error {
|
||||
params := map[string]interface{}{}
|
||||
params["cors"] = nil
|
||||
resp, err := client.do("DELETE", bucketName, params, nil, nil)
|
||||
resp, err := client.do("DELETE", bucketName, "cors", "cors", nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -606,18 +582,18 @@ func (client Client) DeleteBucketCORS(bucketName string) error {
|
|||
return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
|
||||
}
|
||||
|
||||
// GetBucketCORS gets the bucket's CORS settings.
|
||||
//
|
||||
// bucketName the bucket name.
|
||||
// GetBucketCORSResult the result object upon successful request. It's only valid when error is nil.
|
||||
// GetBucketCORS 获得Bucket的CORS设置。
|
||||
//
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
//
|
||||
// bucketName 存储空间名称。
|
||||
// GetBucketCORSResult 操作成功的返回值,error为nil时该返回值有效。
|
||||
//
|
||||
// error 操作无错误为nil,非nil为错误信息。
|
||||
//
|
||||
func (client Client) GetBucketCORS(bucketName string) (GetBucketCORSResult, error) {
|
||||
var out GetBucketCORSResult
|
||||
params := map[string]interface{}{}
|
||||
params["cors"] = nil
|
||||
resp, err := client.do("GET", bucketName, params, nil, nil)
|
||||
resp, err := client.do("GET", bucketName, "cors", "cors", nil, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
|
@ -627,18 +603,17 @@ func (client Client) GetBucketCORS(bucketName string) (GetBucketCORSResult, erro
|
|||
return out, err
|
||||
}
|
||||
|
||||
// GetBucketInfo gets the bucket information.
|
||||
//
|
||||
// bucketName the bucket name.
|
||||
// GetBucketInfoResult the result object upon successful request. It's only valid when error is nil.
|
||||
// GetBucketInfo 获得Bucket的信息。
|
||||
//
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
// bucketName 存储空间名称。
|
||||
// GetBucketInfoResult 操作成功的返回值,error为nil时该返回值有效。
|
||||
//
|
||||
// error 操作无错误为nil,非nil为错误信息。
|
||||
//
|
||||
func (client Client) GetBucketInfo(bucketName string) (GetBucketInfoResult, error) {
|
||||
var out GetBucketInfoResult
|
||||
params := map[string]interface{}{}
|
||||
params["bucketInfo"] = nil
|
||||
resp, err := client.do("GET", bucketName, params, nil, nil)
|
||||
resp, err := client.do("GET", bucketName, "bucketInfo", "bucketInfo", nil, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
|
@ -648,9 +623,10 @@ func (client Client) GetBucketInfo(bucketName string) (GetBucketInfoResult, erro
|
|||
return out, err
|
||||
}
|
||||
|
||||
// UseCname sets the flag of using CName. By default it's false.
|
||||
//
|
||||
// isUseCname true: the endpoint has the CName, false: the endpoint does not have cname. Default is false.
|
||||
// UseCname 设置是否使用CNAME,默认不使用。
|
||||
//
|
||||
// isUseCname true设置endpoint格式是cname格式,false为非cname格式,默认false
|
||||
//
|
||||
func UseCname(isUseCname bool) ClientOption {
|
||||
return func(client *Client) {
|
||||
|
@ -659,10 +635,11 @@ func UseCname(isUseCname bool) ClientOption {
|
|||
}
|
||||
}
|
||||
|
||||
// Timeout sets the HTTP timeout in seconds.
|
||||
//
|
||||
// connectTimeoutSec HTTP timeout in seconds. Default is 10 seconds. 0 means infinite (not recommended)
|
||||
// readWriteTimeout HTTP read or write's timeout in seconds. Default is 20 seconds. 0 means infinite.
|
||||
// Timeout 设置HTTP超时时间。
|
||||
//
|
||||
// connectTimeoutSec HTTP链接超时时间,单位是秒,默认10秒。0表示永不超时。
|
||||
// readWriteTimeout HTTP发送接受数据超时时间,单位是秒,默认20秒。0表示永不超时。
|
||||
//
|
||||
func Timeout(connectTimeoutSec, readWriteTimeout int64) ClientOption {
|
||||
return func(client *Client) {
|
||||
|
@ -672,16 +649,15 @@ func Timeout(connectTimeoutSec, readWriteTimeout int64) ClientOption {
|
|||
time.Second * time.Duration(readWriteTimeout)
|
||||
client.Config.HTTPTimeout.HeaderTimeout =
|
||||
time.Second * time.Duration(readWriteTimeout)
|
||||
client.Config.HTTPTimeout.IdleConnTimeout =
|
||||
time.Second * time.Duration(readWriteTimeout)
|
||||
client.Config.HTTPTimeout.LongTimeout =
|
||||
time.Second * time.Duration(readWriteTimeout*10)
|
||||
}
|
||||
}
|
||||
|
||||
// SecurityToken sets the temporary user's SecurityToken.
|
||||
//
|
||||
// token STS token
|
||||
// SecurityToken 临时用户设置SecurityToken。
|
||||
//
|
||||
// token STS token
|
||||
//
|
||||
func SecurityToken(token string) ClientOption {
|
||||
return func(client *Client) {
|
||||
|
@ -689,9 +665,10 @@ func SecurityToken(token string) ClientOption {
|
|||
}
|
||||
}
|
||||
|
||||
// EnableMD5 enables MD5 validation.
|
||||
//
|
||||
// isEnableMD5 true: enable MD5 validation; false: disable MD5 validation.
|
||||
// EnableMD5 是否启用MD5校验,默认启用。
|
||||
//
|
||||
// isEnableMD5 true启用MD5校验,false不启用MD5校验
|
||||
//
|
||||
func EnableMD5(isEnableMD5 bool) ClientOption {
|
||||
return func(client *Client) {
|
||||
|
@ -699,9 +676,10 @@ func EnableMD5(isEnableMD5 bool) ClientOption {
|
|||
}
|
||||
}
|
||||
|
||||
// MD5ThresholdCalcInMemory sets the memory usage threshold for computing the MD5, default is 16MB.
|
||||
//
|
||||
// threshold the memory threshold in bytes. When the uploaded content is more than 16MB, the temp file is used for computing the MD5.
|
||||
// MD5ThresholdCalcInMemory 使用内存计算MD5值的上限,默认16MB。
|
||||
//
|
||||
// threshold 单位Byte。上传内容小于threshold在MD5在内存中计算,大于使用临时文件计算MD5
|
||||
//
|
||||
func MD5ThresholdCalcInMemory(threshold int64) ClientOption {
|
||||
return func(client *Client) {
|
||||
|
@ -709,9 +687,10 @@ func MD5ThresholdCalcInMemory(threshold int64) ClientOption {
|
|||
}
|
||||
}
|
||||
|
||||
// EnableCRC enables the CRC checksum. Default is true.
|
||||
//
|
||||
// isEnableCRC true: enable CRC checksum; false: disable the CRC checksum.
|
||||
// EnableCRC 上传是否启用CRC校验,默认启用。
|
||||
//
|
||||
// isEnableCRC true启用CRC校验,false不启用CRC校验
|
||||
//
|
||||
func EnableCRC(isEnableCRC bool) ClientOption {
|
||||
return func(client *Client) {
|
||||
|
@ -719,9 +698,10 @@ func EnableCRC(isEnableCRC bool) ClientOption {
|
|||
}
|
||||
}
|
||||
|
||||
// UserAgent specifies UserAgent. The default is aliyun-sdk-go/1.2.0 (windows/-/amd64;go1.5.2).
|
||||
//
|
||||
// userAgent the user agent string.
|
||||
// UserAgent 指定UserAgent,默认如下aliyun-sdk-go/1.2.0 (windows/-/amd64;go1.5.2)。
|
||||
//
|
||||
// userAgent user agent字符串。
|
||||
//
|
||||
func UserAgent(userAgent string) ClientOption {
|
||||
return func(client *Client) {
|
||||
|
@ -729,9 +709,10 @@ func UserAgent(userAgent string) ClientOption {
|
|||
}
|
||||
}
|
||||
|
||||
// Proxy sets the proxy (optional). The default is not using proxy.
|
||||
//
|
||||
// proxyHost the proxy host in the format "host:port". For example, proxy.com:80 .
|
||||
// Proxy 设置代理服务器,默认不使用代理。
|
||||
//
|
||||
// proxyHost 代理服务器地址,格式是host或host:port
|
||||
//
|
||||
func Proxy(proxyHost string) ClientOption {
|
||||
return func(client *Client) {
|
||||
|
@ -741,11 +722,12 @@ func Proxy(proxyHost string) ClientOption {
|
|||
}
|
||||
}
|
||||
|
||||
// AuthProxy sets the proxy information with user name and password.
|
||||
//
|
||||
// proxyHost the proxy host in the format "host:port". For example, proxy.com:80 .
|
||||
// proxyUser the proxy user name.
|
||||
// proxyPassword the proxy password.
|
||||
// AuthProxy 设置需要认证的代理服务器,默认不使用代理。
|
||||
//
|
||||
// proxyHost 代理服务器地址,格式是host或host:port
|
||||
// proxyUser 代理服务器认证的用户名
|
||||
// proxyPassword 代理服务器认证的用户密码
|
||||
//
|
||||
func AuthProxy(proxyHost, proxyUser, proxyPassword string) ClientOption {
|
||||
return func(client *Client) {
|
||||
|
@ -758,18 +740,9 @@ func AuthProxy(proxyHost, proxyUser, proxyPassword string) ClientOption {
|
|||
}
|
||||
}
|
||||
|
||||
//
|
||||
// HTTPClient sets the http.Client in use to the one passed in
|
||||
//
|
||||
func HTTPClient(HTTPClient *http.Client) ClientOption {
|
||||
return func(client *Client) {
|
||||
client.HTTPClient = HTTPClient
|
||||
}
|
||||
}
|
||||
|
||||
// Private
|
||||
func (client Client) do(method, bucketName string, params map[string]interface{},
|
||||
func (client Client) do(method, bucketName, urlParams, subResource string,
|
||||
headers map[string]string, data io.Reader) (*Response, error) {
|
||||
return client.Conn.Do(method, bucketName, "", params,
|
||||
headers, data, 0, nil)
|
||||
return client.Conn.Do(method, bucketName, "", urlParams,
|
||||
subResource, headers, data, 0, nil)
|
||||
}
|
||||
|
|
|
@ -4,44 +4,37 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
// HTTPTimeout defines HTTP timeout.
|
||||
// HTTPTimeout http timeout
|
||||
type HTTPTimeout struct {
|
||||
ConnectTimeout time.Duration
|
||||
ReadWriteTimeout time.Duration
|
||||
HeaderTimeout time.Duration
|
||||
LongTimeout time.Duration
|
||||
IdleConnTimeout time.Duration
|
||||
}
|
||||
|
||||
type HTTPMaxConns struct {
|
||||
MaxIdleConns int
|
||||
MaxIdleConnsPerHost int
|
||||
}
|
||||
|
||||
// Config defines oss configuration
|
||||
// Config oss configure
|
||||
type Config struct {
|
||||
Endpoint string // OSS endpoint
|
||||
AccessKeyID string // AccessId
|
||||
AccessKeySecret string // AccessKey
|
||||
RetryTimes uint // Retry count by default it's 5.
|
||||
UserAgent string // SDK name/version/system information
|
||||
IsDebug bool // Enable debug mode. Default is false.
|
||||
Timeout uint // Timeout in seconds. By default it's 60.
|
||||
SecurityToken string // STS Token
|
||||
IsCname bool // If cname is in the endpoint.
|
||||
HTTPTimeout HTTPTimeout // HTTP timeout
|
||||
HTTPMaxConns HTTPMaxConns // Http max connections
|
||||
IsUseProxy bool // Flag of using proxy.
|
||||
ProxyHost string // Flag of using proxy host.
|
||||
IsAuthProxy bool // Flag of needing authentication.
|
||||
ProxyUser string // Proxy user
|
||||
ProxyPassword string // Proxy password
|
||||
IsEnableMD5 bool // Flag of enabling MD5 for upload.
|
||||
MD5Threshold int64 // Memory footprint threshold for each MD5 computation (16MB is the default), in byte. When the data is more than that, temp file is used.
|
||||
IsEnableCRC bool // Flag of enabling CRC for upload.
|
||||
Endpoint string // oss地址
|
||||
AccessKeyID string // accessId
|
||||
AccessKeySecret string // accessKey
|
||||
RetryTimes uint // 失败重试次数,默认5
|
||||
UserAgent string // SDK名称/版本/系统信息
|
||||
IsDebug bool // 是否开启调试模式,默认false
|
||||
Timeout uint // 超时时间,默认60s
|
||||
SecurityToken string // STS Token
|
||||
IsCname bool // Endpoint是否是CNAME
|
||||
HTTPTimeout HTTPTimeout // HTTP的超时时间设置
|
||||
IsUseProxy bool // 是否使用代理
|
||||
ProxyHost string // 代理服务器地址
|
||||
IsAuthProxy bool // 代理服务器是否使用用户认证
|
||||
ProxyUser string // 代理服务器认证用户名
|
||||
ProxyPassword string // 代理服务器认证密码
|
||||
IsEnableMD5 bool // 上传数据时是否启用MD5校验
|
||||
MD5Threshold int64 // 内存中计算MD5的上线大小,大于该值启用临时文件,单位Byte
|
||||
IsEnableCRC bool // 上传数据时是否启用CRC64校验
|
||||
}
|
||||
|
||||
// getDefaultOssConfig gets the default configuration.
|
||||
// 获取默认配置
|
||||
func getDefaultOssConfig() *Config {
|
||||
config := Config{}
|
||||
|
||||
|
@ -50,8 +43,8 @@ func getDefaultOssConfig() *Config {
|
|||
config.AccessKeySecret = ""
|
||||
config.RetryTimes = 5
|
||||
config.IsDebug = false
|
||||
config.UserAgent = userAgent()
|
||||
config.Timeout = 60 // Seconds
|
||||
config.UserAgent = userAgent
|
||||
config.Timeout = 60 // seconds
|
||||
config.SecurityToken = ""
|
||||
config.IsCname = false
|
||||
|
||||
|
@ -59,9 +52,6 @@ func getDefaultOssConfig() *Config {
|
|||
config.HTTPTimeout.ReadWriteTimeout = time.Second * 60 // 60s
|
||||
config.HTTPTimeout.HeaderTimeout = time.Second * 60 // 60s
|
||||
config.HTTPTimeout.LongTimeout = time.Second * 300 // 300s
|
||||
config.HTTPTimeout.IdleConnTimeout = time.Second * 50 // 50s
|
||||
config.HTTPMaxConns.MaxIdleConns = 100
|
||||
config.HTTPMaxConns.MaxIdleConnsPerHost = 100
|
||||
|
||||
config.IsUseProxy = false
|
||||
config.ProxyHost = ""
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"hash"
|
||||
|
@ -14,178 +13,64 @@ import (
|
|||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Conn defines OSS Conn
|
||||
// Conn oss conn
|
||||
type Conn struct {
|
||||
config *Config
|
||||
url *urlMaker
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
var signKeyList = []string{"acl", "uploads", "location", "cors", "logging", "website", "referer", "lifecycle", "delete", "append", "tagging", "objectMeta", "uploadId", "partNumber", "security-token", "position", "img", "style", "styleName", "replication", "replicationProgress", "replicationLocation", "cname", "bucketInfo", "comp", "qos", "live", "status", "vod", "startTime", "endTime", "symlink", "x-oss-process", "response-content-type", "response-content-language", "response-expires", "response-cache-control", "response-content-disposition", "response-content-encoding", "udf", "udfName", "udfImage", "udfId", "udfImageDesc", "udfApplication", "comp", "udfApplicationLog", "restore", "callback", "callback-var"}
|
||||
// init 初始化Conn
|
||||
func (conn *Conn) init(config *Config, urlMaker *urlMaker) error {
|
||||
httpTimeOut := conn.config.HTTPTimeout
|
||||
|
||||
// init initializes Conn
|
||||
func (conn *Conn) init(config *Config, urlMaker *urlMaker, client *http.Client) error {
|
||||
if client == nil {
|
||||
// New transport
|
||||
transport := newTransport(conn, config)
|
||||
|
||||
// Proxy
|
||||
if conn.config.IsUseProxy {
|
||||
proxyURL, err := url.Parse(config.ProxyHost)
|
||||
// new Transport
|
||||
transport := &http.Transport{
|
||||
Dial: func(netw, addr string) (net.Conn, error) {
|
||||
conn, err := net.DialTimeout(netw, addr, httpTimeOut.ConnectTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
transport.Proxy = http.ProxyURL(proxyURL)
|
||||
return newTimeoutConn(conn, httpTimeOut.ReadWriteTimeout, httpTimeOut.LongTimeout), nil
|
||||
},
|
||||
ResponseHeaderTimeout: httpTimeOut.HeaderTimeout,
|
||||
}
|
||||
|
||||
// Proxy
|
||||
if conn.config.IsUseProxy {
|
||||
proxyURL, err := url.Parse(config.ProxyHost)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
client = &http.Client{Transport: transport}
|
||||
transport.Proxy = http.ProxyURL(proxyURL)
|
||||
}
|
||||
|
||||
conn.config = config
|
||||
conn.url = urlMaker
|
||||
conn.client = client
|
||||
conn.client = &http.Client{Transport: transport}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do sends request and returns the response
|
||||
func (conn Conn) Do(method, bucketName, objectName string, params map[string]interface{}, headers map[string]string,
|
||||
// Do 处理请求,返回响应结果。
|
||||
func (conn Conn) Do(method, bucketName, objectName, urlParams, subResource string, headers map[string]string,
|
||||
data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) {
|
||||
urlParams := conn.getURLParams(params)
|
||||
subResource := conn.getSubResource(params)
|
||||
uri := conn.url.getURL(bucketName, objectName, urlParams)
|
||||
resource := conn.url.getResource(bucketName, objectName, subResource)
|
||||
return conn.doRequest(method, uri, resource, headers, data, initCRC, listener)
|
||||
}
|
||||
|
||||
// DoURL sends the request with signed URL and returns the response result.
|
||||
func (conn Conn) DoURL(method HTTPMethod, signedURL string, headers map[string]string,
|
||||
data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) {
|
||||
// Get URI from signedURL
|
||||
uri, err := url.ParseRequestURI(signedURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m := strings.ToUpper(string(method))
|
||||
req := &http.Request{
|
||||
Method: m,
|
||||
URL: uri,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: make(http.Header),
|
||||
Host: uri.Host,
|
||||
}
|
||||
|
||||
tracker := &readerTracker{completedBytes: 0}
|
||||
fd, crc := conn.handleBody(req, data, initCRC, listener, tracker)
|
||||
if fd != nil {
|
||||
defer func() {
|
||||
fd.Close()
|
||||
os.Remove(fd.Name())
|
||||
}()
|
||||
}
|
||||
|
||||
if conn.config.IsAuthProxy {
|
||||
auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword
|
||||
basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth))
|
||||
req.Header.Set("Proxy-Authorization", basic)
|
||||
}
|
||||
|
||||
req.Header.Set(HTTPHeaderHost, conn.config.Endpoint)
|
||||
req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent)
|
||||
|
||||
if headers != nil {
|
||||
for k, v := range headers {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
}
|
||||
|
||||
// Transfer started
|
||||
event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength)
|
||||
publishProgress(listener, event)
|
||||
|
||||
resp, err := conn.client.Do(req)
|
||||
if err != nil {
|
||||
// Transfer failed
|
||||
event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength)
|
||||
publishProgress(listener, event)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Transfer completed
|
||||
event = newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength)
|
||||
publishProgress(listener, event)
|
||||
|
||||
return conn.handleResponse(resp, crc)
|
||||
}
|
||||
|
||||
func (conn Conn) getURLParams(params map[string]interface{}) string {
|
||||
// Sort
|
||||
keys := make([]string, 0, len(params))
|
||||
for k := range params {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
// Serialize
|
||||
var buf bytes.Buffer
|
||||
for _, k := range keys {
|
||||
if buf.Len() > 0 {
|
||||
buf.WriteByte('&')
|
||||
}
|
||||
buf.WriteString(url.QueryEscape(k))
|
||||
if params[k] != nil {
|
||||
buf.WriteString("=" + url.QueryEscape(params[k].(string)))
|
||||
}
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func (conn Conn) getSubResource(params map[string]interface{}) string {
|
||||
// Sort
|
||||
keys := make([]string, 0, len(params))
|
||||
for k := range params {
|
||||
if conn.isParamSign(k) {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
// Serialize
|
||||
var buf bytes.Buffer
|
||||
for _, k := range keys {
|
||||
if buf.Len() > 0 {
|
||||
buf.WriteByte('&')
|
||||
}
|
||||
buf.WriteString(k)
|
||||
if params[k] != nil {
|
||||
buf.WriteString("=" + params[k].(string))
|
||||
}
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func (conn Conn) isParamSign(paramKey string) bool {
|
||||
for _, k := range signKeyList {
|
||||
if paramKey == k {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (conn Conn) doRequest(method string, uri *url.URL, canonicalizedResource string, headers map[string]string,
|
||||
data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) {
|
||||
method = strings.ToUpper(method)
|
||||
if !conn.config.IsUseProxy {
|
||||
uri.Opaque = uri.Path
|
||||
}
|
||||
req := &http.Request{
|
||||
Method: method,
|
||||
URL: uri,
|
||||
|
@ -227,72 +112,33 @@ func (conn Conn) doRequest(method string, uri *url.URL, canonicalizedResource st
|
|||
|
||||
conn.signHeader(req, canonicalizedResource)
|
||||
|
||||
// Transfer started
|
||||
// transfer started
|
||||
event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength)
|
||||
publishProgress(listener, event)
|
||||
|
||||
resp, err := conn.client.Do(req)
|
||||
if err != nil {
|
||||
// Transfer failed
|
||||
// transfer failed
|
||||
event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength)
|
||||
publishProgress(listener, event)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Transfer completed
|
||||
// transfer completed
|
||||
event = newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength)
|
||||
publishProgress(listener, event)
|
||||
|
||||
return conn.handleResponse(resp, crc)
|
||||
}
|
||||
|
||||
func (conn Conn) signURL(method HTTPMethod, bucketName, objectName string, expiration int64, params map[string]interface{}, headers map[string]string) string {
|
||||
if conn.config.SecurityToken != "" {
|
||||
params[HTTPParamSecurityToken] = conn.config.SecurityToken
|
||||
}
|
||||
subResource := conn.getSubResource(params)
|
||||
canonicalizedResource := conn.url.getResource(bucketName, objectName, subResource)
|
||||
|
||||
m := strings.ToUpper(string(method))
|
||||
req := &http.Request{
|
||||
Method: m,
|
||||
Header: make(http.Header),
|
||||
}
|
||||
|
||||
if conn.config.IsAuthProxy {
|
||||
auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword
|
||||
basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth))
|
||||
req.Header.Set("Proxy-Authorization", basic)
|
||||
}
|
||||
|
||||
req.Header.Set(HTTPHeaderDate, strconv.FormatInt(expiration, 10))
|
||||
req.Header.Set(HTTPHeaderHost, conn.config.Endpoint)
|
||||
req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent)
|
||||
|
||||
if headers != nil {
|
||||
for k, v := range headers {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
}
|
||||
|
||||
signedStr := conn.getSignedStr(req, canonicalizedResource)
|
||||
|
||||
params[HTTPParamExpires] = strconv.FormatInt(expiration, 10)
|
||||
params[HTTPParamAccessKeyID] = conn.config.AccessKeyID
|
||||
params[HTTPParamSignature] = signedStr
|
||||
|
||||
urlParams := conn.getURLParams(params)
|
||||
return conn.url.getSignURL(bucketName, objectName, urlParams)
|
||||
}
|
||||
|
||||
// handleBody handles request body
|
||||
// handle request body
|
||||
func (conn Conn) handleBody(req *http.Request, body io.Reader, initCRC uint64,
|
||||
listener ProgressListener, tracker *readerTracker) (*os.File, hash.Hash64) {
|
||||
var file *os.File
|
||||
var crc hash.Hash64
|
||||
reader := body
|
||||
|
||||
// Length
|
||||
// length
|
||||
switch v := body.(type) {
|
||||
case *bytes.Buffer:
|
||||
req.ContentLength = int64(v.Len())
|
||||
|
@ -307,20 +153,20 @@ func (conn Conn) handleBody(req *http.Request, body io.Reader, initCRC uint64,
|
|||
}
|
||||
req.Header.Set(HTTPHeaderContentLength, strconv.FormatInt(req.ContentLength, 10))
|
||||
|
||||
// MD5
|
||||
// md5
|
||||
if body != nil && conn.config.IsEnableMD5 && req.Header.Get(HTTPHeaderContentMD5) == "" {
|
||||
md5 := ""
|
||||
reader, md5, file, _ = calcMD5(body, req.ContentLength, conn.config.MD5Threshold)
|
||||
req.Header.Set(HTTPHeaderContentMD5, md5)
|
||||
}
|
||||
|
||||
// CRC
|
||||
// crc
|
||||
if reader != nil && conn.config.IsEnableCRC {
|
||||
crc = NewCRC(crcTable(), initCRC)
|
||||
reader = TeeReader(reader, crc, req.ContentLength, listener, tracker)
|
||||
}
|
||||
|
||||
// HTTP body
|
||||
// http body
|
||||
rc, ok := reader.(io.ReadCloser)
|
||||
if !ok && reader != nil {
|
||||
rc = ioutil.NopCloser(reader)
|
||||
|
@ -335,7 +181,7 @@ func tryGetFileSize(f *os.File) int64 {
|
|||
return fInfo.Size()
|
||||
}
|
||||
|
||||
// handleResponse handles response
|
||||
// handle response
|
||||
func (conn Conn) handleResponse(resp *http.Response, crc hash.Hash64) (*Response, error) {
|
||||
var cliCRC uint64
|
||||
var srvCRC uint64
|
||||
|
@ -350,28 +196,24 @@ func (conn Conn) handleResponse(resp *http.Response, crc hash.Hash64) (*Response
|
|||
}
|
||||
|
||||
if len(respBody) == 0 {
|
||||
err = ServiceError{
|
||||
StatusCode: statusCode,
|
||||
RequestID: resp.Header.Get(HTTPHeaderOssRequestID),
|
||||
}
|
||||
// no error in response body
|
||||
err = fmt.Errorf("oss: service returned without a response body (%s)", resp.Status)
|
||||
} else {
|
||||
// Response contains storage service error object, unmarshal
|
||||
// response contains storage service error object, unmarshal
|
||||
srvErr, errIn := serviceErrFromXML(respBody, resp.StatusCode,
|
||||
resp.Header.Get(HTTPHeaderOssRequestID))
|
||||
if errIn != nil { // error unmarshaling the error response
|
||||
err = fmt.Errorf("oss: service returned invalid response body, status = %s, RequestId = %s", resp.Status, resp.Header.Get(HTTPHeaderOssRequestID))
|
||||
} else {
|
||||
err = srvErr
|
||||
if err != nil { // error unmarshaling the error response
|
||||
err = errIn
|
||||
}
|
||||
err = srvErr
|
||||
}
|
||||
|
||||
return &Response{
|
||||
StatusCode: resp.StatusCode,
|
||||
Headers: resp.Header,
|
||||
Body: ioutil.NopCloser(bytes.NewReader(respBody)), // restore the body
|
||||
}, err
|
||||
} else if statusCode >= 300 && statusCode <= 307 {
|
||||
// OSS use 3xx, but response has no body
|
||||
// oss use 3xx, but response has no body
|
||||
err := fmt.Errorf("oss: service returned %d,%s", resp.StatusCode, resp.Status)
|
||||
return &Response{
|
||||
StatusCode: resp.StatusCode,
|
||||
|
@ -397,7 +239,7 @@ func (conn Conn) handleResponse(resp *http.Response, crc hash.Hash64) (*Response
|
|||
|
||||
func calcMD5(body io.Reader, contentLen, md5Threshold int64) (reader io.Reader, b64 string, tempFile *os.File, err error) {
|
||||
if contentLen == 0 || contentLen > md5Threshold {
|
||||
// Huge body, use temporary file
|
||||
// huge body, use temporary file
|
||||
tempFile, err = ioutil.TempFile(os.TempDir(), TempFilePrefix)
|
||||
if tempFile != nil {
|
||||
io.Copy(tempFile, body)
|
||||
|
@ -410,7 +252,7 @@ func calcMD5(body io.Reader, contentLen, md5Threshold int64) (reader io.Reader,
|
|||
reader = tempFile
|
||||
}
|
||||
} else {
|
||||
// Small body, use memory
|
||||
// small body, use memory
|
||||
buf, _ := ioutil.ReadAll(body)
|
||||
sum := md5.Sum(buf)
|
||||
b64 = base64.StdEncoding.EncodeToString(sum[:])
|
||||
|
@ -430,11 +272,9 @@ func readResponseBody(resp *http.Response) ([]byte, error) {
|
|||
|
||||
func serviceErrFromXML(body []byte, statusCode int, requestID string) (ServiceError, error) {
|
||||
var storageErr ServiceError
|
||||
|
||||
if err := xml.Unmarshal(body, &storageErr); err != nil {
|
||||
return storageErr, err
|
||||
}
|
||||
|
||||
storageErr.StatusCode = statusCode
|
||||
storageErr.RequestID = requestID
|
||||
storageErr.RawMessage = string(body)
|
||||
|
@ -449,15 +289,7 @@ func xmlUnmarshal(body io.Reader, v interface{}) error {
|
|||
return xml.Unmarshal(data, v)
|
||||
}
|
||||
|
||||
func jsonUnmarshal(body io.Reader, v interface{}) error {
|
||||
data, err := ioutil.ReadAll(body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return json.Unmarshal(data, v)
|
||||
}
|
||||
|
||||
// timeoutConn handles HTTP timeout
|
||||
// Handle http timeout
|
||||
type timeoutConn struct {
|
||||
conn net.Conn
|
||||
timeout time.Duration
|
||||
|
@ -511,7 +343,7 @@ func (c *timeoutConn) SetWriteDeadline(t time.Time) error {
|
|||
return c.conn.SetWriteDeadline(t)
|
||||
}
|
||||
|
||||
// UrlMaker builds URL and resource
|
||||
// UrlMaker - build url and resource
|
||||
const (
|
||||
urlTypeCname = 1
|
||||
urlTypeIP = 2
|
||||
|
@ -519,13 +351,13 @@ const (
|
|||
)
|
||||
|
||||
type urlMaker struct {
|
||||
Scheme string // HTTP or HTTPS
|
||||
NetLoc string // Host or IP
|
||||
Type int // 1 CNAME, 2 IP, 3 ALIYUN
|
||||
IsProxy bool // Proxy
|
||||
Scheme string // http or https
|
||||
NetLoc string // host or ip
|
||||
Type int // 1 CNAME 2 IP 3 ALIYUN
|
||||
IsProxy bool // proxy
|
||||
}
|
||||
|
||||
// Init parses endpoint
|
||||
// Parse endpoint
|
||||
func (um *urlMaker) Init(endpoint string, isCname bool, isProxy bool) {
|
||||
if strings.HasPrefix(endpoint, "http://") {
|
||||
um.Scheme = "http"
|
||||
|
@ -541,11 +373,7 @@ func (um *urlMaker) Init(endpoint string, isCname bool, isProxy bool) {
|
|||
host, _, err := net.SplitHostPort(um.NetLoc)
|
||||
if err != nil {
|
||||
host = um.NetLoc
|
||||
if host[0] == '[' && host[len(host)-1] == ']' {
|
||||
host = host[1 : len(host)-1]
|
||||
}
|
||||
}
|
||||
|
||||
ip := net.ParseIP(host)
|
||||
if ip != nil {
|
||||
um.Type = urlTypeIP
|
||||
|
@ -557,32 +385,14 @@ func (um *urlMaker) Init(endpoint string, isCname bool, isProxy bool) {
|
|||
um.IsProxy = isProxy
|
||||
}
|
||||
|
||||
// getURL gets URL
|
||||
// Build URL
|
||||
func (um urlMaker) getURL(bucket, object, params string) *url.URL {
|
||||
host, path := um.buildURL(bucket, object)
|
||||
addr := ""
|
||||
if params == "" {
|
||||
addr = fmt.Sprintf("%s://%s%s", um.Scheme, host, path)
|
||||
} else {
|
||||
addr = fmt.Sprintf("%s://%s%s?%s", um.Scheme, host, path, params)
|
||||
}
|
||||
uri, _ := url.ParseRequestURI(addr)
|
||||
return uri
|
||||
}
|
||||
|
||||
// getSignURL gets sign URL
|
||||
func (um urlMaker) getSignURL(bucket, object, params string) string {
|
||||
host, path := um.buildURL(bucket, object)
|
||||
return fmt.Sprintf("%s://%s%s?%s", um.Scheme, host, path, params)
|
||||
}
|
||||
|
||||
// buildURL builds URL
|
||||
func (um urlMaker) buildURL(bucket, object string) (string, string) {
|
||||
var host = ""
|
||||
var path = ""
|
||||
|
||||
object = url.QueryEscape(object)
|
||||
object = strings.Replace(object, "+", "%20", -1)
|
||||
if !um.IsProxy {
|
||||
object = url.QueryEscape(object)
|
||||
}
|
||||
|
||||
if um.Type == urlTypeCname {
|
||||
host = um.NetLoc
|
||||
|
@ -605,10 +415,17 @@ func (um urlMaker) buildURL(bucket, object string) (string, string) {
|
|||
}
|
||||
}
|
||||
|
||||
return host, path
|
||||
uri := &url.URL{
|
||||
Scheme: um.Scheme,
|
||||
Host: host,
|
||||
Path: path,
|
||||
RawQuery: params,
|
||||
}
|
||||
|
||||
return uri
|
||||
}
|
||||
|
||||
// getResource gets canonicalized resource
|
||||
// Canonicalized Resource
|
||||
func (um urlMaker) getResource(bucketName, objectName, subResource string) string {
|
||||
if subResource != "" {
|
||||
subResource = "?" + subResource
|
||||
|
|
|
@ -2,77 +2,35 @@ package oss
|
|||
|
||||
import "os"
|
||||
|
||||
// ACLType bucket/object ACL
|
||||
// ACLType Bucket/Object的访问控制
|
||||
type ACLType string
|
||||
|
||||
const (
|
||||
// ACLPrivate definition : private read and write
|
||||
// ACLPrivate 私有读写
|
||||
ACLPrivate ACLType = "private"
|
||||
|
||||
// ACLPublicRead definition : public read and private write
|
||||
// ACLPublicRead 公共读私有写
|
||||
ACLPublicRead ACLType = "public-read"
|
||||
|
||||
// ACLPublicReadWrite definition : public read and public write
|
||||
// ACLPublicReadWrite 公共读写
|
||||
ACLPublicReadWrite ACLType = "public-read-write"
|
||||
|
||||
// ACLDefault Object. It's only applicable for object.
|
||||
// ACLDefault Object默认权限,Bucket无此权限
|
||||
ACLDefault ACLType = "default"
|
||||
)
|
||||
|
||||
// MetadataDirectiveType specifying whether use the metadata of source object when copying object.
|
||||
// MetadataDirectiveType 对象COPY时新对象是否使用原对象的Meta
|
||||
type MetadataDirectiveType string
|
||||
|
||||
const (
|
||||
// MetaCopy the target object's metadata is copied from the source one
|
||||
// MetaCopy 目标对象使用源对象的META
|
||||
MetaCopy MetadataDirectiveType = "COPY"
|
||||
|
||||
// MetaReplace the target object's metadata is created as part of the copy request (not same as the source one)
|
||||
// MetaReplace 目标对象使用自定义的META
|
||||
MetaReplace MetadataDirectiveType = "REPLACE"
|
||||
)
|
||||
|
||||
// StorageClassType bucket storage type
|
||||
type StorageClassType string
|
||||
|
||||
const (
|
||||
// StorageStandard standard
|
||||
StorageStandard StorageClassType = "Standard"
|
||||
|
||||
// StorageIA infrequent access
|
||||
StorageIA StorageClassType = "IA"
|
||||
|
||||
// StorageArchive archive
|
||||
StorageArchive StorageClassType = "Archive"
|
||||
)
|
||||
|
||||
// PayerType the type of request payer
|
||||
type PayerType string
|
||||
|
||||
const (
|
||||
// Requester the requester who send the request
|
||||
Requester PayerType = "requester"
|
||||
)
|
||||
|
||||
// HTTPMethod HTTP request method
|
||||
type HTTPMethod string
|
||||
|
||||
const (
|
||||
// HTTPGet HTTP GET
|
||||
HTTPGet HTTPMethod = "GET"
|
||||
|
||||
// HTTPPut HTTP PUT
|
||||
HTTPPut HTTPMethod = "PUT"
|
||||
|
||||
// HTTPHead HTTP HEAD
|
||||
HTTPHead HTTPMethod = "HEAD"
|
||||
|
||||
// HTTPPost HTTP POST
|
||||
HTTPPost HTTPMethod = "POST"
|
||||
|
||||
// HTTPDelete HTTP DELETE
|
||||
HTTPDelete HTTPMethod = "DELETE"
|
||||
)
|
||||
|
||||
// HTTP headers
|
||||
// Http头标签
|
||||
const (
|
||||
HTTPHeaderAcceptEncoding string = "Accept-Encoding"
|
||||
HTTPHeaderAuthorization = "Authorization"
|
||||
|
@ -103,7 +61,6 @@ const (
|
|||
HTTPHeaderOssObjectACL = "X-Oss-Object-Acl"
|
||||
HTTPHeaderOssSecurityToken = "X-Oss-Security-Token"
|
||||
HTTPHeaderOssServerSideEncryption = "X-Oss-Server-Side-Encryption"
|
||||
HTTPHeaderOssServerSideEncryptionKeyID = "X-Oss-Server-Side-Encryption-Key-Id"
|
||||
HTTPHeaderOssCopySource = "X-Oss-Copy-Source"
|
||||
HTTPHeaderOssCopySourceRange = "X-Oss-Copy-Source-Range"
|
||||
HTTPHeaderOssCopySourceIfMatch = "X-Oss-Copy-Source-If-Match"
|
||||
|
@ -114,32 +71,19 @@ const (
|
|||
HTTPHeaderOssNextAppendPosition = "X-Oss-Next-Append-Position"
|
||||
HTTPHeaderOssRequestID = "X-Oss-Request-Id"
|
||||
HTTPHeaderOssCRC64 = "X-Oss-Hash-Crc64ecma"
|
||||
HTTPHeaderOssSymlinkTarget = "X-Oss-Symlink-Target"
|
||||
HTTPHeaderOssStorageClass = "X-Oss-Storage-Class"
|
||||
HTTPHeaderOssCallback = "X-Oss-Callback"
|
||||
HTTPHeaderOssCallbackVar = "X-Oss-Callback-Var"
|
||||
HTTPHeaderOSSRequester = "X-Oss-Request-Payer"
|
||||
)
|
||||
|
||||
// HTTP Param
|
||||
// 其它常量
|
||||
const (
|
||||
HTTPParamExpires = "Expires"
|
||||
HTTPParamAccessKeyID = "OSSAccessKeyId"
|
||||
HTTPParamSignature = "Signature"
|
||||
HTTPParamSecurityToken = "security-token"
|
||||
)
|
||||
|
||||
// Other constants
|
||||
const (
|
||||
MaxPartSize = 5 * 1024 * 1024 * 1024 // Max part size, 5GB
|
||||
MinPartSize = 100 * 1024 // Min part size, 100KB
|
||||
|
||||
FilePermMode = os.FileMode(0664) // Default file permission
|
||||
|
||||
TempFilePrefix = "oss-go-temp-" // Temp file prefix
|
||||
TempFileSuffix = ".temp" // Temp file suffix
|
||||
|
||||
CheckpointFileSuffix = ".cp" // Checkpoint file suffix
|
||||
|
||||
Version = "1.9.2" // Go SDK version
|
||||
MaxPartSize = 5 * 1024 * 1024 * 1024 // 文件片最大值,5GB
|
||||
MinPartSize = 100 * 1024 // 文件片最小值,100KBß
|
||||
|
||||
FilePermMode = os.FileMode(0664) // 新建文件默认权限
|
||||
|
||||
TempFilePrefix = "oss-go-temp-" // 临时文件前缀
|
||||
TempFileSuffix = ".temp" // 临时文件后缀
|
||||
|
||||
CheckpointFileSuffix = ".cp" // Checkpoint文件后缀
|
||||
|
||||
Version = "1.3.0" // Go sdk版本
|
||||
)
|
||||
|
|
|
@ -11,11 +11,11 @@ type digest struct {
|
|||
tab *crc64.Table
|
||||
}
|
||||
|
||||
// NewCRC creates a new hash.Hash64 computing the CRC64 checksum
|
||||
// NewCRC creates a new hash.Hash64 computing the CRC-64 checksum
|
||||
// using the polynomial represented by the Table.
|
||||
func NewCRC(tab *crc64.Table, init uint64) hash.Hash64 { return &digest{init, tab} }
|
||||
|
||||
// Size returns the number of bytes sum will return.
|
||||
// Size returns the number of bytes Sum will return.
|
||||
func (d *digest) Size() int { return crc64.Size }
|
||||
|
||||
// BlockSize returns the hash's underlying block size.
|
||||
|
@ -24,7 +24,7 @@ func (d *digest) Size() int { return crc64.Size }
|
|||
// are a multiple of the block size.
|
||||
func (d *digest) BlockSize() int { return 1 }
|
||||
|
||||
// Reset resets the hash to its initial state.
|
||||
// Reset resets the Hash to its initial state.
|
||||
func (d *digest) Reset() { d.crc = 0 }
|
||||
|
||||
// Write (via the embedded io.Writer interface) adds more data to the running hash.
|
||||
|
@ -34,7 +34,7 @@ func (d *digest) Write(p []byte) (n int, err error) {
|
|||
return len(p), nil
|
||||
}
|
||||
|
||||
// Sum64 returns CRC64 value.
|
||||
// Sum64 returns crc64 value.
|
||||
func (d *digest) Sum64() uint64 { return d.crc }
|
||||
|
||||
// Sum returns hash value.
|
||||
|
@ -42,82 +42,3 @@ func (d *digest) Sum(in []byte) []byte {
|
|||
s := d.Sum64()
|
||||
return append(in, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
|
||||
}
|
||||
|
||||
// gf2Dim dimension of GF(2) vectors (length of CRC)
|
||||
const gf2Dim int = 64
|
||||
|
||||
func gf2MatrixTimes(mat []uint64, vec uint64) uint64 {
|
||||
var sum uint64
|
||||
for i := 0; vec != 0; i++ {
|
||||
if vec&1 != 0 {
|
||||
sum ^= mat[i]
|
||||
}
|
||||
|
||||
vec >>= 1
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
||||
func gf2MatrixSquare(square []uint64, mat []uint64) {
|
||||
for n := 0; n < gf2Dim; n++ {
|
||||
square[n] = gf2MatrixTimes(mat, mat[n])
|
||||
}
|
||||
}
|
||||
|
||||
// CRC64Combine combines CRC64
|
||||
func CRC64Combine(crc1 uint64, crc2 uint64, len2 uint64) uint64 {
|
||||
var even [gf2Dim]uint64 // Even-power-of-two zeros operator
|
||||
var odd [gf2Dim]uint64 // Odd-power-of-two zeros operator
|
||||
|
||||
// Degenerate case
|
||||
if len2 == 0 {
|
||||
return crc1
|
||||
}
|
||||
|
||||
// Put operator for one zero bit in odd
|
||||
odd[0] = crc64.ECMA // CRC64 polynomial
|
||||
var row uint64 = 1
|
||||
for n := 1; n < gf2Dim; n++ {
|
||||
odd[n] = row
|
||||
row <<= 1
|
||||
}
|
||||
|
||||
// Put operator for two zero bits in even
|
||||
gf2MatrixSquare(even[:], odd[:])
|
||||
|
||||
// Put operator for four zero bits in odd
|
||||
gf2MatrixSquare(odd[:], even[:])
|
||||
|
||||
// Apply len2 zeros to crc1, first square will put the operator for one zero byte, eight zero bits, in even
|
||||
for {
|
||||
// Apply zeros operator for this bit of len2
|
||||
gf2MatrixSquare(even[:], odd[:])
|
||||
|
||||
if len2&1 != 0 {
|
||||
crc1 = gf2MatrixTimes(even[:], crc1)
|
||||
}
|
||||
|
||||
len2 >>= 1
|
||||
|
||||
// If no more bits set, then done
|
||||
if len2 == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Another iteration of the loop with odd and even swapped
|
||||
gf2MatrixSquare(odd[:], even[:])
|
||||
if len2&1 != 0 {
|
||||
crc1 = gf2MatrixTimes(odd[:], crc1)
|
||||
}
|
||||
len2 >>= 1
|
||||
|
||||
// If no more bits set, then done
|
||||
if len2 == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Return combined CRC
|
||||
crc1 ^= crc2
|
||||
return crc1
|
||||
}
|
||||
|
|
|
@ -5,81 +5,53 @@ import (
|
|||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"hash/crc64"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// DownloadFile downloads files with multipart download.
|
||||
//
|
||||
// objectKey the object key.
|
||||
// filePath the local file to download from objectKey in OSS.
|
||||
// partSize the part size in bytes.
|
||||
// options object's constraints, check out GetObject for the reference.
|
||||
// DownloadFile 分片下载文件
|
||||
//
|
||||
// error it's nil when the call succeeds, otherwise it's an error object.
|
||||
// objectKey object key。
|
||||
// filePath 本地文件。objectKey下载到文件。
|
||||
// partSize 本次上传文件片的大小,字节数。比如100 * 1024为每片100KB。
|
||||
// options Object的属性限制项。详见GetObject。
|
||||
//
|
||||
// error 操作成功error为nil,非nil为错误信息。
|
||||
//
|
||||
func (bucket Bucket) DownloadFile(objectKey, filePath string, partSize int64, options ...Option) error {
|
||||
if partSize < 1 {
|
||||
return errors.New("oss: part size smaller than 1")
|
||||
if partSize < 1 || partSize > MaxPartSize {
|
||||
return errors.New("oss: part size invalid range (1, 5GB]")
|
||||
}
|
||||
|
||||
uRange, err := getRangeConfig(options)
|
||||
cpConf, err := getCpConfig(options, filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cpConf := getCpConfig(options)
|
||||
routines := getRoutines(options)
|
||||
|
||||
if cpConf != nil && cpConf.IsEnable {
|
||||
cpFilePath := getDownloadCpFilePath(cpConf, bucket.BucketName, objectKey, filePath)
|
||||
if cpFilePath != "" {
|
||||
return bucket.downloadFileWithCp(objectKey, filePath, partSize, options, cpFilePath, routines, uRange)
|
||||
}
|
||||
if cpConf.IsEnable {
|
||||
return bucket.downloadFileWithCp(objectKey, filePath, partSize, options, cpConf.FilePath, routines)
|
||||
}
|
||||
|
||||
return bucket.downloadFile(objectKey, filePath, partSize, options, routines, uRange)
|
||||
return bucket.downloadFile(objectKey, filePath, partSize, options, routines)
|
||||
}
|
||||
|
||||
func getDownloadCpFilePath(cpConf *cpConfig, srcBucket, srcObject, destFile string) string {
|
||||
if cpConf.FilePath == "" && cpConf.DirPath != "" {
|
||||
src := fmt.Sprintf("oss://%v/%v", srcBucket, srcObject)
|
||||
absPath, _ := filepath.Abs(destFile)
|
||||
cpFileName := getCpFileName(src, absPath)
|
||||
cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName
|
||||
}
|
||||
return cpConf.FilePath
|
||||
}
|
||||
// ----- 并发无断点的下载 -----
|
||||
|
||||
// getRangeConfig gets the download range from the options.
|
||||
func getRangeConfig(options []Option) (*unpackedRange, error) {
|
||||
rangeOpt, err := findOption(options, HTTPHeaderRange, nil)
|
||||
if err != nil || rangeOpt == nil {
|
||||
return nil, err
|
||||
}
|
||||
return parseRange(rangeOpt.(string))
|
||||
}
|
||||
|
||||
// ----- concurrent download without checkpoint -----
|
||||
|
||||
// downloadWorkerArg is download worker's parameters
|
||||
// 工作协程参数
|
||||
type downloadWorkerArg struct {
|
||||
bucket *Bucket
|
||||
key string
|
||||
filePath string
|
||||
options []Option
|
||||
hook downloadPartHook
|
||||
enableCRC bool
|
||||
bucket *Bucket
|
||||
key string
|
||||
filePath string
|
||||
options []Option
|
||||
hook downloadPartHook
|
||||
}
|
||||
|
||||
// downloadPartHook is hook for test
|
||||
// Hook用于测试
|
||||
type downloadPartHook func(part downloadPart) error
|
||||
|
||||
var downloadPartHooker downloadPartHook = defaultDownloadPartHook
|
||||
|
@ -88,15 +60,15 @@ func defaultDownloadPartHook(part downloadPart) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// defaultDownloadProgressListener defines default ProgressListener, shields the ProgressListener in options of GetObject.
|
||||
// 默认ProgressListener,屏蔽GetObject的Options中ProgressListener
|
||||
type defaultDownloadProgressListener struct {
|
||||
}
|
||||
|
||||
// ProgressChanged no-ops
|
||||
// ProgressChanged 静默处理
|
||||
func (listener *defaultDownloadProgressListener) ProgressChanged(event *ProgressEvent) {
|
||||
}
|
||||
|
||||
// downloadWorker
|
||||
// 工作协程
|
||||
func downloadWorker(id int, arg downloadWorkerArg, jobs <-chan downloadPart, results chan<- downloadPart, failed chan<- error, die <-chan bool) {
|
||||
for part := range jobs {
|
||||
if err := arg.hook(part); err != nil {
|
||||
|
@ -104,11 +76,11 @@ func downloadWorker(id int, arg downloadWorkerArg, jobs <-chan downloadPart, res
|
|||
break
|
||||
}
|
||||
|
||||
// Resolve options
|
||||
// resolve options
|
||||
r := Range(part.Start, part.End)
|
||||
p := Progress(&defaultDownloadProgressListener{})
|
||||
opts := make([]Option, len(arg.options)+2)
|
||||
// Append orderly, can not be reversed!
|
||||
// append orderly, can not be reversed!
|
||||
opts = append(opts, arg.options...)
|
||||
opts = append(opts, r, p)
|
||||
|
||||
|
@ -119,14 +91,6 @@ func downloadWorker(id int, arg downloadWorkerArg, jobs <-chan downloadPart, res
|
|||
}
|
||||
defer rd.Close()
|
||||
|
||||
var crcCalc hash.Hash64
|
||||
if arg.enableCRC {
|
||||
crcCalc = crc64.New(crcTable())
|
||||
contentLen := part.End - part.Start + 1
|
||||
rd = ioutil.NopCloser(TeeReader(rd, crcCalc, contentLen, nil, nil))
|
||||
}
|
||||
defer rd.Close()
|
||||
|
||||
select {
|
||||
case <-die:
|
||||
return
|
||||
|
@ -138,31 +102,25 @@ func downloadWorker(id int, arg downloadWorkerArg, jobs <-chan downloadPart, res
|
|||
failed <- err
|
||||
break
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
_, err = fd.Seek(part.Start-part.Offset, os.SEEK_SET)
|
||||
_, err = fd.Seek(part.Start, os.SEEK_SET)
|
||||
if err != nil {
|
||||
fd.Close()
|
||||
failed <- err
|
||||
break
|
||||
}
|
||||
|
||||
_, err = io.Copy(fd, rd)
|
||||
if err != nil {
|
||||
fd.Close()
|
||||
failed <- err
|
||||
break
|
||||
}
|
||||
|
||||
if arg.enableCRC {
|
||||
part.CRC64 = crcCalc.Sum64()
|
||||
}
|
||||
|
||||
fd.Close()
|
||||
results <- part
|
||||
}
|
||||
}
|
||||
|
||||
// downloadScheduler
|
||||
// 调度协程
|
||||
func downloadScheduler(jobs chan downloadPart, parts []downloadPart) {
|
||||
for _, part := range parts {
|
||||
jobs <- part
|
||||
|
@ -170,34 +128,39 @@ func downloadScheduler(jobs chan downloadPart, parts []downloadPart) {
|
|||
close(jobs)
|
||||
}
|
||||
|
||||
// downloadPart defines download part
|
||||
// 下载片
|
||||
type downloadPart struct {
|
||||
Index int // Part number, starting from 0
|
||||
Start int64 // Start index
|
||||
End int64 // End index
|
||||
Offset int64 // Offset
|
||||
CRC64 uint64 // CRC check value of part
|
||||
Index int // 片序号,从0开始编号
|
||||
Start int64 // 片起始位置
|
||||
End int64 // 片结束位置
|
||||
}
|
||||
|
||||
// getDownloadParts gets download parts
|
||||
func getDownloadParts(objectSize, partSize int64, uRange *unpackedRange) []downloadPart {
|
||||
// 文件分片
|
||||
func getDownloadParts(bucket *Bucket, objectKey string, partSize int64) ([]downloadPart, error) {
|
||||
meta, err := bucket.GetObjectDetailedMeta(objectKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
parts := []downloadPart{}
|
||||
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
part := downloadPart{}
|
||||
i := 0
|
||||
start, end := adjustRange(uRange, objectSize)
|
||||
for offset := start; offset < end; offset += partSize {
|
||||
for offset := int64(0); offset < objectSize; offset += partSize {
|
||||
part.Index = i
|
||||
part.Start = offset
|
||||
part.End = GetPartEnd(offset, end, partSize)
|
||||
part.Offset = start
|
||||
part.CRC64 = 0
|
||||
part.End = GetPartEnd(offset, objectSize, partSize)
|
||||
parts = append(parts, part)
|
||||
i++
|
||||
}
|
||||
return parts
|
||||
return parts, nil
|
||||
}
|
||||
|
||||
// getObjectBytes gets object bytes length
|
||||
// 文件大小
|
||||
func getObjectBytes(parts []downloadPart) int64 {
|
||||
var ob int64
|
||||
for _, part := range parts {
|
||||
|
@ -206,59 +169,24 @@ func getObjectBytes(parts []downloadPart) int64 {
|
|||
return ob
|
||||
}
|
||||
|
||||
// combineCRCInParts caculates the total CRC of continuous parts
|
||||
func combineCRCInParts(dps []downloadPart) uint64 {
|
||||
if dps == nil || len(dps) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
crc := dps[0].CRC64
|
||||
for i := 1; i < len(dps); i++ {
|
||||
crc = CRC64Combine(crc, dps[i].CRC64, (uint64)(dps[i].End-dps[i].Start+1))
|
||||
}
|
||||
|
||||
return crc
|
||||
}
|
||||
|
||||
// downloadFile downloads file concurrently without checkpoint.
|
||||
func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, options []Option, routines int, uRange *unpackedRange) error {
|
||||
// 并发无断点续传的下载
|
||||
func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, options []Option, routines int) error {
|
||||
tempFilePath := filePath + TempFileSuffix
|
||||
listener := getProgressListener(options)
|
||||
|
||||
payerOptions := []Option{}
|
||||
payer := getPayer(options)
|
||||
if payer != "" {
|
||||
payerOptions = append(payerOptions, RequestPayer(PayerType(payer)))
|
||||
}
|
||||
|
||||
// If the file does not exist, create one. If exists, the download will overwrite it.
|
||||
// 如果文件不存在则创建,存在不清空,下载分片会重写文件内容
|
||||
fd, err := os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fd.Close()
|
||||
|
||||
meta, err := bucket.GetObjectDetailedMeta(objectKey, payerOptions...)
|
||||
// 分割文件
|
||||
parts, err := getDownloadParts(&bucket, objectKey, partSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
enableCRC := false
|
||||
expectedCRC := (uint64)(0)
|
||||
if bucket.getConfig().IsEnableCRC && meta.Get(HTTPHeaderOssCRC64) != "" {
|
||||
if uRange == nil || (!uRange.hasStart && !uRange.hasEnd) {
|
||||
enableCRC = true
|
||||
expectedCRC, _ = strconv.ParseUint(meta.Get(HTTPHeaderOssCRC64), 10, 0)
|
||||
}
|
||||
}
|
||||
|
||||
// Get the parts of the file
|
||||
parts := getDownloadParts(objectSize, partSize, uRange)
|
||||
jobs := make(chan downloadPart, len(parts))
|
||||
results := make(chan downloadPart, len(parts))
|
||||
failed := make(chan error)
|
||||
|
@ -269,23 +197,24 @@ func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, op
|
|||
event := newProgressEvent(TransferStartedEvent, 0, totalBytes)
|
||||
publishProgress(listener, event)
|
||||
|
||||
// Start the download workers
|
||||
arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker, enableCRC}
|
||||
// 启动工作协程
|
||||
arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker}
|
||||
for w := 1; w <= routines; w++ {
|
||||
go downloadWorker(w, arg, jobs, results, failed, die)
|
||||
}
|
||||
|
||||
// Download parts concurrently
|
||||
// 并发上传分片
|
||||
go downloadScheduler(jobs, parts)
|
||||
|
||||
// Waiting for parts download finished
|
||||
// 等待分片下载完成
|
||||
completed := 0
|
||||
ps := make([]downloadPart, len(parts))
|
||||
for completed < len(parts) {
|
||||
select {
|
||||
case part := <-results:
|
||||
completed++
|
||||
ps[part.Index] = part
|
||||
completedBytes += (part.End - part.Start + 1)
|
||||
parts[part.Index].CRC64 = part.CRC64
|
||||
event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes)
|
||||
publishProgress(listener, event)
|
||||
case err := <-failed:
|
||||
|
@ -303,44 +232,32 @@ func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, op
|
|||
event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes)
|
||||
publishProgress(listener, event)
|
||||
|
||||
if enableCRC {
|
||||
actualCRC := combineCRCInParts(parts)
|
||||
err = checkDownloadCRC(actualCRC, expectedCRC)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return os.Rename(tempFilePath, filePath)
|
||||
}
|
||||
|
||||
// ----- Concurrent download with chcekpoint -----
|
||||
// ----- 并发有断点的下载 -----
|
||||
|
||||
const downloadCpMagic = "92611BED-89E2-46B6-89E5-72F273D4B0A3"
|
||||
|
||||
type downloadCheckpoint struct {
|
||||
Magic string // Magic
|
||||
MD5 string // Checkpoint content MD5
|
||||
FilePath string // Local file
|
||||
Object string // Key
|
||||
ObjStat objectStat // Object status
|
||||
Parts []downloadPart // All download parts
|
||||
PartStat []bool // Parts' download status
|
||||
Start int64 // Start point of the file
|
||||
End int64 // End point of the file
|
||||
enableCRC bool // Whether has CRC check
|
||||
CRC uint64 // CRC check value
|
||||
Magic string // magic
|
||||
MD5 string // cp内容的MD5
|
||||
FilePath string // 本地文件
|
||||
Object string // key
|
||||
ObjStat objectStat // 文件状态
|
||||
Parts []downloadPart // 全部分片
|
||||
PartStat []bool // 分片下载是否完成
|
||||
}
|
||||
|
||||
type objectStat struct {
|
||||
Size int64 // Object size
|
||||
LastModified string // Last modified time
|
||||
Etag string // Etag
|
||||
Size int64 // 大小
|
||||
LastModified string // 最后修改时间
|
||||
Etag string // etag
|
||||
}
|
||||
|
||||
// isValid flags of checkpoint data is valid. It returns true when the data is valid and the checkpoint is valid and the object is not updated.
|
||||
func (cp downloadCheckpoint) isValid(meta http.Header, uRange *unpackedRange) (bool, error) {
|
||||
// Compare the CP's Magic and the MD5
|
||||
// CP数据是否有效,CP有效且Object没有更新时有效
|
||||
func (cp downloadCheckpoint) isValid(bucket *Bucket, objectKey string) (bool, error) {
|
||||
// 比较CP的Magic及MD5
|
||||
cpb := cp
|
||||
cpb.MD5 = ""
|
||||
js, _ := json.Marshal(cpb)
|
||||
|
@ -351,30 +268,28 @@ func (cp downloadCheckpoint) isValid(meta http.Header, uRange *unpackedRange) (b
|
|||
return false, nil
|
||||
}
|
||||
|
||||
// 确认object没有更新
|
||||
meta, err := bucket.GetObjectDetailedMeta(objectKey)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Compare the object size, last modified time and etag
|
||||
// 比较Object的大小/最后修改时间/etag
|
||||
if cp.ObjStat.Size != objectSize ||
|
||||
cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) ||
|
||||
cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Check the download range
|
||||
if uRange != nil {
|
||||
start, end := adjustRange(uRange, objectSize)
|
||||
if start != cp.Start || end != cp.End {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// load checkpoint from local file
|
||||
// 从文件中load
|
||||
func (cp *downloadCheckpoint) load(filePath string) error {
|
||||
contents, err := ioutil.ReadFile(filePath)
|
||||
if err != nil {
|
||||
|
@ -385,11 +300,11 @@ func (cp *downloadCheckpoint) load(filePath string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// dump funciton dumps to file
|
||||
// dump到文件
|
||||
func (cp *downloadCheckpoint) dump(filePath string) error {
|
||||
bcp := *cp
|
||||
|
||||
// Calculate MD5
|
||||
// 计算MD5
|
||||
bcp.MD5 = ""
|
||||
js, err := json.Marshal(bcp)
|
||||
if err != nil {
|
||||
|
@ -399,17 +314,17 @@ func (cp *downloadCheckpoint) dump(filePath string) error {
|
|||
b64 := base64.StdEncoding.EncodeToString(sum[:])
|
||||
bcp.MD5 = b64
|
||||
|
||||
// Serialize
|
||||
// 序列化
|
||||
js, err = json.Marshal(bcp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Dump
|
||||
// dump
|
||||
return ioutil.WriteFile(filePath, js, FilePermMode)
|
||||
}
|
||||
|
||||
// todoParts gets unfinished parts
|
||||
// 未完成的分片
|
||||
func (cp downloadCheckpoint) todoParts() []downloadPart {
|
||||
dps := []downloadPart{}
|
||||
for i, ps := range cp.PartStat {
|
||||
|
@ -420,7 +335,7 @@ func (cp downloadCheckpoint) todoParts() []downloadPart {
|
|||
return dps
|
||||
}
|
||||
|
||||
// getCompletedBytes gets completed size
|
||||
// 完成的字节数
|
||||
func (cp downloadCheckpoint) getCompletedBytes() int64 {
|
||||
var completedBytes int64
|
||||
for i, part := range cp.Parts {
|
||||
|
@ -431,13 +346,19 @@ func (cp downloadCheckpoint) getCompletedBytes() int64 {
|
|||
return completedBytes
|
||||
}
|
||||
|
||||
// prepare initiates download tasks
|
||||
func (cp *downloadCheckpoint) prepare(meta http.Header, bucket *Bucket, objectKey, filePath string, partSize int64, uRange *unpackedRange) error {
|
||||
// CP
|
||||
// 初始化下载任务
|
||||
func (cp *downloadCheckpoint) prepare(bucket *Bucket, objectKey, filePath string, partSize int64) error {
|
||||
// cp
|
||||
cp.Magic = downloadCpMagic
|
||||
cp.FilePath = filePath
|
||||
cp.Object = objectKey
|
||||
|
||||
// object
|
||||
meta, err := bucket.GetObjectDetailedMeta(objectKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -447,15 +368,11 @@ func (cp *downloadCheckpoint) prepare(meta http.Header, bucket *Bucket, objectKe
|
|||
cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified)
|
||||
cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag)
|
||||
|
||||
if bucket.getConfig().IsEnableCRC && meta.Get(HTTPHeaderOssCRC64) != "" {
|
||||
if uRange == nil || (!uRange.hasStart && !uRange.hasEnd) {
|
||||
cp.enableCRC = true
|
||||
cp.CRC, _ = strconv.ParseUint(meta.Get(HTTPHeaderOssCRC64), 10, 0)
|
||||
}
|
||||
// parts
|
||||
cp.Parts, err = getDownloadParts(bucket, objectKey, partSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Parts
|
||||
cp.Parts = getDownloadParts(objectSize, partSize, uRange)
|
||||
cp.PartStat = make([]bool, len(cp.Parts))
|
||||
for i := range cp.PartStat {
|
||||
cp.PartStat[i] = false
|
||||
|
@ -469,47 +386,35 @@ func (cp *downloadCheckpoint) complete(cpFilePath, downFilepath string) error {
|
|||
return os.Rename(downFilepath, cp.FilePath)
|
||||
}
|
||||
|
||||
// downloadFileWithCp downloads files with checkpoint.
|
||||
func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int, uRange *unpackedRange) error {
|
||||
// 并发带断点的下载
|
||||
func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int) error {
|
||||
tempFilePath := filePath + TempFileSuffix
|
||||
listener := getProgressListener(options)
|
||||
|
||||
payerOptions := []Option{}
|
||||
payer := getPayer(options)
|
||||
if payer != "" {
|
||||
payerOptions = append(payerOptions, RequestPayer(PayerType(payer)))
|
||||
}
|
||||
|
||||
// Load checkpoint data.
|
||||
// LOAD CP数据
|
||||
dcp := downloadCheckpoint{}
|
||||
err := dcp.load(cpFilePath)
|
||||
if err != nil {
|
||||
os.Remove(cpFilePath)
|
||||
}
|
||||
|
||||
// Get the object detailed meta.
|
||||
meta, err := bucket.GetObjectDetailedMeta(objectKey, payerOptions...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Load error or data invalid. Re-initialize the download.
|
||||
valid, err := dcp.isValid(meta, uRange)
|
||||
// LOAD出错或数据无效重新初始化下载
|
||||
valid, err := dcp.isValid(&bucket, objectKey)
|
||||
if err != nil || !valid {
|
||||
if err = dcp.prepare(meta, &bucket, objectKey, filePath, partSize, uRange); err != nil {
|
||||
if err = dcp.prepare(&bucket, objectKey, filePath, partSize); err != nil {
|
||||
return err
|
||||
}
|
||||
os.Remove(cpFilePath)
|
||||
}
|
||||
|
||||
// Create the file if not exists. Otherwise the parts download will overwrite it.
|
||||
// 如果文件不存在则创建,存在不清空,下载分片会重写文件内容
|
||||
fd, err := os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fd.Close()
|
||||
|
||||
// Unfinished parts
|
||||
// 未完成的分片
|
||||
parts := dcp.todoParts()
|
||||
jobs := make(chan downloadPart, len(parts))
|
||||
results := make(chan downloadPart, len(parts))
|
||||
|
@ -520,23 +425,22 @@ func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int
|
|||
event := newProgressEvent(TransferStartedEvent, completedBytes, dcp.ObjStat.Size)
|
||||
publishProgress(listener, event)
|
||||
|
||||
// Start the download workers routine
|
||||
arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker, dcp.enableCRC}
|
||||
// 启动工作协程
|
||||
arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker}
|
||||
for w := 1; w <= routines; w++ {
|
||||
go downloadWorker(w, arg, jobs, results, failed, die)
|
||||
}
|
||||
|
||||
// Concurrently downloads parts
|
||||
// 并发下载分片
|
||||
go downloadScheduler(jobs, parts)
|
||||
|
||||
// Wait for the parts download finished
|
||||
// 等待分片下载完成
|
||||
completed := 0
|
||||
for completed < len(parts) {
|
||||
select {
|
||||
case part := <-results:
|
||||
completed++
|
||||
dcp.PartStat[part.Index] = true
|
||||
dcp.Parts[part.Index].CRC64 = part.CRC64
|
||||
dcp.dump(cpFilePath)
|
||||
completedBytes += (part.End - part.Start + 1)
|
||||
event = newProgressEvent(TransferDataEvent, completedBytes, dcp.ObjStat.Size)
|
||||
|
@ -556,13 +460,5 @@ func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int
|
|||
event = newProgressEvent(TransferCompletedEvent, completedBytes, dcp.ObjStat.Size)
|
||||
publishProgress(listener, event)
|
||||
|
||||
if dcp.enableCRC {
|
||||
actualCRC := combineCRCInParts(dcp.Parts)
|
||||
err = checkDownloadCRC(actualCRC, dcp.CRC)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return dcp.complete(cpFilePath, tempFilePath)
|
||||
}
|
||||
|
|
|
@ -10,33 +10,28 @@ import (
|
|||
// ServiceError contains fields of the error response from Oss Service REST API.
|
||||
type ServiceError struct {
|
||||
XMLName xml.Name `xml:"Error"`
|
||||
Code string `xml:"Code"` // The error code returned from OSS to the caller
|
||||
Message string `xml:"Message"` // The detail error message from OSS
|
||||
RequestID string `xml:"RequestId"` // The UUID used to uniquely identify the request
|
||||
HostID string `xml:"HostId"` // The OSS server cluster's Id
|
||||
Endpoint string `xml:"Endpoint"`
|
||||
RawMessage string // The raw messages from OSS
|
||||
StatusCode int // HTTP status code
|
||||
Code string `xml:"Code"` // OSS返回给用户的错误码
|
||||
Message string `xml:"Message"` // OSS给出的详细错误信息
|
||||
RequestID string `xml:"RequestId"` // 用于唯一标识该次请求的UUID
|
||||
HostID string `xml:"HostId"` // 用于标识访问的OSS集群
|
||||
RawMessage string // OSS返回的原始消息内容
|
||||
StatusCode int // HTTP状态码
|
||||
}
|
||||
|
||||
// Error implements interface error
|
||||
// Implement interface error
|
||||
func (e ServiceError) Error() string {
|
||||
if e.Endpoint == "" {
|
||||
return fmt.Sprintf("oss: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=\"%s\", RequestId=%s",
|
||||
e.StatusCode, e.Code, e.Message, e.RequestID)
|
||||
}
|
||||
return fmt.Sprintf("oss: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=\"%s\", RequestId=%s, Endpoint=%s",
|
||||
e.StatusCode, e.Code, e.Message, e.RequestID, e.Endpoint)
|
||||
return fmt.Sprintf("oss: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestId=%s",
|
||||
e.StatusCode, e.Code, e.Message, e.RequestID)
|
||||
}
|
||||
|
||||
// UnexpectedStatusCodeError is returned when a storage service responds with neither an error
|
||||
// nor with an HTTP status code indicating success.
|
||||
type UnexpectedStatusCodeError struct {
|
||||
allowed []int // The expected HTTP stats code returned from OSS
|
||||
got int // The actual HTTP status code from OSS
|
||||
allowed []int // 预期OSS返回HTTP状态码
|
||||
got int // OSS实际返回HTTP状态码
|
||||
}
|
||||
|
||||
// Error implements interface error
|
||||
// Implement interface error
|
||||
func (e UnexpectedStatusCodeError) Error() string {
|
||||
s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) }
|
||||
|
||||
|
@ -67,25 +62,18 @@ func checkRespCode(respCode int, allowed []int) error {
|
|||
|
||||
// CRCCheckError is returned when crc check is inconsistent between client and server
|
||||
type CRCCheckError struct {
|
||||
clientCRC uint64 // Calculated CRC64 in client
|
||||
serverCRC uint64 // Calculated CRC64 in server
|
||||
operation string // Upload operations such as PutObject/AppendObject/UploadPart, etc
|
||||
requestID string // The request id of this operation
|
||||
clientCRC uint64 // 客户端计算的CRC64值
|
||||
serverCRC uint64 // 服务端计算的CRC64值
|
||||
operation string // 上传操作,如PutObject/AppendObject/UploadPart等
|
||||
requestID string // 本次操作的RequestID
|
||||
}
|
||||
|
||||
// Error implements interface error
|
||||
// Implement interface error
|
||||
func (e CRCCheckError) Error() string {
|
||||
return fmt.Sprintf("oss: the crc of %s is inconsistent, client %d but server %d; request id is %s",
|
||||
e.operation, e.clientCRC, e.serverCRC, e.requestID)
|
||||
}
|
||||
|
||||
func checkDownloadCRC(clientCRC, serverCRC uint64) error {
|
||||
if clientCRC == serverCRC {
|
||||
return nil
|
||||
}
|
||||
return CRCCheckError{clientCRC, serverCRC, "DownloadFile", ""}
|
||||
}
|
||||
|
||||
func checkCRC(resp *Response, operation string) error {
|
||||
if resp.Headers.Get(HTTPHeaderOssCRC64) == "" || resp.ClientCRC == resp.ServerCRC {
|
||||
return nil
|
||||
|
|
|
@ -235,7 +235,7 @@ var extToMimeType = map[string]string{
|
|||
}
|
||||
|
||||
// TypeByExtension returns the MIME type associated with the file extension ext.
|
||||
// gets the file's MIME type for HTTP header Content-Type
|
||||
// 获取文件类型,选项ContentType使用
|
||||
func TypeByExtension(filePath string) string {
|
||||
typ := mime.TypeByExtension(path.Ext(filePath))
|
||||
if typ == "" {
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
"net/http"
|
||||
)
|
||||
|
||||
// Response defines HTTP response from OSS
|
||||
// Response Http response from oss
|
||||
type Response struct {
|
||||
StatusCode int
|
||||
Headers http.Header
|
||||
|
@ -15,46 +15,38 @@ type Response struct {
|
|||
ServerCRC uint64
|
||||
}
|
||||
|
||||
func (r *Response) Read(p []byte) (n int, err error) {
|
||||
return r.Body.Read(p)
|
||||
}
|
||||
|
||||
func (r *Response) Close() error {
|
||||
return r.Body.Close()
|
||||
}
|
||||
|
||||
// PutObjectRequest is the request of DoPutObject
|
||||
// PutObjectRequest The request of DoPutObject
|
||||
type PutObjectRequest struct {
|
||||
ObjectKey string
|
||||
Reader io.Reader
|
||||
}
|
||||
|
||||
// GetObjectRequest is the request of DoGetObject
|
||||
// GetObjectRequest The request of DoGetObject
|
||||
type GetObjectRequest struct {
|
||||
ObjectKey string
|
||||
}
|
||||
|
||||
// GetObjectResult is the result of DoGetObject
|
||||
// GetObjectResult The result of DoGetObject
|
||||
type GetObjectResult struct {
|
||||
Response *Response
|
||||
ClientCRC hash.Hash64
|
||||
ServerCRC uint64
|
||||
}
|
||||
|
||||
// AppendObjectRequest is the requtest of DoAppendObject
|
||||
// AppendObjectRequest The requtest of DoAppendObject
|
||||
type AppendObjectRequest struct {
|
||||
ObjectKey string
|
||||
Reader io.Reader
|
||||
Position int64
|
||||
}
|
||||
|
||||
// AppendObjectResult is the result of DoAppendObject
|
||||
// AppendObjectResult The result of DoAppendObject
|
||||
type AppendObjectResult struct {
|
||||
NextPosition int64
|
||||
CRC uint64
|
||||
}
|
||||
|
||||
// UploadPartRequest is the request of DoUploadPart
|
||||
// UploadPartRequest The request of DoUploadPart
|
||||
type UploadPartRequest struct {
|
||||
InitResult *InitiateMultipartUploadResult
|
||||
Reader io.Reader
|
||||
|
@ -62,7 +54,7 @@ type UploadPartRequest struct {
|
|||
PartNumber int
|
||||
}
|
||||
|
||||
// UploadPartResult is the result of DoUploadPart
|
||||
// UploadPartResult The result of DoUploadPart
|
||||
type UploadPartResult struct {
|
||||
Part UploadPart
|
||||
}
|
||||
|
|
|
@ -5,22 +5,22 @@ import (
|
|||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// CopyFile is multipart copy object
|
||||
//
|
||||
// srcBucketName source bucket name
|
||||
// srcObjectKey source object name
|
||||
// destObjectKey target object name in the form of bucketname.objectkey
|
||||
// partSize the part size in byte.
|
||||
// options object's contraints. Check out function InitiateMultipartUpload.
|
||||
// CopyFile 分片复制文件
|
||||
//
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
// srcBucketName 源Bucket名称。
|
||||
// srcObjectKey 源Object名称。
|
||||
// destObjectKey 目标Object名称。目标Bucket名称为Bucket.BucketName。
|
||||
// partSize 复制文件片的大小,字节数。比如100 * 1024为每片100KB。
|
||||
// options Object的属性限制项。详见InitiateMultipartUpload。
|
||||
//
|
||||
// error 操作成功error为nil,非nil为错误信息。
|
||||
//
|
||||
func (bucket Bucket) CopyFile(srcBucketName, srcObjectKey, destObjectKey string, partSize int64, options ...Option) error {
|
||||
destBucketName := bucket.BucketName
|
||||
|
@ -28,33 +28,25 @@ func (bucket Bucket) CopyFile(srcBucketName, srcObjectKey, destObjectKey string,
|
|||
return errors.New("oss: part size invalid range (1024KB, 5GB]")
|
||||
}
|
||||
|
||||
cpConf := getCpConfig(options)
|
||||
cpConf, err := getCpConfig(options, filepath.Base(destObjectKey))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
routines := getRoutines(options)
|
||||
|
||||
if cpConf != nil && cpConf.IsEnable {
|
||||
cpFilePath := getCopyCpFilePath(cpConf, srcBucketName, srcObjectKey, destBucketName, destObjectKey)
|
||||
if cpFilePath != "" {
|
||||
return bucket.copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey, partSize, options, cpFilePath, routines)
|
||||
}
|
||||
if cpConf.IsEnable {
|
||||
return bucket.copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey,
|
||||
partSize, options, cpConf.FilePath, routines)
|
||||
}
|
||||
|
||||
return bucket.copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey,
|
||||
partSize, options, routines)
|
||||
}
|
||||
|
||||
func getCopyCpFilePath(cpConf *cpConfig, srcBucket, srcObject, destBucket, destObject string) string {
|
||||
if cpConf.FilePath == "" && cpConf.DirPath != "" {
|
||||
dest := fmt.Sprintf("oss://%v/%v", destBucket, destObject)
|
||||
src := fmt.Sprintf("oss://%v/%v", srcBucket, srcObject)
|
||||
cpFileName := getCpFileName(src, dest)
|
||||
cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName
|
||||
}
|
||||
return cpConf.FilePath
|
||||
}
|
||||
// ----- 并发无断点的下载 -----
|
||||
|
||||
// ----- Concurrently copy without checkpoint ---------
|
||||
|
||||
// copyWorkerArg defines the copy worker arguments
|
||||
// 工作协程参数
|
||||
type copyWorkerArg struct {
|
||||
bucket *Bucket
|
||||
imur InitiateMultipartUploadResult
|
||||
|
@ -64,7 +56,7 @@ type copyWorkerArg struct {
|
|||
hook copyPartHook
|
||||
}
|
||||
|
||||
// copyPartHook is the hook for testing purpose
|
||||
// Hook用于测试
|
||||
type copyPartHook func(part copyPart) error
|
||||
|
||||
var copyPartHooker copyPartHook = defaultCopyPartHook
|
||||
|
@ -73,7 +65,7 @@ func defaultCopyPartHook(part copyPart) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// copyWorker copies worker
|
||||
// 工作协程
|
||||
func copyWorker(id int, arg copyWorkerArg, jobs <-chan copyPart, results chan<- UploadPart, failed chan<- error, die <-chan bool) {
|
||||
for chunk := range jobs {
|
||||
if err := arg.hook(chunk); err != nil {
|
||||
|
@ -96,7 +88,7 @@ func copyWorker(id int, arg copyWorkerArg, jobs <-chan copyPart, results chan<-
|
|||
}
|
||||
}
|
||||
|
||||
// copyScheduler
|
||||
// 调度协程
|
||||
func copyScheduler(jobs chan copyPart, parts []copyPart) {
|
||||
for _, part := range parts {
|
||||
jobs <- part
|
||||
|
@ -104,16 +96,26 @@ func copyScheduler(jobs chan copyPart, parts []copyPart) {
|
|||
close(jobs)
|
||||
}
|
||||
|
||||
// copyPart structure
|
||||
// 分片
|
||||
type copyPart struct {
|
||||
Number int // Part number (from 1 to 10,000)
|
||||
Start int64 // The start index in the source file.
|
||||
End int64 // The end index in the source file
|
||||
Number int // 片序号[1, 10000]
|
||||
Start int64 // 片起始位置
|
||||
End int64 // 片结束位置
|
||||
}
|
||||
|
||||
// getCopyParts calculates copy parts
|
||||
func getCopyParts(objectSize, partSize int64) []copyPart {
|
||||
// 文件分片
|
||||
func getCopyParts(bucket *Bucket, objectKey string, partSize int64) ([]copyPart, error) {
|
||||
meta, err := bucket.GetObjectDetailedMeta(objectKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
parts := []copyPart{}
|
||||
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
part := copyPart{}
|
||||
i := 0
|
||||
for offset := int64(0); offset < objectSize; offset += partSize {
|
||||
|
@ -123,10 +125,10 @@ func getCopyParts(objectSize, partSize int64) []copyPart {
|
|||
parts = append(parts, part)
|
||||
i++
|
||||
}
|
||||
return parts
|
||||
return parts, nil
|
||||
}
|
||||
|
||||
// getSrcObjectBytes gets the source file size
|
||||
// 获取源文件大小
|
||||
func getSrcObjectBytes(parts []copyPart) int64 {
|
||||
var ob int64
|
||||
for _, part := range parts {
|
||||
|
@ -135,32 +137,20 @@ func getSrcObjectBytes(parts []copyPart) int64 {
|
|||
return ob
|
||||
}
|
||||
|
||||
// copyFile is a concurrently copy without checkpoint
|
||||
// 并发无断点续传的下载
|
||||
func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey string,
|
||||
partSize int64, options []Option, routines int) error {
|
||||
descBucket, err := bucket.Client.Bucket(destBucketName)
|
||||
srcBucket, err := bucket.Client.Bucket(srcBucketName)
|
||||
listener := getProgressListener(options)
|
||||
|
||||
payerOptions := []Option{}
|
||||
payer := getPayer(options)
|
||||
if payer != "" {
|
||||
payerOptions = append(payerOptions, RequestPayer(PayerType(payer)))
|
||||
}
|
||||
|
||||
meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey, payerOptions...)
|
||||
// 分割文件
|
||||
parts, err := getCopyParts(srcBucket, srcObjectKey, partSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get copy parts
|
||||
parts := getCopyParts(objectSize, partSize)
|
||||
// Initialize the multipart upload
|
||||
// 初始化上传任务
|
||||
imur, err := descBucket.InitiateMultipartUpload(destObjectKey, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -176,16 +166,16 @@ func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destO
|
|||
event := newProgressEvent(TransferStartedEvent, 0, totalBytes)
|
||||
publishProgress(listener, event)
|
||||
|
||||
// Start to copy workers
|
||||
arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, payerOptions, copyPartHooker}
|
||||
// 启动工作协程
|
||||
arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, options, copyPartHooker}
|
||||
for w := 1; w <= routines; w++ {
|
||||
go copyWorker(w, arg, jobs, results, failed, die)
|
||||
}
|
||||
|
||||
// Start the scheduler
|
||||
// 并发上传分片
|
||||
go copyScheduler(jobs, parts)
|
||||
|
||||
// Wait for the parts finished.
|
||||
// 等待分片下载完成
|
||||
completed := 0
|
||||
ups := make([]UploadPart, len(parts))
|
||||
for completed < len(parts) {
|
||||
|
@ -198,7 +188,7 @@ func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destO
|
|||
publishProgress(listener, event)
|
||||
case err := <-failed:
|
||||
close(die)
|
||||
descBucket.AbortMultipartUpload(imur, payerOptions...)
|
||||
descBucket.AbortMultipartUpload(imur)
|
||||
event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes)
|
||||
publishProgress(listener, event)
|
||||
return err
|
||||
|
@ -212,36 +202,36 @@ func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destO
|
|||
event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes)
|
||||
publishProgress(listener, event)
|
||||
|
||||
// Complete the multipart upload
|
||||
_, err = descBucket.CompleteMultipartUpload(imur, ups, payerOptions...)
|
||||
// 提交任务
|
||||
_, err = descBucket.CompleteMultipartUpload(imur, ups)
|
||||
if err != nil {
|
||||
bucket.AbortMultipartUpload(imur, payerOptions...)
|
||||
bucket.AbortMultipartUpload(imur)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ----- Concurrently copy with checkpoint -----
|
||||
// ----- 并发有断点的下载 -----
|
||||
|
||||
const copyCpMagic = "84F1F18C-FF1D-403B-A1D8-9DEB5F65910A"
|
||||
|
||||
type copyCheckpoint struct {
|
||||
Magic string // Magic
|
||||
MD5 string // CP content MD5
|
||||
SrcBucketName string // Source bucket
|
||||
SrcObjectKey string // Source object
|
||||
DestBucketName string // Target bucket
|
||||
DestObjectKey string // Target object
|
||||
CopyID string // Copy ID
|
||||
ObjStat objectStat // Object stat
|
||||
Parts []copyPart // Copy parts
|
||||
CopyParts []UploadPart // The uploaded parts
|
||||
PartStat []bool // The part status
|
||||
Magic string // magic
|
||||
MD5 string // cp内容的MD5
|
||||
SrcBucketName string // 源Bucket
|
||||
SrcObjectKey string // 源Object
|
||||
DestBucketName string // 目标Bucket
|
||||
DestObjectKey string // 目标Bucket
|
||||
CopyID string // copy id
|
||||
ObjStat objectStat // 文件状态
|
||||
Parts []copyPart // 全部分片
|
||||
CopyParts []UploadPart // 分片上传成功后的返回值
|
||||
PartStat []bool // 分片下载是否完成
|
||||
}
|
||||
|
||||
// isValid checks if the data is valid which means CP is valid and object is not updated.
|
||||
func (cp copyCheckpoint) isValid(meta http.Header) (bool, error) {
|
||||
// Compare CP's magic number and the MD5.
|
||||
// CP数据是否有效,CP有效且Object没有更新时有效
|
||||
func (cp copyCheckpoint) isValid(bucket *Bucket, objectKey string) (bool, error) {
|
||||
// 比较CP的Magic及MD5
|
||||
cpb := cp
|
||||
cpb.MD5 = ""
|
||||
js, _ := json.Marshal(cpb)
|
||||
|
@ -252,12 +242,18 @@ func (cp copyCheckpoint) isValid(meta http.Header) (bool, error) {
|
|||
return false, nil
|
||||
}
|
||||
|
||||
// 确认object没有更新
|
||||
meta, err := bucket.GetObjectDetailedMeta(objectKey)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Compare the object size and last modified time and etag.
|
||||
// 比较Object的大小/最后修改时间/etag
|
||||
if cp.ObjStat.Size != objectSize ||
|
||||
cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) ||
|
||||
cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) {
|
||||
|
@ -267,7 +263,7 @@ func (cp copyCheckpoint) isValid(meta http.Header) (bool, error) {
|
|||
return true, nil
|
||||
}
|
||||
|
||||
// load loads from the checkpoint file
|
||||
// 从文件中load
|
||||
func (cp *copyCheckpoint) load(filePath string) error {
|
||||
contents, err := ioutil.ReadFile(filePath)
|
||||
if err != nil {
|
||||
|
@ -278,17 +274,17 @@ func (cp *copyCheckpoint) load(filePath string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// update updates the parts status
|
||||
// 更新分片状态
|
||||
func (cp *copyCheckpoint) update(part UploadPart) {
|
||||
cp.CopyParts[part.PartNumber-1] = part
|
||||
cp.PartStat[part.PartNumber-1] = true
|
||||
}
|
||||
|
||||
// dump dumps the CP to the file
|
||||
// dump到文件
|
||||
func (cp *copyCheckpoint) dump(filePath string) error {
|
||||
bcp := *cp
|
||||
|
||||
// Calculate MD5
|
||||
// 计算MD5
|
||||
bcp.MD5 = ""
|
||||
js, err := json.Marshal(bcp)
|
||||
if err != nil {
|
||||
|
@ -298,17 +294,17 @@ func (cp *copyCheckpoint) dump(filePath string) error {
|
|||
b64 := base64.StdEncoding.EncodeToString(sum[:])
|
||||
bcp.MD5 = b64
|
||||
|
||||
// Serialization
|
||||
// 序列化
|
||||
js, err = json.Marshal(bcp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Dump
|
||||
// dump
|
||||
return ioutil.WriteFile(filePath, js, FilePermMode)
|
||||
}
|
||||
|
||||
// todoParts returns unfinished parts
|
||||
// 未完成的分片
|
||||
func (cp copyCheckpoint) todoParts() []copyPart {
|
||||
dps := []copyPart{}
|
||||
for i, ps := range cp.PartStat {
|
||||
|
@ -319,7 +315,7 @@ func (cp copyCheckpoint) todoParts() []copyPart {
|
|||
return dps
|
||||
}
|
||||
|
||||
// getCompletedBytes returns finished bytes count
|
||||
// 完成的字节数
|
||||
func (cp copyCheckpoint) getCompletedBytes() int64 {
|
||||
var completedBytes int64
|
||||
for i, part := range cp.Parts {
|
||||
|
@ -330,16 +326,22 @@ func (cp copyCheckpoint) getCompletedBytes() int64 {
|
|||
return completedBytes
|
||||
}
|
||||
|
||||
// prepare initializes the multipart upload
|
||||
func (cp *copyCheckpoint) prepare(meta http.Header, srcBucket *Bucket, srcObjectKey string, destBucket *Bucket, destObjectKey string,
|
||||
// 初始化下载任务
|
||||
func (cp *copyCheckpoint) prepare(srcBucket *Bucket, srcObjectKey string, destBucket *Bucket, destObjectKey string,
|
||||
partSize int64, options []Option) error {
|
||||
// CP
|
||||
// cp
|
||||
cp.Magic = copyCpMagic
|
||||
cp.SrcBucketName = srcBucket.BucketName
|
||||
cp.SrcObjectKey = srcObjectKey
|
||||
cp.DestBucketName = destBucket.BucketName
|
||||
cp.DestObjectKey = destObjectKey
|
||||
|
||||
// object
|
||||
meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -349,15 +351,18 @@ func (cp *copyCheckpoint) prepare(meta http.Header, srcBucket *Bucket, srcObject
|
|||
cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified)
|
||||
cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag)
|
||||
|
||||
// Parts
|
||||
cp.Parts = getCopyParts(objectSize, partSize)
|
||||
// parts
|
||||
cp.Parts, err = getCopyParts(srcBucket, srcObjectKey, partSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cp.PartStat = make([]bool, len(cp.Parts))
|
||||
for i := range cp.PartStat {
|
||||
cp.PartStat[i] = false
|
||||
}
|
||||
cp.CopyParts = make([]UploadPart, len(cp.Parts))
|
||||
|
||||
// Init copy
|
||||
// init copy
|
||||
imur, err := destBucket.InitiateMultipartUpload(destObjectKey, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -367,10 +372,10 @@ func (cp *copyCheckpoint) prepare(meta http.Header, srcBucket *Bucket, srcObject
|
|||
return nil
|
||||
}
|
||||
|
||||
func (cp *copyCheckpoint) complete(bucket *Bucket, parts []UploadPart, cpFilePath string, options []Option) error {
|
||||
func (cp *copyCheckpoint) complete(bucket *Bucket, parts []UploadPart, cpFilePath string) error {
|
||||
imur := InitiateMultipartUploadResult{Bucket: cp.DestBucketName,
|
||||
Key: cp.DestObjectKey, UploadID: cp.CopyID}
|
||||
_, err := bucket.CompleteMultipartUpload(imur, parts, options...)
|
||||
_, err := bucket.CompleteMultipartUpload(imur, parts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -378,42 +383,30 @@ func (cp *copyCheckpoint) complete(bucket *Bucket, parts []UploadPart, cpFilePat
|
|||
return err
|
||||
}
|
||||
|
||||
// copyFileWithCp is concurrently copy with checkpoint
|
||||
// 并发带断点的下载
|
||||
func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey string,
|
||||
partSize int64, options []Option, cpFilePath string, routines int) error {
|
||||
descBucket, err := bucket.Client.Bucket(destBucketName)
|
||||
srcBucket, err := bucket.Client.Bucket(srcBucketName)
|
||||
listener := getProgressListener(options)
|
||||
|
||||
payerOptions := []Option{}
|
||||
payer := getPayer(options)
|
||||
if payer != "" {
|
||||
payerOptions = append(payerOptions, RequestPayer(PayerType(payer)))
|
||||
}
|
||||
|
||||
// Load CP data
|
||||
// LOAD CP数据
|
||||
ccp := copyCheckpoint{}
|
||||
err = ccp.load(cpFilePath)
|
||||
if err != nil {
|
||||
os.Remove(cpFilePath)
|
||||
}
|
||||
|
||||
// Make sure the object is not updated.
|
||||
meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey, payerOptions...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Load error or the CP data is invalid---reinitialize
|
||||
valid, err := ccp.isValid(meta)
|
||||
// LOAD出错或数据无效重新初始化下载
|
||||
valid, err := ccp.isValid(srcBucket, srcObjectKey)
|
||||
if err != nil || !valid {
|
||||
if err = ccp.prepare(meta, srcBucket, srcObjectKey, descBucket, destObjectKey, partSize, options); err != nil {
|
||||
if err = ccp.prepare(srcBucket, srcObjectKey, descBucket, destObjectKey, partSize, options); err != nil {
|
||||
return err
|
||||
}
|
||||
os.Remove(cpFilePath)
|
||||
}
|
||||
|
||||
// Unfinished parts
|
||||
// 未完成的分片
|
||||
parts := ccp.todoParts()
|
||||
imur := InitiateMultipartUploadResult{
|
||||
Bucket: destBucketName,
|
||||
|
@ -429,16 +422,16 @@ func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName,
|
|||
event := newProgressEvent(TransferStartedEvent, completedBytes, ccp.ObjStat.Size)
|
||||
publishProgress(listener, event)
|
||||
|
||||
// Start the worker coroutines
|
||||
arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, payerOptions, copyPartHooker}
|
||||
// 启动工作协程
|
||||
arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, options, copyPartHooker}
|
||||
for w := 1; w <= routines; w++ {
|
||||
go copyWorker(w, arg, jobs, results, failed, die)
|
||||
}
|
||||
|
||||
// Start the scheduler
|
||||
// 并发下载分片
|
||||
go copyScheduler(jobs, parts)
|
||||
|
||||
// Wait for the parts completed.
|
||||
// 等待分片下载完成
|
||||
completed := 0
|
||||
for completed < len(parts) {
|
||||
select {
|
||||
|
@ -464,5 +457,5 @@ func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName,
|
|||
event = newProgressEvent(TransferCompletedEvent, completedBytes, ccp.ObjStat.Size)
|
||||
publishProgress(listener, event)
|
||||
|
||||
return ccp.complete(descBucket, ccp.CopyParts, cpFilePath, payerOptions)
|
||||
return ccp.complete(descBucket, ccp.CopyParts, cpFilePath)
|
||||
}
|
||||
|
|
|
@ -5,28 +5,26 @@ import (
|
|||
"encoding/xml"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// InitiateMultipartUpload initializes multipart upload
|
||||
//
|
||||
// objectKey object name
|
||||
// options the object constricts for upload. The valid options are CacheControl, ContentDisposition, ContentEncoding, Expires,
|
||||
// ServerSideEncryption, Meta, check out the following link:
|
||||
// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/InitiateMultipartUpload.html
|
||||
// InitiateMultipartUpload 初始化分片上传任务。
|
||||
//
|
||||
// InitiateMultipartUploadResult the return value of the InitiateMultipartUpload, which is used for calls later on such as UploadPartFromFile,UploadPartCopy.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
// objectKey Object名称。
|
||||
// options 上传时可以指定Object的属性,可选属性有CacheControl、ContentDisposition、ContentEncoding、Expires、
|
||||
// ServerSideEncryption、Meta,具体含义请参考
|
||||
// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/InitiateMultipartUpload.html
|
||||
//
|
||||
// InitiateMultipartUploadResult 初始化后操作成功的返回值,用于后面的UploadPartFromFile、UploadPartCopy等操作。error为nil时有效。
|
||||
// error 操作成功error为nil,非nil为错误信息。
|
||||
//
|
||||
func (bucket Bucket) InitiateMultipartUpload(objectKey string, options ...Option) (InitiateMultipartUploadResult, error) {
|
||||
var imur InitiateMultipartUploadResult
|
||||
opts := addContentType(options, objectKey)
|
||||
params := map[string]interface{}{}
|
||||
params["uploads"] = nil
|
||||
resp, err := bucket.do("POST", objectKey, params, opts, nil, nil)
|
||||
resp, err := bucket.do("POST", objectKey, "uploads", "uploads", opts, nil, nil)
|
||||
if err != nil {
|
||||
return imur, err
|
||||
}
|
||||
|
@ -36,20 +34,23 @@ func (bucket Bucket) InitiateMultipartUpload(objectKey string, options ...Option
|
|||
return imur, err
|
||||
}
|
||||
|
||||
// UploadPart uploads parts
|
||||
//
|
||||
// After initializing a Multipart Upload, the upload Id and object key could be used for uploading the parts.
|
||||
// Each part has its part number (ranges from 1 to 10,000). And for each upload Id, the part number identifies the position of the part in the whole file.
|
||||
// And thus with the same part number and upload Id, another part upload will overwrite the data.
|
||||
// Except the last one, minimal part size is 100KB. There's no limit on the last part size.
|
||||
// UploadPart 上传分片。
|
||||
//
|
||||
// imur the returned value of InitiateMultipartUpload.
|
||||
// reader io.Reader the reader for the part's data.
|
||||
// size the part size.
|
||||
// partNumber the part number (ranges from 1 to 10,000). Invalid part number will lead to InvalidArgument error.
|
||||
// 初始化一个Multipart Upload之后,可以根据指定的Object名和Upload ID来分片(Part)上传数据。
|
||||
// 每一个上传的Part都有一个标识它的号码(part number,范围是1~10000)。对于同一个Upload ID,
|
||||
// 该号码不但唯一标识这一片数据,也标识了这片数据在整个文件内的相对位置。如果您用同一个part号码,上传了新的数据,
|
||||
// 那么OSS上已有的这个号码的Part数据将被覆盖。除了最后一片Part以外,其他的part最小为100KB;
|
||||
// 最后一片Part没有大小限制。
|
||||
//
|
||||
// UploadPart the return value of the upload part. It consists of PartNumber and ETag. It's valid when error is nil.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
// imur InitiateMultipartUpload成功后的返回值。
|
||||
// reader io.Reader 需要分片上传的reader。
|
||||
// size 本次上传片Part的大小。
|
||||
// partNumber 本次上传片(Part)的编号,范围是1~10000。如果超出范围,OSS将返回InvalidArgument错误。
|
||||
//
|
||||
// UploadPart 上传成功的返回值,两个成员PartNumber、ETag。PartNumber片编号,即传入参数partNumber;
|
||||
// ETag及上传数据的MD5。error为nil时有效。
|
||||
// error 操作成功error为nil,非nil为错误信息。
|
||||
//
|
||||
func (bucket Bucket) UploadPart(imur InitiateMultipartUploadResult, reader io.Reader,
|
||||
partSize int64, partNumber int, options ...Option) (UploadPart, error) {
|
||||
|
@ -65,16 +66,18 @@ func (bucket Bucket) UploadPart(imur InitiateMultipartUploadResult, reader io.Re
|
|||
return result.Part, err
|
||||
}
|
||||
|
||||
// UploadPartFromFile uploads part from the file.
|
||||
//
|
||||
// imur the return value of a successful InitiateMultipartUpload.
|
||||
// filePath the local file path to upload.
|
||||
// startPosition the start position in the local file.
|
||||
// partSize the part size.
|
||||
// partNumber the part number (from 1 to 10,000)
|
||||
// UploadPartFromFile 上传分片。
|
||||
//
|
||||
// UploadPart the return value consists of PartNumber and ETag.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
// imur InitiateMultipartUpload成功后的返回值。
|
||||
// filePath 需要分片上传的本地文件。
|
||||
// startPosition 本次上传文件片的起始位置。
|
||||
// partSize 本次上传文件片的大小。
|
||||
// partNumber 本次上传文件片的编号,范围是1~10000。
|
||||
//
|
||||
// UploadPart 上传成功的返回值,两个成员PartNumber、ETag。PartNumber片编号,传入参数partNumber;
|
||||
// ETag上传数据的MD5。error为nil时有效。
|
||||
// error 操作成功error为nil,非nil为错误信息。
|
||||
//
|
||||
func (bucket Bucket) UploadPartFromFile(imur InitiateMultipartUploadResult, filePath string,
|
||||
startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) {
|
||||
|
@ -98,20 +101,19 @@ func (bucket Bucket) UploadPartFromFile(imur InitiateMultipartUploadResult, file
|
|||
return result.Part, err
|
||||
}
|
||||
|
||||
// DoUploadPart does the actual part upload.
|
||||
//
|
||||
// request part upload request
|
||||
// DoUploadPart 上传分片。
|
||||
//
|
||||
// UploadPartResult the result of uploading part.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
// request 上传分片请求。
|
||||
//
|
||||
// UploadPartResult 上传分片请求返回值。
|
||||
// error 操作无错误为nil,非nil为错误信息。
|
||||
//
|
||||
func (bucket Bucket) DoUploadPart(request *UploadPartRequest, options []Option) (*UploadPartResult, error) {
|
||||
listener := getProgressListener(options)
|
||||
options = append(options, ContentLength(request.PartSize))
|
||||
params := map[string]interface{}{}
|
||||
params["partNumber"] = strconv.Itoa(request.PartNumber)
|
||||
params["uploadId"] = request.InitResult.UploadID
|
||||
resp, err := bucket.do("PUT", request.InitResult.Key, params, options,
|
||||
params := "partNumber=" + strconv.Itoa(request.PartNumber) + "&uploadId=" + request.InitResult.UploadID
|
||||
opts := []Option{ContentLength(request.PartSize)}
|
||||
resp, err := bucket.do("PUT", request.InitResult.Key, params, params, opts,
|
||||
&io.LimitedReader{R: request.Reader, N: request.PartSize}, listener)
|
||||
if err != nil {
|
||||
return &UploadPartResult{}, err
|
||||
|
@ -133,32 +135,32 @@ func (bucket Bucket) DoUploadPart(request *UploadPartRequest, options []Option)
|
|||
return &UploadPartResult{part}, nil
|
||||
}
|
||||
|
||||
// UploadPartCopy uploads part copy
|
||||
//
|
||||
// imur the return value of InitiateMultipartUpload
|
||||
// copySrc source Object name
|
||||
// startPosition the part's start index in the source file
|
||||
// partSize the part size
|
||||
// partNumber the part number, ranges from 1 to 10,000. If it exceeds the range OSS returns InvalidArgument error.
|
||||
// options the constraints of source object for the copy. The copy happens only when these contraints are met. Otherwise it returns error.
|
||||
// CopySourceIfNoneMatch, CopySourceIfModifiedSince CopySourceIfUnmodifiedSince, check out the following link for the detail
|
||||
// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/UploadPartCopy.html
|
||||
// UploadPartCopy 拷贝分片。
|
||||
//
|
||||
// UploadPart the return value consists of PartNumber and ETag.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
// imur InitiateMultipartUpload成功后的返回值。
|
||||
// copySrc 源Object名称。
|
||||
// startPosition 本次拷贝片(Part)在源Object的起始位置。
|
||||
// partSize 本次拷贝片的大小。
|
||||
// partNumber 本次拷贝片的编号,范围是1~10000。如果超出范围,OSS将返回InvalidArgument错误。
|
||||
// options copy时源Object的限制条件,满足限制条件时copy,不满足时返回错误。可选条件有CopySourceIfMatch、
|
||||
// CopySourceIfNoneMatch、CopySourceIfModifiedSince CopySourceIfUnmodifiedSince,具体含义请参看
|
||||
// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/UploadPartCopy.html
|
||||
//
|
||||
// UploadPart 上传成功的返回值,两个成员PartNumber、ETag。PartNumber片(Part)编号,即传入参数partNumber;
|
||||
// ETag及上传数据的MD5。error为nil时有效。
|
||||
// error 操作成功error为nil,非nil为错误信息。
|
||||
//
|
||||
func (bucket Bucket) UploadPartCopy(imur InitiateMultipartUploadResult, srcBucketName, srcObjectKey string,
|
||||
startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) {
|
||||
var out UploadPartCopyResult
|
||||
var part UploadPart
|
||||
|
||||
opts := []Option{CopySource(srcBucketName, url.QueryEscape(srcObjectKey)),
|
||||
opts := []Option{CopySource(srcBucketName, srcObjectKey),
|
||||
CopySourceRange(startPosition, partSize)}
|
||||
opts = append(opts, options...)
|
||||
params := map[string]interface{}{}
|
||||
params["partNumber"] = strconv.Itoa(partNumber)
|
||||
params["uploadId"] = imur.UploadID
|
||||
resp, err := bucket.do("PUT", imur.Key, params, opts, nil, nil)
|
||||
params := "partNumber=" + strconv.Itoa(partNumber) + "&uploadId=" + imur.UploadID
|
||||
resp, err := bucket.do("PUT", imur.Key, params, params, opts, nil, nil)
|
||||
if err != nil {
|
||||
return part, err
|
||||
}
|
||||
|
@ -174,16 +176,17 @@ func (bucket Bucket) UploadPartCopy(imur InitiateMultipartUploadResult, srcBucke
|
|||
return part, nil
|
||||
}
|
||||
|
||||
// CompleteMultipartUpload completes the multipart upload.
|
||||
//
|
||||
// imur the return value of InitiateMultipartUpload.
|
||||
// parts the array of return value of UploadPart/UploadPartFromFile/UploadPartCopy.
|
||||
// CompleteMultipartUpload 提交分片上传任务。
|
||||
//
|
||||
// CompleteMultipartUploadResponse the return value when the call succeeds. Only valid when the error is nil.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
// imur InitiateMultipartUpload的返回值。
|
||||
// parts UploadPart/UploadPartFromFile/UploadPartCopy返回值组成的数组。
|
||||
//
|
||||
// CompleteMultipartUploadResponse 操作成功后的返回值。error为nil时有效。
|
||||
// error 操作成功error为nil,非nil为错误信息。
|
||||
//
|
||||
func (bucket Bucket) CompleteMultipartUpload(imur InitiateMultipartUploadResult,
|
||||
parts []UploadPart, options ...Option) (CompleteMultipartUploadResult, error) {
|
||||
parts []UploadPart) (CompleteMultipartUploadResult, error) {
|
||||
var out CompleteMultipartUploadResult
|
||||
|
||||
sort.Sort(uploadParts(parts))
|
||||
|
@ -196,9 +199,8 @@ func (bucket Bucket) CompleteMultipartUpload(imur InitiateMultipartUploadResult,
|
|||
buffer := new(bytes.Buffer)
|
||||
buffer.Write(bs)
|
||||
|
||||
params := map[string]interface{}{}
|
||||
params["uploadId"] = imur.UploadID
|
||||
resp, err := bucket.do("POST", imur.Key, params, options, buffer, nil)
|
||||
params := "uploadId=" + imur.UploadID
|
||||
resp, err := bucket.do("POST", imur.Key, params, params, nil, buffer, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
|
@ -208,16 +210,16 @@ func (bucket Bucket) CompleteMultipartUpload(imur InitiateMultipartUploadResult,
|
|||
return out, err
|
||||
}
|
||||
|
||||
// AbortMultipartUpload aborts the multipart upload.
|
||||
//
|
||||
// imur the return value of InitiateMultipartUpload.
|
||||
// AbortMultipartUpload 取消分片上传任务。
|
||||
//
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
// imur InitiateMultipartUpload的返回值。
|
||||
//
|
||||
func (bucket Bucket) AbortMultipartUpload(imur InitiateMultipartUploadResult, options ...Option) error {
|
||||
params := map[string]interface{}{}
|
||||
params["uploadId"] = imur.UploadID
|
||||
resp, err := bucket.do("DELETE", imur.Key, params, options, nil, nil)
|
||||
// error 操作成功error为nil,非nil为错误信息。
|
||||
//
|
||||
func (bucket Bucket) AbortMultipartUpload(imur InitiateMultipartUploadResult) error {
|
||||
params := "uploadId=" + imur.UploadID
|
||||
resp, err := bucket.do("DELETE", imur.Key, params, params, nil, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -225,57 +227,46 @@ func (bucket Bucket) AbortMultipartUpload(imur InitiateMultipartUploadResult, op
|
|||
return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
|
||||
}
|
||||
|
||||
// ListUploadedParts lists the uploaded parts.
|
||||
//
|
||||
// imur the return value of InitiateMultipartUpload.
|
||||
// ListUploadedParts 列出指定上传任务已经上传的分片。
|
||||
//
|
||||
// ListUploadedPartsResponse the return value if it succeeds, only valid when error is nil.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
// imur InitiateMultipartUpload的返回值。
|
||||
//
|
||||
func (bucket Bucket) ListUploadedParts(imur InitiateMultipartUploadResult, options ...Option) (ListUploadedPartsResult, error) {
|
||||
// ListUploadedPartsResponse 操作成功后的返回值,成员UploadedParts已经上传/拷贝的片。error为nil时该返回值有效。
|
||||
// error 操作成功error为nil,非nil为错误信息。
|
||||
//
|
||||
func (bucket Bucket) ListUploadedParts(imur InitiateMultipartUploadResult) (ListUploadedPartsResult, error) {
|
||||
var out ListUploadedPartsResult
|
||||
options = append(options, EncodingType("url"))
|
||||
|
||||
params := map[string]interface{}{}
|
||||
params, err := getRawParams(options)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
|
||||
params["uploadId"] = imur.UploadID
|
||||
resp, err := bucket.do("GET", imur.Key, params, nil, nil, nil)
|
||||
params := "uploadId=" + imur.UploadID
|
||||
resp, err := bucket.do("GET", imur.Key, params, params, nil, nil, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.Body, &out)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
err = decodeListUploadedPartsResult(&out)
|
||||
return out, err
|
||||
}
|
||||
|
||||
// ListMultipartUploads lists all ongoing multipart upload tasks
|
||||
//
|
||||
// options listObject's filter. Prefix specifies the returned object's prefix; KeyMarker specifies the returned object's start point in lexicographic order;
|
||||
// MaxKeys specifies the max entries to return; Delimiter is the character for grouping object keys.
|
||||
// ListMultipartUploads 列出所有未上传完整的multipart任务列表。
|
||||
//
|
||||
// ListMultipartUploadResponse the return value if it succeeds, only valid when error is nil.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
// options ListObject的筛选行为。Prefix返回object的前缀,KeyMarker返回object的起始位置,MaxUploads最大数目默认1000,
|
||||
// Delimiter用于对Object名字进行分组的字符,所有名字包含指定的前缀且第一次出现delimiter字符之间的object。
|
||||
//
|
||||
// ListMultipartUploadResponse 操作成功后的返回值,error为nil时该返回值有效。
|
||||
// error 操作成功error为nil,非nil为错误信息。
|
||||
//
|
||||
func (bucket Bucket) ListMultipartUploads(options ...Option) (ListMultipartUploadResult, error) {
|
||||
var out ListMultipartUploadResult
|
||||
|
||||
options = append(options, EncodingType("url"))
|
||||
params, err := getRawParams(options)
|
||||
params, err := handleParams(options)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
params["uploads"] = nil
|
||||
|
||||
resp, err := bucket.do("GET", "", params, options, nil, nil)
|
||||
resp, err := bucket.do("GET", "", "uploads&"+params, "uploads", nil, nil, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
|
|
|
@ -1,19 +1,21 @@
|
|||
package oss
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type optionType string
|
||||
|
||||
const (
|
||||
optionParam optionType = "HTTPParameter" // URL parameter
|
||||
optionHTTP optionType = "HTTPHeader" // HTTP header
|
||||
optionArg optionType = "FuncArgument" // Function argument
|
||||
optionParam optionType = "HTTPParameter" // URL参数
|
||||
optionHTTP optionType = "HTTPHeader" // HTTP头
|
||||
optionArg optionType = "FuncArgument" // 函数参数
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -22,7 +24,6 @@ const (
|
|||
checkpointConfig = "x-cp-config"
|
||||
initCRC64 = "init-crc64"
|
||||
progressListener = "x-progress-listener"
|
||||
storageClass = "storage-class"
|
||||
)
|
||||
|
||||
type (
|
||||
|
@ -31,7 +32,7 @@ type (
|
|||
Type optionType
|
||||
}
|
||||
|
||||
// Option HTTP option
|
||||
// Option http option
|
||||
Option func(map[string]optionValue) error
|
||||
)
|
||||
|
||||
|
@ -65,11 +66,6 @@ func ContentEncoding(value string) Option {
|
|||
return setHeader(HTTPHeaderContentEncoding, value)
|
||||
}
|
||||
|
||||
// ContentLanguage is an option to set Content-Language header
|
||||
func ContentLanguage(value string) Option {
|
||||
return setHeader(HTTPHeaderContentLanguage, value)
|
||||
}
|
||||
|
||||
// ContentMD5 is an option to set Content-MD5 header
|
||||
func ContentMD5(value string) Option {
|
||||
return setHeader(HTTPHeaderContentMD5, value)
|
||||
|
@ -90,11 +86,6 @@ func Range(start, end int64) Option {
|
|||
return setHeader(HTTPHeaderRange, fmt.Sprintf("bytes=%d-%d", start, end))
|
||||
}
|
||||
|
||||
// NormalizedRange is an option to set Range header, such as 1024-2048 or 1024- or -2048
|
||||
func NormalizedRange(nr string) Option {
|
||||
return setHeader(HTTPHeaderRange, fmt.Sprintf("bytes=%s", strings.TrimSpace(nr)))
|
||||
}
|
||||
|
||||
// AcceptEncoding is an option to set Accept-Encoding header
|
||||
func AcceptEncoding(value string) Option {
|
||||
return setHeader(HTTPHeaderAcceptEncoding, value)
|
||||
|
@ -162,46 +153,16 @@ func ServerSideEncryption(value string) Option {
|
|||
return setHeader(HTTPHeaderOssServerSideEncryption, value)
|
||||
}
|
||||
|
||||
// ServerSideEncryptionKeyID is an option to set X-Oss-Server-Side-Encryption-Key-Id header
|
||||
func ServerSideEncryptionKeyID(value string) Option {
|
||||
return setHeader(HTTPHeaderOssServerSideEncryptionKeyID, value)
|
||||
}
|
||||
|
||||
// ObjectACL is an option to set X-Oss-Object-Acl header
|
||||
func ObjectACL(acl ACLType) Option {
|
||||
return setHeader(HTTPHeaderOssObjectACL, string(acl))
|
||||
}
|
||||
|
||||
// symlinkTarget is an option to set X-Oss-Symlink-Target
|
||||
func symlinkTarget(targetObjectKey string) Option {
|
||||
return setHeader(HTTPHeaderOssSymlinkTarget, targetObjectKey)
|
||||
}
|
||||
|
||||
// Origin is an option to set Origin header
|
||||
func Origin(value string) Option {
|
||||
return setHeader(HTTPHeaderOrigin, value)
|
||||
}
|
||||
|
||||
// ObjectStorageClass is an option to set the storage class of object
|
||||
func ObjectStorageClass(storageClass StorageClassType) Option {
|
||||
return setHeader(HTTPHeaderOssStorageClass, string(storageClass))
|
||||
}
|
||||
|
||||
// Callback is an option to set callback values
|
||||
func Callback(callback string) Option {
|
||||
return setHeader(HTTPHeaderOssCallback, callback)
|
||||
}
|
||||
|
||||
// CallbackVar is an option to set callback user defined values
|
||||
func CallbackVar(callbackVar string) Option {
|
||||
return setHeader(HTTPHeaderOssCallbackVar, callbackVar)
|
||||
}
|
||||
|
||||
// RequestPayer is an option to set payer who pay for the request
|
||||
func RequestPayer(payerType PayerType) Option {
|
||||
return setHeader(HTTPHeaderOSSRequester, string(payerType))
|
||||
}
|
||||
|
||||
// Delimiter is an option to set delimiler parameter
|
||||
func Delimiter(value string) Option {
|
||||
return addParam("delimiter", value)
|
||||
|
@ -242,49 +203,28 @@ func UploadIDMarker(value string) Option {
|
|||
return addParam("upload-id-marker", value)
|
||||
}
|
||||
|
||||
// MaxParts is an option to set max-parts parameter
|
||||
func MaxParts(value int) Option {
|
||||
return addParam("max-parts", strconv.Itoa(value))
|
||||
}
|
||||
|
||||
// PartNumberMarker is an option to set part-number-marker parameter
|
||||
func PartNumberMarker(value int) Option {
|
||||
return addParam("part-number-marker", strconv.Itoa(value))
|
||||
}
|
||||
|
||||
// DeleteObjectsQuiet false:DeleteObjects in verbose mode; true:DeleteObjects in quite mode. Default is false.
|
||||
// DeleteObjectsQuiet DeleteObjects详细(verbose)模式或简单(quiet)模式,默认详细模式。
|
||||
func DeleteObjectsQuiet(isQuiet bool) Option {
|
||||
return addArg(deleteObjectsQuiet, isQuiet)
|
||||
}
|
||||
|
||||
// StorageClass bucket storage class
|
||||
func StorageClass(value StorageClassType) Option {
|
||||
return addArg(storageClass, value)
|
||||
}
|
||||
|
||||
// Checkpoint configuration
|
||||
// 断点续传配置,包括是否启用、cp文件
|
||||
type cpConfig struct {
|
||||
IsEnable bool
|
||||
FilePath string
|
||||
DirPath string
|
||||
}
|
||||
|
||||
// Checkpoint sets the isEnable flag and checkpoint file path for DownloadFile/UploadFile.
|
||||
// Checkpoint DownloadFile/UploadFile是否开启checkpoint及checkpoint文件路径
|
||||
func Checkpoint(isEnable bool, filePath string) Option {
|
||||
return addArg(checkpointConfig, &cpConfig{IsEnable: isEnable, FilePath: filePath})
|
||||
return addArg(checkpointConfig, &cpConfig{isEnable, filePath})
|
||||
}
|
||||
|
||||
// CheckpointDir sets the isEnable flag and checkpoint dir path for DownloadFile/UploadFile.
|
||||
func CheckpointDir(isEnable bool, dirPath string) Option {
|
||||
return addArg(checkpointConfig, &cpConfig{IsEnable: isEnable, DirPath: dirPath})
|
||||
}
|
||||
|
||||
// Routines DownloadFile/UploadFile routine count
|
||||
// Routines DownloadFile/UploadFile并发数
|
||||
func Routines(n int) Option {
|
||||
return addArg(routineNum, n)
|
||||
}
|
||||
|
||||
// InitCRC Init AppendObject CRC
|
||||
// InitCRC AppendObject CRC的校验的初始值
|
||||
func InitCRC(initCRC uint64) Option {
|
||||
return addArg(initCRC64, initCRC)
|
||||
}
|
||||
|
@ -294,41 +234,6 @@ func Progress(listener ProgressListener) Option {
|
|||
return addArg(progressListener, listener)
|
||||
}
|
||||
|
||||
// ResponseContentType is an option to set response-content-type param
|
||||
func ResponseContentType(value string) Option {
|
||||
return addParam("response-content-type", value)
|
||||
}
|
||||
|
||||
// ResponseContentLanguage is an option to set response-content-language param
|
||||
func ResponseContentLanguage(value string) Option {
|
||||
return addParam("response-content-language", value)
|
||||
}
|
||||
|
||||
// ResponseExpires is an option to set response-expires param
|
||||
func ResponseExpires(value string) Option {
|
||||
return addParam("response-expires", value)
|
||||
}
|
||||
|
||||
// ResponseCacheControl is an option to set response-cache-control param
|
||||
func ResponseCacheControl(value string) Option {
|
||||
return addParam("response-cache-control", value)
|
||||
}
|
||||
|
||||
// ResponseContentDisposition is an option to set response-content-disposition param
|
||||
func ResponseContentDisposition(value string) Option {
|
||||
return addParam("response-content-disposition", value)
|
||||
}
|
||||
|
||||
// ResponseContentEncoding is an option to set response-content-encoding param
|
||||
func ResponseContentEncoding(value string) Option {
|
||||
return addParam("response-content-encoding", value)
|
||||
}
|
||||
|
||||
// Process is an option to set x-oss-process param
|
||||
func Process(value string) Option {
|
||||
return addParam("x-oss-process", value)
|
||||
}
|
||||
|
||||
func setHeader(key string, value interface{}) Option {
|
||||
return func(params map[string]optionValue) error {
|
||||
if value == nil {
|
||||
|
@ -377,27 +282,40 @@ func handleOptions(headers map[string]string, options []Option) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func getRawParams(options []Option) (map[string]interface{}, error) {
|
||||
// Option
|
||||
func handleParams(options []Option) (string, error) {
|
||||
// option
|
||||
params := map[string]optionValue{}
|
||||
for _, option := range options {
|
||||
if option != nil {
|
||||
if err := option(params); err != nil {
|
||||
return nil, err
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
paramsm := map[string]interface{}{}
|
||||
// Serialize
|
||||
// sort
|
||||
var buf bytes.Buffer
|
||||
keys := make([]string, 0, len(params))
|
||||
for k, v := range params {
|
||||
if v.Type == optionParam {
|
||||
vs := params[k]
|
||||
paramsm[k] = vs.Value.(string)
|
||||
keys = append(keys, k)
|
||||
}
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
return paramsm, nil
|
||||
// serialize
|
||||
for _, k := range keys {
|
||||
vs := params[k]
|
||||
prefix := url.QueryEscape(k) + "="
|
||||
|
||||
if buf.Len() > 0 {
|
||||
buf.WriteByte('&')
|
||||
}
|
||||
buf.WriteString(prefix)
|
||||
buf.WriteString(url.QueryEscape(vs.Value.(string)))
|
||||
}
|
||||
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
func findOption(options []Option, param string, defaultVal interface{}) (interface{}, error) {
|
||||
|
|
|
@ -2,7 +2,7 @@ package oss
|
|||
|
||||
import "io"
|
||||
|
||||
// ProgressEventType defines transfer progress event type
|
||||
// ProgressEventType transfer progress event type
|
||||
type ProgressEventType int
|
||||
|
||||
const (
|
||||
|
@ -16,19 +16,19 @@ const (
|
|||
TransferFailedEvent
|
||||
)
|
||||
|
||||
// ProgressEvent defines progress event
|
||||
// ProgressEvent progress event
|
||||
type ProgressEvent struct {
|
||||
ConsumedBytes int64
|
||||
TotalBytes int64
|
||||
EventType ProgressEventType
|
||||
}
|
||||
|
||||
// ProgressListener listens progress change
|
||||
// ProgressListener listen progress change
|
||||
type ProgressListener interface {
|
||||
ProgressChanged(event *ProgressEvent)
|
||||
}
|
||||
|
||||
// -------------------- Private --------------------
|
||||
// -------------------- private --------------------
|
||||
|
||||
func newProgressEvent(eventType ProgressEventType, consumed, total int64) *ProgressEvent {
|
||||
return &ProgressEvent{
|
||||
|
@ -62,7 +62,7 @@ type teeReader struct {
|
|||
// corresponding writes to w. There is no internal buffering -
|
||||
// the write must complete before the read completes.
|
||||
// Any error encountered while writing is reported as a read error.
|
||||
func TeeReader(reader io.Reader, writer io.Writer, totalBytes int64, listener ProgressListener, tracker *readerTracker) io.ReadCloser {
|
||||
func TeeReader(reader io.Reader, writer io.Writer, totalBytes int64, listener ProgressListener, tracker *readerTracker) io.Reader {
|
||||
return &teeReader{
|
||||
reader: reader,
|
||||
writer: writer,
|
||||
|
@ -76,7 +76,7 @@ func TeeReader(reader io.Reader, writer io.Writer, totalBytes int64, listener Pr
|
|||
func (t *teeReader) Read(p []byte) (n int, err error) {
|
||||
n, err = t.reader.Read(p)
|
||||
|
||||
// Read encountered error
|
||||
// read encountered error
|
||||
if err != nil && err != io.EOF {
|
||||
event := newProgressEvent(TransferFailedEvent, t.consumedBytes, t.totalBytes)
|
||||
publishProgress(t.listener, event)
|
||||
|
@ -84,18 +84,18 @@ func (t *teeReader) Read(p []byte) (n int, err error) {
|
|||
|
||||
if n > 0 {
|
||||
t.consumedBytes += int64(n)
|
||||
// CRC
|
||||
// crc
|
||||
if t.writer != nil {
|
||||
if n, err := t.writer.Write(p[:n]); err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
// Progress
|
||||
// progress
|
||||
if t.listener != nil {
|
||||
event := newProgressEvent(TransferDataEvent, t.consumedBytes, t.totalBytes)
|
||||
publishProgress(t.listener, event)
|
||||
}
|
||||
// Track
|
||||
// track
|
||||
if t.tracker != nil {
|
||||
t.tracker.completedBytes = t.consumedBytes
|
||||
}
|
||||
|
@ -103,10 +103,3 @@ func (t *teeReader) Read(p []byte) (n int, err error) {
|
|||
|
||||
return
|
||||
}
|
||||
|
||||
func (t *teeReader) Close() error {
|
||||
if rc, ok := t.reader.(io.ReadCloser); ok {
|
||||
return rc.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,26 +0,0 @@
|
|||
// +build !go1.7
|
||||
|
||||
package oss
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func newTransport(conn *Conn, config *Config) *http.Transport {
|
||||
httpTimeOut := conn.config.HTTPTimeout
|
||||
httpMaxConns := conn.config.HTTPMaxConns
|
||||
// New Transport
|
||||
transport := &http.Transport{
|
||||
Dial: func(netw, addr string) (net.Conn, error) {
|
||||
conn, err := net.DialTimeout(netw, addr, httpTimeOut.ConnectTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newTimeoutConn(conn, httpTimeOut.ReadWriteTimeout, httpTimeOut.LongTimeout), nil
|
||||
},
|
||||
MaxIdleConnsPerHost: httpMaxConns.MaxIdleConnsPerHost,
|
||||
ResponseHeaderTimeout: httpTimeOut.HeaderTimeout,
|
||||
}
|
||||
return transport
|
||||
}
|
|
@ -1,28 +0,0 @@
|
|||
// +build go1.7
|
||||
|
||||
package oss
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func newTransport(conn *Conn, config *Config) *http.Transport {
|
||||
httpTimeOut := conn.config.HTTPTimeout
|
||||
httpMaxConns := conn.config.HTTPMaxConns
|
||||
// New Transport
|
||||
transport := &http.Transport{
|
||||
Dial: func(netw, addr string) (net.Conn, error) {
|
||||
conn, err := net.DialTimeout(netw, addr, httpTimeOut.ConnectTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newTimeoutConn(conn, httpTimeOut.ReadWriteTimeout, httpTimeOut.LongTimeout), nil
|
||||
},
|
||||
MaxIdleConns: httpMaxConns.MaxIdleConns,
|
||||
MaxIdleConnsPerHost: httpMaxConns.MaxIdleConnsPerHost,
|
||||
IdleConnTimeout: httpTimeOut.IdleConnTimeout,
|
||||
ResponseHeaderTimeout: httpTimeOut.HeaderTimeout,
|
||||
}
|
||||
return transport
|
||||
}
|
|
@ -6,54 +6,53 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
// ListBucketsResult defines the result object from ListBuckets request
|
||||
// ListBucketsResult ListBuckets请求返回的结果
|
||||
type ListBucketsResult struct {
|
||||
XMLName xml.Name `xml:"ListAllMyBucketsResult"`
|
||||
Prefix string `xml:"Prefix"` // The prefix in this query
|
||||
Marker string `xml:"Marker"` // The marker filter
|
||||
MaxKeys int `xml:"MaxKeys"` // The max entry count to return. This information is returned when IsTruncated is true.
|
||||
IsTruncated bool `xml:"IsTruncated"` // Flag true means there's remaining buckets to return.
|
||||
NextMarker string `xml:"NextMarker"` // The marker filter for the next list call
|
||||
Owner Owner `xml:"Owner"` // The owner information
|
||||
Buckets []BucketProperties `xml:"Buckets>Bucket"` // The bucket list
|
||||
Prefix string `xml:"Prefix"` // 本次查询结果的前缀
|
||||
Marker string `xml:"Marker"` // 标明查询的起点,未全部返回时有此节点
|
||||
MaxKeys int `xml:"MaxKeys"` // 返回结果的最大数目,未全部返回时有此节点
|
||||
IsTruncated bool `xml:"IsTruncated"` // 所有的结果是否已经全部返回
|
||||
NextMarker string `xml:"NextMarker"` // 表示下一次查询的起点
|
||||
Owner Owner `xml:"Owner"` // 拥有者信息
|
||||
Buckets []BucketProperties `xml:"Buckets>Bucket"` // Bucket列表
|
||||
}
|
||||
|
||||
// BucketProperties defines bucket properties
|
||||
// BucketProperties Bucket信息
|
||||
type BucketProperties struct {
|
||||
XMLName xml.Name `xml:"Bucket"`
|
||||
Name string `xml:"Name"` // Bucket name
|
||||
Location string `xml:"Location"` // Bucket datacenter
|
||||
CreationDate time.Time `xml:"CreationDate"` // Bucket create time
|
||||
StorageClass string `xml:"StorageClass"` // Bucket storage class
|
||||
Name string `xml:"Name"` // Bucket名称
|
||||
Location string `xml:"Location"` // Bucket所在的数据中心
|
||||
CreationDate time.Time `xml:"CreationDate"` // Bucket创建时间
|
||||
}
|
||||
|
||||
// GetBucketACLResult defines GetBucketACL request's result
|
||||
// GetBucketACLResult GetBucketACL请求返回的结果
|
||||
type GetBucketACLResult struct {
|
||||
XMLName xml.Name `xml:"AccessControlPolicy"`
|
||||
ACL string `xml:"AccessControlList>Grant"` // Bucket ACL
|
||||
Owner Owner `xml:"Owner"` // Bucket owner
|
||||
ACL string `xml:"AccessControlList>Grant"` // Bucket权限
|
||||
Owner Owner `xml:"Owner"` // Bucket拥有者信息
|
||||
}
|
||||
|
||||
// LifecycleConfiguration is the Bucket Lifecycle configuration
|
||||
// LifecycleConfiguration Bucket的Lifecycle配置
|
||||
type LifecycleConfiguration struct {
|
||||
XMLName xml.Name `xml:"LifecycleConfiguration"`
|
||||
Rules []LifecycleRule `xml:"Rule"`
|
||||
}
|
||||
|
||||
// LifecycleRule defines Lifecycle rules
|
||||
// LifecycleRule Lifecycle规则
|
||||
type LifecycleRule struct {
|
||||
XMLName xml.Name `xml:"Rule"`
|
||||
ID string `xml:"ID"` // The rule ID
|
||||
Prefix string `xml:"Prefix"` // The object key prefix
|
||||
Status string `xml:"Status"` // The rule status (enabled or not)
|
||||
Expiration LifecycleExpiration `xml:"Expiration"` // The expiration property
|
||||
ID string `xml:"ID"` // 规则唯一的ID
|
||||
Prefix string `xml:"Prefix"` // 规则所适用Object的前缀
|
||||
Status string `xml:"Status"` // 规则是否生效
|
||||
Expiration LifecycleExpiration `xml:"Expiration"` // 规则的过期属性
|
||||
}
|
||||
|
||||
// LifecycleExpiration defines the rule's expiration property
|
||||
// LifecycleExpiration 规则的过期属性
|
||||
type LifecycleExpiration struct {
|
||||
XMLName xml.Name `xml:"Expiration"`
|
||||
Days int `xml:"Days,omitempty"` // Relative expiration time: The expiration time in days after the last modified time
|
||||
Date time.Time `xml:"Date,omitempty"` // Absolute expiration time: The expiration time in date.
|
||||
Days int `xml:"Days,omitempty"` // 最后修改时间过后多少天生效
|
||||
Date time.Time `xml:"Date,omitempty"` // 指定规则何时生效
|
||||
}
|
||||
|
||||
type lifecycleXML struct {
|
||||
|
@ -94,7 +93,7 @@ func convLifecycleRule(rules []LifecycleRule) []lifecycleRule {
|
|||
return rs
|
||||
}
|
||||
|
||||
// BuildLifecycleRuleByDays builds a lifecycle rule with specified expiration days
|
||||
// BuildLifecycleRuleByDays 指定过期天数构建Lifecycle规则
|
||||
func BuildLifecycleRuleByDays(id, prefix string, status bool, days int) LifecycleRule {
|
||||
var statusStr = "Enabled"
|
||||
if !status {
|
||||
|
@ -104,7 +103,7 @@ func BuildLifecycleRuleByDays(id, prefix string, status bool, days int) Lifecycl
|
|||
Expiration: LifecycleExpiration{Days: days}}
|
||||
}
|
||||
|
||||
// BuildLifecycleRuleByDate builds a lifecycle rule with specified expiration time.
|
||||
// BuildLifecycleRuleByDate 指定过期时间构建Lifecycle规则
|
||||
func BuildLifecycleRuleByDate(id, prefix string, status bool, year, month, day int) LifecycleRule {
|
||||
var statusStr = "Enabled"
|
||||
if !status {
|
||||
|
@ -115,172 +114,171 @@ func BuildLifecycleRuleByDate(id, prefix string, status bool, year, month, day i
|
|||
Expiration: LifecycleExpiration{Date: date}}
|
||||
}
|
||||
|
||||
// GetBucketLifecycleResult defines GetBucketLifecycle's result object
|
||||
// GetBucketLifecycleResult GetBucketLifecycle请求请求结果
|
||||
type GetBucketLifecycleResult LifecycleConfiguration
|
||||
|
||||
// RefererXML defines Referer configuration
|
||||
// RefererXML Referer配置
|
||||
type RefererXML struct {
|
||||
XMLName xml.Name `xml:"RefererConfiguration"`
|
||||
AllowEmptyReferer bool `xml:"AllowEmptyReferer"` // Allow empty referrer
|
||||
RefererList []string `xml:"RefererList>Referer"` // Referer whitelist
|
||||
AllowEmptyReferer bool `xml:"AllowEmptyReferer"` // 是否允许referer字段为空的请求访问
|
||||
RefererList []string `xml:"RefererList>Referer"` // referer访问白名单
|
||||
}
|
||||
|
||||
// GetBucketRefererResult defines result object for GetBucketReferer request
|
||||
// GetBucketRefererResult GetBucketReferer请教返回结果
|
||||
type GetBucketRefererResult RefererXML
|
||||
|
||||
// LoggingXML defines logging configuration
|
||||
// LoggingXML Logging配置
|
||||
type LoggingXML struct {
|
||||
XMLName xml.Name `xml:"BucketLoggingStatus"`
|
||||
LoggingEnabled LoggingEnabled `xml:"LoggingEnabled"` // The logging configuration information
|
||||
LoggingEnabled LoggingEnabled `xml:"LoggingEnabled"` // 访问日志信息容器
|
||||
}
|
||||
|
||||
type loggingXMLEmpty struct {
|
||||
XMLName xml.Name `xml:"BucketLoggingStatus"`
|
||||
}
|
||||
|
||||
// LoggingEnabled defines the logging configuration information
|
||||
// LoggingEnabled 访问日志信息容器
|
||||
type LoggingEnabled struct {
|
||||
XMLName xml.Name `xml:"LoggingEnabled"`
|
||||
TargetBucket string `xml:"TargetBucket"` // The bucket name for storing the log files
|
||||
TargetPrefix string `xml:"TargetPrefix"` // The log file prefix
|
||||
TargetBucket string `xml:"TargetBucket"` //存放访问日志的Bucket
|
||||
TargetPrefix string `xml:"TargetPrefix"` //保存访问日志的文件前缀
|
||||
}
|
||||
|
||||
// GetBucketLoggingResult defines the result from GetBucketLogging request
|
||||
// GetBucketLoggingResult GetBucketLogging请求返回结果
|
||||
type GetBucketLoggingResult LoggingXML
|
||||
|
||||
// WebsiteXML defines Website configuration
|
||||
// WebsiteXML Website配置
|
||||
type WebsiteXML struct {
|
||||
XMLName xml.Name `xml:"WebsiteConfiguration"`
|
||||
IndexDocument IndexDocument `xml:"IndexDocument"` // The index page
|
||||
ErrorDocument ErrorDocument `xml:"ErrorDocument"` // The error page
|
||||
IndexDocument IndexDocument `xml:"IndexDocument"` // 目录URL时添加的索引文件
|
||||
ErrorDocument ErrorDocument `xml:"ErrorDocument"` // 404错误时使用的文件
|
||||
}
|
||||
|
||||
// IndexDocument defines the index page info
|
||||
// IndexDocument 目录URL时添加的索引文件
|
||||
type IndexDocument struct {
|
||||
XMLName xml.Name `xml:"IndexDocument"`
|
||||
Suffix string `xml:"Suffix"` // The file name for the index page
|
||||
Suffix string `xml:"Suffix"` // 目录URL时添加的索引文件名
|
||||
}
|
||||
|
||||
// ErrorDocument defines the 404 error page info
|
||||
// ErrorDocument 404错误时使用的文件
|
||||
type ErrorDocument struct {
|
||||
XMLName xml.Name `xml:"ErrorDocument"`
|
||||
Key string `xml:"Key"` // 404 error file name
|
||||
Key string `xml:"Key"` // 404错误时使用的文件名
|
||||
}
|
||||
|
||||
// GetBucketWebsiteResult defines the result from GetBucketWebsite request.
|
||||
// GetBucketWebsiteResult GetBucketWebsite请求返回结果
|
||||
type GetBucketWebsiteResult WebsiteXML
|
||||
|
||||
// CORSXML defines CORS configuration
|
||||
// CORSXML CORS配置
|
||||
type CORSXML struct {
|
||||
XMLName xml.Name `xml:"CORSConfiguration"`
|
||||
CORSRules []CORSRule `xml:"CORSRule"` // CORS rules
|
||||
CORSRules []CORSRule `xml:"CORSRule"` // CORS规则列表
|
||||
}
|
||||
|
||||
// CORSRule defines CORS rules
|
||||
// CORSRule CORS规则
|
||||
type CORSRule struct {
|
||||
XMLName xml.Name `xml:"CORSRule"`
|
||||
AllowedOrigin []string `xml:"AllowedOrigin"` // Allowed origins. By default it's wildcard '*'
|
||||
AllowedMethod []string `xml:"AllowedMethod"` // Allowed methods
|
||||
AllowedHeader []string `xml:"AllowedHeader"` // Allowed headers
|
||||
ExposeHeader []string `xml:"ExposeHeader"` // Allowed response headers
|
||||
MaxAgeSeconds int `xml:"MaxAgeSeconds"` // Max cache ages in seconds
|
||||
AllowedOrigin []string `xml:"AllowedOrigin"` // 允许的来源,默认通配符"*"
|
||||
AllowedMethod []string `xml:"AllowedMethod"` // 允许的方法
|
||||
AllowedHeader []string `xml:"AllowedHeader"` // 允许的请求头
|
||||
ExposeHeader []string `xml:"ExposeHeader"` // 允许的响应头
|
||||
MaxAgeSeconds int `xml:"MaxAgeSeconds"` // 最大的缓存时间
|
||||
}
|
||||
|
||||
// GetBucketCORSResult defines the result from GetBucketCORS request.
|
||||
// GetBucketCORSResult GetBucketCORS请求返回的结果
|
||||
type GetBucketCORSResult CORSXML
|
||||
|
||||
// GetBucketInfoResult defines the result from GetBucketInfo request.
|
||||
// GetBucketInfoResult GetBucketInfo请求返回结果
|
||||
type GetBucketInfoResult struct {
|
||||
XMLName xml.Name `xml:"BucketInfo"`
|
||||
BucketInfo BucketInfo `xml:"Bucket"`
|
||||
XMLName xml.Name `xml:"BucketInfo"`
|
||||
BucketInfo BucketInfo `xml:"Bucket"`
|
||||
}
|
||||
|
||||
// BucketInfo defines Bucket information
|
||||
// BucketInfo Bucket信息
|
||||
type BucketInfo struct {
|
||||
XMLName xml.Name `xml:"Bucket"`
|
||||
Name string `xml:"Name"` // Bucket name
|
||||
Location string `xml:"Location"` // Bucket datacenter
|
||||
CreationDate time.Time `xml:"CreationDate"` // Bucket creation time
|
||||
ExtranetEndpoint string `xml:"ExtranetEndpoint"` // Bucket external endpoint
|
||||
IntranetEndpoint string `xml:"IntranetEndpoint"` // Bucket internal endpoint
|
||||
ACL string `xml:"AccessControlList>Grant"` // Bucket ACL
|
||||
Owner Owner `xml:"Owner"` // Bucket owner
|
||||
StorageClass string `xml:"StorageClass"` // Bucket storage class
|
||||
Name string `xml:"Name"` // Bucket名称
|
||||
Location string `xml:"Location"` // Bucket所在的数据中心
|
||||
CreationDate time.Time `xml:"CreationDate"` // Bucket创建时间
|
||||
ExtranetEndpoint string `xml:"ExtranetEndpoint"` // Bucket访问的外网域名
|
||||
IntranetEndpoint string `xml:"IntranetEndpoint"` // Bucket访问的内网域名
|
||||
ACL string `xml:"AccessControlList>Grant"` // Bucket权限
|
||||
Owner Owner `xml:"Owner"` // Bucket拥有者信息
|
||||
}
|
||||
|
||||
// ListObjectsResult defines the result from ListObjects request
|
||||
// ListObjectsResult ListObjects请求返回结果
|
||||
type ListObjectsResult struct {
|
||||
XMLName xml.Name `xml:"ListBucketResult"`
|
||||
Prefix string `xml:"Prefix"` // The object prefix
|
||||
Marker string `xml:"Marker"` // The marker filter.
|
||||
MaxKeys int `xml:"MaxKeys"` // Max keys to return
|
||||
Delimiter string `xml:"Delimiter"` // The delimiter for grouping objects' name
|
||||
IsTruncated bool `xml:"IsTruncated"` // Flag indicates if all results are returned (when it's false)
|
||||
NextMarker string `xml:"NextMarker"` // The start point of the next query
|
||||
Objects []ObjectProperties `xml:"Contents"` // Object list
|
||||
CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` // You can think of commonprefixes as "folders" whose names end with the delimiter
|
||||
Prefix string `xml:"Prefix"` // 本次查询结果的开始前缀
|
||||
Marker string `xml:"Marker"` // 这次查询的起点
|
||||
MaxKeys int `xml:"MaxKeys"` // 请求返回结果的最大数目
|
||||
Delimiter string `xml:"Delimiter"` // 对Object名字进行分组的字符
|
||||
IsTruncated bool `xml:"IsTruncated"` // 是否所有的结果都已经返回
|
||||
NextMarker string `xml:"NextMarker"` // 下一次查询的起点
|
||||
Objects []ObjectProperties `xml:"Contents"` // Object类别
|
||||
CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` // 以delimiter结尾并有共同前缀的Object的集合
|
||||
}
|
||||
|
||||
// ObjectProperties defines Objecct properties
|
||||
// ObjectProperties Objecct属性
|
||||
type ObjectProperties struct {
|
||||
XMLName xml.Name `xml:"Contents"`
|
||||
Key string `xml:"Key"` // Object key
|
||||
Type string `xml:"Type"` // Object type
|
||||
Size int64 `xml:"Size"` // Object size
|
||||
ETag string `xml:"ETag"` // Object ETag
|
||||
Owner Owner `xml:"Owner"` // Object owner information
|
||||
LastModified time.Time `xml:"LastModified"` // Object last modified time
|
||||
StorageClass string `xml:"StorageClass"` // Object storage class (Standard, IA, Archive)
|
||||
Key string `xml:"Key"` // Object的Key
|
||||
Type string `xml:"Type"` // Object Type
|
||||
Size int64 `xml:"Size"` // Object的长度字节数
|
||||
ETag string `xml:"ETag"` // 标示Object的内容
|
||||
Owner Owner `xml:"Owner"` // 保存Object拥有者信息的容器
|
||||
LastModified time.Time `xml:"LastModified"` // Object最后修改时间
|
||||
StorageClass string `xml:"StorageClass"` // Object的存储类型,目前只能是Standard
|
||||
}
|
||||
|
||||
// Owner defines Bucket/Object's owner
|
||||
// Owner Bucket/Object的owner
|
||||
type Owner struct {
|
||||
XMLName xml.Name `xml:"Owner"`
|
||||
ID string `xml:"ID"` // Owner ID
|
||||
DisplayName string `xml:"DisplayName"` // Owner's display name
|
||||
ID string `xml:"ID"` // 用户ID
|
||||
DisplayName string `xml:"DisplayName"` // Owner名字
|
||||
}
|
||||
|
||||
// CopyObjectResult defines result object of CopyObject
|
||||
// CopyObjectResult CopyObject请求返回的结果
|
||||
type CopyObjectResult struct {
|
||||
XMLName xml.Name `xml:"CopyObjectResult"`
|
||||
LastModified time.Time `xml:"LastModified"` // New object's last modified time.
|
||||
ETag string `xml:"ETag"` // New object's ETag
|
||||
LastModified time.Time `xml:"LastModified"` // 新Object最后更新时间
|
||||
ETag string `xml:"ETag"` // 新Object的ETag值
|
||||
}
|
||||
|
||||
// GetObjectACLResult defines result of GetObjectACL request
|
||||
// GetObjectACLResult GetObjectACL请求返回的结果
|
||||
type GetObjectACLResult GetBucketACLResult
|
||||
|
||||
type deleteXML struct {
|
||||
XMLName xml.Name `xml:"Delete"`
|
||||
Objects []DeleteObject `xml:"Object"` // Objects to delete
|
||||
Quiet bool `xml:"Quiet"` // Flag of quiet mode.
|
||||
Objects []DeleteObject `xml:"Object"` // 删除的所有Object
|
||||
Quiet bool `xml:"Quiet"` // 安静响应模式
|
||||
}
|
||||
|
||||
// DeleteObject defines the struct for deleting object
|
||||
// DeleteObject 删除的Object
|
||||
type DeleteObject struct {
|
||||
XMLName xml.Name `xml:"Object"`
|
||||
Key string `xml:"Key"` // Object name
|
||||
Key string `xml:"Key"` // Object名称
|
||||
}
|
||||
|
||||
// DeleteObjectsResult defines result of DeleteObjects request
|
||||
// DeleteObjectsResult DeleteObjects请求返回结果
|
||||
type DeleteObjectsResult struct {
|
||||
XMLName xml.Name `xml:"DeleteResult"`
|
||||
DeletedObjects []string `xml:"Deleted>Key"` // Deleted object list
|
||||
DeletedObjects []string `xml:"Deleted>Key"` // 删除的Object列表
|
||||
}
|
||||
|
||||
// InitiateMultipartUploadResult defines result of InitiateMultipartUpload request
|
||||
// InitiateMultipartUploadResult InitiateMultipartUpload请求返回结果
|
||||
type InitiateMultipartUploadResult struct {
|
||||
XMLName xml.Name `xml:"InitiateMultipartUploadResult"`
|
||||
Bucket string `xml:"Bucket"` // Bucket name
|
||||
Key string `xml:"Key"` // Object name to upload
|
||||
UploadID string `xml:"UploadId"` // Generated UploadId
|
||||
Bucket string `xml:"Bucket"` // Bucket名称
|
||||
Key string `xml:"Key"` // 上传Object名称
|
||||
UploadID string `xml:"UploadId"` // 生成的UploadId
|
||||
}
|
||||
|
||||
// UploadPart defines the upload/copy part
|
||||
// UploadPart 上传/拷贝的分片
|
||||
type UploadPart struct {
|
||||
XMLName xml.Name `xml:"Part"`
|
||||
PartNumber int `xml:"PartNumber"` // Part number
|
||||
ETag string `xml:"ETag"` // ETag value of the part's data
|
||||
PartNumber int `xml:"PartNumber"` // Part编号
|
||||
ETag string `xml:"ETag"` // ETag缓存码
|
||||
}
|
||||
|
||||
type uploadParts []UploadPart
|
||||
|
@ -297,10 +295,10 @@ func (slice uploadParts) Swap(i, j int) {
|
|||
slice[i], slice[j] = slice[j], slice[i]
|
||||
}
|
||||
|
||||
// UploadPartCopyResult defines result object of multipart copy request.
|
||||
// UploadPartCopyResult 拷贝分片请求返回的结果
|
||||
type UploadPartCopyResult struct {
|
||||
XMLName xml.Name `xml:"CopyPartResult"`
|
||||
LastModified time.Time `xml:"LastModified"` // Last modified time
|
||||
LastModified time.Time `xml:"LastModified"` // 最后修改时间
|
||||
ETag string `xml:"ETag"` // ETag
|
||||
}
|
||||
|
||||
|
@ -309,69 +307,61 @@ type completeMultipartUploadXML struct {
|
|||
Part []UploadPart `xml:"Part"`
|
||||
}
|
||||
|
||||
// CompleteMultipartUploadResult defines result object of CompleteMultipartUploadRequest
|
||||
// CompleteMultipartUploadResult 提交分片上传任务返回结果
|
||||
type CompleteMultipartUploadResult struct {
|
||||
XMLName xml.Name `xml:"CompleteMultipartUploadResult"`
|
||||
Location string `xml:"Location"` // Object URL
|
||||
Bucket string `xml:"Bucket"` // Bucket name
|
||||
ETag string `xml:"ETag"` // Object ETag
|
||||
Key string `xml:"Key"` // Object name
|
||||
Location string `xml:"Location"` // Object的URL
|
||||
Bucket string `xml:"Bucket"` // Bucket名称
|
||||
ETag string `xml:"ETag"` // Object的ETag
|
||||
Key string `xml:"Key"` // Object的名字
|
||||
}
|
||||
|
||||
// ListUploadedPartsResult defines result object of ListUploadedParts
|
||||
// ListUploadedPartsResult ListUploadedParts请求返回结果
|
||||
type ListUploadedPartsResult struct {
|
||||
XMLName xml.Name `xml:"ListPartsResult"`
|
||||
Bucket string `xml:"Bucket"` // Bucket name
|
||||
Key string `xml:"Key"` // Object name
|
||||
UploadID string `xml:"UploadId"` // Upload ID
|
||||
NextPartNumberMarker string `xml:"NextPartNumberMarker"` // Next part number
|
||||
MaxParts int `xml:"MaxParts"` // Max parts count
|
||||
IsTruncated bool `xml:"IsTruncated"` // Flag indicates all entries returned.false: all entries returned.
|
||||
UploadedParts []UploadedPart `xml:"Part"` // Uploaded parts
|
||||
Bucket string `xml:"Bucket"` // Bucket名称
|
||||
Key string `xml:"Key"` // Object名称
|
||||
UploadID string `xml:"UploadId"` // 上传Id
|
||||
NextPartNumberMarker string `xml:"NextPartNumberMarker"` // 下一个Part的位置
|
||||
MaxParts int `xml:"MaxParts"` // 最大Part个数
|
||||
IsTruncated bool `xml:"IsTruncated"` // 是否完全上传完成
|
||||
UploadedParts []UploadedPart `xml:"Part"` // 已完成的Part
|
||||
}
|
||||
|
||||
// UploadedPart defines uploaded part
|
||||
// UploadedPart 该任务已经上传的分片
|
||||
type UploadedPart struct {
|
||||
XMLName xml.Name `xml:"Part"`
|
||||
PartNumber int `xml:"PartNumber"` // Part number
|
||||
LastModified time.Time `xml:"LastModified"` // Last modified time
|
||||
ETag string `xml:"ETag"` // ETag cache
|
||||
Size int `xml:"Size"` // Part size
|
||||
PartNumber int `xml:"PartNumber"` // Part编号
|
||||
LastModified time.Time `xml:"LastModified"` // 最后一次修改时间
|
||||
ETag string `xml:"ETag"` // ETag缓存码
|
||||
Size int `xml:"Size"` // Part大小
|
||||
}
|
||||
|
||||
// ListMultipartUploadResult defines result object of ListMultipartUpload
|
||||
// ListMultipartUploadResult ListMultipartUpload请求返回结果
|
||||
type ListMultipartUploadResult struct {
|
||||
XMLName xml.Name `xml:"ListMultipartUploadsResult"`
|
||||
Bucket string `xml:"Bucket"` // Bucket name
|
||||
Delimiter string `xml:"Delimiter"` // Delimiter for grouping object.
|
||||
Prefix string `xml:"Prefix"` // Object prefix
|
||||
KeyMarker string `xml:"KeyMarker"` // Object key marker
|
||||
UploadIDMarker string `xml:"UploadIdMarker"` // UploadId marker
|
||||
NextKeyMarker string `xml:"NextKeyMarker"` // Next key marker, if not all entries returned.
|
||||
NextUploadIDMarker string `xml:"NextUploadIdMarker"` // Next uploadId marker, if not all entries returned.
|
||||
MaxUploads int `xml:"MaxUploads"` // Max uploads to return
|
||||
IsTruncated bool `xml:"IsTruncated"` // Flag indicates all entries are returned.
|
||||
Uploads []UncompletedUpload `xml:"Upload"` // Ongoing uploads (not completed, not aborted)
|
||||
CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` // Common prefixes list.
|
||||
Bucket string `xml:"Bucket"` // Bucket名称
|
||||
Delimiter string `xml:"Delimiter"` // 分组分割符
|
||||
Prefix string `xml:"Prefix"` // 筛选前缀
|
||||
KeyMarker string `xml:"KeyMarker"` // 起始Object位置
|
||||
UploadIDMarker string `xml:"UploadIdMarker"` // 起始UploadId位置
|
||||
NextKeyMarker string `xml:"NextKeyMarker"` // 如果没有全部返回,标明接下去的KeyMarker位置
|
||||
NextUploadIDMarker string `xml:"NextUploadIdMarker"` // 如果没有全部返回,标明接下去的UploadId位置
|
||||
MaxUploads int `xml:"MaxUploads"` // 返回最大Upload数目
|
||||
IsTruncated bool `xml:"IsTruncated"` // 是否完全返回
|
||||
Uploads []UncompletedUpload `xml:"Upload"` // 未完成上传的MultipartUpload
|
||||
CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` // 所有名字包含指定的前缀且第一次出现delimiter字符之间的object作为一组的分组结果
|
||||
}
|
||||
|
||||
// UncompletedUpload structure wraps an uncompleted upload task
|
||||
// UncompletedUpload 未完成的Upload任务
|
||||
type UncompletedUpload struct {
|
||||
XMLName xml.Name `xml:"Upload"`
|
||||
Key string `xml:"Key"` // Object name
|
||||
UploadID string `xml:"UploadId"` // The UploadId
|
||||
Initiated time.Time `xml:"Initiated"` // Initialization time in the format such as 2012-02-23T04:18:23.000Z
|
||||
Key string `xml:"Key"` // Object名称
|
||||
UploadID string `xml:"UploadId"` // 对应UploadId
|
||||
Initiated time.Time `xml:"Initiated"` // 初始化时间,格式2012-02-23T04:18:23.000Z
|
||||
}
|
||||
|
||||
// ProcessObjectResult defines result object of ProcessObject
|
||||
type ProcessObjectResult struct {
|
||||
Bucket string `json:"bucket"`
|
||||
FileSize int `json:"fileSize"`
|
||||
Object string `json:"object"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
// decodeDeleteObjectsResult decodes deleting objects result in URL encoding
|
||||
// 解析URL编码
|
||||
func decodeDeleteObjectsResult(result *DeleteObjectsResult) error {
|
||||
var err error
|
||||
for i := 0; i < len(result.DeletedObjects); i++ {
|
||||
|
@ -383,7 +373,7 @@ func decodeDeleteObjectsResult(result *DeleteObjectsResult) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// decodeListObjectsResult decodes list objects result in URL encoding
|
||||
// 解析URL编码
|
||||
func decodeListObjectsResult(result *ListObjectsResult) error {
|
||||
var err error
|
||||
result.Prefix, err = url.QueryUnescape(result.Prefix)
|
||||
|
@ -417,17 +407,7 @@ func decodeListObjectsResult(result *ListObjectsResult) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// decodeListUploadedPartsResult decodes
|
||||
func decodeListUploadedPartsResult(result *ListUploadedPartsResult) error {
|
||||
var err error
|
||||
result.Key, err = url.QueryUnescape(result.Key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// decodeListMultipartUploadResult decodes list multipart upload result in URL encoding
|
||||
// 解析URL编码
|
||||
func decodeListMultipartUploadResult(result *ListMultipartUploadResult) error {
|
||||
var err error
|
||||
result.Prefix, err = url.QueryUnescape(result.Prefix)
|
||||
|
@ -460,9 +440,3 @@ func decodeListMultipartUploadResult(result *ListMultipartUploadResult) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// createBucketConfiguration defines the configuration for creating a bucket.
|
||||
type createBucketConfiguration struct {
|
||||
XMLName xml.Name `xml:"CreateBucketConfiguration"`
|
||||
StorageClass StorageClassType `xml:"StorageClass,omitempty"`
|
||||
}
|
||||
|
|
|
@ -3,79 +3,61 @@ package oss
|
|||
import (
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
)
|
||||
|
||||
// UploadFile is multipart file upload.
|
||||
//
|
||||
// objectKey the object name.
|
||||
// filePath the local file path to upload.
|
||||
// partSize the part size in byte.
|
||||
// options the options for uploading object.
|
||||
// UploadFile 分片上传文件
|
||||
//
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
// objectKey object名称。
|
||||
// filePath 本地文件。需要上传的文件。
|
||||
// partSize 本次上传文件片的大小,字节数。比如100 * 1024为每片100KB。
|
||||
// options 上传Object时可以指定Object的属性。详见InitiateMultipartUpload。
|
||||
//
|
||||
// error 操作成功为nil,非nil为错误信息。
|
||||
//
|
||||
func (bucket Bucket) UploadFile(objectKey, filePath string, partSize int64, options ...Option) error {
|
||||
if partSize < MinPartSize || partSize > MaxPartSize {
|
||||
return errors.New("oss: part size invalid range (100KB, 5GB]")
|
||||
return errors.New("oss: part size invalid range (1024KB, 5GB]")
|
||||
}
|
||||
|
||||
cpConf, err := getCpConfig(options, filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cpConf := getCpConfig(options)
|
||||
routines := getRoutines(options)
|
||||
|
||||
if cpConf != nil && cpConf.IsEnable {
|
||||
cpFilePath := getUploadCpFilePath(cpConf, filePath, bucket.BucketName, objectKey)
|
||||
if cpFilePath != "" {
|
||||
return bucket.uploadFileWithCp(objectKey, filePath, partSize, options, cpFilePath, routines)
|
||||
}
|
||||
if cpConf.IsEnable {
|
||||
return bucket.uploadFileWithCp(objectKey, filePath, partSize, options, cpConf.FilePath, routines)
|
||||
}
|
||||
|
||||
return bucket.uploadFile(objectKey, filePath, partSize, options, routines)
|
||||
}
|
||||
|
||||
func getUploadCpFilePath(cpConf *cpConfig, srcFile, destBucket, destObject string) string {
|
||||
if cpConf.FilePath == "" && cpConf.DirPath != "" {
|
||||
dest := fmt.Sprintf("oss://%v/%v", destBucket, destObject)
|
||||
absPath, _ := filepath.Abs(srcFile)
|
||||
cpFileName := getCpFileName(absPath, dest)
|
||||
cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName
|
||||
}
|
||||
return cpConf.FilePath
|
||||
}
|
||||
// ----- 并发无断点的上传 -----
|
||||
|
||||
// ----- concurrent upload without checkpoint -----
|
||||
|
||||
// getCpConfig gets checkpoint configuration
|
||||
func getCpConfig(options []Option) *cpConfig {
|
||||
// 获取Checkpoint配置
|
||||
func getCpConfig(options []Option, filePath string) (*cpConfig, error) {
|
||||
cpc := &cpConfig{}
|
||||
cpcOpt, err := findOption(options, checkpointConfig, nil)
|
||||
if err != nil || cpcOpt == nil {
|
||||
return nil
|
||||
return cpc, err
|
||||
}
|
||||
|
||||
return cpcOpt.(*cpConfig)
|
||||
cpc = cpcOpt.(*cpConfig)
|
||||
if cpc.IsEnable && cpc.FilePath == "" {
|
||||
cpc.FilePath = filePath + CheckpointFileSuffix
|
||||
}
|
||||
|
||||
return cpc, nil
|
||||
}
|
||||
|
||||
// getCpFileName return the name of the checkpoint file
|
||||
func getCpFileName(src, dest string) string {
|
||||
md5Ctx := md5.New()
|
||||
md5Ctx.Write([]byte(src))
|
||||
srcCheckSum := hex.EncodeToString(md5Ctx.Sum(nil))
|
||||
|
||||
md5Ctx.Reset()
|
||||
md5Ctx.Write([]byte(dest))
|
||||
destCheckSum := hex.EncodeToString(md5Ctx.Sum(nil))
|
||||
|
||||
return fmt.Sprintf("%v-%v.cp", srcCheckSum, destCheckSum)
|
||||
}
|
||||
|
||||
// getRoutines gets the routine count. by default it's 1.
|
||||
// 获取并发数,默认并发数1
|
||||
func getRoutines(options []Option) int {
|
||||
rtnOpt, err := findOption(options, routineNum, nil)
|
||||
if err != nil || rtnOpt == nil {
|
||||
|
@ -92,17 +74,7 @@ func getRoutines(options []Option) int {
|
|||
return rs
|
||||
}
|
||||
|
||||
// getPayer return the payer of the request
|
||||
func getPayer(options []Option) string {
|
||||
payerOpt, err := findOption(options, HTTPHeaderOSSRequester, nil)
|
||||
if err != nil || payerOpt == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return payerOpt.(string)
|
||||
}
|
||||
|
||||
// getProgressListener gets the progress callback
|
||||
// 获取进度回调
|
||||
func getProgressListener(options []Option) ProgressListener {
|
||||
isSet, listener, _ := isOptionSet(options, progressListener)
|
||||
if !isSet {
|
||||
|
@ -111,7 +83,7 @@ func getProgressListener(options []Option) ProgressListener {
|
|||
return listener.(ProgressListener)
|
||||
}
|
||||
|
||||
// uploadPartHook is for testing usage
|
||||
// 测试使用
|
||||
type uploadPartHook func(id int, chunk FileChunk) error
|
||||
|
||||
var uploadPartHooker uploadPartHook = defaultUploadPart
|
||||
|
@ -120,23 +92,22 @@ func defaultUploadPart(id int, chunk FileChunk) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// workerArg defines worker argument structure
|
||||
// 工作协程参数
|
||||
type workerArg struct {
|
||||
bucket *Bucket
|
||||
filePath string
|
||||
imur InitiateMultipartUploadResult
|
||||
options []Option
|
||||
hook uploadPartHook
|
||||
}
|
||||
|
||||
// worker is the worker coroutine function
|
||||
// 工作协程
|
||||
func worker(id int, arg workerArg, jobs <-chan FileChunk, results chan<- UploadPart, failed chan<- error, die <-chan bool) {
|
||||
for chunk := range jobs {
|
||||
if err := arg.hook(id, chunk); err != nil {
|
||||
failed <- err
|
||||
break
|
||||
}
|
||||
part, err := arg.bucket.UploadPartFromFile(arg.imur, arg.filePath, chunk.Offset, chunk.Size, chunk.Number, arg.options...)
|
||||
part, err := arg.bucket.UploadPartFromFile(arg.imur, arg.filePath, chunk.Offset, chunk.Size, chunk.Number)
|
||||
if err != nil {
|
||||
failed <- err
|
||||
break
|
||||
|
@ -150,7 +121,7 @@ func worker(id int, arg workerArg, jobs <-chan FileChunk, results chan<- UploadP
|
|||
}
|
||||
}
|
||||
|
||||
// scheduler function
|
||||
// 调度协程
|
||||
func scheduler(jobs chan FileChunk, chunks []FileChunk) {
|
||||
for _, chunk := range chunks {
|
||||
jobs <- chunk
|
||||
|
@ -166,7 +137,7 @@ func getTotalBytes(chunks []FileChunk) int64 {
|
|||
return tb
|
||||
}
|
||||
|
||||
// uploadFile is a concurrent upload, without checkpoint
|
||||
// 并发上传,不带断点续传功能
|
||||
func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, options []Option, routines int) error {
|
||||
listener := getProgressListener(options)
|
||||
|
||||
|
@ -175,13 +146,7 @@ func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, opti
|
|||
return err
|
||||
}
|
||||
|
||||
payerOptions := []Option{}
|
||||
payer := getPayer(options)
|
||||
if payer != "" {
|
||||
payerOptions = append(payerOptions, RequestPayer(PayerType(payer)))
|
||||
}
|
||||
|
||||
// Initialize the multipart upload
|
||||
// 初始化上传任务
|
||||
imur, err := bucket.InitiateMultipartUpload(objectKey, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -197,16 +162,16 @@ func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, opti
|
|||
event := newProgressEvent(TransferStartedEvent, 0, totalBytes)
|
||||
publishProgress(listener, event)
|
||||
|
||||
// Start the worker coroutine
|
||||
arg := workerArg{&bucket, filePath, imur, payerOptions, uploadPartHooker}
|
||||
// 启动工作协程
|
||||
arg := workerArg{&bucket, filePath, imur, uploadPartHooker}
|
||||
for w := 1; w <= routines; w++ {
|
||||
go worker(w, arg, jobs, results, failed, die)
|
||||
}
|
||||
|
||||
// Schedule the jobs
|
||||
// 并发上传分片
|
||||
go scheduler(jobs, chunks)
|
||||
|
||||
// Waiting for the upload finished
|
||||
// 等待分配分片上传完成
|
||||
completed := 0
|
||||
parts := make([]UploadPart, len(chunks))
|
||||
for completed < len(chunks) {
|
||||
|
@ -221,7 +186,7 @@ func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, opti
|
|||
close(die)
|
||||
event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes)
|
||||
publishProgress(listener, event)
|
||||
bucket.AbortMultipartUpload(imur, payerOptions...)
|
||||
bucket.AbortMultipartUpload(imur)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -233,43 +198,43 @@ func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, opti
|
|||
event = newProgressEvent(TransferStartedEvent, completedBytes, totalBytes)
|
||||
publishProgress(listener, event)
|
||||
|
||||
// Complete the multpart upload
|
||||
_, err = bucket.CompleteMultipartUpload(imur, parts, payerOptions...)
|
||||
// 提交任务
|
||||
_, err = bucket.CompleteMultipartUpload(imur, parts)
|
||||
if err != nil {
|
||||
bucket.AbortMultipartUpload(imur, payerOptions...)
|
||||
bucket.AbortMultipartUpload(imur)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ----- concurrent upload with checkpoint -----
|
||||
// ----- 并发带断点的上传 -----
|
||||
const uploadCpMagic = "FE8BB4EA-B593-4FAC-AD7A-2459A36E2E62"
|
||||
|
||||
type uploadCheckpoint struct {
|
||||
Magic string // Magic
|
||||
MD5 string // Checkpoint file content's MD5
|
||||
FilePath string // Local file path
|
||||
FileStat cpStat // File state
|
||||
ObjectKey string // Key
|
||||
UploadID string // Upload ID
|
||||
Parts []cpPart // All parts of the local file
|
||||
Magic string // magic
|
||||
MD5 string // cp内容的MD5
|
||||
FilePath string // 本地文件
|
||||
FileStat cpStat // 文件状态
|
||||
ObjectKey string // key
|
||||
UploadID string // upload id
|
||||
Parts []cpPart // 本地文件的全部分片
|
||||
}
|
||||
|
||||
type cpStat struct {
|
||||
Size int64 // File size
|
||||
LastModified time.Time // File's last modified time
|
||||
MD5 string // Local file's MD5
|
||||
Size int64 // 文件大小
|
||||
LastModified time.Time // 本地文件最后修改时间
|
||||
MD5 string // 本地文件MD5
|
||||
}
|
||||
|
||||
type cpPart struct {
|
||||
Chunk FileChunk // File chunk
|
||||
Part UploadPart // Uploaded part
|
||||
IsCompleted bool // Upload complete flag
|
||||
Chunk FileChunk // 分片
|
||||
Part UploadPart // 上传完成的分片
|
||||
IsCompleted bool // upload是否完成
|
||||
}
|
||||
|
||||
// isValid checks if the uploaded data is valid---it's valid when the file is not updated and the checkpoint data is valid.
|
||||
// CP数据是否有效,CP有效且文件没有更新时有效
|
||||
func (cp uploadCheckpoint) isValid(filePath string) (bool, error) {
|
||||
// Compare the CP's magic number and MD5.
|
||||
// 比较CP的Magic及MD5
|
||||
cpb := cp
|
||||
cpb.MD5 = ""
|
||||
js, _ := json.Marshal(cpb)
|
||||
|
@ -280,7 +245,7 @@ func (cp uploadCheckpoint) isValid(filePath string) (bool, error) {
|
|||
return false, nil
|
||||
}
|
||||
|
||||
// Make sure if the local file is updated.
|
||||
// 确认本地文件是否更新
|
||||
fd, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
@ -297,7 +262,7 @@ func (cp uploadCheckpoint) isValid(filePath string) (bool, error) {
|
|||
return false, err
|
||||
}
|
||||
|
||||
// Compare the file size, file's last modified time and file's MD5
|
||||
// 比较文件大小/文件最后更新时间/文件MD5
|
||||
if cp.FileStat.Size != st.Size() ||
|
||||
cp.FileStat.LastModified != st.ModTime() ||
|
||||
cp.FileStat.MD5 != md {
|
||||
|
@ -307,7 +272,7 @@ func (cp uploadCheckpoint) isValid(filePath string) (bool, error) {
|
|||
return true, nil
|
||||
}
|
||||
|
||||
// load loads from the file
|
||||
// 从文件中load
|
||||
func (cp *uploadCheckpoint) load(filePath string) error {
|
||||
contents, err := ioutil.ReadFile(filePath)
|
||||
if err != nil {
|
||||
|
@ -318,11 +283,11 @@ func (cp *uploadCheckpoint) load(filePath string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// dump dumps to the local file
|
||||
// dump到文件
|
||||
func (cp *uploadCheckpoint) dump(filePath string) error {
|
||||
bcp := *cp
|
||||
|
||||
// Calculate MD5
|
||||
// 计算MD5
|
||||
bcp.MD5 = ""
|
||||
js, err := json.Marshal(bcp)
|
||||
if err != nil {
|
||||
|
@ -332,23 +297,23 @@ func (cp *uploadCheckpoint) dump(filePath string) error {
|
|||
b64 := base64.StdEncoding.EncodeToString(sum[:])
|
||||
bcp.MD5 = b64
|
||||
|
||||
// Serialization
|
||||
// 序列化
|
||||
js, err = json.Marshal(bcp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Dump
|
||||
// dump
|
||||
return ioutil.WriteFile(filePath, js, FilePermMode)
|
||||
}
|
||||
|
||||
// updatePart updates the part status
|
||||
// 更新分片状态
|
||||
func (cp *uploadCheckpoint) updatePart(part UploadPart) {
|
||||
cp.Parts[part.PartNumber-1].Part = part
|
||||
cp.Parts[part.PartNumber-1].IsCompleted = true
|
||||
}
|
||||
|
||||
// todoParts returns unfinished parts
|
||||
// 未完成的分片
|
||||
func (cp *uploadCheckpoint) todoParts() []FileChunk {
|
||||
fcs := []FileChunk{}
|
||||
for _, part := range cp.Parts {
|
||||
|
@ -359,7 +324,7 @@ func (cp *uploadCheckpoint) todoParts() []FileChunk {
|
|||
return fcs
|
||||
}
|
||||
|
||||
// allParts returns all parts
|
||||
// 所有的分片
|
||||
func (cp *uploadCheckpoint) allParts() []UploadPart {
|
||||
ps := []UploadPart{}
|
||||
for _, part := range cp.Parts {
|
||||
|
@ -368,7 +333,7 @@ func (cp *uploadCheckpoint) allParts() []UploadPart {
|
|||
return ps
|
||||
}
|
||||
|
||||
// getCompletedBytes returns completed bytes count
|
||||
// 完成的字节数
|
||||
func (cp *uploadCheckpoint) getCompletedBytes() int64 {
|
||||
var completedBytes int64
|
||||
for _, part := range cp.Parts {
|
||||
|
@ -379,19 +344,19 @@ func (cp *uploadCheckpoint) getCompletedBytes() int64 {
|
|||
return completedBytes
|
||||
}
|
||||
|
||||
// calcFileMD5 calculates the MD5 for the specified local file
|
||||
// 计算文件文件MD5
|
||||
func calcFileMD5(filePath string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// prepare initializes the multipart upload
|
||||
// 初始化分片上传
|
||||
func prepare(cp *uploadCheckpoint, objectKey, filePath string, partSize int64, bucket *Bucket, options []Option) error {
|
||||
// CP
|
||||
// cp
|
||||
cp.Magic = uploadCpMagic
|
||||
cp.FilePath = filePath
|
||||
cp.ObjectKey = objectKey
|
||||
|
||||
// Local file
|
||||
// localfile
|
||||
fd, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -410,7 +375,7 @@ func prepare(cp *uploadCheckpoint, objectKey, filePath string, partSize int64, b
|
|||
}
|
||||
cp.FileStat.MD5 = md
|
||||
|
||||
// Chunks
|
||||
// chunks
|
||||
parts, err := SplitFileByPartSize(filePath, partSize)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -422,7 +387,7 @@ func prepare(cp *uploadCheckpoint, objectKey, filePath string, partSize int64, b
|
|||
cp.Parts[i].IsCompleted = false
|
||||
}
|
||||
|
||||
// Init load
|
||||
// init load
|
||||
imur, err := bucket.InitiateMultipartUpload(objectKey, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -432,11 +397,11 @@ func prepare(cp *uploadCheckpoint, objectKey, filePath string, partSize int64, b
|
|||
return nil
|
||||
}
|
||||
|
||||
// complete completes the multipart upload and deletes the local CP files
|
||||
func complete(cp *uploadCheckpoint, bucket *Bucket, parts []UploadPart, cpFilePath string, options []Option) error {
|
||||
// 提交分片上传,删除CP文件
|
||||
func complete(cp *uploadCheckpoint, bucket *Bucket, parts []UploadPart, cpFilePath string) error {
|
||||
imur := InitiateMultipartUploadResult{Bucket: bucket.BucketName,
|
||||
Key: cp.ObjectKey, UploadID: cp.UploadID}
|
||||
_, err := bucket.CompleteMultipartUpload(imur, parts, options...)
|
||||
_, err := bucket.CompleteMultipartUpload(imur, parts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -444,24 +409,18 @@ func complete(cp *uploadCheckpoint, bucket *Bucket, parts []UploadPart, cpFilePa
|
|||
return err
|
||||
}
|
||||
|
||||
// uploadFileWithCp handles concurrent upload with checkpoint
|
||||
// 并发带断点的上传
|
||||
func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int) error {
|
||||
listener := getProgressListener(options)
|
||||
|
||||
payerOptions := []Option{}
|
||||
payer := getPayer(options)
|
||||
if payer != "" {
|
||||
payerOptions = append(payerOptions, RequestPayer(PayerType(payer)))
|
||||
}
|
||||
|
||||
// Load CP data
|
||||
// LOAD CP数据
|
||||
ucp := uploadCheckpoint{}
|
||||
err := ucp.load(cpFilePath)
|
||||
if err != nil {
|
||||
os.Remove(cpFilePath)
|
||||
}
|
||||
|
||||
// Load error or the CP data is invalid.
|
||||
// LOAD出错或数据无效重新初始化上传
|
||||
valid, err := ucp.isValid(filePath)
|
||||
if err != nil || !valid {
|
||||
if err = prepare(&ucp, objectKey, filePath, partSize, &bucket, options); err != nil {
|
||||
|
@ -485,16 +444,16 @@ func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64
|
|||
event := newProgressEvent(TransferStartedEvent, completedBytes, ucp.FileStat.Size)
|
||||
publishProgress(listener, event)
|
||||
|
||||
// Start the workers
|
||||
arg := workerArg{&bucket, filePath, imur, payerOptions, uploadPartHooker}
|
||||
// 启动工作协程
|
||||
arg := workerArg{&bucket, filePath, imur, uploadPartHooker}
|
||||
for w := 1; w <= routines; w++ {
|
||||
go worker(w, arg, jobs, results, failed, die)
|
||||
}
|
||||
|
||||
// Schedule jobs
|
||||
// 并发上传分片
|
||||
go scheduler(jobs, chunks)
|
||||
|
||||
// Waiting for the job finished
|
||||
// 等待分配分片上传完成
|
||||
completed := 0
|
||||
for completed < len(chunks) {
|
||||
select {
|
||||
|
@ -520,7 +479,7 @@ func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64
|
|||
event = newProgressEvent(TransferCompletedEvent, completedBytes, ucp.FileStat.Size)
|
||||
publishProgress(listener, event)
|
||||
|
||||
// Complete the multipart upload
|
||||
err = complete(&ucp, &bucket, ucp.allParts(), cpFilePath, payerOptions)
|
||||
// 提交分片上传
|
||||
err = complete(&ucp, &bucket, ucp.allParts(), cpFilePath)
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -9,27 +9,25 @@ import (
|
|||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// userAgent gets user agent
|
||||
// It has the SDK version information, OS information and GO version
|
||||
func userAgent() string {
|
||||
// Get User Agent
|
||||
// Go sdk相关信息,包括sdk版本,操作系统类型,GO版本
|
||||
var userAgent = func() string {
|
||||
sys := getSysInfo()
|
||||
return fmt.Sprintf("aliyun-sdk-go/%s (%s/%s/%s;%s)", Version, sys.name,
|
||||
sys.release, sys.machine, runtime.Version())
|
||||
}
|
||||
}()
|
||||
|
||||
type sysInfo struct {
|
||||
name string // OS name such as windows/Linux
|
||||
release string // OS version 2.6.32-220.23.2.ali1089.el5.x86_64 etc
|
||||
machine string // CPU type amd64/x86_64
|
||||
name string // 操作系统名称windows/Linux
|
||||
release string // 操作系统版本 2.6.32-220.23.2.ali1089.el5.x86_64等
|
||||
machine string // 机器类型amd64/x86_64
|
||||
}
|
||||
|
||||
// getSysInfo gets system info
|
||||
// gets the OS information and CPU type
|
||||
// Get system info
|
||||
// 获取操作系统信息、机器类型
|
||||
func getSysInfo() sysInfo {
|
||||
name := runtime.GOOS
|
||||
release := "-"
|
||||
|
@ -46,106 +44,8 @@ func getSysInfo() sysInfo {
|
|||
return sysInfo{name: name, release: release, machine: machine}
|
||||
}
|
||||
|
||||
// unpackedRange
|
||||
type unpackedRange struct {
|
||||
hasStart bool // Flag indicates if the start point is specified
|
||||
hasEnd bool // Flag indicates if the end point is specified
|
||||
start int64 // Start point
|
||||
end int64 // End point
|
||||
}
|
||||
|
||||
// invalidRangeError returns invalid range error
|
||||
func invalidRangeError(r string) error {
|
||||
return fmt.Errorf("InvalidRange %s", r)
|
||||
}
|
||||
|
||||
// parseRange parse various styles of range such as bytes=M-N
|
||||
func parseRange(normalizedRange string) (*unpackedRange, error) {
|
||||
var err error
|
||||
hasStart := false
|
||||
hasEnd := false
|
||||
var start int64
|
||||
var end int64
|
||||
|
||||
// Bytes==M-N or ranges=M-N
|
||||
nrSlice := strings.Split(normalizedRange, "=")
|
||||
if len(nrSlice) != 2 || nrSlice[0] != "bytes" {
|
||||
return nil, invalidRangeError(normalizedRange)
|
||||
}
|
||||
|
||||
// Bytes=M-N,X-Y
|
||||
rSlice := strings.Split(nrSlice[1], ",")
|
||||
rStr := rSlice[0]
|
||||
|
||||
if strings.HasSuffix(rStr, "-") { // M-
|
||||
startStr := rStr[:len(rStr)-1]
|
||||
start, err = strconv.ParseInt(startStr, 10, 64)
|
||||
if err != nil {
|
||||
return nil, invalidRangeError(normalizedRange)
|
||||
}
|
||||
hasStart = true
|
||||
} else if strings.HasPrefix(rStr, "-") { // -N
|
||||
len := rStr[1:]
|
||||
end, err = strconv.ParseInt(len, 10, 64)
|
||||
if err != nil {
|
||||
return nil, invalidRangeError(normalizedRange)
|
||||
}
|
||||
if end == 0 { // -0
|
||||
return nil, invalidRangeError(normalizedRange)
|
||||
}
|
||||
hasEnd = true
|
||||
} else { // M-N
|
||||
valSlice := strings.Split(rStr, "-")
|
||||
if len(valSlice) != 2 {
|
||||
return nil, invalidRangeError(normalizedRange)
|
||||
}
|
||||
start, err = strconv.ParseInt(valSlice[0], 10, 64)
|
||||
if err != nil {
|
||||
return nil, invalidRangeError(normalizedRange)
|
||||
}
|
||||
hasStart = true
|
||||
end, err = strconv.ParseInt(valSlice[1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, invalidRangeError(normalizedRange)
|
||||
}
|
||||
hasEnd = true
|
||||
}
|
||||
|
||||
return &unpackedRange{hasStart, hasEnd, start, end}, nil
|
||||
}
|
||||
|
||||
// adjustRange returns adjusted range, adjust the range according to the length of the file
|
||||
func adjustRange(ur *unpackedRange, size int64) (start, end int64) {
|
||||
if ur == nil {
|
||||
return 0, size
|
||||
}
|
||||
|
||||
if ur.hasStart && ur.hasEnd {
|
||||
start = ur.start
|
||||
end = ur.end + 1
|
||||
if ur.start < 0 || ur.start >= size || ur.end > size || ur.start > ur.end {
|
||||
start = 0
|
||||
end = size
|
||||
}
|
||||
} else if ur.hasStart {
|
||||
start = ur.start
|
||||
end = size
|
||||
if ur.start < 0 || ur.start >= size {
|
||||
start = 0
|
||||
}
|
||||
} else if ur.hasEnd {
|
||||
start = size - ur.end
|
||||
end = size
|
||||
if ur.end < 0 || ur.end > size {
|
||||
start = 0
|
||||
end = size
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetNowSec returns Unix time, the number of seconds elapsed since January 1, 1970 UTC.
|
||||
// gets the current time in Unix time, in seconds.
|
||||
// 获取当前时间,从UTC开始的秒数。
|
||||
func GetNowSec() int64 {
|
||||
return time.Now().Unix()
|
||||
}
|
||||
|
@ -154,25 +54,25 @@ func GetNowSec() int64 {
|
|||
// since January 1, 1970 UTC. The result is undefined if the Unix time
|
||||
// in nanoseconds cannot be represented by an int64. Note that this
|
||||
// means the result of calling UnixNano on the zero Time is undefined.
|
||||
// gets the current time in Unix time, in nanoseconds.
|
||||
// 获取当前时间,从UTC开始的纳秒。
|
||||
func GetNowNanoSec() int64 {
|
||||
return time.Now().UnixNano()
|
||||
}
|
||||
|
||||
// GetNowGMT gets the current time in GMT format.
|
||||
// GetNowGMT 获取当前时间,格式形如"Mon, 02 Jan 2006 15:04:05 GMT",HTTP中使用的时间格式
|
||||
func GetNowGMT() string {
|
||||
return time.Now().UTC().Format(http.TimeFormat)
|
||||
}
|
||||
|
||||
// FileChunk is the file chunk definition
|
||||
// FileChunk 文件片定义
|
||||
type FileChunk struct {
|
||||
Number int // Chunk number
|
||||
Offset int64 // Chunk offset
|
||||
Size int64 // Chunk size.
|
||||
Number int // 块序号
|
||||
Offset int64 // 块在文件中的偏移量
|
||||
Size int64 // 块大小
|
||||
}
|
||||
|
||||
// SplitFileByPartNum splits big file into parts by the num of parts.
|
||||
// Split the file with specified parts count, returns the split result when error is nil.
|
||||
// SplitFileByPartNum Split big file to part by the num of part
|
||||
// 按指定的块数分割文件。返回值FileChunk为分割结果,error为nil时有效。
|
||||
func SplitFileByPartNum(fileName string, chunkNum int) ([]FileChunk, error) {
|
||||
if chunkNum <= 0 || chunkNum > 10000 {
|
||||
return nil, errors.New("chunkNum invalid")
|
||||
|
@ -210,8 +110,8 @@ func SplitFileByPartNum(fileName string, chunkNum int) ([]FileChunk, error) {
|
|||
return chunks, nil
|
||||
}
|
||||
|
||||
// SplitFileByPartSize splits big file into parts by the size of parts.
|
||||
// Splits the file by the part size. Returns the FileChunk when error is nil.
|
||||
// SplitFileByPartSize Split big file to part by the size of part
|
||||
// 按块大小分割文件。返回值FileChunk为分割结果,error为nil时有效。
|
||||
func SplitFileByPartSize(fileName string, chunkSize int64) ([]FileChunk, error) {
|
||||
if chunkSize <= 0 {
|
||||
return nil, errors.New("chunkSize invalid")
|
||||
|
@ -229,7 +129,7 @@ func SplitFileByPartSize(fileName string, chunkSize int64) ([]FileChunk, error)
|
|||
}
|
||||
var chunkN = stat.Size() / chunkSize
|
||||
if chunkN >= 10000 {
|
||||
return nil, errors.New("Too many parts, please increase part size")
|
||||
return nil, errors.New("Too many parts, please increase part size.")
|
||||
}
|
||||
|
||||
var chunks []FileChunk
|
||||
|
@ -251,7 +151,7 @@ func SplitFileByPartSize(fileName string, chunkSize int64) ([]FileChunk, error)
|
|||
return chunks, nil
|
||||
}
|
||||
|
||||
// GetPartEnd calculates the end position
|
||||
// GetPartEnd 计算结束位置
|
||||
func GetPartEnd(begin int64, total int64, per int64) int64 {
|
||||
if begin+per > total {
|
||||
return total - 1
|
||||
|
@ -259,7 +159,7 @@ func GetPartEnd(begin int64, total int64, per int64) int64 {
|
|||
return begin + per - 1
|
||||
}
|
||||
|
||||
// crcTable returns the table constructed from the specified polynomial
|
||||
// crcTable returns the Table constructed from the specified polynomial
|
||||
var crcTable = func() *crc64.Table {
|
||||
return crc64.MakeTable(crc64.ECMA)
|
||||
}
|
||||
|
|
|
@ -2,8 +2,8 @@ language: go
|
|||
|
||||
go:
|
||||
- 1.6
|
||||
- 1.9
|
||||
- '1.10'
|
||||
- 1.7
|
||||
- 1.8
|
||||
|
||||
install:
|
||||
- go get github.com/mattn/goveralls
|
||||
|
|
|
@ -7,17 +7,9 @@ XPath
|
|||
|
||||
XPath is Go package provides selecting nodes from XML, HTML or other documents using XPath expression.
|
||||
|
||||
Implementation
|
||||
===
|
||||
[XQuery](https://github.com/antchfx/xquery) : lets you extract data from HTML/XML documents using XPath package.
|
||||
|
||||
- [htmlquery](https://github.com/antchfx/htmlquery) - an XPath query package for HTML document
|
||||
|
||||
- [xmlquery](https://github.com/antchfx/xmlquery) - an XPath query package for XML document.
|
||||
|
||||
- [jsonquery](https://github.com/antchfx/jsonquery) - an XPath query package for JSON document
|
||||
|
||||
Supported Features
|
||||
===
|
||||
### Features
|
||||
|
||||
#### The basic XPath patterns.
|
||||
|
||||
|
@ -53,10 +45,7 @@ Supported Features
|
|||
|
||||
- `//b` : Returns elements in the entire document matching b.
|
||||
|
||||
- `a|b` : All nodes matching a or b, union operation(not boolean or).
|
||||
|
||||
- `(a, b, c)` : Evaluates each of its operands and concatenates the resulting sequences, in order, into a single result sequence
|
||||
|
||||
- `a|b` : All nodes matching a or b.
|
||||
|
||||
#### Node Axes
|
||||
|
||||
|
@ -108,60 +97,23 @@ Supported Features
|
|||
* a div b Divide
|
||||
* a mod b Floating point mod, like Java.
|
||||
|
||||
- `a or b` : Boolean `or` operation.
|
||||
|
||||
- `a and b` : Boolean `and` operation.
|
||||
|
||||
- `(expr)` : Parenthesized expressions.
|
||||
|
||||
- `fun(arg1, ..., argn)` : Function calls:
|
||||
- `fun(arg1, ..., argn)` : Function calls.
|
||||
|
||||
| Function | Supported |
|
||||
| --- | --- |
|
||||
`boolean()`| ✓ |
|
||||
`ceiling()`| ✓ |
|
||||
`choose()`| ✗ |
|
||||
`concat()`| ✓ |
|
||||
`contains()`| ✓ |
|
||||
`count()`| ✓ |
|
||||
`current()`| ✗ |
|
||||
`document()`| ✗ |
|
||||
`element-available()`| ✗ |
|
||||
`ends-with()`| ✓ |
|
||||
`false()`| ✓ |
|
||||
`floor()`| ✓ |
|
||||
`format-number()`| ✗ |
|
||||
`function-available()`| ✗ |
|
||||
`generate-id()`| ✗ |
|
||||
`id()`| ✗ |
|
||||
`key()`| ✗ |
|
||||
`lang()`| ✗ |
|
||||
`last()`| ✓ |
|
||||
`local-name()`| ✓ |
|
||||
`name()`| ✓ |
|
||||
`namespace-uri()`| ✓ |
|
||||
`normalize-space()`| ✓ |
|
||||
`not()`| ✓ |
|
||||
`number()`| ✓ |
|
||||
`position()`| ✓ |
|
||||
`round()`| ✓ |
|
||||
`starts-with()`| ✓ |
|
||||
`string()`| ✓ |
|
||||
`string-length()`| ✓ |
|
||||
`substring()`| ✓ |
|
||||
`substring-after()`| ✓ |
|
||||
`substring-before()`| ✓ |
|
||||
`sum()`| ✓ |
|
||||
`system-property()`| ✗ |
|
||||
`translate()`| ✓ |
|
||||
`true()`| ✓ |
|
||||
`unparsed-entity-url()` | ✗ |
|
||||
* position()
|
||||
* last()
|
||||
* count( node-set )
|
||||
* name()
|
||||
* starts-with( string, string )
|
||||
* normalize-space( string )
|
||||
* substring( string , start [, length] )
|
||||
* not( expression )
|
||||
* string-length( [string] )
|
||||
* contains( string, string )
|
||||
* sum( node-set )
|
||||
* concat( string1 , string2 [, stringn]* )
|
||||
|
||||
Changelogs
|
||||
===
|
||||
- `a or b` : Boolean or.
|
||||
|
||||
2019-01-29
|
||||
- improvement `normalize-space` function. [#32](https://github.com/antchfx/xpath/issues/32)
|
||||
|
||||
2018-12-07
|
||||
- supports XPath 2.0 Sequence expressions. [#30](https://github.com/antchfx/xpath/pull/30) by [@minherz](https://github.com/minherz).
|
||||
- `a and b` : Boolean and.
|
|
@ -23,12 +23,9 @@ type builder struct {
|
|||
func axisPredicate(root *axisNode) func(NodeNavigator) bool {
|
||||
// get current axix node type.
|
||||
typ := ElementNode
|
||||
switch root.AxeType {
|
||||
case "attribute":
|
||||
if root.AxeType == "attribute" {
|
||||
typ = AttributeNode
|
||||
case "self", "parent":
|
||||
typ = allNode
|
||||
default:
|
||||
} else {
|
||||
switch root.Prop {
|
||||
case "comment":
|
||||
typ = CommentNode
|
||||
|
@ -37,17 +34,12 @@ func axisPredicate(root *axisNode) func(NodeNavigator) bool {
|
|||
// case "processing-instruction":
|
||||
// typ = ProcessingInstructionNode
|
||||
case "node":
|
||||
typ = allNode
|
||||
typ = ElementNode
|
||||
}
|
||||
}
|
||||
nametest := root.LocalName != "" || root.Prefix != ""
|
||||
predicate := func(n NodeNavigator) bool {
|
||||
if typ == n.NodeType() || typ == allNode || typ == TextNode {
|
||||
if nametest {
|
||||
if root.LocalName == n.LocalName() && root.Prefix == n.Prefix() {
|
||||
return true
|
||||
}
|
||||
} else {
|
||||
if typ == n.NodeType() || typ == TextNode {
|
||||
if root.LocalName == "" || (root.LocalName == n.LocalName() && root.Prefix == n.Prefix()) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
@ -69,16 +61,18 @@ func (b *builder) processAxisNode(root *axisNode) (query, error) {
|
|||
if root.Input == nil {
|
||||
qyInput = &contextQuery{}
|
||||
} else {
|
||||
if root.AxeType == "child" && (root.Input.Type() == nodeAxis) {
|
||||
if input := root.Input.(*axisNode); input.AxeType == "descendant-or-self" {
|
||||
var qyGrandInput query
|
||||
if input.Input != nil {
|
||||
qyGrandInput, _ = b.processNode(input.Input)
|
||||
} else {
|
||||
qyGrandInput = &contextQuery{}
|
||||
if b.flag&filterFlag == 0 {
|
||||
if root.AxeType == "child" && (root.Input.Type() == nodeAxis) {
|
||||
if input := root.Input.(*axisNode); input.AxeType == "descendant-or-self" {
|
||||
var qyGrandInput query
|
||||
if input.Input != nil {
|
||||
qyGrandInput, _ = b.processNode(input.Input)
|
||||
} else {
|
||||
qyGrandInput = &contextQuery{}
|
||||
}
|
||||
qyOutput = &descendantQuery{Input: qyGrandInput, Predicate: predicate, Self: true}
|
||||
return qyOutput, nil
|
||||
}
|
||||
qyOutput = &descendantQuery{Input: qyGrandInput, Predicate: predicate, Self: true}
|
||||
return qyOutput, nil
|
||||
}
|
||||
}
|
||||
qyInput, err = b.processNode(root.Input)
|
||||
|
@ -163,16 +157,6 @@ func (b *builder) processFunctionNode(root *functionNode) (query, error) {
|
|||
return nil, err
|
||||
}
|
||||
qyOutput = &functionQuery{Input: b.firstInput, Func: startwithFunc(arg1, arg2)}
|
||||
case "ends-with":
|
||||
arg1, err := b.processNode(root.Args[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
arg2, err := b.processNode(root.Args[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qyOutput = &functionQuery{Input: b.firstInput, Func: endwithFunc(arg1, arg2)}
|
||||
case "contains":
|
||||
arg1, err := b.processNode(root.Args[0])
|
||||
if err != nil {
|
||||
|
@ -205,25 +189,6 @@ func (b *builder) processFunctionNode(root *functionNode) (query, error) {
|
|||
}
|
||||
}
|
||||
qyOutput = &functionQuery{Input: b.firstInput, Func: substringFunc(arg1, arg2, arg3)}
|
||||
case "substring-before", "substring-after":
|
||||
//substring-xxxx( haystack, needle )
|
||||
if len(root.Args) != 2 {
|
||||
return nil, errors.New("xpath: substring-before function must have two parameters")
|
||||
}
|
||||
var (
|
||||
arg1, arg2 query
|
||||
err error
|
||||
)
|
||||
if arg1, err = b.processNode(root.Args[0]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if arg2, err = b.processNode(root.Args[1]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qyOutput = &functionQuery{
|
||||
Input: b.firstInput,
|
||||
Func: substringIndFunc(arg1, arg2, root.FuncName == "substring-after"),
|
||||
}
|
||||
case "string-length":
|
||||
// string-length( [string] )
|
||||
if len(root.Args) < 1 {
|
||||
|
@ -243,25 +208,6 @@ func (b *builder) processFunctionNode(root *functionNode) (query, error) {
|
|||
return nil, err
|
||||
}
|
||||
qyOutput = &functionQuery{Input: argQuery, Func: normalizespaceFunc}
|
||||
case "translate":
|
||||
//translate( string , string, string )
|
||||
if len(root.Args) != 3 {
|
||||
return nil, errors.New("xpath: translate function must have three parameters")
|
||||
}
|
||||
var (
|
||||
arg1, arg2, arg3 query
|
||||
err error
|
||||
)
|
||||
if arg1, err = b.processNode(root.Args[0]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if arg2, err = b.processNode(root.Args[1]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if arg3, err = b.processNode(root.Args[2]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qyOutput = &functionQuery{Input: b.firstInput, Func: translateFunc(arg1, arg2, arg3)}
|
||||
case "not":
|
||||
if len(root.Args) == 0 {
|
||||
return nil, errors.New("xpath: not function must have at least one parameter")
|
||||
|
@ -271,62 +217,12 @@ func (b *builder) processFunctionNode(root *functionNode) (query, error) {
|
|||
return nil, err
|
||||
}
|
||||
qyOutput = &functionQuery{Input: argQuery, Func: notFunc}
|
||||
case "name", "local-name", "namespace-uri":
|
||||
inp := b.firstInput
|
||||
if len(root.Args) > 1 {
|
||||
return nil, fmt.Errorf("xpath: %s function must have at most one parameter", root.FuncName)
|
||||
}
|
||||
if len(root.Args) == 1 {
|
||||
argQuery, err := b.processNode(root.Args[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
inp = argQuery
|
||||
}
|
||||
f := &functionQuery{Input: inp}
|
||||
switch root.FuncName {
|
||||
case "name":
|
||||
f.Func = nameFunc
|
||||
case "local-name":
|
||||
f.Func = localNameFunc
|
||||
case "namespace-uri":
|
||||
f.Func = namespaceFunc
|
||||
}
|
||||
qyOutput = f
|
||||
case "true", "false":
|
||||
val := root.FuncName == "true"
|
||||
qyOutput = &functionQuery{
|
||||
Input: b.firstInput,
|
||||
Func: func(_ query, _ iterator) interface{} {
|
||||
return val
|
||||
},
|
||||
}
|
||||
case "name":
|
||||
qyOutput = &functionQuery{Input: b.firstInput, Func: nameFunc}
|
||||
case "last":
|
||||
qyOutput = &functionQuery{Input: b.firstInput, Func: lastFunc}
|
||||
case "position":
|
||||
qyOutput = &functionQuery{Input: b.firstInput, Func: positionFunc}
|
||||
case "boolean", "number", "string":
|
||||
inp := b.firstInput
|
||||
if len(root.Args) > 1 {
|
||||
return nil, fmt.Errorf("xpath: %s function must have at most one parameter", root.FuncName)
|
||||
}
|
||||
if len(root.Args) == 1 {
|
||||
argQuery, err := b.processNode(root.Args[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
inp = argQuery
|
||||
}
|
||||
f := &functionQuery{Input: inp}
|
||||
switch root.FuncName {
|
||||
case "boolean":
|
||||
f.Func = booleanFunc
|
||||
case "string":
|
||||
f.Func = stringFunc
|
||||
case "number":
|
||||
f.Func = numberFunc
|
||||
}
|
||||
qyOutput = f
|
||||
case "count":
|
||||
//if b.firstInput == nil {
|
||||
// return nil, errors.New("xpath: expression must evaluate to node-set")
|
||||
|
@ -348,24 +244,6 @@ func (b *builder) processFunctionNode(root *functionNode) (query, error) {
|
|||
return nil, err
|
||||
}
|
||||
qyOutput = &functionQuery{Input: argQuery, Func: sumFunc}
|
||||
case "ceiling", "floor", "round":
|
||||
if len(root.Args) == 0 {
|
||||
return nil, fmt.Errorf("xpath: ceiling(node-sets) function must with have parameters node-sets")
|
||||
}
|
||||
argQuery, err := b.processNode(root.Args[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f := &functionQuery{Input: argQuery}
|
||||
switch root.FuncName {
|
||||
case "ceiling":
|
||||
f.Func = ceilingFunc
|
||||
case "floor":
|
||||
f.Func = floorFunc
|
||||
case "round":
|
||||
f.Func = roundFunc
|
||||
}
|
||||
qyOutput = f
|
||||
case "concat":
|
||||
if len(root.Args) < 2 {
|
||||
return nil, fmt.Errorf("xpath: concat() must have at least two arguments")
|
||||
|
@ -426,14 +304,12 @@ func (b *builder) processOperatorNode(root *operatorNode) (query, error) {
|
|||
exprFunc = neFunc
|
||||
}
|
||||
qyOutput = &logicalQuery{Left: left, Right: right, Do: exprFunc}
|
||||
case "or", "and":
|
||||
case "or", "and", "|":
|
||||
isOr := false
|
||||
if root.Op == "or" {
|
||||
if root.Op == "or" || root.Op == "|" {
|
||||
isOr = true
|
||||
}
|
||||
qyOutput = &booleanQuery{Left: left, Right: right, IsOr: isOr}
|
||||
case "|":
|
||||
qyOutput = &unionQuery{Left: left, Right: right}
|
||||
}
|
||||
return qyOutput, nil
|
||||
}
|
||||
|
|
|
@ -2,9 +2,6 @@ package xpath
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
@ -83,146 +80,16 @@ func sumFunc(q query, t iterator) interface{} {
|
|||
case float64:
|
||||
sum = typ
|
||||
case string:
|
||||
v, err := strconv.ParseFloat(typ, 64)
|
||||
if err != nil {
|
||||
panic(errors.New("sum() function argument type must be a node-set or number"))
|
||||
if v, err := strconv.ParseFloat(typ, 64); err != nil {
|
||||
sum = v
|
||||
}
|
||||
sum = v
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
||||
func asNumber(t iterator, o interface{}) float64 {
|
||||
switch typ := o.(type) {
|
||||
case query:
|
||||
node := typ.Select(t)
|
||||
if node == nil {
|
||||
return float64(0)
|
||||
}
|
||||
if v, err := strconv.ParseFloat(node.Value(), 64); err == nil {
|
||||
return v
|
||||
}
|
||||
case float64:
|
||||
return typ
|
||||
case string:
|
||||
v, err := strconv.ParseFloat(typ, 64)
|
||||
if err != nil {
|
||||
panic(errors.New("ceiling() function argument type must be a node-set or number"))
|
||||
}
|
||||
return v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// ceilingFunc is a XPath Node Set functions ceiling(node-set).
|
||||
func ceilingFunc(q query, t iterator) interface{} {
|
||||
val := asNumber(t, q.Evaluate(t))
|
||||
return math.Ceil(val)
|
||||
}
|
||||
|
||||
// floorFunc is a XPath Node Set functions floor(node-set).
|
||||
func floorFunc(q query, t iterator) interface{} {
|
||||
val := asNumber(t, q.Evaluate(t))
|
||||
return math.Floor(val)
|
||||
}
|
||||
|
||||
// roundFunc is a XPath Node Set functions round(node-set).
|
||||
func roundFunc(q query, t iterator) interface{} {
|
||||
val := asNumber(t, q.Evaluate(t))
|
||||
//return math.Round(val)
|
||||
return round(val)
|
||||
}
|
||||
|
||||
// nameFunc is a XPath functions name([node-set]).
|
||||
func nameFunc(q query, t iterator) interface{} {
|
||||
v := q.Select(t)
|
||||
if v == nil {
|
||||
return ""
|
||||
}
|
||||
ns := v.Prefix()
|
||||
if ns == "" {
|
||||
return v.LocalName()
|
||||
}
|
||||
return ns + ":" + v.LocalName()
|
||||
}
|
||||
|
||||
// localNameFunc is a XPath functions local-name([node-set]).
|
||||
func localNameFunc(q query, t iterator) interface{} {
|
||||
v := q.Select(t)
|
||||
if v == nil {
|
||||
return ""
|
||||
}
|
||||
return v.LocalName()
|
||||
}
|
||||
|
||||
// namespaceFunc is a XPath functions namespace-uri([node-set]).
|
||||
func namespaceFunc(q query, t iterator) interface{} {
|
||||
v := q.Select(t)
|
||||
if v == nil {
|
||||
return ""
|
||||
}
|
||||
return v.Prefix()
|
||||
}
|
||||
|
||||
func asBool(t iterator, v interface{}) bool {
|
||||
switch v := v.(type) {
|
||||
case nil:
|
||||
return false
|
||||
case *NodeIterator:
|
||||
return v.MoveNext()
|
||||
case bool:
|
||||
return bool(v)
|
||||
case float64:
|
||||
return v != 0
|
||||
case string:
|
||||
return v != ""
|
||||
case query:
|
||||
return v.Select(t) != nil
|
||||
default:
|
||||
panic(fmt.Errorf("unexpected type: %T", v))
|
||||
}
|
||||
}
|
||||
|
||||
func asString(t iterator, v interface{}) string {
|
||||
switch v := v.(type) {
|
||||
case nil:
|
||||
return ""
|
||||
case bool:
|
||||
if v {
|
||||
return "true"
|
||||
}
|
||||
return "false"
|
||||
case float64:
|
||||
return strconv.FormatFloat(v, 'g', -1, 64)
|
||||
case string:
|
||||
return v
|
||||
case query:
|
||||
node := v.Select(t)
|
||||
if node == nil {
|
||||
return ""
|
||||
}
|
||||
return node.Value()
|
||||
default:
|
||||
panic(fmt.Errorf("unexpected type: %T", v))
|
||||
}
|
||||
}
|
||||
|
||||
// booleanFunc is a XPath functions boolean([node-set]).
|
||||
func booleanFunc(q query, t iterator) interface{} {
|
||||
v := q.Evaluate(t)
|
||||
return asBool(t, v)
|
||||
}
|
||||
|
||||
// numberFunc is a XPath functions number([node-set]).
|
||||
func numberFunc(q query, t iterator) interface{} {
|
||||
v := q.Evaluate(t)
|
||||
return asNumber(t, v)
|
||||
}
|
||||
|
||||
// stringFunc is a XPath functions string([node-set]).
|
||||
func stringFunc(q query, t iterator) interface{} {
|
||||
v := q.Evaluate(t)
|
||||
return asString(t, v)
|
||||
return t.Current().LocalName()
|
||||
}
|
||||
|
||||
// startwithFunc is a XPath functions starts-with(string, string).
|
||||
|
@ -252,33 +119,6 @@ func startwithFunc(arg1, arg2 query) func(query, iterator) interface{} {
|
|||
}
|
||||
}
|
||||
|
||||
// endwithFunc is a XPath functions ends-with(string, string).
|
||||
func endwithFunc(arg1, arg2 query) func(query, iterator) interface{} {
|
||||
return func(q query, t iterator) interface{} {
|
||||
var (
|
||||
m, n string
|
||||
ok bool
|
||||
)
|
||||
switch typ := arg1.Evaluate(t).(type) {
|
||||
case string:
|
||||
m = typ
|
||||
case query:
|
||||
node := typ.Select(t)
|
||||
if node == nil {
|
||||
return false
|
||||
}
|
||||
m = node.Value()
|
||||
default:
|
||||
panic(errors.New("ends-with() function argument type must be string"))
|
||||
}
|
||||
n, ok = arg2.Evaluate(t).(string)
|
||||
if !ok {
|
||||
panic(errors.New("ends-with() function argument type must be string"))
|
||||
}
|
||||
return strings.HasSuffix(m, n)
|
||||
}
|
||||
}
|
||||
|
||||
// containsFunc is a XPath functions contains(string or @attr, string).
|
||||
func containsFunc(arg1, arg2 query) func(query, iterator) interface{} {
|
||||
return func(q query, t iterator) interface{} {
|
||||
|
@ -309,11 +149,6 @@ func containsFunc(arg1, arg2 query) func(query, iterator) interface{} {
|
|||
}
|
||||
}
|
||||
|
||||
var (
|
||||
regnewline = regexp.MustCompile(`[\r\n\t]`)
|
||||
regseqspace = regexp.MustCompile(`\s{2,}`)
|
||||
)
|
||||
|
||||
// normalizespaceFunc is XPath functions normalize-space(string?)
|
||||
func normalizespaceFunc(q query, t iterator) interface{} {
|
||||
var m string
|
||||
|
@ -323,14 +158,11 @@ func normalizespaceFunc(q query, t iterator) interface{} {
|
|||
case query:
|
||||
node := typ.Select(t)
|
||||
if node == nil {
|
||||
return ""
|
||||
return false
|
||||
}
|
||||
m = node.Value()
|
||||
}
|
||||
m = strings.TrimSpace(m)
|
||||
m = regnewline.ReplaceAllString(m, " ")
|
||||
m = regseqspace.ReplaceAllString(m, " ")
|
||||
return m
|
||||
return strings.TrimSpace(m)
|
||||
}
|
||||
|
||||
// substringFunc is XPath functions substring function returns a part of a given string.
|
||||
|
@ -343,7 +175,7 @@ func substringFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} {
|
|||
case query:
|
||||
node := typ.Select(t)
|
||||
if node == nil {
|
||||
return ""
|
||||
return false
|
||||
}
|
||||
m = node.Value()
|
||||
}
|
||||
|
@ -353,10 +185,7 @@ func substringFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} {
|
|||
|
||||
if start, ok = arg2.Evaluate(t).(float64); !ok {
|
||||
panic(errors.New("substring() function first argument type must be int"))
|
||||
} else if start < 1 {
|
||||
panic(errors.New("substring() function first argument type must be >= 1"))
|
||||
}
|
||||
start--
|
||||
if arg3 != nil {
|
||||
if length, ok = arg3.Evaluate(t).(float64); !ok {
|
||||
panic(errors.New("substring() function second argument type must be int"))
|
||||
|
@ -372,46 +201,6 @@ func substringFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} {
|
|||
}
|
||||
}
|
||||
|
||||
// substringIndFunc is XPath functions substring-before/substring-after function returns a part of a given string.
|
||||
func substringIndFunc(arg1, arg2 query, after bool) func(query, iterator) interface{} {
|
||||
return func(q query, t iterator) interface{} {
|
||||
var str string
|
||||
switch v := arg1.Evaluate(t).(type) {
|
||||
case string:
|
||||
str = v
|
||||
case query:
|
||||
node := v.Select(t)
|
||||
if node == nil {
|
||||
return ""
|
||||
}
|
||||
str = node.Value()
|
||||
}
|
||||
var word string
|
||||
switch v := arg2.Evaluate(t).(type) {
|
||||
case string:
|
||||
word = v
|
||||
case query:
|
||||
node := v.Select(t)
|
||||
if node == nil {
|
||||
return ""
|
||||
}
|
||||
word = node.Value()
|
||||
}
|
||||
if word == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
i := strings.Index(str, word)
|
||||
if i < 0 {
|
||||
return ""
|
||||
}
|
||||
if after {
|
||||
return str[i+len(word):]
|
||||
}
|
||||
return str[:i]
|
||||
}
|
||||
}
|
||||
|
||||
// stringLengthFunc is XPATH string-length( [string] ) function that returns a number
|
||||
// equal to the number of characters in a given string.
|
||||
func stringLengthFunc(arg1 query) func(query, iterator) interface{} {
|
||||
|
@ -430,25 +219,6 @@ func stringLengthFunc(arg1 query) func(query, iterator) interface{} {
|
|||
}
|
||||
}
|
||||
|
||||
// translateFunc is XPath functions translate() function returns a replaced string.
|
||||
func translateFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} {
|
||||
return func(q query, t iterator) interface{} {
|
||||
str := asString(t, arg1.Evaluate(t))
|
||||
src := asString(t, arg2.Evaluate(t))
|
||||
dst := asString(t, arg3.Evaluate(t))
|
||||
|
||||
var replace []string
|
||||
for i, s := range src {
|
||||
d := ""
|
||||
if i < len(dst) {
|
||||
d = string(dst[i])
|
||||
}
|
||||
replace = append(replace, string(s), d)
|
||||
}
|
||||
return strings.NewReplacer(replace...).Replace(str)
|
||||
}
|
||||
}
|
||||
|
||||
// notFunc is XPATH functions not(expression) function operation.
|
||||
func notFunc(q query, t iterator) interface{} {
|
||||
switch v := q.Evaluate(t).(type) {
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
// +build go1.10
|
||||
|
||||
package xpath
|
||||
|
||||
import "math"
|
||||
|
||||
func round(f float64) int {
|
||||
return int(math.Round(f))
|
||||
}
|
|
@ -1,15 +0,0 @@
|
|||
// +build !go1.10
|
||||
|
||||
package xpath
|
||||
|
||||
import "math"
|
||||
|
||||
// math.Round() is supported by Go 1.10+,
|
||||
// This method just compatible for version <1.10.
|
||||
// https://github.com/golang/go/issues/20100
|
||||
func round(f float64) int {
|
||||
if math.Abs(f) < 0.5 {
|
||||
return 0
|
||||
}
|
||||
return int(f + math.Copysign(0.5, f))
|
||||
}
|
|
@ -42,7 +42,7 @@ const (
|
|||
itemString // Quoted string constant
|
||||
itemNumber // Number constant
|
||||
itemAxe // Axe (like child::)
|
||||
itemEOF // END
|
||||
itemEof // END
|
||||
)
|
||||
|
||||
// A node is an XPath node in the parse tree.
|
||||
|
@ -389,7 +389,7 @@ Loop:
|
|||
}
|
||||
|
||||
// Step ::= AxisSpecifier NodeTest Predicate* | AbbreviatedStep
|
||||
func (p *parser) parseStep(n node) (opnd node) {
|
||||
func (p *parser) parseStep(n node) node {
|
||||
axeTyp := "child" // default axes value.
|
||||
if p.r.typ == itemDot || p.r.typ == itemDotDot {
|
||||
if p.r.typ == itemDot {
|
||||
|
@ -398,45 +398,23 @@ func (p *parser) parseStep(n node) (opnd node) {
|
|||
axeTyp = "parent"
|
||||
}
|
||||
p.next()
|
||||
opnd = newAxisNode(axeTyp, "", "", "", n)
|
||||
if p.r.typ != itemLBracket {
|
||||
return opnd
|
||||
}
|
||||
} else {
|
||||
switch p.r.typ {
|
||||
case itemAt:
|
||||
p.next()
|
||||
axeTyp = "attribute"
|
||||
case itemAxe:
|
||||
axeTyp = p.r.name
|
||||
p.next()
|
||||
case itemLParens:
|
||||
return p.parseSequence(n)
|
||||
}
|
||||
opnd = p.parseNodeTest(n, axeTyp)
|
||||
return newAxisNode(axeTyp, "", "", "", n)
|
||||
}
|
||||
switch p.r.typ {
|
||||
case itemAt:
|
||||
p.next()
|
||||
axeTyp = "attribute"
|
||||
case itemAxe:
|
||||
axeTyp = p.r.name
|
||||
p.next()
|
||||
}
|
||||
opnd := p.parseNodeTest(n, axeTyp)
|
||||
for p.r.typ == itemLBracket {
|
||||
opnd = newFilterNode(opnd, p.parsePredicate(opnd))
|
||||
}
|
||||
return opnd
|
||||
}
|
||||
|
||||
// Expr ::= '(' Step ("," Step)* ')'
|
||||
func (p *parser) parseSequence(n node) (opnd node) {
|
||||
p.skipItem(itemLParens)
|
||||
opnd = p.parseStep(n)
|
||||
for {
|
||||
if p.r.typ != itemComma {
|
||||
break
|
||||
}
|
||||
p.next()
|
||||
opnd2 := p.parseStep(n)
|
||||
opnd = newOperatorNode("|", opnd, opnd2)
|
||||
}
|
||||
p.skipItem(itemRParens)
|
||||
return opnd
|
||||
}
|
||||
|
||||
// NodeTest ::= NameTest | nodeType '(' ')' | 'processing-instruction' '(' Literal ')'
|
||||
func (p *parser) parseNodeTest(n node, axeTyp string) (opnd node) {
|
||||
switch p.r.typ {
|
||||
|
@ -650,7 +628,7 @@ func (s *scanner) nextChar() bool {
|
|||
return false
|
||||
}
|
||||
s.curr = rune(s.text[s.pos])
|
||||
s.pos++
|
||||
s.pos += 1
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -658,7 +636,7 @@ func (s *scanner) nextItem() bool {
|
|||
s.skipSpace()
|
||||
switch s.curr {
|
||||
case 0:
|
||||
s.typ = itemEOF
|
||||
s.typ = itemEof
|
||||
return false
|
||||
case ',', '@', '(', ')', '|', '*', '[', ']', '+', '-', '=', '#', '$':
|
||||
s.typ = asItemType(s.curr)
|
||||
|
|
|
@ -71,7 +71,7 @@ func (a *ancestorQuery) Select(t iterator) NodeNavigator {
|
|||
}
|
||||
for node.MoveToParent() {
|
||||
if !a.Predicate(node) {
|
||||
continue
|
||||
break
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
@ -707,79 +707,16 @@ func (b *booleanQuery) Select(t iterator) NodeNavigator {
|
|||
|
||||
func (b *booleanQuery) Evaluate(t iterator) interface{} {
|
||||
m := b.Left.Evaluate(t)
|
||||
left := asBool(t, m)
|
||||
if b.IsOr && left {
|
||||
return true
|
||||
} else if !b.IsOr && !left {
|
||||
return false
|
||||
if m.(bool) == b.IsOr {
|
||||
return m
|
||||
}
|
||||
m = b.Right.Evaluate(t)
|
||||
return asBool(t, m)
|
||||
return b.Right.Evaluate(t)
|
||||
}
|
||||
|
||||
func (b *booleanQuery) Clone() query {
|
||||
return &booleanQuery{IsOr: b.IsOr, Left: b.Left.Clone(), Right: b.Right.Clone()}
|
||||
}
|
||||
|
||||
type unionQuery struct {
|
||||
Left, Right query
|
||||
iterator func() NodeNavigator
|
||||
}
|
||||
|
||||
func (u *unionQuery) Select(t iterator) NodeNavigator {
|
||||
if u.iterator == nil {
|
||||
var list []NodeNavigator
|
||||
var i int
|
||||
root := t.Current().Copy()
|
||||
for {
|
||||
node := u.Left.Select(t)
|
||||
if node == nil {
|
||||
break
|
||||
}
|
||||
node = node.Copy()
|
||||
list = append(list, node)
|
||||
}
|
||||
t.Current().MoveTo(root)
|
||||
for {
|
||||
node := u.Right.Select(t)
|
||||
if node == nil {
|
||||
break
|
||||
}
|
||||
node = node.Copy()
|
||||
var exists bool
|
||||
for _, x := range list {
|
||||
if reflect.DeepEqual(x, node) {
|
||||
exists = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !exists {
|
||||
list = append(list, node)
|
||||
}
|
||||
}
|
||||
u.iterator = func() NodeNavigator {
|
||||
if i >= len(list) {
|
||||
return nil
|
||||
}
|
||||
node := list[i]
|
||||
i++
|
||||
return node
|
||||
}
|
||||
}
|
||||
return u.iterator()
|
||||
}
|
||||
|
||||
func (u *unionQuery) Evaluate(t iterator) interface{} {
|
||||
u.iterator = nil
|
||||
u.Left.Evaluate(t)
|
||||
u.Right.Evaluate(t)
|
||||
return u
|
||||
}
|
||||
|
||||
func (u *unionQuery) Clone() query {
|
||||
return &unionQuery{Left: u.Left.Clone(), Right: u.Right.Clone()}
|
||||
}
|
||||
|
||||
func getNodePosition(q query) int {
|
||||
type Position interface {
|
||||
position() int
|
||||
|
|
|
@ -22,9 +22,6 @@ const (
|
|||
|
||||
// CommentNode is a comment node, such as <!-- my comment -->
|
||||
CommentNode
|
||||
|
||||
// allNode is any types of node, used by xpath package only to predicate match.
|
||||
allNode
|
||||
)
|
||||
|
||||
// NodeNavigator provides cursor model for navigating XML data.
|
||||
|
|
|
@ -66,15 +66,10 @@ func (n *Node) InnerText() string {
|
|||
|
||||
func outputXML(buf *bytes.Buffer, n *Node) {
|
||||
if n.Type == TextNode || n.Type == CommentNode {
|
||||
xml.EscapeText(buf, []byte(strings.TrimSpace(n.Data)))
|
||||
buf.WriteString(strings.TrimSpace(n.Data))
|
||||
return
|
||||
}
|
||||
if n.Type == DeclarationNode {
|
||||
buf.WriteString("<?" + n.Data)
|
||||
} else {
|
||||
buf.WriteString("<" + n.Data)
|
||||
}
|
||||
|
||||
buf.WriteString("<" + n.Data)
|
||||
for _, attr := range n.Attr {
|
||||
if attr.Name.Space != "" {
|
||||
buf.WriteString(fmt.Sprintf(` %s:%s="%s"`, attr.Name.Space, attr.Name.Local, attr.Value))
|
||||
|
@ -82,17 +77,11 @@ func outputXML(buf *bytes.Buffer, n *Node) {
|
|||
buf.WriteString(fmt.Sprintf(` %s="%s"`, attr.Name.Local, attr.Value))
|
||||
}
|
||||
}
|
||||
if n.Type == DeclarationNode {
|
||||
buf.WriteString("?>")
|
||||
} else {
|
||||
buf.WriteString(">")
|
||||
}
|
||||
buf.WriteString(">")
|
||||
for child := n.FirstChild; child != nil; child = child.NextSibling {
|
||||
outputXML(buf, child)
|
||||
}
|
||||
if n.Type != DeclarationNode {
|
||||
buf.WriteString(fmt.Sprintf("</%s>", n.Data))
|
||||
}
|
||||
buf.WriteString(fmt.Sprintf("</%s>", n.Data))
|
||||
}
|
||||
|
||||
// OutputXML returns the text that including tags name.
|
||||
|
@ -139,9 +128,6 @@ func addChild(parent, n *Node) {
|
|||
}
|
||||
|
||||
func addSibling(sibling, n *Node) {
|
||||
for t := sibling.NextSibling; t != nil; t = t.NextSibling {
|
||||
sibling = t
|
||||
}
|
||||
n.Parent = sibling.Parent
|
||||
sibling.NextSibling = n
|
||||
n.PrevSibling = sibling
|
||||
|
@ -259,3 +245,8 @@ quit:
|
|||
func Parse(r io.Reader) (*Node, error) {
|
||||
return parse(r)
|
||||
}
|
||||
|
||||
// ParseXML returns the parse tree for the XML from the given Reader.Deprecated.
|
||||
func ParseXML(r io.Reader) (*Node, error) {
|
||||
return parse(r)
|
||||
}
|
||||
|
|
|
@ -1,11 +0,0 @@
|
|||
module github.com/dylanmei/winrmtest
|
||||
|
||||
require (
|
||||
github.com/antchfx/xpath v0.0.0-20190129040759-c8489ed3251e // indirect
|
||||
github.com/antchfx/xquery v0.0.0-20180515051857-ad5b8c7a47b0
|
||||
github.com/kr/pretty v0.1.0 // indirect
|
||||
github.com/satori/go.uuid v1.2.0
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd // indirect
|
||||
golang.org/x/text v0.3.0 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
||||
)
|
|
@ -1,17 +0,0 @@
|
|||
github.com/antchfx/xpath v0.0.0-20190129040759-c8489ed3251e h1:ptBAamGVd6CfRsUtyHD+goy2JGhv1QC32v3gqM8mYAM=
|
||||
github.com/antchfx/xpath v0.0.0-20190129040759-c8489ed3251e/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk=
|
||||
github.com/antchfx/xquery v0.0.0-20180515051857-ad5b8c7a47b0 h1:JaCC8jz0zdMLk2m+qCCVLLLM/PL93p84w4pK3aJWj60=
|
||||
github.com/antchfx/xquery v0.0.0-20180515051857-ad5b8c7a47b0/go.mod h1:LzD22aAzDP8/dyiCKFp31He4m2GPjl0AFyzDtZzUu9M=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
|
||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd h1:HuTn7WObtcDo9uEEU7rEqL0jYthdXAmZ6PP+meazmaU=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
|
@ -1 +0,0 @@
|
|||
module github.com/google/uuid
|
|
@ -48,7 +48,6 @@ func setNodeInterface(name string) bool {
|
|||
// does not specify a specific interface generate a random Node ID
|
||||
// (section 4.1.6)
|
||||
if name == "" {
|
||||
ifname = "random"
|
||||
randomBits(nodeID[:])
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2018 Google Inc. All rights reserved.
|
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
|
@ -35,43 +35,20 @@ const (
|
|||
|
||||
var rander = rand.Reader // random function
|
||||
|
||||
// Parse decodes s into a UUID or returns an error. Both the standard UUID
|
||||
// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
|
||||
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the
|
||||
// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex
|
||||
// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.
|
||||
// Parse decodes s into a UUID or returns an error. Both the UUID form of
|
||||
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
|
||||
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded.
|
||||
func Parse(s string) (UUID, error) {
|
||||
var uuid UUID
|
||||
switch len(s) {
|
||||
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
case 36:
|
||||
|
||||
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
case 36 + 9:
|
||||
if len(s) != 36 {
|
||||
if len(s) != 36+9 {
|
||||
return uuid, fmt.Errorf("invalid UUID length: %d", len(s))
|
||||
}
|
||||
if strings.ToLower(s[:9]) != "urn:uuid:" {
|
||||
return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9])
|
||||
}
|
||||
s = s[9:]
|
||||
|
||||
// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
|
||||
case 36 + 2:
|
||||
s = s[1:]
|
||||
|
||||
// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
case 32:
|
||||
var ok bool
|
||||
for i := range uuid {
|
||||
uuid[i], ok = xtob(s[i*2], s[i*2+1])
|
||||
if !ok {
|
||||
return uuid, errors.New("invalid UUID format")
|
||||
}
|
||||
}
|
||||
return uuid, nil
|
||||
default:
|
||||
return uuid, fmt.Errorf("invalid UUID length: %d", len(s))
|
||||
}
|
||||
// s is now at least 36 bytes long
|
||||
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
|
||||
return uuid, errors.New("invalid UUID format")
|
||||
}
|
||||
|
@ -93,29 +70,15 @@ func Parse(s string) (UUID, error) {
|
|||
// ParseBytes is like Parse, except it parses a byte slice instead of a string.
|
||||
func ParseBytes(b []byte) (UUID, error) {
|
||||
var uuid UUID
|
||||
switch len(b) {
|
||||
case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
if len(b) != 36 {
|
||||
if len(b) != 36+9 {
|
||||
return uuid, fmt.Errorf("invalid UUID length: %d", len(b))
|
||||
}
|
||||
if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) {
|
||||
return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9])
|
||||
}
|
||||
b = b[9:]
|
||||
case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
|
||||
b = b[1:]
|
||||
case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
var ok bool
|
||||
for i := 0; i < 32; i += 2 {
|
||||
uuid[i/2], ok = xtob(b[i], b[i+1])
|
||||
if !ok {
|
||||
return uuid, errors.New("invalid UUID format")
|
||||
}
|
||||
}
|
||||
return uuid, nil
|
||||
default:
|
||||
return uuid, fmt.Errorf("invalid UUID length: %d", len(b))
|
||||
}
|
||||
// s is now at least 36 bytes long
|
||||
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' {
|
||||
return uuid, errors.New("invalid UUID format")
|
||||
}
|
||||
|
@ -134,16 +97,6 @@ func ParseBytes(b []byte) (UUID, error) {
|
|||
return uuid, nil
|
||||
}
|
||||
|
||||
// MustParse is like Parse but panics if the string cannot be parsed.
|
||||
// It simplifies safe initialization of global variables holding compiled UUIDs.
|
||||
func MustParse(s string) UUID {
|
||||
uuid, err := Parse(s)
|
||||
if err != nil {
|
||||
panic(`uuid: Parse(` + s + `): ` + err.Error())
|
||||
}
|
||||
return uuid
|
||||
}
|
||||
|
||||
// FromBytes creates a new UUID from a byte slice. Returns an error if the slice
|
||||
// does not have a length of 16. The bytes are copied from the slice.
|
||||
func FromBytes(b []byte) (uuid UUID, err error) {
|
||||
|
@ -177,7 +130,7 @@ func (uuid UUID) URN() string {
|
|||
}
|
||||
|
||||
func encodeHex(dst []byte, uuid UUID) {
|
||||
hex.Encode(dst, uuid[:4])
|
||||
hex.Encode(dst[:], uuid[:4])
|
||||
dst[8] = '-'
|
||||
hex.Encode(dst[9:13], uuid[4:6])
|
||||
dst[13] = '-'
|
||||
|
|
|
@ -545,11 +545,7 @@ func v3auth(cloud *Cloud, opts *ClientOpts) (*gophercloud.AuthOptions, error) {
|
|||
scope := new(gophercloud.AuthScope)
|
||||
|
||||
// Application credentials don't support scope
|
||||
if isApplicationCredential(cloud.AuthInfo) {
|
||||
// If Domain* is set, but UserDomain* or ProjectDomain* aren't,
|
||||
// then use Domain* as the default setting.
|
||||
cloud = setDomainIfNeeded(cloud)
|
||||
} else {
|
||||
if !isApplicationCredential(cloud.AuthInfo) {
|
||||
if !isProjectScoped(cloud.AuthInfo) {
|
||||
if cloud.AuthInfo.DomainID != "" {
|
||||
scope.DomainID = cloud.AuthInfo.DomainID
|
||||
|
|
|
@ -4,118 +4,118 @@ package clientconfig
|
|||
// The format of the clouds-public.yml is documented at
|
||||
// https://docs.openstack.org/python-openstackclient/latest/configuration/
|
||||
type PublicClouds struct {
|
||||
Clouds map[string]Cloud `yaml:"public-clouds" json:"public-clouds"`
|
||||
Clouds map[string]Cloud `yaml:"public-clouds"`
|
||||
}
|
||||
|
||||
// Clouds represents a collection of Cloud entries in a clouds.yaml file.
|
||||
// The format of clouds.yaml is documented at
|
||||
// https://docs.openstack.org/os-client-config/latest/user/configuration.html.
|
||||
type Clouds struct {
|
||||
Clouds map[string]Cloud `yaml:"clouds" json:"clouds"`
|
||||
Clouds map[string]Cloud `yaml:"clouds"`
|
||||
}
|
||||
|
||||
// Cloud represents an entry in a clouds.yaml/public-clouds.yaml/secure.yaml file.
|
||||
type Cloud struct {
|
||||
Cloud string `yaml:"cloud" json:"cloud"`
|
||||
Profile string `yaml:"profile" json:"profile"`
|
||||
AuthInfo *AuthInfo `yaml:"auth" json:"auth"`
|
||||
AuthType AuthType `yaml:"auth_type" json:"auth_type"`
|
||||
RegionName string `yaml:"region_name" json:"region_name"`
|
||||
Regions []interface{} `yaml:"regions" json:"regions"`
|
||||
Cloud string `yaml:"cloud"`
|
||||
Profile string `yaml:"profile"`
|
||||
AuthInfo *AuthInfo `yaml:"auth"`
|
||||
AuthType AuthType `yaml:"auth_type"`
|
||||
RegionName string `yaml:"region_name"`
|
||||
Regions []interface{} `yaml:"regions"`
|
||||
|
||||
// API Version overrides.
|
||||
IdentityAPIVersion string `yaml:"identity_api_version" json:"identity_api_version"`
|
||||
VolumeAPIVersion string `yaml:"volume_api_version" json:"volume_api_version"`
|
||||
IdentityAPIVersion string `yaml:"identity_api_version"`
|
||||
VolumeAPIVersion string `yaml:"volume_api_version"`
|
||||
|
||||
// Verify whether or not SSL API requests should be verified.
|
||||
Verify *bool `yaml:"verify" json:"verify"`
|
||||
Verify *bool `yaml:"verify"`
|
||||
|
||||
// CACertFile a path to a CA Cert bundle that can be used as part of
|
||||
// verifying SSL API requests.
|
||||
CACertFile string `yaml:"cacert" json:"cacert"`
|
||||
CACertFile string `yaml:"cacert"`
|
||||
|
||||
// ClientCertFile a path to a client certificate to use as part of the SSL
|
||||
// transaction.
|
||||
ClientCertFile string `yaml:"cert" json:"cert"`
|
||||
ClientCertFile string `yaml:"cert"`
|
||||
|
||||
// ClientKeyFile a path to a client key to use as part of the SSL
|
||||
// transaction.
|
||||
ClientKeyFile string `yaml:"key" json:"key"`
|
||||
ClientKeyFile string `yaml:"key"`
|
||||
}
|
||||
|
||||
// AuthInfo represents the auth section of a cloud entry or
|
||||
// auth options entered explicitly in ClientOpts.
|
||||
type AuthInfo struct {
|
||||
// AuthURL is the keystone/identity endpoint URL.
|
||||
AuthURL string `yaml:"auth_url" json:"auth_url"`
|
||||
AuthURL string `yaml:"auth_url"`
|
||||
|
||||
// Token is a pre-generated authentication token.
|
||||
Token string `yaml:"token" json:"token"`
|
||||
Token string `yaml:"token"`
|
||||
|
||||
// Username is the username of the user.
|
||||
Username string `yaml:"username" json:"username"`
|
||||
Username string `yaml:"username"`
|
||||
|
||||
// UserID is the unique ID of a user.
|
||||
UserID string `yaml:"user_id" json:"user_id"`
|
||||
UserID string `yaml:"user_id"`
|
||||
|
||||
// Password is the password of the user.
|
||||
Password string `yaml:"password" json:"password"`
|
||||
Password string `yaml:"password"`
|
||||
|
||||
// Application Credential ID to login with.
|
||||
ApplicationCredentialID string `yaml:"application_credential_id" json:"application_credential_id"`
|
||||
ApplicationCredentialID string `yaml:"application_credential_id"`
|
||||
|
||||
// Application Credential name to login with.
|
||||
ApplicationCredentialName string `yaml:"application_credential_name" json:"application_credential_name"`
|
||||
ApplicationCredentialName string `yaml:"application_credential_name"`
|
||||
|
||||
// Application Credential secret to login with.
|
||||
ApplicationCredentialSecret string `yaml:"application_credential_secret" json:"application_credential_secret"`
|
||||
ApplicationCredentialSecret string `yaml:"application_credential_secret"`
|
||||
|
||||
// ProjectName is the common/human-readable name of a project.
|
||||
// Users can be scoped to a project.
|
||||
// ProjectName on its own is not enough to ensure a unique scope. It must
|
||||
// also be combined with either a ProjectDomainName or ProjectDomainID.
|
||||
// ProjectName cannot be combined with ProjectID in a scope.
|
||||
ProjectName string `yaml:"project_name" json:"project_name"`
|
||||
ProjectName string `yaml:"project_name"`
|
||||
|
||||
// ProjectID is the unique ID of a project.
|
||||
// It can be used to scope a user to a specific project.
|
||||
ProjectID string `yaml:"project_id" json:"project_id"`
|
||||
ProjectID string `yaml:"project_id"`
|
||||
|
||||
// UserDomainName is the name of the domain where a user resides.
|
||||
// It is used to identify the source domain of a user.
|
||||
UserDomainName string `yaml:"user_domain_name" json:"user_domain_name"`
|
||||
UserDomainName string `yaml:"user_domain_name"`
|
||||
|
||||
// UserDomainID is the unique ID of the domain where a user resides.
|
||||
// It is used to identify the source domain of a user.
|
||||
UserDomainID string `yaml:"user_domain_id" json:"user_domain_id"`
|
||||
UserDomainID string `yaml:"user_domain_id"`
|
||||
|
||||
// ProjectDomainName is the name of the domain where a project resides.
|
||||
// It is used to identify the source domain of a project.
|
||||
// ProjectDomainName can be used in addition to a ProjectName when scoping
|
||||
// a user to a specific project.
|
||||
ProjectDomainName string `yaml:"project_domain_name" json:"project_domain_name"`
|
||||
ProjectDomainName string `yaml:"project_domain_name"`
|
||||
|
||||
// ProjectDomainID is the name of the domain where a project resides.
|
||||
// It is used to identify the source domain of a project.
|
||||
// ProjectDomainID can be used in addition to a ProjectName when scoping
|
||||
// a user to a specific project.
|
||||
ProjectDomainID string `yaml:"project_domain_id" json:"project_domain_id"`
|
||||
ProjectDomainID string `yaml:"project_domain_id"`
|
||||
|
||||
// DomainName is the name of a domain which can be used to identify the
|
||||
// source domain of either a user or a project.
|
||||
// If UserDomainName and ProjectDomainName are not specified, then DomainName
|
||||
// is used as a default choice.
|
||||
// It can also be used be used to specify a domain-only scope.
|
||||
DomainName string `yaml:"domain_name" json:"domain_name"`
|
||||
DomainName string `yaml:"domain_name"`
|
||||
|
||||
// DomainID is the unique ID of a domain which can be used to identify the
|
||||
// source domain of eitehr a user or a project.
|
||||
// If UserDomainID and ProjectDomainID are not specified, then DomainID is
|
||||
// used as a default choice.
|
||||
// It can also be used be used to specify a domain-only scope.
|
||||
DomainID string `yaml:"domain_id" json:"domain_id"`
|
||||
DomainID string `yaml:"domain_id"`
|
||||
|
||||
// DefaultDomain is the domain ID to fall back on if no other domain has
|
||||
// been specified and a domain is required for scope.
|
||||
DefaultDomain string `yaml:"default_domain" json:"default_domain"`
|
||||
DefaultDomain string `yaml:"default_domain"`
|
||||
}
|
||||
|
|
|
@ -22,4 +22,4 @@ _testmain.go
|
|||
*.exe
|
||||
|
||||
.idea/
|
||||
*.iml
|
||||
*.iml
|
|
@ -3,11 +3,11 @@ sudo: false
|
|||
|
||||
matrix:
|
||||
include:
|
||||
- go: 1.7.x
|
||||
- go: 1.8.x
|
||||
- go: 1.9.x
|
||||
- go: 1.10.x
|
||||
- go: 1.11.x
|
||||
- go: 1.4
|
||||
- go: 1.5
|
||||
- go: 1.6
|
||||
- go: 1.7
|
||||
- go: 1.8
|
||||
- go: tip
|
||||
allow_failures:
|
||||
- go: tip
|
||||
|
|
|
@ -4,6 +4,5 @@
|
|||
# Please keep the list sorted.
|
||||
|
||||
Gary Burd <gary@beagledreams.com>
|
||||
Google LLC (https://opensource.google.com/)
|
||||
Joachim Bauch <mail@joachim-bauch.de>
|
||||
|
||||
|
|
|
@ -51,7 +51,7 @@ subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn
|
|||
<tr><td>Write message using io.WriteCloser</td><td><a href="http://godoc.org/github.com/gorilla/websocket#Conn.NextWriter">Yes</a></td><td>No, see note 3</td></tr>
|
||||
</table>
|
||||
|
||||
Notes:
|
||||
Notes:
|
||||
|
||||
1. Large messages are fragmented in [Chrome's new WebSocket implementation](http://www.ietf.org/mail-archive/web/hybi/current/msg10503.html).
|
||||
2. The application can get the type of a received data message by implementing
|
||||
|
|
|
@ -5,15 +5,15 @@
|
|||
package websocket
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptrace"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -53,10 +53,6 @@ type Dialer struct {
|
|||
// NetDial is nil, net.Dial is used.
|
||||
NetDial func(network, addr string) (net.Conn, error)
|
||||
|
||||
// NetDialContext specifies the dial function for creating TCP connections. If
|
||||
// NetDialContext is nil, net.DialContext is used.
|
||||
NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error)
|
||||
|
||||
// Proxy specifies a function to return a proxy for a given
|
||||
// Request. If the function returns a non-nil error, the
|
||||
// request is aborted with the provided error.
|
||||
|
@ -75,17 +71,6 @@ type Dialer struct {
|
|||
// do not limit the size of the messages that can be sent or received.
|
||||
ReadBufferSize, WriteBufferSize int
|
||||
|
||||
// WriteBufferPool is a pool of buffers for write operations. If the value
|
||||
// is not set, then write buffers are allocated to the connection for the
|
||||
// lifetime of the connection.
|
||||
//
|
||||
// A pool is most useful when the application has a modest volume of writes
|
||||
// across a large number of connections.
|
||||
//
|
||||
// Applications should use a single pool for each unique value of
|
||||
// WriteBufferSize.
|
||||
WriteBufferPool BufferPool
|
||||
|
||||
// Subprotocols specifies the client's requested subprotocols.
|
||||
Subprotocols []string
|
||||
|
||||
|
@ -101,13 +86,52 @@ type Dialer struct {
|
|||
Jar http.CookieJar
|
||||
}
|
||||
|
||||
// Dial creates a new client connection by calling DialContext with a background context.
|
||||
func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
|
||||
return d.DialContext(context.Background(), urlStr, requestHeader)
|
||||
}
|
||||
|
||||
var errMalformedURL = errors.New("malformed ws or wss URL")
|
||||
|
||||
// parseURL parses the URL.
|
||||
//
|
||||
// This function is a replacement for the standard library url.Parse function.
|
||||
// In Go 1.4 and earlier, url.Parse loses information from the path.
|
||||
func parseURL(s string) (*url.URL, error) {
|
||||
// From the RFC:
|
||||
//
|
||||
// ws-URI = "ws:" "//" host [ ":" port ] path [ "?" query ]
|
||||
// wss-URI = "wss:" "//" host [ ":" port ] path [ "?" query ]
|
||||
var u url.URL
|
||||
switch {
|
||||
case strings.HasPrefix(s, "ws://"):
|
||||
u.Scheme = "ws"
|
||||
s = s[len("ws://"):]
|
||||
case strings.HasPrefix(s, "wss://"):
|
||||
u.Scheme = "wss"
|
||||
s = s[len("wss://"):]
|
||||
default:
|
||||
return nil, errMalformedURL
|
||||
}
|
||||
|
||||
if i := strings.Index(s, "?"); i >= 0 {
|
||||
u.RawQuery = s[i+1:]
|
||||
s = s[:i]
|
||||
}
|
||||
|
||||
if i := strings.Index(s, "/"); i >= 0 {
|
||||
u.Opaque = s[i:]
|
||||
s = s[:i]
|
||||
} else {
|
||||
u.Opaque = "/"
|
||||
}
|
||||
|
||||
u.Host = s
|
||||
|
||||
if strings.Contains(u.Host, "@") {
|
||||
// Don't bother parsing user information because user information is
|
||||
// not allowed in websocket URIs.
|
||||
return nil, errMalformedURL
|
||||
}
|
||||
|
||||
return &u, nil
|
||||
}
|
||||
|
||||
func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) {
|
||||
hostPort = u.Host
|
||||
hostNoPort = u.Host
|
||||
|
@ -126,29 +150,26 @@ func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) {
|
|||
return hostPort, hostNoPort
|
||||
}
|
||||
|
||||
// DefaultDialer is a dialer with all fields set to the default values.
|
||||
// DefaultDialer is a dialer with all fields set to the default zero values.
|
||||
var DefaultDialer = &Dialer{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
HandshakeTimeout: 45 * time.Second,
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
}
|
||||
|
||||
// nilDialer is dialer to use when receiver is nil.
|
||||
var nilDialer = *DefaultDialer
|
||||
|
||||
// DialContext creates a new client connection. Use requestHeader to specify the
|
||||
// Dial creates a new client connection. Use requestHeader to specify the
|
||||
// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie).
|
||||
// Use the response.Header to get the selected subprotocol
|
||||
// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
|
||||
//
|
||||
// The context will be used in the request and in the Dialer
|
||||
//
|
||||
// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
|
||||
// non-nil *http.Response so that callers can handle redirects, authentication,
|
||||
// etcetera. The response body may not contain the entire response and does not
|
||||
// need to be closed by the application.
|
||||
func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
|
||||
func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
|
||||
|
||||
if d == nil {
|
||||
d = &nilDialer
|
||||
d = &Dialer{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
}
|
||||
}
|
||||
|
||||
challengeKey, err := generateChallengeKey()
|
||||
|
@ -156,7 +177,7 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h
|
|||
return nil, nil, err
|
||||
}
|
||||
|
||||
u, err := url.Parse(urlStr)
|
||||
u, err := parseURL(urlStr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -184,7 +205,6 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h
|
|||
Header: make(http.Header),
|
||||
Host: u.Host,
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
// Set the cookies present in the cookie jar of the dialer
|
||||
if d.Jar != nil {
|
||||
|
@ -217,83 +237,45 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h
|
|||
k == "Sec-Websocket-Extensions" ||
|
||||
(k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0):
|
||||
return nil, nil, errors.New("websocket: duplicate header not allowed: " + k)
|
||||
case k == "Sec-Websocket-Protocol":
|
||||
req.Header["Sec-WebSocket-Protocol"] = vs
|
||||
default:
|
||||
req.Header[k] = vs
|
||||
}
|
||||
}
|
||||
|
||||
if d.EnableCompression {
|
||||
req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"}
|
||||
}
|
||||
|
||||
if d.HandshakeTimeout != 0 {
|
||||
var cancel func()
|
||||
ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
// Get network dial function.
|
||||
var netDial func(network, add string) (net.Conn, error)
|
||||
|
||||
if d.NetDialContext != nil {
|
||||
netDial = func(network, addr string) (net.Conn, error) {
|
||||
return d.NetDialContext(ctx, network, addr)
|
||||
}
|
||||
} else if d.NetDial != nil {
|
||||
netDial = d.NetDial
|
||||
} else {
|
||||
netDialer := &net.Dialer{}
|
||||
netDial = func(network, addr string) (net.Conn, error) {
|
||||
return netDialer.DialContext(ctx, network, addr)
|
||||
}
|
||||
}
|
||||
|
||||
// If needed, wrap the dial function to set the connection deadline.
|
||||
if deadline, ok := ctx.Deadline(); ok {
|
||||
forwardDial := netDial
|
||||
netDial = func(network, addr string) (net.Conn, error) {
|
||||
c, err := forwardDial(network, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = c.SetDeadline(deadline)
|
||||
if err != nil {
|
||||
c.Close()
|
||||
return nil, err
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
}
|
||||
|
||||
// If needed, wrap the dial function to connect through a proxy.
|
||||
if d.Proxy != nil {
|
||||
proxyURL, err := d.Proxy(req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if proxyURL != nil {
|
||||
dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
netDial = dialer.Dial
|
||||
}
|
||||
req.Header.Set("Sec-Websocket-Extensions", "permessage-deflate; server_no_context_takeover; client_no_context_takeover")
|
||||
}
|
||||
|
||||
hostPort, hostNoPort := hostPortNoPort(u)
|
||||
trace := httptrace.ContextClientTrace(ctx)
|
||||
if trace != nil && trace.GetConn != nil {
|
||||
trace.GetConn(hostPort)
|
||||
|
||||
var proxyURL *url.URL
|
||||
// Check wether the proxy method has been configured
|
||||
if d.Proxy != nil {
|
||||
proxyURL, err = d.Proxy(req)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
netConn, err := netDial("tcp", hostPort)
|
||||
if trace != nil && trace.GotConn != nil {
|
||||
trace.GotConn(httptrace.GotConnInfo{
|
||||
Conn: netConn,
|
||||
})
|
||||
var targetHostPort string
|
||||
if proxyURL != nil {
|
||||
targetHostPort, _ = hostPortNoPort(proxyURL)
|
||||
} else {
|
||||
targetHostPort = hostPort
|
||||
}
|
||||
|
||||
var deadline time.Time
|
||||
if d.HandshakeTimeout != 0 {
|
||||
deadline = time.Now().Add(d.HandshakeTimeout)
|
||||
}
|
||||
|
||||
netDial := d.NetDial
|
||||
if netDial == nil {
|
||||
netDialer := &net.Dialer{Deadline: deadline}
|
||||
netDial = netDialer.Dial
|
||||
}
|
||||
|
||||
netConn, err := netDial("tcp", targetHostPort)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -304,6 +286,42 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h
|
|||
}
|
||||
}()
|
||||
|
||||
if err := netConn.SetDeadline(deadline); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if proxyURL != nil {
|
||||
connectHeader := make(http.Header)
|
||||
if user := proxyURL.User; user != nil {
|
||||
proxyUser := user.Username()
|
||||
if proxyPassword, passwordSet := user.Password(); passwordSet {
|
||||
credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword))
|
||||
connectHeader.Set("Proxy-Authorization", "Basic "+credential)
|
||||
}
|
||||
}
|
||||
connectReq := &http.Request{
|
||||
Method: "CONNECT",
|
||||
URL: &url.URL{Opaque: hostPort},
|
||||
Host: hostPort,
|
||||
Header: connectHeader,
|
||||
}
|
||||
|
||||
connectReq.Write(netConn)
|
||||
|
||||
// Read response.
|
||||
// Okay to use and discard buffered reader here, because
|
||||
// TLS server will not speak until spoken to.
|
||||
br := bufio.NewReader(netConn)
|
||||
resp, err := http.ReadResponse(br, connectReq)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
f := strings.SplitN(resp.Status, " ", 2)
|
||||
return nil, nil, errors.New(f[1])
|
||||
}
|
||||
}
|
||||
|
||||
if u.Scheme == "https" {
|
||||
cfg := cloneTLSConfig(d.TLSClientConfig)
|
||||
if cfg.ServerName == "" {
|
||||
|
@ -311,31 +329,22 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h
|
|||
}
|
||||
tlsConn := tls.Client(netConn, cfg)
|
||||
netConn = tlsConn
|
||||
|
||||
var err error
|
||||
if trace != nil {
|
||||
err = doHandshakeWithTrace(trace, tlsConn, cfg)
|
||||
} else {
|
||||
err = doHandshake(tlsConn, cfg)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if err := tlsConn.Handshake(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if !cfg.InsecureSkipVerify {
|
||||
if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil)
|
||||
conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize)
|
||||
|
||||
if err := req.Write(netConn); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if trace != nil && trace.GotFirstResponseByte != nil {
|
||||
if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 {
|
||||
trace.GotFirstResponseByte()
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := http.ReadResponse(conn.br, req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
|
@ -381,15 +390,3 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h
|
|||
netConn = nil // to avoid close in defer.
|
||||
return conn, resp, nil
|
||||
}
|
||||
|
||||
func doHandshake(tlsConn *tls.Conn, cfg *tls.Config) error {
|
||||
if err := tlsConn.Handshake(); err != nil {
|
||||
return err
|
||||
}
|
||||
if !cfg.InsecureSkipVerify {
|
||||
if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -76,7 +76,7 @@ const (
|
|||
// is UTF-8 encoded text.
|
||||
PingMessage = 9
|
||||
|
||||
// PongMessage denotes a pong control message. The optional message payload
|
||||
// PongMessage denotes a ping control message. The optional message payload
|
||||
// is UTF-8 encoded text.
|
||||
PongMessage = 10
|
||||
)
|
||||
|
@ -100,8 +100,9 @@ func (e *netError) Error() string { return e.msg }
|
|||
func (e *netError) Temporary() bool { return e.temporary }
|
||||
func (e *netError) Timeout() bool { return e.timeout }
|
||||
|
||||
// CloseError represents a close message.
|
||||
// CloseError represents close frame.
|
||||
type CloseError struct {
|
||||
|
||||
// Code is defined in RFC 6455, section 11.7.
|
||||
Code int
|
||||
|
||||
|
@ -223,20 +224,6 @@ func isValidReceivedCloseCode(code int) bool {
|
|||
return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999)
|
||||
}
|
||||
|
||||
// BufferPool represents a pool of buffers. The *sync.Pool type satisfies this
|
||||
// interface. The type of the value stored in a pool is not specified.
|
||||
type BufferPool interface {
|
||||
// Get gets a value from the pool or returns nil if the pool is empty.
|
||||
Get() interface{}
|
||||
// Put adds a value to the pool.
|
||||
Put(interface{})
|
||||
}
|
||||
|
||||
// writePoolData is the type added to the write buffer pool. This wrapper is
|
||||
// used to prevent applications from peeking at and depending on the values
|
||||
// added to the pool.
|
||||
type writePoolData struct{ buf []byte }
|
||||
|
||||
// The Conn type represents a WebSocket connection.
|
||||
type Conn struct {
|
||||
conn net.Conn
|
||||
|
@ -246,8 +233,6 @@ type Conn struct {
|
|||
// Write fields
|
||||
mu chan bool // used as mutex to protect write to conn
|
||||
writeBuf []byte // frame is constructed in this buffer.
|
||||
writePool BufferPool
|
||||
writeBufSize int
|
||||
writeDeadline time.Time
|
||||
writer io.WriteCloser // the current writer returned to the application
|
||||
isWriting bool // for best-effort concurrent write detection
|
||||
|
@ -279,29 +264,64 @@ type Conn struct {
|
|||
newDecompressionReader func(io.Reader) io.ReadCloser
|
||||
}
|
||||
|
||||
func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, writeBufferPool BufferPool, br *bufio.Reader, writeBuf []byte) *Conn {
|
||||
func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int) *Conn {
|
||||
return newConnBRW(conn, isServer, readBufferSize, writeBufferSize, nil)
|
||||
}
|
||||
|
||||
type writeHook struct {
|
||||
p []byte
|
||||
}
|
||||
|
||||
func (wh *writeHook) Write(p []byte) (int, error) {
|
||||
wh.p = p
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func newConnBRW(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, brw *bufio.ReadWriter) *Conn {
|
||||
mu := make(chan bool, 1)
|
||||
mu <- true
|
||||
|
||||
var br *bufio.Reader
|
||||
if readBufferSize == 0 && brw != nil && brw.Reader != nil {
|
||||
// Reuse the supplied bufio.Reader if the buffer has a useful size.
|
||||
// This code assumes that peek on a reader returns
|
||||
// bufio.Reader.buf[:0].
|
||||
brw.Reader.Reset(conn)
|
||||
if p, err := brw.Reader.Peek(0); err == nil && cap(p) >= 256 {
|
||||
br = brw.Reader
|
||||
}
|
||||
}
|
||||
if br == nil {
|
||||
if readBufferSize == 0 {
|
||||
readBufferSize = defaultReadBufferSize
|
||||
} else if readBufferSize < maxControlFramePayloadSize {
|
||||
// must be large enough for control frame
|
||||
}
|
||||
if readBufferSize < maxControlFramePayloadSize {
|
||||
readBufferSize = maxControlFramePayloadSize
|
||||
}
|
||||
br = bufio.NewReaderSize(conn, readBufferSize)
|
||||
}
|
||||
|
||||
if writeBufferSize <= 0 {
|
||||
writeBufferSize = defaultWriteBufferSize
|
||||
}
|
||||
writeBufferSize += maxFrameHeaderSize
|
||||
|
||||
if writeBuf == nil && writeBufferPool == nil {
|
||||
writeBuf = make([]byte, writeBufferSize)
|
||||
var writeBuf []byte
|
||||
if writeBufferSize == 0 && brw != nil && brw.Writer != nil {
|
||||
// Use the bufio.Writer's buffer if the buffer has a useful size. This
|
||||
// code assumes that bufio.Writer.buf[:1] is passed to the
|
||||
// bufio.Writer's underlying writer.
|
||||
var wh writeHook
|
||||
brw.Writer.Reset(&wh)
|
||||
brw.Writer.WriteByte(0)
|
||||
brw.Flush()
|
||||
if cap(wh.p) >= maxFrameHeaderSize+256 {
|
||||
writeBuf = wh.p[:cap(wh.p)]
|
||||
}
|
||||
}
|
||||
|
||||
if writeBuf == nil {
|
||||
if writeBufferSize == 0 {
|
||||
writeBufferSize = defaultWriteBufferSize
|
||||
}
|
||||
writeBuf = make([]byte, writeBufferSize+maxFrameHeaderSize)
|
||||
}
|
||||
|
||||
mu := make(chan bool, 1)
|
||||
mu <- true
|
||||
c := &Conn{
|
||||
isServer: isServer,
|
||||
br: br,
|
||||
|
@ -309,8 +329,6 @@ func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int,
|
|||
mu: mu,
|
||||
readFinal: true,
|
||||
writeBuf: writeBuf,
|
||||
writePool: writeBufferPool,
|
||||
writeBufSize: writeBufferSize,
|
||||
enableWriteCompression: true,
|
||||
compressionLevel: defaultCompressionLevel,
|
||||
}
|
||||
|
@ -325,8 +343,7 @@ func (c *Conn) Subprotocol() string {
|
|||
return c.subprotocol
|
||||
}
|
||||
|
||||
// Close closes the underlying network connection without sending or waiting
|
||||
// for a close message.
|
||||
// Close closes the underlying network connection without sending or waiting for a close frame.
|
||||
func (c *Conn) Close() error {
|
||||
return c.conn.Close()
|
||||
}
|
||||
|
@ -353,16 +370,7 @@ func (c *Conn) writeFatal(err error) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func (c *Conn) read(n int) ([]byte, error) {
|
||||
p, err := c.br.Peek(n)
|
||||
if err == io.EOF {
|
||||
err = errUnexpectedEOF
|
||||
}
|
||||
c.br.Discard(len(p))
|
||||
return p, err
|
||||
}
|
||||
|
||||
func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error {
|
||||
func (c *Conn) write(frameType int, deadline time.Time, bufs ...[]byte) error {
|
||||
<-c.mu
|
||||
defer func() { c.mu <- true }()
|
||||
|
||||
|
@ -374,14 +382,15 @@ func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error
|
|||
}
|
||||
|
||||
c.conn.SetWriteDeadline(deadline)
|
||||
if len(buf1) == 0 {
|
||||
_, err = c.conn.Write(buf0)
|
||||
} else {
|
||||
err = c.writeBufs(buf0, buf1)
|
||||
}
|
||||
if err != nil {
|
||||
return c.writeFatal(err)
|
||||
for _, buf := range bufs {
|
||||
if len(buf) > 0 {
|
||||
_, err := c.conn.Write(buf)
|
||||
if err != nil {
|
||||
return c.writeFatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if frameType == CloseMessage {
|
||||
c.writeFatal(ErrCloseSent)
|
||||
}
|
||||
|
@ -467,19 +476,7 @@ func (c *Conn) prepWrite(messageType int) error {
|
|||
c.writeErrMu.Lock()
|
||||
err := c.writeErr
|
||||
c.writeErrMu.Unlock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if c.writeBuf == nil {
|
||||
wpd, ok := c.writePool.Get().(writePoolData)
|
||||
if ok {
|
||||
c.writeBuf = wpd.buf
|
||||
} else {
|
||||
c.writeBuf = make([]byte, c.writeBufSize)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
// NextWriter returns a writer for the next message to send. The writer's Close
|
||||
|
@ -487,9 +484,6 @@ func (c *Conn) prepWrite(messageType int) error {
|
|||
//
|
||||
// There can be at most one open writer on a connection. NextWriter closes the
|
||||
// previous writer if the application has not already done so.
|
||||
//
|
||||
// All message types (TextMessage, BinaryMessage, CloseMessage, PingMessage and
|
||||
// PongMessage) are supported.
|
||||
func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) {
|
||||
if err := c.prepWrite(messageType); err != nil {
|
||||
return nil, err
|
||||
|
@ -605,10 +599,6 @@ func (w *messageWriter) flushFrame(final bool, extra []byte) error {
|
|||
|
||||
if final {
|
||||
c.writer = nil
|
||||
if c.writePool != nil {
|
||||
c.writePool.Put(writePoolData{buf: c.writeBuf})
|
||||
c.writeBuf = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -774,6 +764,7 @@ func (c *Conn) SetWriteDeadline(t time.Time) error {
|
|||
// Read methods
|
||||
|
||||
func (c *Conn) advanceFrame() (int, error) {
|
||||
|
||||
// 1. Skip remainder of previous frame.
|
||||
|
||||
if c.readRemaining > 0 {
|
||||
|
@ -1042,7 +1033,7 @@ func (c *Conn) SetReadDeadline(t time.Time) error {
|
|||
}
|
||||
|
||||
// SetReadLimit sets the maximum size for a message read from the peer. If a
|
||||
// message exceeds the limit, the connection sends a close message to the peer
|
||||
// message exceeds the limit, the connection sends a close frame to the peer
|
||||
// and returns ErrReadLimit to the application.
|
||||
func (c *Conn) SetReadLimit(limit int64) {
|
||||
c.readLimit = limit
|
||||
|
@ -1055,22 +1046,24 @@ func (c *Conn) CloseHandler() func(code int, text string) error {
|
|||
|
||||
// SetCloseHandler sets the handler for close messages received from the peer.
|
||||
// The code argument to h is the received close code or CloseNoStatusReceived
|
||||
// if the close message is empty. The default close handler sends a close
|
||||
// message back to the peer.
|
||||
// if the close message is empty. The default close handler sends a close frame
|
||||
// back to the peer.
|
||||
//
|
||||
// The handler function is called from the NextReader, ReadMessage and message
|
||||
// reader Read methods. The application must read the connection to process
|
||||
// close messages as described in the section on Control Messages above.
|
||||
// The application must read the connection to process close messages as
|
||||
// described in the section on Control Frames above.
|
||||
//
|
||||
// The connection read methods return a CloseError when a close message is
|
||||
// The connection read methods return a CloseError when a close frame is
|
||||
// received. Most applications should handle close messages as part of their
|
||||
// normal error handling. Applications should only set a close handler when the
|
||||
// application must perform some action before sending a close message back to
|
||||
// application must perform some action before sending a close frame back to
|
||||
// the peer.
|
||||
func (c *Conn) SetCloseHandler(h func(code int, text string) error) {
|
||||
if h == nil {
|
||||
h = func(code int, text string) error {
|
||||
message := FormatCloseMessage(code, "")
|
||||
message := []byte{}
|
||||
if code != CloseNoStatusReceived {
|
||||
message = FormatCloseMessage(code, "")
|
||||
}
|
||||
c.WriteControl(CloseMessage, message, time.Now().Add(writeWait))
|
||||
return nil
|
||||
}
|
||||
|
@ -1084,12 +1077,11 @@ func (c *Conn) PingHandler() func(appData string) error {
|
|||
}
|
||||
|
||||
// SetPingHandler sets the handler for ping messages received from the peer.
|
||||
// The appData argument to h is the PING message application data. The default
|
||||
// The appData argument to h is the PING frame application data. The default
|
||||
// ping handler sends a pong to the peer.
|
||||
//
|
||||
// The handler function is called from the NextReader, ReadMessage and message
|
||||
// reader Read methods. The application must read the connection to process
|
||||
// ping messages as described in the section on Control Messages above.
|
||||
// The application must read the connection to process ping messages as
|
||||
// described in the section on Control Frames above.
|
||||
func (c *Conn) SetPingHandler(h func(appData string) error) {
|
||||
if h == nil {
|
||||
h = func(message string) error {
|
||||
|
@ -1111,12 +1103,11 @@ func (c *Conn) PongHandler() func(appData string) error {
|
|||
}
|
||||
|
||||
// SetPongHandler sets the handler for pong messages received from the peer.
|
||||
// The appData argument to h is the PONG message application data. The default
|
||||
// The appData argument to h is the PONG frame application data. The default
|
||||
// pong handler does nothing.
|
||||
//
|
||||
// The handler function is called from the NextReader, ReadMessage and message
|
||||
// reader Read methods. The application must read the connection to process
|
||||
// pong messages as described in the section on Control Messages above.
|
||||
// The application must read the connection to process ping messages as
|
||||
// described in the section on Control Frames above.
|
||||
func (c *Conn) SetPongHandler(h func(appData string) error) {
|
||||
if h == nil {
|
||||
h = func(string) error { return nil }
|
||||
|
@ -1150,14 +1141,7 @@ func (c *Conn) SetCompressionLevel(level int) error {
|
|||
}
|
||||
|
||||
// FormatCloseMessage formats closeCode and text as a WebSocket close message.
|
||||
// An empty message is returned for code CloseNoStatusReceived.
|
||||
func FormatCloseMessage(closeCode int, text string) []byte {
|
||||
if closeCode == CloseNoStatusReceived {
|
||||
// Return empty message because it's illegal to send
|
||||
// CloseNoStatusReceived. Return non-nil value in case application
|
||||
// checks for nil.
|
||||
return []byte{}
|
||||
}
|
||||
buf := make([]byte, 2+len(text))
|
||||
binary.BigEndian.PutUint16(buf, uint16(closeCode))
|
||||
copy(buf[2:], text)
|
||||
|
|
|
@ -2,14 +2,17 @@
|
|||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.8
|
||||
// +build go1.5
|
||||
|
||||
package websocket
|
||||
|
||||
import "net"
|
||||
import "io"
|
||||
|
||||
func (c *Conn) writeBufs(bufs ...[]byte) error {
|
||||
b := net.Buffers(bufs)
|
||||
_, err := b.WriteTo(c.conn)
|
||||
return err
|
||||
func (c *Conn) read(n int) ([]byte, error) {
|
||||
p, err := c.br.Peek(n)
|
||||
if err == io.EOF {
|
||||
err = errUnexpectedEOF
|
||||
}
|
||||
c.br.Discard(len(p))
|
||||
return p, err
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.5
|
||||
|
||||
package websocket
|
||||
|
||||
import "io"
|
||||
|
||||
func (c *Conn) read(n int) ([]byte, error) {
|
||||
p, err := c.br.Peek(n)
|
||||
if err == io.EOF {
|
||||
err = errUnexpectedEOF
|
||||
}
|
||||
if len(p) > 0 {
|
||||
// advance over the bytes just read
|
||||
io.ReadFull(c.br, p)
|
||||
}
|
||||
return p, err
|
||||
}
|
|
@ -1,18 +0,0 @@
|
|||
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
func (c *Conn) writeBufs(bufs ...[]byte) error {
|
||||
for _, buf := range bufs {
|
||||
if len(buf) > 0 {
|
||||
if _, err := c.conn.Write(buf); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -6,8 +6,9 @@
|
|||
//
|
||||
// Overview
|
||||
//
|
||||
// The Conn type represents a WebSocket connection. A server application calls
|
||||
// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn:
|
||||
// The Conn type represents a WebSocket connection. A server application uses
|
||||
// the Upgrade function from an Upgrader object with a HTTP request handler
|
||||
// to get a pointer to a Conn:
|
||||
//
|
||||
// var upgrader = websocket.Upgrader{
|
||||
// ReadBufferSize: 1024,
|
||||
|
@ -30,12 +31,10 @@
|
|||
// for {
|
||||
// messageType, p, err := conn.ReadMessage()
|
||||
// if err != nil {
|
||||
// log.Println(err)
|
||||
// return
|
||||
// }
|
||||
// if err := conn.WriteMessage(messageType, p); err != nil {
|
||||
// log.Println(err)
|
||||
// return
|
||||
// if err = conn.WriteMessage(messageType, p); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// }
|
||||
//
|
||||
|
@ -86,26 +85,20 @@
|
|||
// and pong. Call the connection WriteControl, WriteMessage or NextWriter
|
||||
// methods to send a control message to the peer.
|
||||
//
|
||||
// Connections handle received close messages by calling the handler function
|
||||
// set with the SetCloseHandler method and by returning a *CloseError from the
|
||||
// NextReader, ReadMessage or the message Read method. The default close
|
||||
// handler sends a close message to the peer.
|
||||
// Connections handle received close messages by sending a close message to the
|
||||
// peer and returning a *CloseError from the the NextReader, ReadMessage or the
|
||||
// message Read method.
|
||||
//
|
||||
// Connections handle received ping messages by calling the handler function
|
||||
// set with the SetPingHandler method. The default ping handler sends a pong
|
||||
// message to the peer.
|
||||
// Connections handle received ping and pong messages by invoking callback
|
||||
// functions set with SetPingHandler and SetPongHandler methods. The callback
|
||||
// functions are called from the NextReader, ReadMessage and the message Read
|
||||
// methods.
|
||||
//
|
||||
// Connections handle received pong messages by calling the handler function
|
||||
// set with the SetPongHandler method. The default pong handler does nothing.
|
||||
// If an application sends ping messages, then the application should set a
|
||||
// pong handler to receive the corresponding pong.
|
||||
// The default ping handler sends a pong to the peer. The application's reading
|
||||
// goroutine can block for a short time while the handler writes the pong data
|
||||
// to the connection.
|
||||
//
|
||||
// The control message handler functions are called from the NextReader,
|
||||
// ReadMessage and message reader Read methods. The default close and ping
|
||||
// handlers can block these methods for a short time when the handler writes to
|
||||
// the connection.
|
||||
//
|
||||
// The application must read the connection to process close, ping and pong
|
||||
// The application must read the connection to process ping, pong and close
|
||||
// messages sent from the peer. If the application is not otherwise interested
|
||||
// in messages from the peer, then the application should start a goroutine to
|
||||
// read and discard messages from the peer. A simple example is:
|
||||
|
@ -144,12 +137,19 @@
|
|||
// method fails the WebSocket handshake with HTTP status 403.
|
||||
//
|
||||
// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail
|
||||
// the handshake if the Origin request header is present and the Origin host is
|
||||
// not equal to the Host request header.
|
||||
// the handshake if the Origin request header is present and not equal to the
|
||||
// Host request header.
|
||||
//
|
||||
// The deprecated package-level Upgrade function does not perform origin
|
||||
// checking. The application is responsible for checking the Origin header
|
||||
// before calling the Upgrade function.
|
||||
// An application can allow connections from any origin by specifying a
|
||||
// function that always returns true:
|
||||
//
|
||||
// var upgrader = websocket.Upgrader{
|
||||
// CheckOrigin: func(r *http.Request) bool { return true },
|
||||
// }
|
||||
//
|
||||
// The deprecated Upgrade function does not enforce an origin policy. It's the
|
||||
// application's responsibility to check the Origin header before calling
|
||||
// Upgrade.
|
||||
//
|
||||
// Compression EXPERIMENTAL
|
||||
//
|
||||
|
|
|
@ -9,14 +9,12 @@ import (
|
|||
"io"
|
||||
)
|
||||
|
||||
// WriteJSON writes the JSON encoding of v as a message.
|
||||
//
|
||||
// Deprecated: Use c.WriteJSON instead.
|
||||
// WriteJSON is deprecated, use c.WriteJSON instead.
|
||||
func WriteJSON(c *Conn, v interface{}) error {
|
||||
return c.WriteJSON(v)
|
||||
}
|
||||
|
||||
// WriteJSON writes the JSON encoding of v as a message.
|
||||
// WriteJSON writes the JSON encoding of v to the connection.
|
||||
//
|
||||
// See the documentation for encoding/json Marshal for details about the
|
||||
// conversion of Go values to JSON.
|
||||
|
@ -33,10 +31,7 @@ func (c *Conn) WriteJSON(v interface{}) error {
|
|||
return err2
|
||||
}
|
||||
|
||||
// ReadJSON reads the next JSON-encoded message from the connection and stores
|
||||
// it in the value pointed to by v.
|
||||
//
|
||||
// Deprecated: Use c.ReadJSON instead.
|
||||
// ReadJSON is deprecated, use c.ReadJSON instead.
|
||||
func ReadJSON(c *Conn, v interface{}) error {
|
||||
return c.ReadJSON(v)
|
||||
}
|
||||
|
|
|
@ -11,6 +11,7 @@ import "unsafe"
|
|||
const wordSize = int(unsafe.Sizeof(uintptr(0)))
|
||||
|
||||
func maskBytes(key [4]byte, pos int, b []byte) int {
|
||||
|
||||
// Mask one byte at a time for small buffers.
|
||||
if len(b) < 2*wordSize {
|
||||
for i := range b {
|
||||
|
|
|
@ -19,6 +19,7 @@ import (
|
|||
type PreparedMessage struct {
|
||||
messageType int
|
||||
data []byte
|
||||
err error
|
||||
mu sync.Mutex
|
||||
frames map[prepareKey]*preparedFrame
|
||||
}
|
||||
|
|
|
@ -1,77 +0,0 @@
|
|||
// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type netDialerFunc func(network, addr string) (net.Conn, error)
|
||||
|
||||
func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) {
|
||||
return fn(network, addr)
|
||||
}
|
||||
|
||||
func init() {
|
||||
proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) {
|
||||
return &httpProxyDialer{proxyURL: proxyURL, fowardDial: forwardDialer.Dial}, nil
|
||||
})
|
||||
}
|
||||
|
||||
type httpProxyDialer struct {
|
||||
proxyURL *url.URL
|
||||
fowardDial func(network, addr string) (net.Conn, error)
|
||||
}
|
||||
|
||||
func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) {
|
||||
hostPort, _ := hostPortNoPort(hpd.proxyURL)
|
||||
conn, err := hpd.fowardDial(network, hostPort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
connectHeader := make(http.Header)
|
||||
if user := hpd.proxyURL.User; user != nil {
|
||||
proxyUser := user.Username()
|
||||
if proxyPassword, passwordSet := user.Password(); passwordSet {
|
||||
credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword))
|
||||
connectHeader.Set("Proxy-Authorization", "Basic "+credential)
|
||||
}
|
||||
}
|
||||
|
||||
connectReq := &http.Request{
|
||||
Method: "CONNECT",
|
||||
URL: &url.URL{Opaque: addr},
|
||||
Host: addr,
|
||||
Header: connectHeader,
|
||||
}
|
||||
|
||||
if err := connectReq.Write(conn); err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Read response. It's OK to use and discard buffered reader here becaue
|
||||
// the remote server does not speak until spoken to.
|
||||
br := bufio.NewReader(conn)
|
||||
resp, err := http.ReadResponse(br, connectReq)
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
conn.Close()
|
||||
f := strings.SplitN(resp.Status, " ", 2)
|
||||
return nil, errors.New(f[1])
|
||||
}
|
||||
return conn, nil
|
||||
}
|
|
@ -7,7 +7,7 @@ package websocket
|
|||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
@ -33,23 +33,10 @@ type Upgrader struct {
|
|||
// or received.
|
||||
ReadBufferSize, WriteBufferSize int
|
||||
|
||||
// WriteBufferPool is a pool of buffers for write operations. If the value
|
||||
// is not set, then write buffers are allocated to the connection for the
|
||||
// lifetime of the connection.
|
||||
//
|
||||
// A pool is most useful when the application has a modest volume of writes
|
||||
// across a large number of connections.
|
||||
//
|
||||
// Applications should use a single pool for each unique value of
|
||||
// WriteBufferSize.
|
||||
WriteBufferPool BufferPool
|
||||
|
||||
// Subprotocols specifies the server's supported protocols in order of
|
||||
// preference. If this field is not nil, then the Upgrade method negotiates a
|
||||
// preference. If this field is set, then the Upgrade method negotiates a
|
||||
// subprotocol by selecting the first match in this list with a protocol
|
||||
// requested by the client. If there's no match, then no protocol is
|
||||
// negotiated (the Sec-Websocket-Protocol header is not included in the
|
||||
// handshake response).
|
||||
// requested by the client.
|
||||
Subprotocols []string
|
||||
|
||||
// Error specifies the function for generating HTTP error responses. If Error
|
||||
|
@ -57,12 +44,8 @@ type Upgrader struct {
|
|||
Error func(w http.ResponseWriter, r *http.Request, status int, reason error)
|
||||
|
||||
// CheckOrigin returns true if the request Origin header is acceptable. If
|
||||
// CheckOrigin is nil, then a safe default is used: return false if the
|
||||
// Origin request header is present and the origin host is not equal to
|
||||
// request Host header.
|
||||
//
|
||||
// A CheckOrigin function should carefully validate the request origin to
|
||||
// prevent cross-site request forgery.
|
||||
// CheckOrigin is nil, the host in the Origin header must not be set or
|
||||
// must match the host of the request.
|
||||
CheckOrigin func(r *http.Request) bool
|
||||
|
||||
// EnableCompression specify if the server should attempt to negotiate per
|
||||
|
@ -93,7 +76,7 @@ func checkSameOrigin(r *http.Request) bool {
|
|||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return equalASCIIFold(u.Host, r.Host)
|
||||
return u.Host == r.Host
|
||||
}
|
||||
|
||||
func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string {
|
||||
|
@ -116,44 +99,42 @@ func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header
|
|||
//
|
||||
// The responseHeader is included in the response to the client's upgrade
|
||||
// request. Use the responseHeader to specify cookies (Set-Cookie) and the
|
||||
// application negotiated subprotocol (Sec-WebSocket-Protocol).
|
||||
// application negotiated subprotocol (Sec-Websocket-Protocol).
|
||||
//
|
||||
// If the upgrade fails, then Upgrade replies to the client with an HTTP error
|
||||
// response.
|
||||
func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) {
|
||||
const badHandshake = "websocket: the client is not using the websocket protocol: "
|
||||
if r.Method != "GET" {
|
||||
return u.returnError(w, r, http.StatusMethodNotAllowed, "websocket: not a websocket handshake: request method is not GET")
|
||||
}
|
||||
|
||||
if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok {
|
||||
return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-Websocket-Extensions' headers are unsupported")
|
||||
}
|
||||
|
||||
if !tokenListContainsValue(r.Header, "Connection", "upgrade") {
|
||||
return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header")
|
||||
return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'upgrade' token not found in 'Connection' header")
|
||||
}
|
||||
|
||||
if !tokenListContainsValue(r.Header, "Upgrade", "websocket") {
|
||||
return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header")
|
||||
}
|
||||
|
||||
if r.Method != "GET" {
|
||||
return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET")
|
||||
return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'websocket' token not found in 'Upgrade' header")
|
||||
}
|
||||
|
||||
if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") {
|
||||
return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header")
|
||||
}
|
||||
|
||||
if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok {
|
||||
return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported")
|
||||
}
|
||||
|
||||
checkOrigin := u.CheckOrigin
|
||||
if checkOrigin == nil {
|
||||
checkOrigin = checkSameOrigin
|
||||
}
|
||||
if !checkOrigin(r) {
|
||||
return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin")
|
||||
return u.returnError(w, r, http.StatusForbidden, "websocket: 'Origin' header value not allowed")
|
||||
}
|
||||
|
||||
challengeKey := r.Header.Get("Sec-Websocket-Key")
|
||||
if challengeKey == "" {
|
||||
return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: `Sec-WebSocket-Key' header is missing or blank")
|
||||
return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: `Sec-Websocket-Key' header is missing or blank")
|
||||
}
|
||||
|
||||
subprotocol := u.selectSubprotocol(r, responseHeader)
|
||||
|
@ -170,12 +151,17 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade
|
|||
}
|
||||
}
|
||||
|
||||
var (
|
||||
netConn net.Conn
|
||||
err error
|
||||
)
|
||||
|
||||
h, ok := w.(http.Hijacker)
|
||||
if !ok {
|
||||
return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker")
|
||||
}
|
||||
var brw *bufio.ReadWriter
|
||||
netConn, brw, err := h.Hijack()
|
||||
netConn, brw, err = h.Hijack()
|
||||
if err != nil {
|
||||
return u.returnError(w, r, http.StatusInternalServerError, err.Error())
|
||||
}
|
||||
|
@ -185,21 +171,7 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade
|
|||
return nil, errors.New("websocket: client sent data before handshake is complete")
|
||||
}
|
||||
|
||||
var br *bufio.Reader
|
||||
if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 {
|
||||
// Reuse hijacked buffered reader as connection reader.
|
||||
br = brw.Reader
|
||||
}
|
||||
|
||||
buf := bufioWriterBuffer(netConn, brw.Writer)
|
||||
|
||||
var writeBuf []byte
|
||||
if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 {
|
||||
// Reuse hijacked write buffer as connection buffer.
|
||||
writeBuf = buf
|
||||
}
|
||||
|
||||
c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf)
|
||||
c := newConnBRW(netConn, true, u.ReadBufferSize, u.WriteBufferSize, brw)
|
||||
c.subprotocol = subprotocol
|
||||
|
||||
if compress {
|
||||
|
@ -207,23 +179,17 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade
|
|||
c.newDecompressionReader = decompressNoContextTakeover
|
||||
}
|
||||
|
||||
// Use larger of hijacked buffer and connection write buffer for header.
|
||||
p := buf
|
||||
if len(c.writeBuf) > len(p) {
|
||||
p = c.writeBuf
|
||||
}
|
||||
p = p[:0]
|
||||
|
||||
p := c.writeBuf[:0]
|
||||
p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...)
|
||||
p = append(p, computeAcceptKey(challengeKey)...)
|
||||
p = append(p, "\r\n"...)
|
||||
if c.subprotocol != "" {
|
||||
p = append(p, "Sec-WebSocket-Protocol: "...)
|
||||
p = append(p, "Sec-Websocket-Protocol: "...)
|
||||
p = append(p, c.subprotocol...)
|
||||
p = append(p, "\r\n"...)
|
||||
}
|
||||
if compress {
|
||||
p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...)
|
||||
p = append(p, "Sec-Websocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...)
|
||||
}
|
||||
for k, vs := range responseHeader {
|
||||
if k == "Sec-Websocket-Protocol" {
|
||||
|
@ -264,14 +230,13 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade
|
|||
|
||||
// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
|
||||
//
|
||||
// Deprecated: Use websocket.Upgrader instead.
|
||||
// This function is deprecated, use websocket.Upgrader instead.
|
||||
//
|
||||
// Upgrade does not perform origin checking. The application is responsible for
|
||||
// checking the Origin header before calling Upgrade. An example implementation
|
||||
// of the same origin policy check is:
|
||||
// The application is responsible for checking the request origin before
|
||||
// calling Upgrade. An example implementation of the same origin policy is:
|
||||
//
|
||||
// if req.Header.Get("Origin") != "http://"+req.Host {
|
||||
// http.Error(w, "Origin not allowed", http.StatusForbidden)
|
||||
// http.Error(w, "Origin not allowed", 403)
|
||||
// return
|
||||
// }
|
||||
//
|
||||
|
@ -324,40 +289,3 @@ func IsWebSocketUpgrade(r *http.Request) bool {
|
|||
return tokenListContainsValue(r.Header, "Connection", "upgrade") &&
|
||||
tokenListContainsValue(r.Header, "Upgrade", "websocket")
|
||||
}
|
||||
|
||||
// bufioReaderSize size returns the size of a bufio.Reader.
|
||||
func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int {
|
||||
// This code assumes that peek on a reset reader returns
|
||||
// bufio.Reader.buf[:0].
|
||||
// TODO: Use bufio.Reader.Size() after Go 1.10
|
||||
br.Reset(originalReader)
|
||||
if p, err := br.Peek(0); err == nil {
|
||||
return cap(p)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// writeHook is an io.Writer that records the last slice passed to it vio
|
||||
// io.Writer.Write.
|
||||
type writeHook struct {
|
||||
p []byte
|
||||
}
|
||||
|
||||
func (wh *writeHook) Write(p []byte) (int, error) {
|
||||
wh.p = p
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// bufioWriterBuffer grabs the buffer from a bufio.Writer.
|
||||
func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte {
|
||||
// This code assumes that bufio.Writer.buf[:1] is passed to the
|
||||
// bufio.Writer's underlying writer.
|
||||
var wh writeHook
|
||||
bw.Reset(&wh)
|
||||
bw.WriteByte(0)
|
||||
bw.Flush()
|
||||
|
||||
bw.Reset(originalWriter)
|
||||
|
||||
return wh.p[:cap(wh.p)]
|
||||
}
|
||||
|
|
|
@ -1,19 +0,0 @@
|
|||
// +build go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net/http/httptrace"
|
||||
)
|
||||
|
||||
func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error {
|
||||
if trace.TLSHandshakeStart != nil {
|
||||
trace.TLSHandshakeStart()
|
||||
}
|
||||
err := doHandshake(tlsConn, cfg)
|
||||
if trace.TLSHandshakeDone != nil {
|
||||
trace.TLSHandshakeDone(tlsConn.ConnectionState(), err)
|
||||
}
|
||||
return err
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
// +build !go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net/http/httptrace"
|
||||
)
|
||||
|
||||
func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error {
|
||||
return doHandshake(tlsConn, cfg)
|
||||
}
|
|
@ -11,7 +11,6 @@ import (
|
|||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
|
||||
|
@ -112,14 +111,14 @@ func nextTokenOrQuoted(s string) (value string, rest string) {
|
|||
case escape:
|
||||
escape = false
|
||||
p[j] = b
|
||||
j++
|
||||
j += 1
|
||||
case b == '\\':
|
||||
escape = true
|
||||
case b == '"':
|
||||
return string(p[:j]), s[i+1:]
|
||||
default:
|
||||
p[j] = b
|
||||
j++
|
||||
j += 1
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
|
@ -128,31 +127,8 @@ func nextTokenOrQuoted(s string) (value string, rest string) {
|
|||
return "", ""
|
||||
}
|
||||
|
||||
// equalASCIIFold returns true if s is equal to t with ASCII case folding.
|
||||
func equalASCIIFold(s, t string) bool {
|
||||
for s != "" && t != "" {
|
||||
sr, size := utf8.DecodeRuneInString(s)
|
||||
s = s[size:]
|
||||
tr, size := utf8.DecodeRuneInString(t)
|
||||
t = t[size:]
|
||||
if sr == tr {
|
||||
continue
|
||||
}
|
||||
if 'A' <= sr && sr <= 'Z' {
|
||||
sr = sr + 'a' - 'A'
|
||||
}
|
||||
if 'A' <= tr && tr <= 'Z' {
|
||||
tr = tr + 'a' - 'A'
|
||||
}
|
||||
if sr != tr {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return s == t
|
||||
}
|
||||
|
||||
// tokenListContainsValue returns true if the 1#token header with the given
|
||||
// name contains a token equal to value with ASCII case folding.
|
||||
// name contains token.
|
||||
func tokenListContainsValue(header http.Header, name string, value string) bool {
|
||||
headers:
|
||||
for _, s := range header[name] {
|
||||
|
@ -166,7 +142,7 @@ headers:
|
|||
if s != "" && s[0] != ',' {
|
||||
continue headers
|
||||
}
|
||||
if equalASCIIFold(t, value) {
|
||||
if strings.EqualFold(t, value) {
|
||||
return true
|
||||
}
|
||||
if s == "" {
|
||||
|
@ -178,8 +154,9 @@ headers:
|
|||
return false
|
||||
}
|
||||
|
||||
// parseExtensions parses WebSocket extensions from a header.
|
||||
// parseExtensiosn parses WebSocket extensions from a header.
|
||||
func parseExtensions(header http.Header) []map[string]string {
|
||||
|
||||
// From RFC 6455:
|
||||
//
|
||||
// Sec-WebSocket-Extensions = extension-list
|
||||
|
|
|
@ -1,473 +0,0 @@
|
|||
// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT.
|
||||
//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy
|
||||
|
||||
// Package proxy provides support for a variety of protocols to proxy network
|
||||
// data.
|
||||
//
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type proxy_direct struct{}
|
||||
|
||||
// Direct is a direct proxy: one that makes network connections directly.
|
||||
var proxy_Direct = proxy_direct{}
|
||||
|
||||
func (proxy_direct) Dial(network, addr string) (net.Conn, error) {
|
||||
return net.Dial(network, addr)
|
||||
}
|
||||
|
||||
// A PerHost directs connections to a default Dialer unless the host name
|
||||
// requested matches one of a number of exceptions.
|
||||
type proxy_PerHost struct {
|
||||
def, bypass proxy_Dialer
|
||||
|
||||
bypassNetworks []*net.IPNet
|
||||
bypassIPs []net.IP
|
||||
bypassZones []string
|
||||
bypassHosts []string
|
||||
}
|
||||
|
||||
// NewPerHost returns a PerHost Dialer that directs connections to either
|
||||
// defaultDialer or bypass, depending on whether the connection matches one of
|
||||
// the configured rules.
|
||||
func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost {
|
||||
return &proxy_PerHost{
|
||||
def: defaultDialer,
|
||||
bypass: bypass,
|
||||
}
|
||||
}
|
||||
|
||||
// Dial connects to the address addr on the given network through either
|
||||
// defaultDialer or bypass.
|
||||
func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) {
|
||||
host, _, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p.dialerForRequest(host).Dial(network, addr)
|
||||
}
|
||||
|
||||
func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer {
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
for _, net := range p.bypassNetworks {
|
||||
if net.Contains(ip) {
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
for _, bypassIP := range p.bypassIPs {
|
||||
if bypassIP.Equal(ip) {
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
return p.def
|
||||
}
|
||||
|
||||
for _, zone := range p.bypassZones {
|
||||
if strings.HasSuffix(host, zone) {
|
||||
return p.bypass
|
||||
}
|
||||
if host == zone[1:] {
|
||||
// For a zone ".example.com", we match "example.com"
|
||||
// too.
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
for _, bypassHost := range p.bypassHosts {
|
||||
if bypassHost == host {
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
return p.def
|
||||
}
|
||||
|
||||
// AddFromString parses a string that contains comma-separated values
|
||||
// specifying hosts that should use the bypass proxy. Each value is either an
|
||||
// IP address, a CIDR range, a zone (*.example.com) or a host name
|
||||
// (localhost). A best effort is made to parse the string and errors are
|
||||
// ignored.
|
||||
func (p *proxy_PerHost) AddFromString(s string) {
|
||||
hosts := strings.Split(s, ",")
|
||||
for _, host := range hosts {
|
||||
host = strings.TrimSpace(host)
|
||||
if len(host) == 0 {
|
||||
continue
|
||||
}
|
||||
if strings.Contains(host, "/") {
|
||||
// We assume that it's a CIDR address like 127.0.0.0/8
|
||||
if _, net, err := net.ParseCIDR(host); err == nil {
|
||||
p.AddNetwork(net)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
p.AddIP(ip)
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(host, "*.") {
|
||||
p.AddZone(host[1:])
|
||||
continue
|
||||
}
|
||||
p.AddHost(host)
|
||||
}
|
||||
}
|
||||
|
||||
// AddIP specifies an IP address that will use the bypass proxy. Note that
|
||||
// this will only take effect if a literal IP address is dialed. A connection
|
||||
// to a named host will never match an IP.
|
||||
func (p *proxy_PerHost) AddIP(ip net.IP) {
|
||||
p.bypassIPs = append(p.bypassIPs, ip)
|
||||
}
|
||||
|
||||
// AddNetwork specifies an IP range that will use the bypass proxy. Note that
|
||||
// this will only take effect if a literal IP address is dialed. A connection
|
||||
// to a named host will never match.
|
||||
func (p *proxy_PerHost) AddNetwork(net *net.IPNet) {
|
||||
p.bypassNetworks = append(p.bypassNetworks, net)
|
||||
}
|
||||
|
||||
// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of
|
||||
// "example.com" matches "example.com" and all of its subdomains.
|
||||
func (p *proxy_PerHost) AddZone(zone string) {
|
||||
if strings.HasSuffix(zone, ".") {
|
||||
zone = zone[:len(zone)-1]
|
||||
}
|
||||
if !strings.HasPrefix(zone, ".") {
|
||||
zone = "." + zone
|
||||
}
|
||||
p.bypassZones = append(p.bypassZones, zone)
|
||||
}
|
||||
|
||||
// AddHost specifies a host name that will use the bypass proxy.
|
||||
func (p *proxy_PerHost) AddHost(host string) {
|
||||
if strings.HasSuffix(host, ".") {
|
||||
host = host[:len(host)-1]
|
||||
}
|
||||
p.bypassHosts = append(p.bypassHosts, host)
|
||||
}
|
||||
|
||||
// A Dialer is a means to establish a connection.
|
||||
type proxy_Dialer interface {
|
||||
// Dial connects to the given address via the proxy.
|
||||
Dial(network, addr string) (c net.Conn, err error)
|
||||
}
|
||||
|
||||
// Auth contains authentication parameters that specific Dialers may require.
|
||||
type proxy_Auth struct {
|
||||
User, Password string
|
||||
}
|
||||
|
||||
// FromEnvironment returns the dialer specified by the proxy related variables in
|
||||
// the environment.
|
||||
func proxy_FromEnvironment() proxy_Dialer {
|
||||
allProxy := proxy_allProxyEnv.Get()
|
||||
if len(allProxy) == 0 {
|
||||
return proxy_Direct
|
||||
}
|
||||
|
||||
proxyURL, err := url.Parse(allProxy)
|
||||
if err != nil {
|
||||
return proxy_Direct
|
||||
}
|
||||
proxy, err := proxy_FromURL(proxyURL, proxy_Direct)
|
||||
if err != nil {
|
||||
return proxy_Direct
|
||||
}
|
||||
|
||||
noProxy := proxy_noProxyEnv.Get()
|
||||
if len(noProxy) == 0 {
|
||||
return proxy
|
||||
}
|
||||
|
||||
perHost := proxy_NewPerHost(proxy, proxy_Direct)
|
||||
perHost.AddFromString(noProxy)
|
||||
return perHost
|
||||
}
|
||||
|
||||
// proxySchemes is a map from URL schemes to a function that creates a Dialer
|
||||
// from a URL with such a scheme.
|
||||
var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)
|
||||
|
||||
// RegisterDialerType takes a URL scheme and a function to generate Dialers from
|
||||
// a URL with that scheme and a forwarding Dialer. Registered schemes are used
|
||||
// by FromURL.
|
||||
func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) {
|
||||
if proxy_proxySchemes == nil {
|
||||
proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error))
|
||||
}
|
||||
proxy_proxySchemes[scheme] = f
|
||||
}
|
||||
|
||||
// FromURL returns a Dialer given a URL specification and an underlying
|
||||
// Dialer for it to make network requests.
|
||||
func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) {
|
||||
var auth *proxy_Auth
|
||||
if u.User != nil {
|
||||
auth = new(proxy_Auth)
|
||||
auth.User = u.User.Username()
|
||||
if p, ok := u.User.Password(); ok {
|
||||
auth.Password = p
|
||||
}
|
||||
}
|
||||
|
||||
switch u.Scheme {
|
||||
case "socks5":
|
||||
return proxy_SOCKS5("tcp", u.Host, auth, forward)
|
||||
}
|
||||
|
||||
// If the scheme doesn't match any of the built-in schemes, see if it
|
||||
// was registered by another package.
|
||||
if proxy_proxySchemes != nil {
|
||||
if f, ok := proxy_proxySchemes[u.Scheme]; ok {
|
||||
return f(u, forward)
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.New("proxy: unknown scheme: " + u.Scheme)
|
||||
}
|
||||
|
||||
var (
|
||||
proxy_allProxyEnv = &proxy_envOnce{
|
||||
names: []string{"ALL_PROXY", "all_proxy"},
|
||||
}
|
||||
proxy_noProxyEnv = &proxy_envOnce{
|
||||
names: []string{"NO_PROXY", "no_proxy"},
|
||||
}
|
||||
)
|
||||
|
||||
// envOnce looks up an environment variable (optionally by multiple
|
||||
// names) once. It mitigates expensive lookups on some platforms
|
||||
// (e.g. Windows).
|
||||
// (Borrowed from net/http/transport.go)
|
||||
type proxy_envOnce struct {
|
||||
names []string
|
||||
once sync.Once
|
||||
val string
|
||||
}
|
||||
|
||||
func (e *proxy_envOnce) Get() string {
|
||||
e.once.Do(e.init)
|
||||
return e.val
|
||||
}
|
||||
|
||||
func (e *proxy_envOnce) init() {
|
||||
for _, n := range e.names {
|
||||
e.val = os.Getenv(n)
|
||||
if e.val != "" {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address
|
||||
// with an optional username and password. See RFC 1928 and RFC 1929.
|
||||
func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) {
|
||||
s := &proxy_socks5{
|
||||
network: network,
|
||||
addr: addr,
|
||||
forward: forward,
|
||||
}
|
||||
if auth != nil {
|
||||
s.user = auth.User
|
||||
s.password = auth.Password
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
type proxy_socks5 struct {
|
||||
user, password string
|
||||
network, addr string
|
||||
forward proxy_Dialer
|
||||
}
|
||||
|
||||
const proxy_socks5Version = 5
|
||||
|
||||
const (
|
||||
proxy_socks5AuthNone = 0
|
||||
proxy_socks5AuthPassword = 2
|
||||
)
|
||||
|
||||
const proxy_socks5Connect = 1
|
||||
|
||||
const (
|
||||
proxy_socks5IP4 = 1
|
||||
proxy_socks5Domain = 3
|
||||
proxy_socks5IP6 = 4
|
||||
)
|
||||
|
||||
var proxy_socks5Errors = []string{
|
||||
"",
|
||||
"general failure",
|
||||
"connection forbidden",
|
||||
"network unreachable",
|
||||
"host unreachable",
|
||||
"connection refused",
|
||||
"TTL expired",
|
||||
"command not supported",
|
||||
"address type not supported",
|
||||
}
|
||||
|
||||
// Dial connects to the address addr on the given network via the SOCKS5 proxy.
|
||||
func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) {
|
||||
switch network {
|
||||
case "tcp", "tcp6", "tcp4":
|
||||
default:
|
||||
return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network)
|
||||
}
|
||||
|
||||
conn, err := s.forward.Dial(s.network, s.addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s.connect(conn, addr); err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// connect takes an existing connection to a socks5 proxy server,
|
||||
// and commands the server to extend that connection to target,
|
||||
// which must be a canonical address with a host and port.
|
||||
func (s *proxy_socks5) connect(conn net.Conn, target string) error {
|
||||
host, portStr, err := net.SplitHostPort(target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
port, err := strconv.Atoi(portStr)
|
||||
if err != nil {
|
||||
return errors.New("proxy: failed to parse port number: " + portStr)
|
||||
}
|
||||
if port < 1 || port > 0xffff {
|
||||
return errors.New("proxy: port number out of range: " + portStr)
|
||||
}
|
||||
|
||||
// the size here is just an estimate
|
||||
buf := make([]byte, 0, 6+len(host))
|
||||
|
||||
buf = append(buf, proxy_socks5Version)
|
||||
if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 {
|
||||
buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword)
|
||||
} else {
|
||||
buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone)
|
||||
}
|
||||
|
||||
if _, err := conn.Write(buf); err != nil {
|
||||
return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
|
||||
return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
if buf[0] != 5 {
|
||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0])))
|
||||
}
|
||||
if buf[1] == 0xff {
|
||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication")
|
||||
}
|
||||
|
||||
// See RFC 1929
|
||||
if buf[1] == proxy_socks5AuthPassword {
|
||||
buf = buf[:0]
|
||||
buf = append(buf, 1 /* password protocol version */)
|
||||
buf = append(buf, uint8(len(s.user)))
|
||||
buf = append(buf, s.user...)
|
||||
buf = append(buf, uint8(len(s.password)))
|
||||
buf = append(buf, s.password...)
|
||||
|
||||
if _, err := conn.Write(buf); err != nil {
|
||||
return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
|
||||
return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if buf[1] != 0 {
|
||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password")
|
||||
}
|
||||
}
|
||||
|
||||
buf = buf[:0]
|
||||
buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */)
|
||||
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
if ip4 := ip.To4(); ip4 != nil {
|
||||
buf = append(buf, proxy_socks5IP4)
|
||||
ip = ip4
|
||||
} else {
|
||||
buf = append(buf, proxy_socks5IP6)
|
||||
}
|
||||
buf = append(buf, ip...)
|
||||
} else {
|
||||
if len(host) > 255 {
|
||||
return errors.New("proxy: destination host name too long: " + host)
|
||||
}
|
||||
buf = append(buf, proxy_socks5Domain)
|
||||
buf = append(buf, byte(len(host)))
|
||||
buf = append(buf, host...)
|
||||
}
|
||||
buf = append(buf, byte(port>>8), byte(port))
|
||||
|
||||
if _, err := conn.Write(buf); err != nil {
|
||||
return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(conn, buf[:4]); err != nil {
|
||||
return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
failure := "unknown error"
|
||||
if int(buf[1]) < len(proxy_socks5Errors) {
|
||||
failure = proxy_socks5Errors[buf[1]]
|
||||
}
|
||||
|
||||
if len(failure) > 0 {
|
||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure)
|
||||
}
|
||||
|
||||
bytesToDiscard := 0
|
||||
switch buf[3] {
|
||||
case proxy_socks5IP4:
|
||||
bytesToDiscard = net.IPv4len
|
||||
case proxy_socks5IP6:
|
||||
bytesToDiscard = net.IPv6len
|
||||
case proxy_socks5Domain:
|
||||
_, err := io.ReadFull(conn, buf[:1])
|
||||
if err != nil {
|
||||
return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
bytesToDiscard = int(buf[0])
|
||||
default:
|
||||
return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr)
|
||||
}
|
||||
|
||||
if cap(buf) < bytesToDiscard {
|
||||
buf = make([]byte, bytesToDiscard)
|
||||
} else {
|
||||
buf = buf[:bytesToDiscard]
|
||||
}
|
||||
if _, err := io.ReadFull(conn, buf); err != nil {
|
||||
return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
// Also need to discard the port number
|
||||
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
|
||||
return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,7 +1,11 @@
|
|||
// checkpoint is a package for checking version information and alerts
|
||||
// for a HashiCorp product.
|
||||
package checkpoint
|
||||
|
||||
import (
|
||||
crand "crypto/rand"
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
@ -19,9 +23,112 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/hashicorp/go-cleanhttp"
|
||||
uuid "github.com/hashicorp/go-uuid"
|
||||
)
|
||||
|
||||
var magicBytes = [4]byte{0x35, 0x77, 0x69, 0xFB}
|
||||
var magicBytes [4]byte = [4]byte{0x35, 0x77, 0x69, 0xFB}
|
||||
|
||||
// ReportParams are the parameters for configuring a telemetry report.
|
||||
type ReportParams struct {
|
||||
// Signature is some random signature that should be stored and used
|
||||
// as a cookie-like value. This ensures that alerts aren't repeated.
|
||||
// If the signature is changed, repeat alerts may be sent down. The
|
||||
// signature should NOT be anything identifiable to a user (such as
|
||||
// a MAC address). It should be random.
|
||||
//
|
||||
// If SignatureFile is given, then the signature will be read from this
|
||||
// file. If the file doesn't exist, then a random signature will
|
||||
// automatically be generated and stored here. SignatureFile will be
|
||||
// ignored if Signature is given.
|
||||
Signature string `json:"signature"`
|
||||
SignatureFile string `json:"-"`
|
||||
|
||||
StartTime time.Time `json:"start_time"`
|
||||
EndTime time.Time `json:"end_time"`
|
||||
Arch string `json:"arch"`
|
||||
OS string `json:"os"`
|
||||
Payload interface{} `json:"payload,omitempty"`
|
||||
Product string `json:"product"`
|
||||
RunID string `json:"run_id"`
|
||||
SchemaVersion string `json:"schema_version"`
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
func (i *ReportParams) signature() string {
|
||||
signature := i.Signature
|
||||
if i.Signature == "" && i.SignatureFile != "" {
|
||||
var err error
|
||||
signature, err = checkSignature(i.SignatureFile)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
}
|
||||
return signature
|
||||
}
|
||||
|
||||
// Report sends telemetry information to checkpoint
|
||||
func Report(ctx context.Context, r *ReportParams) error {
|
||||
if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
req, err := ReportRequest(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client := cleanhttp.DefaultClient()
|
||||
resp, err := client.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode != 201 {
|
||||
return fmt.Errorf("Unknown status: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReportRequest creates a request object for making a report
|
||||
func ReportRequest(r *ReportParams) (*http.Request, error) {
|
||||
// Populate some fields automatically if we can
|
||||
if r.RunID == "" {
|
||||
uuid, err := uuid.GenerateUUID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.RunID = uuid
|
||||
}
|
||||
if r.Arch == "" {
|
||||
r.Arch = runtime.GOARCH
|
||||
}
|
||||
if r.OS == "" {
|
||||
r.OS = runtime.GOOS
|
||||
}
|
||||
if r.Signature == "" {
|
||||
r.Signature = r.signature()
|
||||
}
|
||||
|
||||
b, err := json.Marshal(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
u := &url.URL{
|
||||
Scheme: "https",
|
||||
Host: "checkpoint-api.hashicorp.com",
|
||||
Path: fmt.Sprintf("/v1/telemetry/%s", r.Product),
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", u.String(), bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Add("Accept", "application/json")
|
||||
req.Header.Add("User-Agent", "HashiCorp/go-checkpoint")
|
||||
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// CheckParams are the parameters for configuring a check request.
|
||||
type CheckParams struct {
|
||||
|
@ -70,14 +177,14 @@ type CheckParams struct {
|
|||
|
||||
// CheckResponse is the response for a check request.
|
||||
type CheckResponse struct {
|
||||
Product string `json:"product"`
|
||||
CurrentVersion string `json:"current_version"`
|
||||
CurrentReleaseDate int `json:"current_release_date"`
|
||||
CurrentDownloadURL string `json:"current_download_url"`
|
||||
CurrentChangelogURL string `json:"current_changelog_url"`
|
||||
ProjectWebsite string `json:"project_website"`
|
||||
Outdated bool `json:"outdated"`
|
||||
Alerts []*CheckAlert `json:"alerts"`
|
||||
Product string
|
||||
CurrentVersion string `json:"current_version"`
|
||||
CurrentReleaseDate int `json:"current_release_date"`
|
||||
CurrentDownloadURL string `json:"current_download_url"`
|
||||
CurrentChangelogURL string `json:"current_changelog_url"`
|
||||
ProjectWebsite string `json:"project_website"`
|
||||
Outdated bool `json:"outdated"`
|
||||
Alerts []*CheckAlert
|
||||
}
|
||||
|
||||
// CheckAlert is a single alert message from a check request.
|
||||
|
@ -85,11 +192,11 @@ type CheckResponse struct {
|
|||
// These never have to be manually constructed, and are typically populated
|
||||
// into a CheckResponse as a result of the Check request.
|
||||
type CheckAlert struct {
|
||||
ID int `json:"id"`
|
||||
Date int `json:"date"`
|
||||
Message string `json:"message"`
|
||||
URL string `json:"url"`
|
||||
Level string `json:"level"`
|
||||
ID int
|
||||
Date int
|
||||
Message string
|
||||
URL string
|
||||
Level string
|
||||
}
|
||||
|
||||
// Check checks for alerts and new version information.
|
||||
|
@ -98,7 +205,7 @@ func Check(p *CheckParams) (*CheckResponse, error) {
|
|||
return &CheckResponse{}, nil
|
||||
}
|
||||
|
||||
// Set a default timeout of 3 sec for the check request (in milliseconds)
|
||||
// set a default timeout of 3 sec for the check request (in milliseconds)
|
||||
timeout := 3000
|
||||
if _, err := strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT")); err == nil {
|
||||
timeout, _ = strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT"))
|
||||
|
@ -146,8 +253,8 @@ func Check(p *CheckParams) (*CheckResponse, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Accept", "application/json")
|
||||
req.Header.Set("User-Agent", "HashiCorp/go-checkpoint")
|
||||
req.Header.Add("Accept", "application/json")
|
||||
req.Header.Add("User-Agent", "HashiCorp/go-checkpoint")
|
||||
|
||||
client := cleanhttp.DefaultClient()
|
||||
|
||||
|
@ -159,8 +266,6 @@ func Check(p *CheckParams) (*CheckResponse, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
return nil, fmt.Errorf("Unknown status: %d", resp.StatusCode)
|
||||
}
|
||||
|
@ -285,11 +390,14 @@ func checkCache(current string, path string, d time.Duration) (io.ReadCloser, er
|
|||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func checkResult(r io.Reader) (*CheckResponse, error) {
|
||||
var result CheckResponse
|
||||
if err := json.NewDecoder(r).Decode(&result); err != nil {
|
||||
dec := json.NewDecoder(r)
|
||||
if err := dec.Decode(&result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
|
@ -318,7 +426,7 @@ func checkSignature(path string) (string, error) {
|
|||
var b [16]byte
|
||||
n := 0
|
||||
for n < 16 {
|
||||
n2, err := crand.Read(b[n:])
|
||||
n2, err := rand.Read(b[n:])
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -348,7 +456,7 @@ func writeCacheHeader(f io.Writer, v string) error {
|
|||
}
|
||||
|
||||
// Write out our current version length
|
||||
length := uint32(len(v))
|
||||
var length uint32 = uint32(len(v))
|
||||
if err := binary.Write(f, binary.LittleEndian, length); err != nil {
|
||||
return err
|
||||
}
|
|
@ -1,6 +0,0 @@
|
|||
module github.com/hashicorp/go-checkpoint
|
||||
|
||||
require (
|
||||
github.com/hashicorp/go-cleanhttp v0.5.0
|
||||
github.com/hashicorp/go-uuid v1.0.0
|
||||
)
|
|
@ -1,4 +0,0 @@
|
|||
github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM=
|
||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
|
@ -1,118 +0,0 @@
|
|||
package checkpoint
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-cleanhttp"
|
||||
uuid "github.com/hashicorp/go-uuid"
|
||||
)
|
||||
|
||||
// ReportParams are the parameters for configuring a telemetry report.
|
||||
type ReportParams struct {
|
||||
// Signature is some random signature that should be stored and used
|
||||
// as a cookie-like value. This ensures that alerts aren't repeated.
|
||||
// If the signature is changed, repeat alerts may be sent down. The
|
||||
// signature should NOT be anything identifiable to a user (such as
|
||||
// a MAC address). It should be random.
|
||||
//
|
||||
// If SignatureFile is given, then the signature will be read from this
|
||||
// file. If the file doesn't exist, then a random signature will
|
||||
// automatically be generated and stored here. SignatureFile will be
|
||||
// ignored if Signature is given.
|
||||
Signature string `json:"signature"`
|
||||
SignatureFile string `json:"-"`
|
||||
|
||||
StartTime time.Time `json:"start_time"`
|
||||
EndTime time.Time `json:"end_time"`
|
||||
Arch string `json:"arch"`
|
||||
OS string `json:"os"`
|
||||
Payload interface{} `json:"payload,omitempty"`
|
||||
Product string `json:"product"`
|
||||
RunID string `json:"run_id"`
|
||||
SchemaVersion string `json:"schema_version"`
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
func (i *ReportParams) signature() string {
|
||||
signature := i.Signature
|
||||
if i.Signature == "" && i.SignatureFile != "" {
|
||||
var err error
|
||||
signature, err = checkSignature(i.SignatureFile)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
}
|
||||
return signature
|
||||
}
|
||||
|
||||
// Report sends telemetry information to checkpoint
|
||||
func Report(ctx context.Context, r *ReportParams) error {
|
||||
if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
req, err := ReportRequest(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client := cleanhttp.DefaultClient()
|
||||
resp, err := client.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode != 201 {
|
||||
return fmt.Errorf("Unknown status: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReportRequest creates a request object for making a report
|
||||
func ReportRequest(r *ReportParams) (*http.Request, error) {
|
||||
// Populate some fields automatically if we can
|
||||
if r.RunID == "" {
|
||||
uuid, err := uuid.GenerateUUID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.RunID = uuid
|
||||
}
|
||||
if r.Arch == "" {
|
||||
r.Arch = runtime.GOARCH
|
||||
}
|
||||
if r.OS == "" {
|
||||
r.OS = runtime.GOOS
|
||||
}
|
||||
if r.Signature == "" {
|
||||
r.Signature = r.signature()
|
||||
}
|
||||
|
||||
b, err := json.Marshal(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
u := &url.URL{
|
||||
Scheme: "https",
|
||||
Host: "checkpoint-api.hashicorp.com",
|
||||
Path: fmt.Sprintf("/v1/telemetry/%s", r.Product),
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", u.String(), bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Accept", "application/json")
|
||||
req.Header.Set("User-Agent", "HashiCorp/go-checkpoint")
|
||||
|
||||
return req, nil
|
||||
}
|
|
@ -1,90 +0,0 @@
|
|||
package checkpoint
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-cleanhttp"
|
||||
)
|
||||
|
||||
// VersionsParams are the parameters for a versions request.
|
||||
type VersionsParams struct {
|
||||
// Service is used to lookup the correct service.
|
||||
Service string
|
||||
|
||||
// Product is used to filter the version contraints.
|
||||
Product string
|
||||
|
||||
// Force, if true, will force the check even if CHECKPOINT_DISABLE
|
||||
// is set. Within HashiCorp products, this is ONLY USED when the user
|
||||
// specifically requests it. This is never automatically done without
|
||||
// the user's consent.
|
||||
Force bool
|
||||
}
|
||||
|
||||
// VersionsResponse is the response for a versions request.
|
||||
type VersionsResponse struct {
|
||||
Service string `json:"service"`
|
||||
Product string `json:"product"`
|
||||
Minimum string `json:"minimum"`
|
||||
Maximum string `json:"maximum"`
|
||||
Excluding []string `json:"excluding"`
|
||||
}
|
||||
|
||||
// Versions returns the version constrains for a given service and product.
|
||||
func Versions(p *VersionsParams) (*VersionsResponse, error) {
|
||||
if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" && !p.Force {
|
||||
return &VersionsResponse{}, nil
|
||||
}
|
||||
|
||||
// Set a default timeout of 1 sec for the versions request (in milliseconds)
|
||||
timeout := 1000
|
||||
if _, err := strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT")); err == nil {
|
||||
timeout, _ = strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT"))
|
||||
}
|
||||
|
||||
v := url.Values{}
|
||||
v.Set("product", p.Product)
|
||||
|
||||
u := &url.URL{
|
||||
Scheme: "https",
|
||||
Host: "checkpoint-api.hashicorp.com",
|
||||
Path: fmt.Sprintf("/v1/versions/%s", p.Service),
|
||||
RawQuery: v.Encode(),
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Accept", "application/json")
|
||||
req.Header.Set("User-Agent", "HashiCorp/go-checkpoint")
|
||||
|
||||
client := cleanhttp.DefaultClient()
|
||||
|
||||
// We use a short timeout since checking for new versions is not critical
|
||||
// enough to block on if checkpoint is broken/slow.
|
||||
client.Timeout = time.Duration(timeout) * time.Millisecond
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
return nil, fmt.Errorf("Unknown status: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
result := &VersionsResponse{}
|
||||
if err := json.NewDecoder(resp.Body).Decode(result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
|
@ -37,9 +37,6 @@ func (g *GitGetter) Get(dst string, u *url.URL) error {
|
|||
// The port number must be parseable as an integer. If not, the user
|
||||
// was probably trying to use a scp-style address, in which case the
|
||||
// ssh:// prefix must be removed to indicate that.
|
||||
//
|
||||
// This is not necessary in versions of Go which have patched
|
||||
// CVE-2019-14809 (e.g. Go 1.12.8+)
|
||||
if portStr := u.Port(); portStr != "" {
|
||||
if _, err := strconv.ParseUint(portStr, 10, 16); err != nil {
|
||||
return fmt.Errorf("invalid port number %q; if using the \"scp-like\" git address scheme where a colon introduces the path instead, remove the ssh:// portion and use just the git:: prefix", portStr)
|
||||
|
|
|
@ -1,3 +0,0 @@
|
|||
module github.com/hashicorp/go-rootcerts
|
||||
|
||||
require github.com/mitchellh/go-homedir v1.0.0
|
|
@ -1,2 +0,0 @@
|
|||
github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0=
|
||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
|
@ -1 +0,0 @@
|
|||
module github.com/kardianos/osext
|
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2016 Microsoft Corporation
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
203
vendor/github.com/masterzen/azure-sdk-for-go/core/http/chunked.go
generated
vendored
Normal file
203
vendor/github.com/masterzen/azure-sdk-for-go/core/http/chunked.go
generated
vendored
Normal file
|
@ -0,0 +1,203 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// The wire protocol for HTTP's "chunked" Transfer-Encoding.
|
||||
|
||||
// This code is duplicated in net/http and net/http/httputil.
|
||||
// Please make any changes in both files.
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
const maxLineLength = 4096 // assumed <= bufio.defaultBufSize
|
||||
|
||||
var ErrLineTooLong = errors.New("header line too long")
|
||||
|
||||
// newChunkedReader returns a new chunkedReader that translates the data read from r
|
||||
// out of HTTP "chunked" format before returning it.
|
||||
// The chunkedReader returns io.EOF when the final 0-length chunk is read.
|
||||
//
|
||||
// newChunkedReader is not needed by normal applications. The http package
|
||||
// automatically decodes chunking when reading response bodies.
|
||||
func newChunkedReader(r io.Reader) io.Reader {
|
||||
br, ok := r.(*bufio.Reader)
|
||||
if !ok {
|
||||
br = bufio.NewReader(r)
|
||||
}
|
||||
return &chunkedReader{r: br}
|
||||
}
|
||||
|
||||
type chunkedReader struct {
|
||||
r *bufio.Reader
|
||||
n uint64 // unread bytes in chunk
|
||||
err error
|
||||
buf [2]byte
|
||||
}
|
||||
|
||||
func (cr *chunkedReader) beginChunk() {
|
||||
// chunk-size CRLF
|
||||
var line []byte
|
||||
line, cr.err = readLine(cr.r)
|
||||
if cr.err != nil {
|
||||
return
|
||||
}
|
||||
cr.n, cr.err = parseHexUint(line)
|
||||
if cr.err != nil {
|
||||
return
|
||||
}
|
||||
if cr.n == 0 {
|
||||
cr.err = io.EOF
|
||||
}
|
||||
}
|
||||
|
||||
func (cr *chunkedReader) chunkHeaderAvailable() bool {
|
||||
n := cr.r.Buffered()
|
||||
if n > 0 {
|
||||
peek, _ := cr.r.Peek(n)
|
||||
return bytes.IndexByte(peek, '\n') >= 0
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (cr *chunkedReader) Read(b []uint8) (n int, err error) {
|
||||
for cr.err == nil {
|
||||
if cr.n == 0 {
|
||||
if n > 0 && !cr.chunkHeaderAvailable() {
|
||||
// We've read enough. Don't potentially block
|
||||
// reading a new chunk header.
|
||||
break
|
||||
}
|
||||
cr.beginChunk()
|
||||
continue
|
||||
}
|
||||
if len(b) == 0 {
|
||||
break
|
||||
}
|
||||
rbuf := b
|
||||
if uint64(len(rbuf)) > cr.n {
|
||||
rbuf = rbuf[:cr.n]
|
||||
}
|
||||
var n0 int
|
||||
n0, cr.err = cr.r.Read(rbuf)
|
||||
n += n0
|
||||
b = b[n0:]
|
||||
cr.n -= uint64(n0)
|
||||
// If we're at the end of a chunk, read the next two
|
||||
// bytes to verify they are "\r\n".
|
||||
if cr.n == 0 && cr.err == nil {
|
||||
if _, cr.err = io.ReadFull(cr.r, cr.buf[:2]); cr.err == nil {
|
||||
if cr.buf[0] != '\r' || cr.buf[1] != '\n' {
|
||||
cr.err = errors.New("malformed chunked encoding")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return n, cr.err
|
||||
}
|
||||
|
||||
// Read a line of bytes (up to \n) from b.
|
||||
// Give up if the line exceeds maxLineLength.
|
||||
// The returned bytes are a pointer into storage in
|
||||
// the bufio, so they are only valid until the next bufio read.
|
||||
func readLine(b *bufio.Reader) (p []byte, err error) {
|
||||
if p, err = b.ReadSlice('\n'); err != nil {
|
||||
// We always know when EOF is coming.
|
||||
// If the caller asked for a line, there should be a line.
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
} else if err == bufio.ErrBufferFull {
|
||||
err = ErrLineTooLong
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if len(p) >= maxLineLength {
|
||||
return nil, ErrLineTooLong
|
||||
}
|
||||
return trimTrailingWhitespace(p), nil
|
||||
}
|
||||
|
||||
func trimTrailingWhitespace(b []byte) []byte {
|
||||
for len(b) > 0 && isASCIISpace(b[len(b)-1]) {
|
||||
b = b[:len(b)-1]
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func isASCIISpace(b byte) bool {
|
||||
return b == ' ' || b == '\t' || b == '\n' || b == '\r'
|
||||
}
|
||||
|
||||
// newChunkedWriter returns a new chunkedWriter that translates writes into HTTP
|
||||
// "chunked" format before writing them to w. Closing the returned chunkedWriter
|
||||
// sends the final 0-length chunk that marks the end of the stream.
|
||||
//
|
||||
// newChunkedWriter is not needed by normal applications. The http
|
||||
// package adds chunking automatically if handlers don't set a
|
||||
// Content-Length header. Using newChunkedWriter inside a handler
|
||||
// would result in double chunking or chunking with a Content-Length
|
||||
// length, both of which are wrong.
|
||||
func newChunkedWriter(w io.Writer) io.WriteCloser {
|
||||
return &chunkedWriter{w}
|
||||
}
|
||||
|
||||
// Writing to chunkedWriter translates to writing in HTTP chunked Transfer
|
||||
// Encoding wire format to the underlying Wire chunkedWriter.
|
||||
type chunkedWriter struct {
|
||||
Wire io.Writer
|
||||
}
|
||||
|
||||
// Write the contents of data as one chunk to Wire.
|
||||
// NOTE: Note that the corresponding chunk-writing procedure in Conn.Write has
|
||||
// a bug since it does not check for success of io.WriteString
|
||||
func (cw *chunkedWriter) Write(data []byte) (n int, err error) {
|
||||
|
||||
// Don't send 0-length data. It looks like EOF for chunked encoding.
|
||||
if len(data) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
if _, err = fmt.Fprintf(cw.Wire, "%x\r\n", len(data)); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if n, err = cw.Wire.Write(data); err != nil {
|
||||
return
|
||||
}
|
||||
if n != len(data) {
|
||||
err = io.ErrShortWrite
|
||||
return
|
||||
}
|
||||
_, err = io.WriteString(cw.Wire, "\r\n")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (cw *chunkedWriter) Close() error {
|
||||
_, err := io.WriteString(cw.Wire, "0\r\n")
|
||||
return err
|
||||
}
|
||||
|
||||
func parseHexUint(v []byte) (n uint64, err error) {
|
||||
for _, b := range v {
|
||||
n <<= 4
|
||||
switch {
|
||||
case '0' <= b && b <= '9':
|
||||
b = b - '0'
|
||||
case 'a' <= b && b <= 'f':
|
||||
b = b - 'a' + 10
|
||||
case 'A' <= b && b <= 'F':
|
||||
b = b - 'A' + 10
|
||||
default:
|
||||
return 0, errors.New("invalid byte in chunk length")
|
||||
}
|
||||
n |= uint64(b)
|
||||
}
|
||||
return
|
||||
}
|
|
@ -0,0 +1,487 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// HTTP client. See RFC 2616.
|
||||
//
|
||||
// This is the high-level Client interface.
|
||||
// The low-level implementation is in transport.go.
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// A Client is an HTTP client. Its zero value (DefaultClient) is a
|
||||
// usable client that uses DefaultTransport.
|
||||
//
|
||||
// The Client's Transport typically has internal state (cached TCP
|
||||
// connections), so Clients should be reused instead of created as
|
||||
// needed. Clients are safe for concurrent use by multiple goroutines.
|
||||
//
|
||||
// A Client is higher-level than a RoundTripper (such as Transport)
|
||||
// and additionally handles HTTP details such as cookies and
|
||||
// redirects.
|
||||
type Client struct {
|
||||
// Transport specifies the mechanism by which individual
|
||||
// HTTP requests are made.
|
||||
// If nil, DefaultTransport is used.
|
||||
Transport RoundTripper
|
||||
|
||||
// CheckRedirect specifies the policy for handling redirects.
|
||||
// If CheckRedirect is not nil, the client calls it before
|
||||
// following an HTTP redirect. The arguments req and via are
|
||||
// the upcoming request and the requests made already, oldest
|
||||
// first. If CheckRedirect returns an error, the Client's Get
|
||||
// method returns both the previous Response and
|
||||
// CheckRedirect's error (wrapped in a url.Error) instead of
|
||||
// issuing the Request req.
|
||||
//
|
||||
// If CheckRedirect is nil, the Client uses its default policy,
|
||||
// which is to stop after 10 consecutive requests.
|
||||
CheckRedirect func(req *Request, via []*Request) error
|
||||
|
||||
// Jar specifies the cookie jar.
|
||||
// If Jar is nil, cookies are not sent in requests and ignored
|
||||
// in responses.
|
||||
Jar CookieJar
|
||||
|
||||
// Timeout specifies a time limit for requests made by this
|
||||
// Client. The timeout includes connection time, any
|
||||
// redirects, and reading the response body. The timer remains
|
||||
// running after Get, Head, Post, or Do return and will
|
||||
// interrupt reading of the Response.Body.
|
||||
//
|
||||
// A Timeout of zero means no timeout.
|
||||
//
|
||||
// The Client's Transport must support the CancelRequest
|
||||
// method or Client will return errors when attempting to make
|
||||
// a request with Get, Head, Post, or Do. Client's default
|
||||
// Transport (DefaultTransport) supports CancelRequest.
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
// DefaultClient is the default Client and is used by Get, Head, and Post.
|
||||
var DefaultClient = &Client{}
|
||||
|
||||
// RoundTripper is an interface representing the ability to execute a
|
||||
// single HTTP transaction, obtaining the Response for a given Request.
|
||||
//
|
||||
// A RoundTripper must be safe for concurrent use by multiple
|
||||
// goroutines.
|
||||
type RoundTripper interface {
|
||||
// RoundTrip executes a single HTTP transaction, returning
|
||||
// the Response for the request req. RoundTrip should not
|
||||
// attempt to interpret the response. In particular,
|
||||
// RoundTrip must return err == nil if it obtained a response,
|
||||
// regardless of the response's HTTP status code. A non-nil
|
||||
// err should be reserved for failure to obtain a response.
|
||||
// Similarly, RoundTrip should not attempt to handle
|
||||
// higher-level protocol details such as redirects,
|
||||
// authentication, or cookies.
|
||||
//
|
||||
// RoundTrip should not modify the request, except for
|
||||
// consuming and closing the Body, including on errors. The
|
||||
// request's URL and Header fields are guaranteed to be
|
||||
// initialized.
|
||||
RoundTrip(*Request) (*Response, error)
|
||||
}
|
||||
|
||||
// Given a string of the form "host", "host:port", or "[ipv6::address]:port",
|
||||
// return true if the string includes a port.
|
||||
func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") }
|
||||
|
||||
// Used in Send to implement io.ReadCloser by bundling together the
|
||||
// bufio.Reader through which we read the response, and the underlying
|
||||
// network connection.
|
||||
type readClose struct {
|
||||
io.Reader
|
||||
io.Closer
|
||||
}
|
||||
|
||||
func (c *Client) send(req *Request) (*Response, error) {
|
||||
if c.Jar != nil {
|
||||
for _, cookie := range c.Jar.Cookies(req.URL) {
|
||||
req.AddCookie(cookie)
|
||||
}
|
||||
}
|
||||
resp, err := send(req, c.transport())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if c.Jar != nil {
|
||||
if rc := resp.Cookies(); len(rc) > 0 {
|
||||
c.Jar.SetCookies(req.URL, rc)
|
||||
}
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// Do sends an HTTP request and returns an HTTP response, following
|
||||
// policy (e.g. redirects, cookies, auth) as configured on the client.
|
||||
//
|
||||
// An error is returned if caused by client policy (such as
|
||||
// CheckRedirect), or if there was an HTTP protocol error.
|
||||
// A non-2xx response doesn't cause an error.
|
||||
//
|
||||
// When err is nil, resp always contains a non-nil resp.Body.
|
||||
//
|
||||
// Callers should close resp.Body when done reading from it. If
|
||||
// resp.Body is not closed, the Client's underlying RoundTripper
|
||||
// (typically Transport) may not be able to re-use a persistent TCP
|
||||
// connection to the server for a subsequent "keep-alive" request.
|
||||
//
|
||||
// The request Body, if non-nil, will be closed by the underlying
|
||||
// Transport, even on errors.
|
||||
//
|
||||
// Generally Get, Post, or PostForm will be used instead of Do.
|
||||
func (c *Client) Do(req *Request) (resp *Response, err error) {
|
||||
if req.Method == "GET" || req.Method == "HEAD" {
|
||||
return c.doFollowingRedirects(req, shouldRedirectGet)
|
||||
}
|
||||
if req.Method == "POST" || req.Method == "PUT" {
|
||||
return c.doFollowingRedirects(req, shouldRedirectPost)
|
||||
}
|
||||
return c.send(req)
|
||||
}
|
||||
|
||||
func (c *Client) transport() RoundTripper {
|
||||
if c.Transport != nil {
|
||||
return c.Transport
|
||||
}
|
||||
return DefaultTransport
|
||||
}
|
||||
|
||||
// send issues an HTTP request.
|
||||
// Caller should close resp.Body when done reading from it.
|
||||
func send(req *Request, t RoundTripper) (resp *Response, err error) {
|
||||
if t == nil {
|
||||
req.closeBody()
|
||||
return nil, errors.New("http: no Client.Transport or DefaultTransport")
|
||||
}
|
||||
|
||||
if req.URL == nil {
|
||||
req.closeBody()
|
||||
return nil, errors.New("http: nil Request.URL")
|
||||
}
|
||||
|
||||
if req.RequestURI != "" {
|
||||
req.closeBody()
|
||||
return nil, errors.New("http: Request.RequestURI can't be set in client requests.")
|
||||
}
|
||||
|
||||
// Most the callers of send (Get, Post, et al) don't need
|
||||
// Headers, leaving it uninitialized. We guarantee to the
|
||||
// Transport that this has been initialized, though.
|
||||
if req.Header == nil {
|
||||
req.Header = make(Header)
|
||||
}
|
||||
|
||||
if u := req.URL.User; u != nil {
|
||||
username := u.Username()
|
||||
password, _ := u.Password()
|
||||
req.Header.Set("Authorization", "Basic "+basicAuth(username, password))
|
||||
}
|
||||
resp, err = t.RoundTrip(req)
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
log.Printf("RoundTripper returned a response & error; ignoring response")
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// See 2 (end of page 4) http://www.ietf.org/rfc/rfc2617.txt
|
||||
// "To receive authorization, the client sends the userid and password,
|
||||
// separated by a single colon (":") character, within a base64
|
||||
// encoded string in the credentials."
|
||||
// It is not meant to be urlencoded.
|
||||
func basicAuth(username, password string) string {
|
||||
auth := username + ":" + password
|
||||
return base64.StdEncoding.EncodeToString([]byte(auth))
|
||||
}
|
||||
|
||||
// True if the specified HTTP status code is one for which the Get utility should
|
||||
// automatically redirect.
|
||||
func shouldRedirectGet(statusCode int) bool {
|
||||
switch statusCode {
|
||||
case StatusMovedPermanently, StatusFound, StatusSeeOther, StatusTemporaryRedirect:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// True if the specified HTTP status code is one for which the Post utility should
|
||||
// automatically redirect.
|
||||
func shouldRedirectPost(statusCode int) bool {
|
||||
switch statusCode {
|
||||
case StatusFound, StatusSeeOther:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Get issues a GET to the specified URL. If the response is one of the following
|
||||
// redirect codes, Get follows the redirect, up to a maximum of 10 redirects:
|
||||
//
|
||||
// 301 (Moved Permanently)
|
||||
// 302 (Found)
|
||||
// 303 (See Other)
|
||||
// 307 (Temporary Redirect)
|
||||
//
|
||||
// An error is returned if there were too many redirects or if there
|
||||
// was an HTTP protocol error. A non-2xx response doesn't cause an
|
||||
// error.
|
||||
//
|
||||
// When err is nil, resp always contains a non-nil resp.Body.
|
||||
// Caller should close resp.Body when done reading from it.
|
||||
//
|
||||
// Get is a wrapper around DefaultClient.Get.
|
||||
func Get(url string) (resp *Response, err error) {
|
||||
return DefaultClient.Get(url)
|
||||
}
|
||||
|
||||
// Get issues a GET to the specified URL. If the response is one of the
|
||||
// following redirect codes, Get follows the redirect after calling the
|
||||
// Client's CheckRedirect function.
|
||||
//
|
||||
// 301 (Moved Permanently)
|
||||
// 302 (Found)
|
||||
// 303 (See Other)
|
||||
// 307 (Temporary Redirect)
|
||||
//
|
||||
// An error is returned if the Client's CheckRedirect function fails
|
||||
// or if there was an HTTP protocol error. A non-2xx response doesn't
|
||||
// cause an error.
|
||||
//
|
||||
// When err is nil, resp always contains a non-nil resp.Body.
|
||||
// Caller should close resp.Body when done reading from it.
|
||||
func (c *Client) Get(url string) (resp *Response, err error) {
|
||||
req, err := NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.doFollowingRedirects(req, shouldRedirectGet)
|
||||
}
|
||||
|
||||
func (c *Client) doFollowingRedirects(ireq *Request, shouldRedirect func(int) bool) (resp *Response, err error) {
|
||||
var base *url.URL
|
||||
redirectChecker := c.CheckRedirect
|
||||
if redirectChecker == nil {
|
||||
redirectChecker = defaultCheckRedirect
|
||||
}
|
||||
var via []*Request
|
||||
|
||||
if ireq.URL == nil {
|
||||
ireq.closeBody()
|
||||
return nil, errors.New("http: nil Request.URL")
|
||||
}
|
||||
|
||||
var reqmu sync.Mutex // guards req
|
||||
req := ireq
|
||||
|
||||
var timer *time.Timer
|
||||
if c.Timeout > 0 {
|
||||
type canceler interface {
|
||||
CancelRequest(*Request)
|
||||
}
|
||||
tr, ok := c.transport().(canceler)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("net/http: Client Transport of type %T doesn't support CancelRequest; Timeout not supported", c.transport())
|
||||
}
|
||||
timer = time.AfterFunc(c.Timeout, func() {
|
||||
reqmu.Lock()
|
||||
defer reqmu.Unlock()
|
||||
tr.CancelRequest(req)
|
||||
})
|
||||
}
|
||||
|
||||
urlStr := "" // next relative or absolute URL to fetch (after first request)
|
||||
redirectFailed := false
|
||||
for redirect := 0; ; redirect++ {
|
||||
if redirect != 0 {
|
||||
nreq := new(Request)
|
||||
nreq.Method = ireq.Method
|
||||
if ireq.Method == "POST" || ireq.Method == "PUT" {
|
||||
nreq.Method = "GET"
|
||||
}
|
||||
nreq.Header = make(Header)
|
||||
nreq.URL, err = base.Parse(urlStr)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
if len(via) > 0 {
|
||||
// Add the Referer header.
|
||||
lastReq := via[len(via)-1]
|
||||
if lastReq.URL.Scheme != "https" {
|
||||
nreq.Header.Set("Referer", lastReq.URL.String())
|
||||
}
|
||||
|
||||
err = redirectChecker(nreq, via)
|
||||
if err != nil {
|
||||
redirectFailed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
reqmu.Lock()
|
||||
req = nreq
|
||||
reqmu.Unlock()
|
||||
}
|
||||
|
||||
urlStr = req.URL.String()
|
||||
if resp, err = c.send(req); err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
if shouldRedirect(resp.StatusCode) {
|
||||
// Read the body if small so underlying TCP connection will be re-used.
|
||||
// No need to check for errors: if it fails, Transport won't reuse it anyway.
|
||||
const maxBodySlurpSize = 2 << 10
|
||||
if resp.ContentLength == -1 || resp.ContentLength <= maxBodySlurpSize {
|
||||
io.CopyN(ioutil.Discard, resp.Body, maxBodySlurpSize)
|
||||
}
|
||||
resp.Body.Close()
|
||||
if urlStr = resp.Header.Get("Location"); urlStr == "" {
|
||||
err = errors.New(fmt.Sprintf("%d response missing Location header", resp.StatusCode))
|
||||
break
|
||||
}
|
||||
base = req.URL
|
||||
via = append(via, req)
|
||||
continue
|
||||
}
|
||||
if timer != nil {
|
||||
resp.Body = &cancelTimerBody{timer, resp.Body}
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
method := ireq.Method
|
||||
urlErr := &url.Error{
|
||||
Op: method[0:1] + strings.ToLower(method[1:]),
|
||||
URL: urlStr,
|
||||
Err: err,
|
||||
}
|
||||
|
||||
if redirectFailed {
|
||||
// Special case for Go 1 compatibility: return both the response
|
||||
// and an error if the CheckRedirect function failed.
|
||||
// See http://golang.org/issue/3795
|
||||
return resp, urlErr
|
||||
}
|
||||
|
||||
if resp != nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
return nil, urlErr
|
||||
}
|
||||
|
||||
func defaultCheckRedirect(req *Request, via []*Request) error {
|
||||
if len(via) >= 10 {
|
||||
return errors.New("stopped after 10 redirects")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Post issues a POST to the specified URL.
|
||||
//
|
||||
// Caller should close resp.Body when done reading from it.
|
||||
//
|
||||
// Post is a wrapper around DefaultClient.Post
|
||||
func Post(url string, bodyType string, body io.Reader) (resp *Response, err error) {
|
||||
return DefaultClient.Post(url, bodyType, body)
|
||||
}
|
||||
|
||||
// Post issues a POST to the specified URL.
|
||||
//
|
||||
// Caller should close resp.Body when done reading from it.
|
||||
//
|
||||
// If the provided body is also an io.Closer, it is closed after the
|
||||
// request.
|
||||
func (c *Client) Post(url string, bodyType string, body io.Reader) (resp *Response, err error) {
|
||||
req, err := NewRequest("POST", url, body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", bodyType)
|
||||
return c.doFollowingRedirects(req, shouldRedirectPost)
|
||||
}
|
||||
|
||||
// PostForm issues a POST to the specified URL, with data's keys and
|
||||
// values URL-encoded as the request body.
|
||||
//
|
||||
// When err is nil, resp always contains a non-nil resp.Body.
|
||||
// Caller should close resp.Body when done reading from it.
|
||||
//
|
||||
// PostForm is a wrapper around DefaultClient.PostForm
|
||||
func PostForm(url string, data url.Values) (resp *Response, err error) {
|
||||
return DefaultClient.PostForm(url, data)
|
||||
}
|
||||
|
||||
// PostForm issues a POST to the specified URL,
|
||||
// with data's keys and values urlencoded as the request body.
|
||||
//
|
||||
// When err is nil, resp always contains a non-nil resp.Body.
|
||||
// Caller should close resp.Body when done reading from it.
|
||||
func (c *Client) PostForm(url string, data url.Values) (resp *Response, err error) {
|
||||
return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
|
||||
}
|
||||
|
||||
// Head issues a HEAD to the specified URL. If the response is one of the
|
||||
// following redirect codes, Head follows the redirect after calling the
|
||||
// Client's CheckRedirect function.
|
||||
//
|
||||
// 301 (Moved Permanently)
|
||||
// 302 (Found)
|
||||
// 303 (See Other)
|
||||
// 307 (Temporary Redirect)
|
||||
//
|
||||
// Head is a wrapper around DefaultClient.Head
|
||||
func Head(url string) (resp *Response, err error) {
|
||||
return DefaultClient.Head(url)
|
||||
}
|
||||
|
||||
// Head issues a HEAD to the specified URL. If the response is one of the
|
||||
// following redirect codes, Head follows the redirect after calling the
|
||||
// Client's CheckRedirect function.
|
||||
//
|
||||
// 301 (Moved Permanently)
|
||||
// 302 (Found)
|
||||
// 303 (See Other)
|
||||
// 307 (Temporary Redirect)
|
||||
func (c *Client) Head(url string) (resp *Response, err error) {
|
||||
req, err := NewRequest("HEAD", url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.doFollowingRedirects(req, shouldRedirectGet)
|
||||
}
|
||||
|
||||
type cancelTimerBody struct {
|
||||
t *time.Timer
|
||||
rc io.ReadCloser
|
||||
}
|
||||
|
||||
func (b *cancelTimerBody) Read(p []byte) (n int, err error) {
|
||||
n, err = b.rc.Read(p)
|
||||
if err == io.EOF {
|
||||
b.t.Stop()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (b *cancelTimerBody) Close() error {
|
||||
err := b.rc.Close()
|
||||
b.t.Stop()
|
||||
return err
|
||||
}
|
|
@ -0,0 +1,363 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// This implementation is done according to RFC 6265:
|
||||
//
|
||||
// http://tools.ietf.org/html/rfc6265
|
||||
|
||||
// A Cookie represents an HTTP cookie as sent in the Set-Cookie header of an
|
||||
// HTTP response or the Cookie header of an HTTP request.
|
||||
type Cookie struct {
|
||||
Name string
|
||||
Value string
|
||||
Path string
|
||||
Domain string
|
||||
Expires time.Time
|
||||
RawExpires string
|
||||
|
||||
// MaxAge=0 means no 'Max-Age' attribute specified.
|
||||
// MaxAge<0 means delete cookie now, equivalently 'Max-Age: 0'
|
||||
// MaxAge>0 means Max-Age attribute present and given in seconds
|
||||
MaxAge int
|
||||
Secure bool
|
||||
HttpOnly bool
|
||||
Raw string
|
||||
Unparsed []string // Raw text of unparsed attribute-value pairs
|
||||
}
|
||||
|
||||
// readSetCookies parses all "Set-Cookie" values from
|
||||
// the header h and returns the successfully parsed Cookies.
|
||||
func readSetCookies(h Header) []*Cookie {
|
||||
cookies := []*Cookie{}
|
||||
for _, line := range h["Set-Cookie"] {
|
||||
parts := strings.Split(strings.TrimSpace(line), ";")
|
||||
if len(parts) == 1 && parts[0] == "" {
|
||||
continue
|
||||
}
|
||||
parts[0] = strings.TrimSpace(parts[0])
|
||||
j := strings.Index(parts[0], "=")
|
||||
if j < 0 {
|
||||
continue
|
||||
}
|
||||
name, value := parts[0][:j], parts[0][j+1:]
|
||||
if !isCookieNameValid(name) {
|
||||
continue
|
||||
}
|
||||
value, success := parseCookieValue(value)
|
||||
if !success {
|
||||
continue
|
||||
}
|
||||
c := &Cookie{
|
||||
Name: name,
|
||||
Value: value,
|
||||
Raw: line,
|
||||
}
|
||||
for i := 1; i < len(parts); i++ {
|
||||
parts[i] = strings.TrimSpace(parts[i])
|
||||
if len(parts[i]) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
attr, val := parts[i], ""
|
||||
if j := strings.Index(attr, "="); j >= 0 {
|
||||
attr, val = attr[:j], attr[j+1:]
|
||||
}
|
||||
lowerAttr := strings.ToLower(attr)
|
||||
val, success = parseCookieValue(val)
|
||||
if !success {
|
||||
c.Unparsed = append(c.Unparsed, parts[i])
|
||||
continue
|
||||
}
|
||||
switch lowerAttr {
|
||||
case "secure":
|
||||
c.Secure = true
|
||||
continue
|
||||
case "httponly":
|
||||
c.HttpOnly = true
|
||||
continue
|
||||
case "domain":
|
||||
c.Domain = val
|
||||
continue
|
||||
case "max-age":
|
||||
secs, err := strconv.Atoi(val)
|
||||
if err != nil || secs != 0 && val[0] == '0' {
|
||||
break
|
||||
}
|
||||
if secs <= 0 {
|
||||
c.MaxAge = -1
|
||||
} else {
|
||||
c.MaxAge = secs
|
||||
}
|
||||
continue
|
||||
case "expires":
|
||||
c.RawExpires = val
|
||||
exptime, err := time.Parse(time.RFC1123, val)
|
||||
if err != nil {
|
||||
exptime, err = time.Parse("Mon, 02-Jan-2006 15:04:05 MST", val)
|
||||
if err != nil {
|
||||
c.Expires = time.Time{}
|
||||
break
|
||||
}
|
||||
}
|
||||
c.Expires = exptime.UTC()
|
||||
continue
|
||||
case "path":
|
||||
c.Path = val
|
||||
continue
|
||||
}
|
||||
c.Unparsed = append(c.Unparsed, parts[i])
|
||||
}
|
||||
cookies = append(cookies, c)
|
||||
}
|
||||
return cookies
|
||||
}
|
||||
|
||||
// SetCookie adds a Set-Cookie header to the provided ResponseWriter's headers.
|
||||
func SetCookie(w ResponseWriter, cookie *Cookie) {
|
||||
w.Header().Add("Set-Cookie", cookie.String())
|
||||
}
|
||||
|
||||
// String returns the serialization of the cookie for use in a Cookie
|
||||
// header (if only Name and Value are set) or a Set-Cookie response
|
||||
// header (if other fields are set).
|
||||
func (c *Cookie) String() string {
|
||||
var b bytes.Buffer
|
||||
fmt.Fprintf(&b, "%s=%s", sanitizeCookieName(c.Name), sanitizeCookieValue(c.Value))
|
||||
if len(c.Path) > 0 {
|
||||
fmt.Fprintf(&b, "; Path=%s", sanitizeCookiePath(c.Path))
|
||||
}
|
||||
if len(c.Domain) > 0 {
|
||||
if validCookieDomain(c.Domain) {
|
||||
// A c.Domain containing illegal characters is not
|
||||
// sanitized but simply dropped which turns the cookie
|
||||
// into a host-only cookie. A leading dot is okay
|
||||
// but won't be sent.
|
||||
d := c.Domain
|
||||
if d[0] == '.' {
|
||||
d = d[1:]
|
||||
}
|
||||
fmt.Fprintf(&b, "; Domain=%s", d)
|
||||
} else {
|
||||
log.Printf("net/http: invalid Cookie.Domain %q; dropping domain attribute",
|
||||
c.Domain)
|
||||
}
|
||||
}
|
||||
if c.Expires.Unix() > 0 {
|
||||
fmt.Fprintf(&b, "; Expires=%s", c.Expires.UTC().Format(time.RFC1123))
|
||||
}
|
||||
if c.MaxAge > 0 {
|
||||
fmt.Fprintf(&b, "; Max-Age=%d", c.MaxAge)
|
||||
} else if c.MaxAge < 0 {
|
||||
fmt.Fprintf(&b, "; Max-Age=0")
|
||||
}
|
||||
if c.HttpOnly {
|
||||
fmt.Fprintf(&b, "; HttpOnly")
|
||||
}
|
||||
if c.Secure {
|
||||
fmt.Fprintf(&b, "; Secure")
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// readCookies parses all "Cookie" values from the header h and
|
||||
// returns the successfully parsed Cookies.
|
||||
//
|
||||
// if filter isn't empty, only cookies of that name are returned
|
||||
func readCookies(h Header, filter string) []*Cookie {
|
||||
cookies := []*Cookie{}
|
||||
lines, ok := h["Cookie"]
|
||||
if !ok {
|
||||
return cookies
|
||||
}
|
||||
|
||||
for _, line := range lines {
|
||||
parts := strings.Split(strings.TrimSpace(line), ";")
|
||||
if len(parts) == 1 && parts[0] == "" {
|
||||
continue
|
||||
}
|
||||
// Per-line attributes
|
||||
parsedPairs := 0
|
||||
for i := 0; i < len(parts); i++ {
|
||||
parts[i] = strings.TrimSpace(parts[i])
|
||||
if len(parts[i]) == 0 {
|
||||
continue
|
||||
}
|
||||
name, val := parts[i], ""
|
||||
if j := strings.Index(name, "="); j >= 0 {
|
||||
name, val = name[:j], name[j+1:]
|
||||
}
|
||||
if !isCookieNameValid(name) {
|
||||
continue
|
||||
}
|
||||
if filter != "" && filter != name {
|
||||
continue
|
||||
}
|
||||
val, success := parseCookieValue(val)
|
||||
if !success {
|
||||
continue
|
||||
}
|
||||
cookies = append(cookies, &Cookie{Name: name, Value: val})
|
||||
parsedPairs++
|
||||
}
|
||||
}
|
||||
return cookies
|
||||
}
|
||||
|
||||
// validCookieDomain returns wheter v is a valid cookie domain-value.
|
||||
func validCookieDomain(v string) bool {
|
||||
if isCookieDomainName(v) {
|
||||
return true
|
||||
}
|
||||
if net.ParseIP(v) != nil && !strings.Contains(v, ":") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isCookieDomainName returns whether s is a valid domain name or a valid
|
||||
// domain name with a leading dot '.'. It is almost a direct copy of
|
||||
// package net's isDomainName.
|
||||
func isCookieDomainName(s string) bool {
|
||||
if len(s) == 0 {
|
||||
return false
|
||||
}
|
||||
if len(s) > 255 {
|
||||
return false
|
||||
}
|
||||
|
||||
if s[0] == '.' {
|
||||
// A cookie a domain attribute may start with a leading dot.
|
||||
s = s[1:]
|
||||
}
|
||||
last := byte('.')
|
||||
ok := false // Ok once we've seen a letter.
|
||||
partlen := 0
|
||||
for i := 0; i < len(s); i++ {
|
||||
c := s[i]
|
||||
switch {
|
||||
default:
|
||||
return false
|
||||
case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z':
|
||||
// No '_' allowed here (in contrast to package net).
|
||||
ok = true
|
||||
partlen++
|
||||
case '0' <= c && c <= '9':
|
||||
// fine
|
||||
partlen++
|
||||
case c == '-':
|
||||
// Byte before dash cannot be dot.
|
||||
if last == '.' {
|
||||
return false
|
||||
}
|
||||
partlen++
|
||||
case c == '.':
|
||||
// Byte before dot cannot be dot, dash.
|
||||
if last == '.' || last == '-' {
|
||||
return false
|
||||
}
|
||||
if partlen > 63 || partlen == 0 {
|
||||
return false
|
||||
}
|
||||
partlen = 0
|
||||
}
|
||||
last = c
|
||||
}
|
||||
if last == '-' || partlen > 63 {
|
||||
return false
|
||||
}
|
||||
|
||||
return ok
|
||||
}
|
||||
|
||||
var cookieNameSanitizer = strings.NewReplacer("\n", "-", "\r", "-")
|
||||
|
||||
func sanitizeCookieName(n string) string {
|
||||
return cookieNameSanitizer.Replace(n)
|
||||
}
|
||||
|
||||
// http://tools.ietf.org/html/rfc6265#section-4.1.1
|
||||
// cookie-value = *cookie-octet / ( DQUOTE *cookie-octet DQUOTE )
|
||||
// cookie-octet = %x21 / %x23-2B / %x2D-3A / %x3C-5B / %x5D-7E
|
||||
// ; US-ASCII characters excluding CTLs,
|
||||
// ; whitespace DQUOTE, comma, semicolon,
|
||||
// ; and backslash
|
||||
// We loosen this as spaces and commas are common in cookie values
|
||||
// but we produce a quoted cookie-value in when value starts or ends
|
||||
// with a comma or space.
|
||||
// See http://golang.org/issue/7243 for the discussion.
|
||||
func sanitizeCookieValue(v string) string {
|
||||
v = sanitizeOrWarn("Cookie.Value", validCookieValueByte, v)
|
||||
if len(v) == 0 {
|
||||
return v
|
||||
}
|
||||
if v[0] == ' ' || v[0] == ',' || v[len(v)-1] == ' ' || v[len(v)-1] == ',' {
|
||||
return `"` + v + `"`
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func validCookieValueByte(b byte) bool {
|
||||
return 0x20 <= b && b < 0x7f && b != '"' && b != ';' && b != '\\'
|
||||
}
|
||||
|
||||
// path-av = "Path=" path-value
|
||||
// path-value = <any CHAR except CTLs or ";">
|
||||
func sanitizeCookiePath(v string) string {
|
||||
return sanitizeOrWarn("Cookie.Path", validCookiePathByte, v)
|
||||
}
|
||||
|
||||
func validCookiePathByte(b byte) bool {
|
||||
return 0x20 <= b && b < 0x7f && b != ';'
|
||||
}
|
||||
|
||||
func sanitizeOrWarn(fieldName string, valid func(byte) bool, v string) string {
|
||||
ok := true
|
||||
for i := 0; i < len(v); i++ {
|
||||
if valid(v[i]) {
|
||||
continue
|
||||
}
|
||||
log.Printf("net/http: invalid byte %q in %s; dropping invalid bytes", v[i], fieldName)
|
||||
ok = false
|
||||
break
|
||||
}
|
||||
if ok {
|
||||
return v
|
||||
}
|
||||
buf := make([]byte, 0, len(v))
|
||||
for i := 0; i < len(v); i++ {
|
||||
if b := v[i]; valid(b) {
|
||||
buf = append(buf, b)
|
||||
}
|
||||
}
|
||||
return string(buf)
|
||||
}
|
||||
|
||||
func parseCookieValue(raw string) (string, bool) {
|
||||
// Strip the quotes, if present.
|
||||
if len(raw) > 1 && raw[0] == '"' && raw[len(raw)-1] == '"' {
|
||||
raw = raw[1 : len(raw)-1]
|
||||
}
|
||||
for i := 0; i < len(raw); i++ {
|
||||
if !validCookieValueByte(raw[i]) {
|
||||
return "", false
|
||||
}
|
||||
}
|
||||
return raw, true
|
||||
}
|
||||
|
||||
func isCookieNameValid(raw string) bool {
|
||||
return strings.IndexFunc(raw, isNotToken) < 0
|
||||
}
|
|
@ -0,0 +1,80 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package http provides HTTP client and server implementations.
|
||||
|
||||
Get, Head, Post, and PostForm make HTTP (or HTTPS) requests:
|
||||
|
||||
resp, err := http.Get("http://example.com/")
|
||||
...
|
||||
resp, err := http.Post("http://example.com/upload", "image/jpeg", &buf)
|
||||
...
|
||||
resp, err := http.PostForm("http://example.com/form",
|
||||
url.Values{"key": {"Value"}, "id": {"123"}})
|
||||
|
||||
The client must close the response body when finished with it:
|
||||
|
||||
resp, err := http.Get("http://example.com/")
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
// ...
|
||||
|
||||
For control over HTTP client headers, redirect policy, and other
|
||||
settings, create a Client:
|
||||
|
||||
client := &http.Client{
|
||||
CheckRedirect: redirectPolicyFunc,
|
||||
}
|
||||
|
||||
resp, err := client.Get("http://example.com")
|
||||
// ...
|
||||
|
||||
req, err := http.NewRequest("GET", "http://example.com", nil)
|
||||
// ...
|
||||
req.Header.Add("If-None-Match", `W/"wyzzy"`)
|
||||
resp, err := client.Do(req)
|
||||
// ...
|
||||
|
||||
For control over proxies, TLS configuration, keep-alives,
|
||||
compression, and other settings, create a Transport:
|
||||
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{RootCAs: pool},
|
||||
DisableCompression: true,
|
||||
}
|
||||
client := &http.Client{Transport: tr}
|
||||
resp, err := client.Get("https://example.com")
|
||||
|
||||
Clients and Transports are safe for concurrent use by multiple
|
||||
goroutines and for efficiency should only be created once and re-used.
|
||||
|
||||
ListenAndServe starts an HTTP server with a given address and handler.
|
||||
The handler is usually nil, which means to use DefaultServeMux.
|
||||
Handle and HandleFunc add handlers to DefaultServeMux:
|
||||
|
||||
http.Handle("/foo", fooHandler)
|
||||
|
||||
http.HandleFunc("/bar", func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintf(w, "Hello, %q", html.EscapeString(r.URL.Path))
|
||||
})
|
||||
|
||||
log.Fatal(http.ListenAndServe(":8080", nil))
|
||||
|
||||
More control over the server's behavior is available by creating a
|
||||
custom Server:
|
||||
|
||||
s := &http.Server{
|
||||
Addr: ":8080",
|
||||
Handler: myHandler,
|
||||
ReadTimeout: 10 * time.Second,
|
||||
WriteTimeout: 10 * time.Second,
|
||||
MaxHeaderBytes: 1 << 20,
|
||||
}
|
||||
log.Fatal(s.ListenAndServe())
|
||||
*/
|
||||
package http
|
123
vendor/github.com/masterzen/azure-sdk-for-go/core/http/filetransport.go
generated
vendored
Normal file
123
vendor/github.com/masterzen/azure-sdk-for-go/core/http/filetransport.go
generated
vendored
Normal file
|
@ -0,0 +1,123 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// fileTransport implements RoundTripper for the 'file' protocol.
|
||||
type fileTransport struct {
|
||||
fh fileHandler
|
||||
}
|
||||
|
||||
// NewFileTransport returns a new RoundTripper, serving the provided
|
||||
// FileSystem. The returned RoundTripper ignores the URL host in its
|
||||
// incoming requests, as well as most other properties of the
|
||||
// request.
|
||||
//
|
||||
// The typical use case for NewFileTransport is to register the "file"
|
||||
// protocol with a Transport, as in:
|
||||
//
|
||||
// t := &http.Transport{}
|
||||
// t.RegisterProtocol("file", http.NewFileTransport(http.Dir("/")))
|
||||
// c := &http.Client{Transport: t}
|
||||
// res, err := c.Get("file:///etc/passwd")
|
||||
// ...
|
||||
func NewFileTransport(fs FileSystem) RoundTripper {
|
||||
return fileTransport{fileHandler{fs}}
|
||||
}
|
||||
|
||||
func (t fileTransport) RoundTrip(req *Request) (resp *Response, err error) {
|
||||
// We start ServeHTTP in a goroutine, which may take a long
|
||||
// time if the file is large. The newPopulateResponseWriter
|
||||
// call returns a channel which either ServeHTTP or finish()
|
||||
// sends our *Response on, once the *Response itself has been
|
||||
// populated (even if the body itself is still being
|
||||
// written to the res.Body, a pipe)
|
||||
rw, resc := newPopulateResponseWriter()
|
||||
go func() {
|
||||
t.fh.ServeHTTP(rw, req)
|
||||
rw.finish()
|
||||
}()
|
||||
return <-resc, nil
|
||||
}
|
||||
|
||||
func newPopulateResponseWriter() (*populateResponse, <-chan *Response) {
|
||||
pr, pw := io.Pipe()
|
||||
rw := &populateResponse{
|
||||
ch: make(chan *Response),
|
||||
pw: pw,
|
||||
res: &Response{
|
||||
Proto: "HTTP/1.0",
|
||||
ProtoMajor: 1,
|
||||
Header: make(Header),
|
||||
Close: true,
|
||||
Body: pr,
|
||||
},
|
||||
}
|
||||
return rw, rw.ch
|
||||
}
|
||||
|
||||
// populateResponse is a ResponseWriter that populates the *Response
|
||||
// in res, and writes its body to a pipe connected to the response
|
||||
// body. Once writes begin or finish() is called, the response is sent
|
||||
// on ch.
|
||||
type populateResponse struct {
|
||||
res *Response
|
||||
ch chan *Response
|
||||
wroteHeader bool
|
||||
hasContent bool
|
||||
sentResponse bool
|
||||
pw *io.PipeWriter
|
||||
}
|
||||
|
||||
func (pr *populateResponse) finish() {
|
||||
if !pr.wroteHeader {
|
||||
pr.WriteHeader(500)
|
||||
}
|
||||
if !pr.sentResponse {
|
||||
pr.sendResponse()
|
||||
}
|
||||
pr.pw.Close()
|
||||
}
|
||||
|
||||
func (pr *populateResponse) sendResponse() {
|
||||
if pr.sentResponse {
|
||||
return
|
||||
}
|
||||
pr.sentResponse = true
|
||||
|
||||
if pr.hasContent {
|
||||
pr.res.ContentLength = -1
|
||||
}
|
||||
pr.ch <- pr.res
|
||||
}
|
||||
|
||||
func (pr *populateResponse) Header() Header {
|
||||
return pr.res.Header
|
||||
}
|
||||
|
||||
func (pr *populateResponse) WriteHeader(code int) {
|
||||
if pr.wroteHeader {
|
||||
return
|
||||
}
|
||||
pr.wroteHeader = true
|
||||
|
||||
pr.res.StatusCode = code
|
||||
pr.res.Status = fmt.Sprintf("%d %s", code, StatusText(code))
|
||||
}
|
||||
|
||||
func (pr *populateResponse) Write(p []byte) (n int, err error) {
|
||||
if !pr.wroteHeader {
|
||||
pr.WriteHeader(StatusOK)
|
||||
}
|
||||
pr.hasContent = true
|
||||
if !pr.sentResponse {
|
||||
pr.sendResponse()
|
||||
}
|
||||
return pr.pw.Write(p)
|
||||
}
|
|
@ -0,0 +1,549 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// HTTP file system request handler
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime"
|
||||
"mime/multipart"
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// A Dir implements http.FileSystem using the native file
|
||||
// system restricted to a specific directory tree.
|
||||
//
|
||||
// An empty Dir is treated as ".".
|
||||
type Dir string
|
||||
|
||||
func (d Dir) Open(name string) (File, error) {
|
||||
if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 ||
|
||||
strings.Contains(name, "\x00") {
|
||||
return nil, errors.New("http: invalid character in file path")
|
||||
}
|
||||
dir := string(d)
|
||||
if dir == "" {
|
||||
dir = "."
|
||||
}
|
||||
f, err := os.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name))))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// A FileSystem implements access to a collection of named files.
|
||||
// The elements in a file path are separated by slash ('/', U+002F)
|
||||
// characters, regardless of host operating system convention.
|
||||
type FileSystem interface {
|
||||
Open(name string) (File, error)
|
||||
}
|
||||
|
||||
// A File is returned by a FileSystem's Open method and can be
|
||||
// served by the FileServer implementation.
|
||||
//
|
||||
// The methods should behave the same as those on an *os.File.
|
||||
type File interface {
|
||||
io.Closer
|
||||
io.Reader
|
||||
Readdir(count int) ([]os.FileInfo, error)
|
||||
Seek(offset int64, whence int) (int64, error)
|
||||
Stat() (os.FileInfo, error)
|
||||
}
|
||||
|
||||
func dirList(w ResponseWriter, f File) {
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
fmt.Fprintf(w, "<pre>\n")
|
||||
for {
|
||||
dirs, err := f.Readdir(100)
|
||||
if err != nil || len(dirs) == 0 {
|
||||
break
|
||||
}
|
||||
for _, d := range dirs {
|
||||
name := d.Name()
|
||||
if d.IsDir() {
|
||||
name += "/"
|
||||
}
|
||||
// name may contain '?' or '#', which must be escaped to remain
|
||||
// part of the URL path, and not indicate the start of a query
|
||||
// string or fragment.
|
||||
url := url.URL{Path: name}
|
||||
fmt.Fprintf(w, "<a href=\"%s\">%s</a>\n", url.String(), htmlReplacer.Replace(name))
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(w, "</pre>\n")
|
||||
}
|
||||
|
||||
// ServeContent replies to the request using the content in the
|
||||
// provided ReadSeeker. The main benefit of ServeContent over io.Copy
|
||||
// is that it handles Range requests properly, sets the MIME type, and
|
||||
// handles If-Modified-Since requests.
|
||||
//
|
||||
// If the response's Content-Type header is not set, ServeContent
|
||||
// first tries to deduce the type from name's file extension and,
|
||||
// if that fails, falls back to reading the first block of the content
|
||||
// and passing it to DetectContentType.
|
||||
// The name is otherwise unused; in particular it can be empty and is
|
||||
// never sent in the response.
|
||||
//
|
||||
// If modtime is not the zero time, ServeContent includes it in a
|
||||
// Last-Modified header in the response. If the request includes an
|
||||
// If-Modified-Since header, ServeContent uses modtime to decide
|
||||
// whether the content needs to be sent at all.
|
||||
//
|
||||
// The content's Seek method must work: ServeContent uses
|
||||
// a seek to the end of the content to determine its size.
|
||||
//
|
||||
// If the caller has set w's ETag header, ServeContent uses it to
|
||||
// handle requests using If-Range and If-None-Match.
|
||||
//
|
||||
// Note that *os.File implements the io.ReadSeeker interface.
|
||||
func ServeContent(w ResponseWriter, req *Request, name string, modtime time.Time, content io.ReadSeeker) {
|
||||
sizeFunc := func() (int64, error) {
|
||||
size, err := content.Seek(0, os.SEEK_END)
|
||||
if err != nil {
|
||||
return 0, errSeeker
|
||||
}
|
||||
_, err = content.Seek(0, os.SEEK_SET)
|
||||
if err != nil {
|
||||
return 0, errSeeker
|
||||
}
|
||||
return size, nil
|
||||
}
|
||||
serveContent(w, req, name, modtime, sizeFunc, content)
|
||||
}
|
||||
|
||||
// errSeeker is returned by ServeContent's sizeFunc when the content
|
||||
// doesn't seek properly. The underlying Seeker's error text isn't
|
||||
// included in the sizeFunc reply so it's not sent over HTTP to end
|
||||
// users.
|
||||
var errSeeker = errors.New("seeker can't seek")
|
||||
|
||||
// if name is empty, filename is unknown. (used for mime type, before sniffing)
|
||||
// if modtime.IsZero(), modtime is unknown.
|
||||
// content must be seeked to the beginning of the file.
|
||||
// The sizeFunc is called at most once. Its error, if any, is sent in the HTTP response.
|
||||
func serveContent(w ResponseWriter, r *Request, name string, modtime time.Time, sizeFunc func() (int64, error), content io.ReadSeeker) {
|
||||
if checkLastModified(w, r, modtime) {
|
||||
return
|
||||
}
|
||||
rangeReq, done := checkETag(w, r)
|
||||
if done {
|
||||
return
|
||||
}
|
||||
|
||||
code := StatusOK
|
||||
|
||||
// If Content-Type isn't set, use the file's extension to find it, but
|
||||
// if the Content-Type is unset explicitly, do not sniff the type.
|
||||
ctypes, haveType := w.Header()["Content-Type"]
|
||||
var ctype string
|
||||
if !haveType {
|
||||
ctype = mime.TypeByExtension(filepath.Ext(name))
|
||||
if ctype == "" {
|
||||
// read a chunk to decide between utf-8 text and binary
|
||||
var buf [sniffLen]byte
|
||||
n, _ := io.ReadFull(content, buf[:])
|
||||
ctype = DetectContentType(buf[:n])
|
||||
_, err := content.Seek(0, os.SEEK_SET) // rewind to output whole file
|
||||
if err != nil {
|
||||
Error(w, "seeker can't seek", StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
w.Header().Set("Content-Type", ctype)
|
||||
} else if len(ctypes) > 0 {
|
||||
ctype = ctypes[0]
|
||||
}
|
||||
|
||||
size, err := sizeFunc()
|
||||
if err != nil {
|
||||
Error(w, err.Error(), StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// handle Content-Range header.
|
||||
sendSize := size
|
||||
var sendContent io.Reader = content
|
||||
if size >= 0 {
|
||||
ranges, err := parseRange(rangeReq, size)
|
||||
if err != nil {
|
||||
Error(w, err.Error(), StatusRequestedRangeNotSatisfiable)
|
||||
return
|
||||
}
|
||||
if sumRangesSize(ranges) > size {
|
||||
// The total number of bytes in all the ranges
|
||||
// is larger than the size of the file by
|
||||
// itself, so this is probably an attack, or a
|
||||
// dumb client. Ignore the range request.
|
||||
ranges = nil
|
||||
}
|
||||
switch {
|
||||
case len(ranges) == 1:
|
||||
// RFC 2616, Section 14.16:
|
||||
// "When an HTTP message includes the content of a single
|
||||
// range (for example, a response to a request for a
|
||||
// single range, or to a request for a set of ranges
|
||||
// that overlap without any holes), this content is
|
||||
// transmitted with a Content-Range header, and a
|
||||
// Content-Length header showing the number of bytes
|
||||
// actually transferred.
|
||||
// ...
|
||||
// A response to a request for a single range MUST NOT
|
||||
// be sent using the multipart/byteranges media type."
|
||||
ra := ranges[0]
|
||||
if _, err := content.Seek(ra.start, os.SEEK_SET); err != nil {
|
||||
Error(w, err.Error(), StatusRequestedRangeNotSatisfiable)
|
||||
return
|
||||
}
|
||||
sendSize = ra.length
|
||||
code = StatusPartialContent
|
||||
w.Header().Set("Content-Range", ra.contentRange(size))
|
||||
case len(ranges) > 1:
|
||||
for _, ra := range ranges {
|
||||
if ra.start > size {
|
||||
Error(w, err.Error(), StatusRequestedRangeNotSatisfiable)
|
||||
return
|
||||
}
|
||||
}
|
||||
sendSize = rangesMIMESize(ranges, ctype, size)
|
||||
code = StatusPartialContent
|
||||
|
||||
pr, pw := io.Pipe()
|
||||
mw := multipart.NewWriter(pw)
|
||||
w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary())
|
||||
sendContent = pr
|
||||
defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish.
|
||||
go func() {
|
||||
for _, ra := range ranges {
|
||||
part, err := mw.CreatePart(ra.mimeHeader(ctype, size))
|
||||
if err != nil {
|
||||
pw.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
if _, err := content.Seek(ra.start, os.SEEK_SET); err != nil {
|
||||
pw.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
if _, err := io.CopyN(part, content, ra.length); err != nil {
|
||||
pw.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
mw.Close()
|
||||
pw.Close()
|
||||
}()
|
||||
}
|
||||
|
||||
w.Header().Set("Accept-Ranges", "bytes")
|
||||
if w.Header().Get("Content-Encoding") == "" {
|
||||
w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10))
|
||||
}
|
||||
}
|
||||
|
||||
w.WriteHeader(code)
|
||||
|
||||
if r.Method != "HEAD" {
|
||||
io.CopyN(w, sendContent, sendSize)
|
||||
}
|
||||
}
|
||||
|
||||
// modtime is the modification time of the resource to be served, or IsZero().
|
||||
// return value is whether this request is now complete.
|
||||
func checkLastModified(w ResponseWriter, r *Request, modtime time.Time) bool {
|
||||
if modtime.IsZero() {
|
||||
return false
|
||||
}
|
||||
|
||||
// The Date-Modified header truncates sub-second precision, so
|
||||
// use mtime < t+1s instead of mtime <= t to check for unmodified.
|
||||
if t, err := time.Parse(TimeFormat, r.Header.Get("If-Modified-Since")); err == nil && modtime.Before(t.Add(1*time.Second)) {
|
||||
h := w.Header()
|
||||
delete(h, "Content-Type")
|
||||
delete(h, "Content-Length")
|
||||
w.WriteHeader(StatusNotModified)
|
||||
return true
|
||||
}
|
||||
w.Header().Set("Last-Modified", modtime.UTC().Format(TimeFormat))
|
||||
return false
|
||||
}
|
||||
|
||||
// checkETag implements If-None-Match and If-Range checks.
|
||||
// The ETag must have been previously set in the ResponseWriter's headers.
|
||||
//
|
||||
// The return value is the effective request "Range" header to use and
|
||||
// whether this request is now considered done.
|
||||
func checkETag(w ResponseWriter, r *Request) (rangeReq string, done bool) {
|
||||
etag := w.Header().get("Etag")
|
||||
rangeReq = r.Header.get("Range")
|
||||
|
||||
// Invalidate the range request if the entity doesn't match the one
|
||||
// the client was expecting.
|
||||
// "If-Range: version" means "ignore the Range: header unless version matches the
|
||||
// current file."
|
||||
// We only support ETag versions.
|
||||
// The caller must have set the ETag on the response already.
|
||||
if ir := r.Header.get("If-Range"); ir != "" && ir != etag {
|
||||
// TODO(bradfitz): handle If-Range requests with Last-Modified
|
||||
// times instead of ETags? I'd rather not, at least for
|
||||
// now. That seems like a bug/compromise in the RFC 2616, and
|
||||
// I've never heard of anybody caring about that (yet).
|
||||
rangeReq = ""
|
||||
}
|
||||
|
||||
if inm := r.Header.get("If-None-Match"); inm != "" {
|
||||
// Must know ETag.
|
||||
if etag == "" {
|
||||
return rangeReq, false
|
||||
}
|
||||
|
||||
// TODO(bradfitz): non-GET/HEAD requests require more work:
|
||||
// sending a different status code on matches, and
|
||||
// also can't use weak cache validators (those with a "W/
|
||||
// prefix). But most users of ServeContent will be using
|
||||
// it on GET or HEAD, so only support those for now.
|
||||
if r.Method != "GET" && r.Method != "HEAD" {
|
||||
return rangeReq, false
|
||||
}
|
||||
|
||||
// TODO(bradfitz): deal with comma-separated or multiple-valued
|
||||
// list of If-None-match values. For now just handle the common
|
||||
// case of a single item.
|
||||
if inm == etag || inm == "*" {
|
||||
h := w.Header()
|
||||
delete(h, "Content-Type")
|
||||
delete(h, "Content-Length")
|
||||
w.WriteHeader(StatusNotModified)
|
||||
return "", true
|
||||
}
|
||||
}
|
||||
return rangeReq, false
|
||||
}
|
||||
|
||||
// name is '/'-separated, not filepath.Separator.
|
||||
func serveFile(w ResponseWriter, r *Request, fs FileSystem, name string, redirect bool) {
|
||||
const indexPage = "/index.html"
|
||||
|
||||
// redirect .../index.html to .../
|
||||
// can't use Redirect() because that would make the path absolute,
|
||||
// which would be a problem running under StripPrefix
|
||||
if strings.HasSuffix(r.URL.Path, indexPage) {
|
||||
localRedirect(w, r, "./")
|
||||
return
|
||||
}
|
||||
|
||||
f, err := fs.Open(name)
|
||||
if err != nil {
|
||||
// TODO expose actual error?
|
||||
NotFound(w, r)
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
d, err1 := f.Stat()
|
||||
if err1 != nil {
|
||||
// TODO expose actual error?
|
||||
NotFound(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
if redirect {
|
||||
// redirect to canonical path: / at end of directory url
|
||||
// r.URL.Path always begins with /
|
||||
url := r.URL.Path
|
||||
if d.IsDir() {
|
||||
if url[len(url)-1] != '/' {
|
||||
localRedirect(w, r, path.Base(url)+"/")
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if url[len(url)-1] == '/' {
|
||||
localRedirect(w, r, "../"+path.Base(url))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// use contents of index.html for directory, if present
|
||||
if d.IsDir() {
|
||||
index := name + indexPage
|
||||
ff, err := fs.Open(index)
|
||||
if err == nil {
|
||||
defer ff.Close()
|
||||
dd, err := ff.Stat()
|
||||
if err == nil {
|
||||
name = index
|
||||
d = dd
|
||||
f = ff
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Still a directory? (we didn't find an index.html file)
|
||||
if d.IsDir() {
|
||||
if checkLastModified(w, r, d.ModTime()) {
|
||||
return
|
||||
}
|
||||
dirList(w, f)
|
||||
return
|
||||
}
|
||||
|
||||
// serverContent will check modification time
|
||||
sizeFunc := func() (int64, error) { return d.Size(), nil }
|
||||
serveContent(w, r, d.Name(), d.ModTime(), sizeFunc, f)
|
||||
}
|
||||
|
||||
// localRedirect gives a Moved Permanently response.
|
||||
// It does not convert relative paths to absolute paths like Redirect does.
|
||||
func localRedirect(w ResponseWriter, r *Request, newPath string) {
|
||||
if q := r.URL.RawQuery; q != "" {
|
||||
newPath += "?" + q
|
||||
}
|
||||
w.Header().Set("Location", newPath)
|
||||
w.WriteHeader(StatusMovedPermanently)
|
||||
}
|
||||
|
||||
// ServeFile replies to the request with the contents of the named file or directory.
|
||||
func ServeFile(w ResponseWriter, r *Request, name string) {
|
||||
dir, file := filepath.Split(name)
|
||||
serveFile(w, r, Dir(dir), file, false)
|
||||
}
|
||||
|
||||
type fileHandler struct {
|
||||
root FileSystem
|
||||
}
|
||||
|
||||
// FileServer returns a handler that serves HTTP requests
|
||||
// with the contents of the file system rooted at root.
|
||||
//
|
||||
// To use the operating system's file system implementation,
|
||||
// use http.Dir:
|
||||
//
|
||||
// http.Handle("/", http.FileServer(http.Dir("/tmp")))
|
||||
func FileServer(root FileSystem) Handler {
|
||||
return &fileHandler{root}
|
||||
}
|
||||
|
||||
func (f *fileHandler) ServeHTTP(w ResponseWriter, r *Request) {
|
||||
upath := r.URL.Path
|
||||
if !strings.HasPrefix(upath, "/") {
|
||||
upath = "/" + upath
|
||||
r.URL.Path = upath
|
||||
}
|
||||
serveFile(w, r, f.root, path.Clean(upath), true)
|
||||
}
|
||||
|
||||
// httpRange specifies the byte range to be sent to the client.
|
||||
type httpRange struct {
|
||||
start, length int64
|
||||
}
|
||||
|
||||
func (r httpRange) contentRange(size int64) string {
|
||||
return fmt.Sprintf("bytes %d-%d/%d", r.start, r.start+r.length-1, size)
|
||||
}
|
||||
|
||||
func (r httpRange) mimeHeader(contentType string, size int64) textproto.MIMEHeader {
|
||||
return textproto.MIMEHeader{
|
||||
"Content-Range": {r.contentRange(size)},
|
||||
"Content-Type": {contentType},
|
||||
}
|
||||
}
|
||||
|
||||
// parseRange parses a Range header string as per RFC 2616.
|
||||
func parseRange(s string, size int64) ([]httpRange, error) {
|
||||
if s == "" {
|
||||
return nil, nil // header not present
|
||||
}
|
||||
const b = "bytes="
|
||||
if !strings.HasPrefix(s, b) {
|
||||
return nil, errors.New("invalid range")
|
||||
}
|
||||
var ranges []httpRange
|
||||
for _, ra := range strings.Split(s[len(b):], ",") {
|
||||
ra = strings.TrimSpace(ra)
|
||||
if ra == "" {
|
||||
continue
|
||||
}
|
||||
i := strings.Index(ra, "-")
|
||||
if i < 0 {
|
||||
return nil, errors.New("invalid range")
|
||||
}
|
||||
start, end := strings.TrimSpace(ra[:i]), strings.TrimSpace(ra[i+1:])
|
||||
var r httpRange
|
||||
if start == "" {
|
||||
// If no start is specified, end specifies the
|
||||
// range start relative to the end of the file.
|
||||
i, err := strconv.ParseInt(end, 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.New("invalid range")
|
||||
}
|
||||
if i > size {
|
||||
i = size
|
||||
}
|
||||
r.start = size - i
|
||||
r.length = size - r.start
|
||||
} else {
|
||||
i, err := strconv.ParseInt(start, 10, 64)
|
||||
if err != nil || i > size || i < 0 {
|
||||
return nil, errors.New("invalid range")
|
||||
}
|
||||
r.start = i
|
||||
if end == "" {
|
||||
// If no end is specified, range extends to end of the file.
|
||||
r.length = size - r.start
|
||||
} else {
|
||||
i, err := strconv.ParseInt(end, 10, 64)
|
||||
if err != nil || r.start > i {
|
||||
return nil, errors.New("invalid range")
|
||||
}
|
||||
if i >= size {
|
||||
i = size - 1
|
||||
}
|
||||
r.length = i - r.start + 1
|
||||
}
|
||||
}
|
||||
ranges = append(ranges, r)
|
||||
}
|
||||
return ranges, nil
|
||||
}
|
||||
|
||||
// countingWriter counts how many bytes have been written to it.
|
||||
type countingWriter int64
|
||||
|
||||
func (w *countingWriter) Write(p []byte) (n int, err error) {
|
||||
*w += countingWriter(len(p))
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// rangesMIMESize returns the number of bytes it takes to encode the
|
||||
// provided ranges as a multipart response.
|
||||
func rangesMIMESize(ranges []httpRange, contentType string, contentSize int64) (encSize int64) {
|
||||
var w countingWriter
|
||||
mw := multipart.NewWriter(&w)
|
||||
for _, ra := range ranges {
|
||||
mw.CreatePart(ra.mimeHeader(contentType, contentSize))
|
||||
encSize += ra.length
|
||||
}
|
||||
mw.Close()
|
||||
encSize += int64(w)
|
||||
return
|
||||
}
|
||||
|
||||
func sumRangesSize(ranges []httpRange) (size int64) {
|
||||
for _, ra := range ranges {
|
||||
size += ra.length
|
||||
}
|
||||
return
|
||||
}
|
|
@ -0,0 +1,211 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/textproto"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var raceEnabled = false // set by race.go
|
||||
|
||||
// A Header represents the key-value pairs in an HTTP header.
|
||||
type Header map[string][]string
|
||||
|
||||
// Add adds the key, value pair to the header.
|
||||
// It appends to any existing values associated with key.
|
||||
func (h Header) Add(key, value string) {
|
||||
textproto.MIMEHeader(h).Add(key, value)
|
||||
}
|
||||
|
||||
// Set sets the header entries associated with key to
|
||||
// the single element value. It replaces any existing
|
||||
// values associated with key.
|
||||
func (h Header) Set(key, value string) {
|
||||
textproto.MIMEHeader(h).Set(key, value)
|
||||
}
|
||||
|
||||
// Get gets the first value associated with the given key.
|
||||
// If there are no values associated with the key, Get returns "".
|
||||
// To access multiple values of a key, access the map directly
|
||||
// with CanonicalHeaderKey.
|
||||
func (h Header) Get(key string) string {
|
||||
return textproto.MIMEHeader(h).Get(key)
|
||||
}
|
||||
|
||||
// get is like Get, but key must already be in CanonicalHeaderKey form.
|
||||
func (h Header) get(key string) string {
|
||||
if v := h[key]; len(v) > 0 {
|
||||
return v[0]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Del deletes the values associated with key.
|
||||
func (h Header) Del(key string) {
|
||||
textproto.MIMEHeader(h).Del(key)
|
||||
}
|
||||
|
||||
// Write writes a header in wire format.
|
||||
func (h Header) Write(w io.Writer) error {
|
||||
return h.WriteSubset(w, nil)
|
||||
}
|
||||
|
||||
func (h Header) clone() Header {
|
||||
h2 := make(Header, len(h))
|
||||
for k, vv := range h {
|
||||
vv2 := make([]string, len(vv))
|
||||
copy(vv2, vv)
|
||||
h2[k] = vv2
|
||||
}
|
||||
return h2
|
||||
}
|
||||
|
||||
var timeFormats = []string{
|
||||
TimeFormat,
|
||||
time.RFC850,
|
||||
time.ANSIC,
|
||||
}
|
||||
|
||||
// ParseTime parses a time header (such as the Date: header),
|
||||
// trying each of the three formats allowed by HTTP/1.1:
|
||||
// TimeFormat, time.RFC850, and time.ANSIC.
|
||||
func ParseTime(text string) (t time.Time, err error) {
|
||||
for _, layout := range timeFormats {
|
||||
t, err = time.Parse(layout, text)
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var headerNewlineToSpace = strings.NewReplacer("\n", " ", "\r", " ")
|
||||
|
||||
type writeStringer interface {
|
||||
WriteString(string) (int, error)
|
||||
}
|
||||
|
||||
// stringWriter implements WriteString on a Writer.
|
||||
type stringWriter struct {
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
func (w stringWriter) WriteString(s string) (n int, err error) {
|
||||
return w.w.Write([]byte(s))
|
||||
}
|
||||
|
||||
type keyValues struct {
|
||||
key string
|
||||
values []string
|
||||
}
|
||||
|
||||
// A headerSorter implements sort.Interface by sorting a []keyValues
|
||||
// by key. It's used as a pointer, so it can fit in a sort.Interface
|
||||
// interface value without allocation.
|
||||
type headerSorter struct {
|
||||
kvs []keyValues
|
||||
}
|
||||
|
||||
func (s *headerSorter) Len() int { return len(s.kvs) }
|
||||
func (s *headerSorter) Swap(i, j int) { s.kvs[i], s.kvs[j] = s.kvs[j], s.kvs[i] }
|
||||
func (s *headerSorter) Less(i, j int) bool { return s.kvs[i].key < s.kvs[j].key }
|
||||
|
||||
var headerSorterPool = sync.Pool{
|
||||
New: func() interface{} { return new(headerSorter) },
|
||||
}
|
||||
|
||||
// sortedKeyValues returns h's keys sorted in the returned kvs
|
||||
// slice. The headerSorter used to sort is also returned, for possible
|
||||
// return to headerSorterCache.
|
||||
func (h Header) sortedKeyValues(exclude map[string]bool) (kvs []keyValues, hs *headerSorter) {
|
||||
hs = headerSorterPool.Get().(*headerSorter)
|
||||
if cap(hs.kvs) < len(h) {
|
||||
hs.kvs = make([]keyValues, 0, len(h))
|
||||
}
|
||||
kvs = hs.kvs[:0]
|
||||
for k, vv := range h {
|
||||
if !exclude[k] {
|
||||
kvs = append(kvs, keyValues{k, vv})
|
||||
}
|
||||
}
|
||||
hs.kvs = kvs
|
||||
sort.Sort(hs)
|
||||
return kvs, hs
|
||||
}
|
||||
|
||||
// WriteSubset writes a header in wire format.
|
||||
// If exclude is not nil, keys where exclude[key] == true are not written.
|
||||
func (h Header) WriteSubset(w io.Writer, exclude map[string]bool) error {
|
||||
ws, ok := w.(writeStringer)
|
||||
if !ok {
|
||||
ws = stringWriter{w}
|
||||
}
|
||||
kvs, sorter := h.sortedKeyValues(exclude)
|
||||
for _, kv := range kvs {
|
||||
for _, v := range kv.values {
|
||||
v = headerNewlineToSpace.Replace(v)
|
||||
v = textproto.TrimString(v)
|
||||
for _, s := range []string{kv.key, ": ", v, "\r\n"} {
|
||||
if _, err := ws.WriteString(s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
headerSorterPool.Put(sorter)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CanonicalHeaderKey returns the canonical format of the
|
||||
// header key s. The canonicalization converts the first
|
||||
// letter and any letter following a hyphen to upper case;
|
||||
// the rest are converted to lowercase. For example, the
|
||||
// canonical key for "accept-encoding" is "Accept-Encoding".
|
||||
func CanonicalHeaderKey(s string) string { return textproto.CanonicalMIMEHeaderKey(s) }
|
||||
|
||||
// hasToken reports whether token appears with v, ASCII
|
||||
// case-insensitive, with space or comma boundaries.
|
||||
// token must be all lowercase.
|
||||
// v may contain mixed cased.
|
||||
func hasToken(v, token string) bool {
|
||||
if len(token) > len(v) || token == "" {
|
||||
return false
|
||||
}
|
||||
if v == token {
|
||||
return true
|
||||
}
|
||||
for sp := 0; sp <= len(v)-len(token); sp++ {
|
||||
// Check that first character is good.
|
||||
// The token is ASCII, so checking only a single byte
|
||||
// is sufficient. We skip this potential starting
|
||||
// position if both the first byte and its potential
|
||||
// ASCII uppercase equivalent (b|0x20) don't match.
|
||||
// False positives ('^' => '~') are caught by EqualFold.
|
||||
if b := v[sp]; b != token[0] && b|0x20 != token[0] {
|
||||
continue
|
||||
}
|
||||
// Check that start pos is on a valid token boundary.
|
||||
if sp > 0 && !isTokenBoundary(v[sp-1]) {
|
||||
continue
|
||||
}
|
||||
// Check that end pos is on a valid token boundary.
|
||||
if endPos := sp + len(token); endPos != len(v) && !isTokenBoundary(v[endPos]) {
|
||||
continue
|
||||
}
|
||||
if strings.EqualFold(v[sp:sp+len(token)], token) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isTokenBoundary(b byte) bool {
|
||||
return b == ' ' || b == ',' || b == '\t'
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// A CookieJar manages storage and use of cookies in HTTP requests.
|
||||
//
|
||||
// Implementations of CookieJar must be safe for concurrent use by multiple
|
||||
// goroutines.
|
||||
//
|
||||
// The net/http/cookiejar package provides a CookieJar implementation.
|
||||
type CookieJar interface {
|
||||
// SetCookies handles the receipt of the cookies in a reply for the
|
||||
// given URL. It may or may not choose to save the cookies, depending
|
||||
// on the jar's policy and implementation.
|
||||
SetCookies(u *url.URL, cookies []*Cookie)
|
||||
|
||||
// Cookies returns the cookies to send in a request for the given URL.
|
||||
// It is up to the implementation to honor the standard cookie use
|
||||
// restrictions such as in RFC 6265.
|
||||
Cookies(u *url.URL) []*Cookie
|
||||
}
|
|
@ -0,0 +1,96 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http
|
||||
|
||||
// This file deals with lexical matters of HTTP
|
||||
|
||||
var isTokenTable = [127]bool{
|
||||
'!': true,
|
||||
'#': true,
|
||||
'$': true,
|
||||
'%': true,
|
||||
'&': true,
|
||||
'\'': true,
|
||||
'*': true,
|
||||
'+': true,
|
||||
'-': true,
|
||||
'.': true,
|
||||
'0': true,
|
||||
'1': true,
|
||||
'2': true,
|
||||
'3': true,
|
||||
'4': true,
|
||||
'5': true,
|
||||
'6': true,
|
||||
'7': true,
|
||||
'8': true,
|
||||
'9': true,
|
||||
'A': true,
|
||||
'B': true,
|
||||
'C': true,
|
||||
'D': true,
|
||||
'E': true,
|
||||
'F': true,
|
||||
'G': true,
|
||||
'H': true,
|
||||
'I': true,
|
||||
'J': true,
|
||||
'K': true,
|
||||
'L': true,
|
||||
'M': true,
|
||||
'N': true,
|
||||
'O': true,
|
||||
'P': true,
|
||||
'Q': true,
|
||||
'R': true,
|
||||
'S': true,
|
||||
'T': true,
|
||||
'U': true,
|
||||
'W': true,
|
||||
'V': true,
|
||||
'X': true,
|
||||
'Y': true,
|
||||
'Z': true,
|
||||
'^': true,
|
||||
'_': true,
|
||||
'`': true,
|
||||
'a': true,
|
||||
'b': true,
|
||||
'c': true,
|
||||
'd': true,
|
||||
'e': true,
|
||||
'f': true,
|
||||
'g': true,
|
||||
'h': true,
|
||||
'i': true,
|
||||
'j': true,
|
||||
'k': true,
|
||||
'l': true,
|
||||
'm': true,
|
||||
'n': true,
|
||||
'o': true,
|
||||
'p': true,
|
||||
'q': true,
|
||||
'r': true,
|
||||
's': true,
|
||||
't': true,
|
||||
'u': true,
|
||||
'v': true,
|
||||
'w': true,
|
||||
'x': true,
|
||||
'y': true,
|
||||
'z': true,
|
||||
'|': true,
|
||||
'~': true,
|
||||
}
|
||||
|
||||
func isToken(r rune) bool {
|
||||
i := int(r)
|
||||
return i < len(isTokenTable) && isTokenTable[i]
|
||||
}
|
||||
|
||||
func isNotToken(r rune) bool {
|
||||
return !isToken(r)
|
||||
}
|
|
@ -0,0 +1,11 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build race
|
||||
|
||||
package http
|
||||
|
||||
func init() {
|
||||
raceEnabled = true
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue