Merge pull request #10709 from hashicorp/stubout-post-processor-exoscale-import
Shim "exoscale-import" post-processor
This commit is contained in:
commit
bb54b8bf10
|
@ -87,7 +87,6 @@
|
|||
|
||||
/post-processor/alicloud-import/ dongxiao.zzh@alibaba-inc.com
|
||||
/post-processor/checksum/ v.tolstov@selfip.ru
|
||||
/post-processor/exoscale-import/ @falzm @mcorbin
|
||||
/post-processor/googlecompute-export/ crunkleton@google.com
|
||||
/post-processor/yandex-export/ @GennadySpb
|
||||
/post-processor/yandex-import/ @GennadySpb
|
||||
|
|
|
@ -72,7 +72,6 @@ import (
|
|||
checksumpostprocessor "github.com/hashicorp/packer/post-processor/checksum"
|
||||
compresspostprocessor "github.com/hashicorp/packer/post-processor/compress"
|
||||
digitaloceanimportpostprocessor "github.com/hashicorp/packer/post-processor/digitalocean-import"
|
||||
exoscaleimportpostprocessor "github.com/hashicorp/packer/post-processor/exoscale-import"
|
||||
googlecomputeexportpostprocessor "github.com/hashicorp/packer/post-processor/googlecompute-export"
|
||||
googlecomputeimportpostprocessor "github.com/hashicorp/packer/post-processor/googlecompute-import"
|
||||
manifestpostprocessor "github.com/hashicorp/packer/post-processor/manifest"
|
||||
|
@ -190,7 +189,6 @@ var PostProcessors = map[string]packersdk.PostProcessor{
|
|||
"checksum": new(checksumpostprocessor.PostProcessor),
|
||||
"compress": new(compresspostprocessor.PostProcessor),
|
||||
"digitalocean-import": new(digitaloceanimportpostprocessor.PostProcessor),
|
||||
"exoscale-import": new(exoscaleimportpostprocessor.PostProcessor),
|
||||
"googlecompute-export": new(googlecomputeexportpostprocessor.PostProcessor),
|
||||
"googlecompute-import": new(googlecomputeimportpostprocessor.PostProcessor),
|
||||
"manifest": new(manifestpostprocessor.PostProcessor),
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
// Previously core-bundled components, split into their own plugins but
|
||||
// still vendored with Packer for now. Importing as library instead of
|
||||
// forcing use of packer init, until packer v1.8.0
|
||||
exoscaleimportpostprocessor "github.com/exoscale/packer-plugin-exoscale/post-processor/exoscale-import"
|
||||
dockerbuilder "github.com/hashicorp/packer-plugin-docker/builder/docker"
|
||||
dockerimportpostprocessor "github.com/hashicorp/packer-plugin-docker/post-processor/docker-import"
|
||||
dockerpushpostprocessor "github.com/hashicorp/packer-plugin-docker/post-processor/docker-push"
|
||||
|
@ -30,10 +31,11 @@ var VendoredPostProcessors = map[string]packersdk.PostProcessor{
|
|||
"docker-push": new(dockerpushpostprocessor.PostProcessor),
|
||||
"docker-save": new(dockersavepostprocessor.PostProcessor),
|
||||
"docker-tag": new(dockertagpostprocessor.PostProcessor),
|
||||
"exoscale-import": new(exoscaleimportpostprocessor.PostProcessor),
|
||||
}
|
||||
|
||||
// Upon init lets us load up any plugins that were vendored manually into the
|
||||
// default set of plugins.
|
||||
// Upon init lets load up any plugins that were vendored manually into the default
|
||||
// set of plugins.
|
||||
func init() {
|
||||
for k, v := range VendoredBuilders {
|
||||
if _, ok := Builders[k]; ok {
|
||||
|
|
10
go.mod
10
go.mod
|
@ -25,7 +25,7 @@ require (
|
|||
github.com/dgrijalva/jwt-go v3.2.0+incompatible
|
||||
github.com/digitalocean/go-qemu v0.0.0-20201211181942-d361e7b4965f
|
||||
github.com/digitalocean/godo v1.11.1
|
||||
github.com/exoscale/egoscale v0.18.1
|
||||
github.com/exoscale/packer-plugin-exoscale v0.1.0
|
||||
github.com/fatih/camelcase v1.0.0
|
||||
github.com/fatih/structtag v1.0.0
|
||||
github.com/go-ini/ini v1.25.4
|
||||
|
@ -77,7 +77,7 @@ require (
|
|||
github.com/profitbricks/profitbricks-sdk-go v4.0.2+incompatible
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7
|
||||
github.com/shirou/gopsutil v3.21.1+incompatible
|
||||
github.com/stretchr/testify v1.6.1
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/tencentcloud/tencentcloud-sdk-go v3.0.222+incompatible
|
||||
github.com/ucloud/ucloud-sdk-go v0.16.3
|
||||
github.com/ufilesdk-dev/ufile-gosdk v0.0.0-20190830075812-b4dbc4ef43a6
|
||||
|
@ -88,13 +88,13 @@ require (
|
|||
github.com/yandex-cloud/go-sdk v0.0.0-20200921111412-ef15ded2014c
|
||||
github.com/zclconf/go-cty v1.7.0
|
||||
github.com/zclconf/go-cty-yaml v1.0.1
|
||||
golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad
|
||||
golang.org/x/mobile v0.0.0-20201208152944-da85bec010a2
|
||||
golang.org/x/mod v0.3.0
|
||||
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11
|
||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777
|
||||
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c
|
||||
golang.org/x/tools v0.0.0-20201111133315-69daaf961d65
|
||||
google.golang.org/api v0.32.0
|
||||
google.golang.org/grpc v1.32.0
|
||||
|
|
68
go.sum
68
go.sum
|
@ -148,10 +148,14 @@ github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6D
|
|||
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-xdr v0.0.0-20161123171359-e6a2ba005892/go.mod h1:CTDl0pzVzE5DEzZhPfvhY/9sPFMQIxaJ9VAMs9AagrE=
|
||||
github.com/deepmap/oapi-codegen v1.3.11/go.mod h1:suMvK7+rKlx3+tpa8ByptmvoXbAV70wERKTOGH3hLp0=
|
||||
github.com/deepmap/oapi-codegen v1.5.1 h1:Xx8/OynzWhQeKFWr182hg6Ja2evS1gcqdja7qMn05yU=
|
||||
github.com/deepmap/oapi-codegen v1.5.1/go.mod h1:Eb1vtV3f58zvm37CJV4UAQ1bECb0fgAVvTdonC1ftJg=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/digitalocean/go-libvirt v0.0.0-20190626172931-4d226dd6c437/go.mod h1:PRcPVAAma6zcLpFd4GZrjR/MRpood3TamjKI2m/z/Uw=
|
||||
|
@ -176,8 +180,11 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF
|
|||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/exoscale/egoscale v0.18.1 h1:1FNZVk8jHUx0AvWhOZxLEDNlacTU0chMXUUNkm9EZaI=
|
||||
github.com/exoscale/egoscale v0.18.1/go.mod h1:Z7OOdzzTOz1Q1PjQXumlz9Wn/CddH0zSYdCF3rnBKXE=
|
||||
github.com/exoscale/egoscale v0.43.1 h1:Lhr0UOfg3t3Y56yh1DsYCjQuUHqFvsC8iUVqvub8+0Q=
|
||||
github.com/exoscale/egoscale v0.43.1/go.mod h1:mpEXBpROAa/2i5GC0r33rfxG+TxSEka11g1PIXt9+zc=
|
||||
github.com/exoscale/packer-plugin-exoscale v0.1.0 h1:p4ymqF1tNiTuxgSdnEjGqXehMdDQbV7BPaLsMxGav24=
|
||||
github.com/exoscale/packer-plugin-exoscale v0.1.0/go.mod h1:ZmJRkxsAlmEsVYOMxYPupDkax54uZ+ph0h3W59aIMZ8=
|
||||
github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8=
|
||||
github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
|
@ -186,8 +193,12 @@ github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL
|
|||
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
||||
github.com/fatih/structtag v1.0.0 h1:pTHj65+u3RKWYPSGaU290FpI/dXxTaHdVwVwbcPKmEc=
|
||||
github.com/fatih/structtag v1.0.0/go.mod h1:IKitwq45uXL/yqi5mYghiD3w9H6eTOvI9vnk8tXMphA=
|
||||
github.com/getkin/kin-openapi v0.13.0/go.mod h1:WGRs2ZMM1Q8LR1QBEwUxC6RJEfaBcD0s+pcEVXFuAjw=
|
||||
github.com/getkin/kin-openapi v0.37.0/go.mod h1:ZJSfy1PxJv2QQvH9EdBj3nupRTVvV42mkW6zKUlRBwk=
|
||||
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-chi/chi v1.5.1/go.mod h1:REp24E+25iKvxgeTfHmdUoL5x15kBiDBlnIl5bCwe2k=
|
||||
github.com/go-chi/chi v4.0.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
|
@ -197,6 +208,8 @@ github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp
|
|||
github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM=
|
||||
github.com/go-ole/go-ole v1.2.5 h1:t4MGB5xEDZvXI+0rMjjsfBsD7yAgp/s9ZDkL1JndXwY=
|
||||
github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48/go.mod h1:dZGr0i9PLlaaTD4H/hoZIDjQ+r6xq8mgbRzHZf7f2J8=
|
||||
github.com/go-resty/resty/v2 v2.3.0 h1:JOOeAvjSlapTT92p8xiS19Zxev1neGikoHsXJeOq8So=
|
||||
github.com/go-resty/resty/v2 v2.3.0/go.mod h1:UpN9CgLZNsv4e9XG50UU8xdI0F43UQ4HmxLBDwaroHU=
|
||||
|
@ -208,8 +221,9 @@ github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
|
|||
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
|
||||
github.com/gofrs/flock v0.7.3 h1:I0EKY9l8HZCXTMYC4F80vwT6KNypV9uYKP3Alm/hjmQ=
|
||||
github.com/gofrs/flock v0.7.3/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
|
||||
github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE=
|
||||
github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw=
|
||||
github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 h1:zN2lZNZRflqFyxVaTIU61KNKQ9C0055u9CAfpmqUvo4=
|
||||
github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3/go.mod h1:nPpo7qLxd6XL3hWJG/O60sR8ZKfMCiIoNap5GvD12KU=
|
||||
|
@ -242,6 +256,7 @@ github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0
|
|||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
|
@ -381,7 +396,6 @@ github.com/hashicorp/packer v1.6.7-0.20210217093213-201869d627bf/go.mod h1:+EWPP
|
|||
github.com/hashicorp/packer-plugin-docker v0.0.2 h1:j/hQTogaN2pZfZohlZTRu5YvNZg2/qtYYHkxPBxv2Oo=
|
||||
github.com/hashicorp/packer-plugin-docker v0.0.2/go.mod h1:A2p9qztS4n88KsNF+qBM7BWw2HndW636GpFIjNSvbKM=
|
||||
github.com/hashicorp/packer-plugin-sdk v0.0.6/go.mod h1:Nvh28f+Jmpp2rcaN79bULTouNkGNDRfHckhHKTAXtyU=
|
||||
github.com/hashicorp/packer-plugin-sdk v0.0.6/go.mod h1:Nvh28f+Jmpp2rcaN79bULTouNkGNDRfHckhHKTAXtyU=
|
||||
github.com/hashicorp/packer-plugin-sdk v0.0.7-0.20210111224258-fd30ebb797f0/go.mod h1:YdWTt5w6cYfaQG7IOi5iorL+3SXnz8hI0gJCi8Db/LI=
|
||||
github.com/hashicorp/packer-plugin-sdk v0.0.7-0.20210120105339-f6fd68d2570a/go.mod h1:exN0C+Pe+3zu18l4nxueNjX5cfmslxUX/m/xk4IVmZQ=
|
||||
github.com/hashicorp/packer-plugin-sdk v0.0.7-0.20210122130548-45a6ca0a9365/go.mod h1:K7VsU0lfJBDyiUrSNnS/j+zMxSRwwH9WC9QvHv32KsU=
|
||||
|
@ -405,6 +419,9 @@ github.com/hetznercloud/hcloud-go v1.15.1/go.mod h1:8lR3yHBHZWy2uGcUi9Ibt4UOoop2
|
|||
github.com/hyperonecom/h1-client-go v0.0.0-20191203060043-b46280e4c4a4 h1:mSmyzhwBeQt2TlHbsXYLona9pwjWAvYGwQJ2Cq/k3VE=
|
||||
github.com/hyperonecom/h1-client-go v0.0.0-20191203060043-b46280e4c4a4/go.mod h1:yNUVHSleURKSaYUKq4Wx0i/vjCen2aq7CvPyHd/Vj2Q=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/jarcoal/httpmock v1.0.6/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik=
|
||||
github.com/jarcoal/httpmock v1.0.8 h1:8kI16SoO6LQKgPE7PvQuV+YuD/inwHd7fOOe2zMbo4k=
|
||||
github.com/jarcoal/httpmock v1.0.8/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik=
|
||||
github.com/jdcloud-api/jdcloud-sdk-go v1.9.1-0.20190605102154-3d81a50ca961 h1:a2/K4HRhg31A5vafiz5yYiGMjaCxwRpyjJStfVquKds=
|
||||
github.com/jdcloud-api/jdcloud-sdk-go v1.9.1-0.20190605102154-3d81a50ca961/go.mod h1:UrKjuULIWLjHFlG6aSPunArE5QX57LftMmStAZJBEX8=
|
||||
github.com/jehiah/go-strftime v0.0.0-20171201141054-1d33003b3869 h1:IPJ3dvxmJ4uczJe5YQdrYB16oTJlGSC/OyZDqUk9xX4=
|
||||
|
@ -452,21 +469,31 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
|||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4=
|
||||
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
|
||||
github.com/labstack/echo/v4 v4.1.11/go.mod h1:i541M3Fj6f76NZtHSj7TXnyM8n2gaodfvfxNnFqi74g=
|
||||
github.com/labstack/echo/v4 v4.1.17/go.mod h1:Tn2yRQL/UclUalpb5rPdXDevbkJ+lp/2svdyFBg6CHQ=
|
||||
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
|
||||
github.com/linode/linodego v0.14.0 h1:0APKMjiVGyry2TTUVDiok72H6cWpFNMMrFWBFn14aFU=
|
||||
github.com/linode/linodego v0.14.0/go.mod h1:2ce3S00NrDqJfp4i55ZuSlT0U3cKNELNYACWBPI8Tnw=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/masterzen/simplexml v0.0.0-20160608183007-4572e39b1ab9/go.mod h1:kCEbxUJlNDEBNbdQMkPSp6yaKcRXVI6f4ddk8Riv4bc=
|
||||
github.com/masterzen/simplexml v0.0.0-20190410153822-31eea3082786 h1:2ZKn+w/BJeL43sCxI2jhPLRv73oVVOjEKZjKkflyqxg=
|
||||
github.com/masterzen/simplexml v0.0.0-20190410153822-31eea3082786/go.mod h1:kCEbxUJlNDEBNbdQMkPSp6yaKcRXVI6f4ddk8Riv4bc=
|
||||
github.com/masterzen/winrm v0.0.0-20200615185753-c42b5136ff88/go.mod h1:a2HXwefeat3evJHxFXSayvRHpYEPJYtErl4uIzfaUqY=
|
||||
github.com/masterzen/winrm v0.0.0-20201030141608-56ca5c5f2380 h1:uKhPH5dYpx3Z8ZAnaTGfGZUiHOWa5p5mdG8wZlh+tLo=
|
||||
github.com/masterzen/winrm v0.0.0-20201030141608-56ca5c5f2380/go.mod h1:a2HXwefeat3evJHxFXSayvRHpYEPJYtErl4uIzfaUqY=
|
||||
github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE=
|
||||
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
|
||||
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
|
||||
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
|
||||
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
|
||||
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
|
||||
|
@ -558,6 +585,7 @@ github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R
|
|||
github.com/rivo/uniseg v0.1.0 h1:+2KBaVoUmb9XzDsrx/Ct0W/EYOSFf/nWTauy++DprtY=
|
||||
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=
|
||||
|
@ -586,14 +614,16 @@ github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn
|
|||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/objx v0.3.0 h1:NGXK3lHquSN08v5vWalVI/L8XU9hdzE/G6xsrze47As=
|
||||
github.com/stretchr/objx v0.3.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/tencentcloud/tencentcloud-sdk-go v3.0.222+incompatible h1:bs+0lcG4RELNbE8PsBC9oaPP0/qExr0DuEGnZyocm84=
|
||||
github.com/tencentcloud/tencentcloud-sdk-go v3.0.222+incompatible/go.mod h1:0PfYow01SHPMhKY31xa+EFz2RStxIqj6JFAJS+IkCi4=
|
||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
|
||||
|
@ -608,6 +638,10 @@ github.com/ugorji/go/codec v1.2.4 h1:C5VurWRRCKjuENsbM6GYVw8W++WVW9rSxoACKIvxzz8
|
|||
github.com/ugorji/go/codec v1.2.4/go.mod h1:bWBu1+kIRWcF8uMklKaJrR6fTWQOwAlrIzX22pHwryA=
|
||||
github.com/ulikunitz/xz v0.5.5 h1:pFrO0lVpTBXLpYw+pnLj6TbvHuyjXMfjGeCwSqCVwok=
|
||||
github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
|
||||
github.com/valyala/fasttemplate v1.1.0/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
|
||||
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
|
||||
github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk=
|
||||
github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4=
|
||||
github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI=
|
||||
|
@ -647,14 +681,18 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
|
|||
golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200422194213-44a606286825/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9 h1:sYNJzB4J8toYPQTM6pAkcmBRgw9SnQKP9oXCHfgy604=
|
||||
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY=
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
|
@ -708,6 +746,7 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL
|
|||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
|
@ -727,8 +766,9 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R
|
|||
golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11 h1:lwlPPsmjDKK0J6eG6xDWd5XPehI0R024zxjDnw3esPA=
|
||||
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777 h1:003p0dJM77cxMSyCPFphvZf/Y5/NXf5fzg6ufd1/Oew=
|
||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
@ -764,12 +804,14 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -788,11 +830,13 @@ golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
|
@ -801,8 +845,9 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
|||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=
|
||||
|
@ -977,8 +1022,9 @@ gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
|
|
@ -1,31 +0,0 @@
|
|||
package exoscaleimport
|
||||
|
||||
const BuilderId = "packer.post-processor.exoscale-import"
|
||||
|
||||
type Artifact struct {
|
||||
id string
|
||||
}
|
||||
|
||||
func (a *Artifact) BuilderId() string {
|
||||
return BuilderId
|
||||
}
|
||||
|
||||
func (a *Artifact) Id() string {
|
||||
return a.id
|
||||
}
|
||||
|
||||
func (a *Artifact) Files() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Artifact) String() string {
|
||||
return a.id
|
||||
}
|
||||
|
||||
func (a *Artifact) State(name string) interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Artifact) Destroy() error {
|
||||
return nil
|
||||
}
|
|
@ -1,270 +0,0 @@
|
|||
//go:generate mapstructure-to-hcl2 -type Config
|
||||
|
||||
package exoscaleimport
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
"github.com/exoscale/egoscale"
|
||||
"github.com/hashicorp/hcl/v2/hcldec"
|
||||
"github.com/hashicorp/packer-plugin-sdk/common"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/config"
|
||||
"github.com/hashicorp/packer/builder/file"
|
||||
"github.com/hashicorp/packer/builder/qemu"
|
||||
"github.com/hashicorp/packer/post-processor/artifice"
|
||||
"github.com/hashicorp/packer/post-processor/exoscale-import/version"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultTemplateZone = "ch-gva-2"
|
||||
defaultAPIEndpoint = "https://api.exoscale.com/compute"
|
||||
defaultSOSEndpoint = "https://sos-" + defaultTemplateZone + ".exo.io"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
common.PackerConfig `mapstructure:",squash"`
|
||||
SkipClean bool `mapstructure:"skip_clean"`
|
||||
|
||||
SOSEndpoint string `mapstructure:"sos_endpoint"`
|
||||
APIEndpoint string `mapstructure:"api_endpoint"`
|
||||
APIKey string `mapstructure:"api_key"`
|
||||
APISecret string `mapstructure:"api_secret"`
|
||||
ImageBucket string `mapstructure:"image_bucket"`
|
||||
TemplateZone string `mapstructure:"template_zone"`
|
||||
TemplateName string `mapstructure:"template_name"`
|
||||
TemplateDescription string `mapstructure:"template_description"`
|
||||
TemplateUsername string `mapstructure:"template_username"`
|
||||
TemplateDisablePassword bool `mapstructure:"template_disable_password"`
|
||||
TemplateDisableSSHKey bool `mapstructure:"template_disable_sshkey"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
egoscale.UserAgent = "Packer-Exoscale/" + version.ExoscaleImportPluginVersion.FormattedVersion() + " " + egoscale.UserAgent
|
||||
}
|
||||
|
||||
type PostProcessor struct {
|
||||
config Config
|
||||
}
|
||||
|
||||
func (p *PostProcessor) ConfigSpec() hcldec.ObjectSpec { return p.config.FlatMapstructure().HCL2Spec() }
|
||||
|
||||
func (p *PostProcessor) Configure(raws ...interface{}) error {
|
||||
p.config.TemplateZone = defaultTemplateZone
|
||||
p.config.APIEndpoint = defaultAPIEndpoint
|
||||
p.config.SOSEndpoint = defaultSOSEndpoint
|
||||
|
||||
if err := config.Decode(&p.config, nil, raws...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if p.config.APIKey == "" {
|
||||
p.config.APIKey = os.Getenv("EXOSCALE_API_KEY")
|
||||
}
|
||||
|
||||
if p.config.APISecret == "" {
|
||||
p.config.APISecret = os.Getenv("EXOSCALE_API_SECRET")
|
||||
}
|
||||
|
||||
requiredArgs := map[string]*string{
|
||||
"api_key": &p.config.APIKey,
|
||||
"api_secret": &p.config.APISecret,
|
||||
"api_endpoint": &p.config.APIEndpoint,
|
||||
"sos_endpoint": &p.config.SOSEndpoint,
|
||||
"image_bucket": &p.config.ImageBucket,
|
||||
"template_zone": &p.config.TemplateZone,
|
||||
"template_name": &p.config.TemplateName,
|
||||
"template_description": &p.config.TemplateDescription,
|
||||
}
|
||||
|
||||
errs := new(packersdk.MultiError)
|
||||
for k, v := range requiredArgs {
|
||||
if *v == "" {
|
||||
errs = packersdk.MultiErrorAppend(
|
||||
errs, fmt.Errorf("%s must be set", k))
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs.Errors) > 0 {
|
||||
return errs
|
||||
}
|
||||
|
||||
packersdk.LogSecretFilter.Set(p.config.APIKey, p.config.APISecret)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PostProcessor) PostProcess(ctx context.Context, ui packersdk.Ui, a packersdk.Artifact) (packersdk.Artifact, bool, bool, error) {
|
||||
switch a.BuilderId() {
|
||||
case qemu.BuilderId, file.BuilderId, artifice.BuilderId:
|
||||
break
|
||||
|
||||
default:
|
||||
err := fmt.Errorf(
|
||||
"Unknown artifact type: %s\nCan only import from QEMU/file builders and Artifice post-processor artifacts.",
|
||||
a.BuilderId())
|
||||
return nil, false, false, err
|
||||
}
|
||||
|
||||
ui.Message("Uploading template image")
|
||||
url, md5sum, err := p.uploadImage(ctx, ui, a)
|
||||
if err != nil {
|
||||
return nil, false, false, fmt.Errorf("unable to upload image: %s", err)
|
||||
}
|
||||
|
||||
ui.Message("Registering template")
|
||||
id, err := p.registerTemplate(ctx, ui, url, md5sum)
|
||||
if err != nil {
|
||||
return nil, false, false, fmt.Errorf("unable to register template: %s", err)
|
||||
}
|
||||
|
||||
if !p.config.SkipClean {
|
||||
ui.Message("Deleting uploaded template image")
|
||||
if err = p.deleteImage(ctx, ui, a); err != nil {
|
||||
return nil, false, false, fmt.Errorf("unable to delete uploaded template image: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
return &Artifact{id}, false, false, nil
|
||||
}
|
||||
|
||||
func (p *PostProcessor) uploadImage(ctx context.Context, ui packersdk.Ui, a packersdk.Artifact) (string, string, error) {
|
||||
var (
|
||||
imageFile = a.Files()[0]
|
||||
bucketFile = filepath.Base(imageFile)
|
||||
)
|
||||
|
||||
f, err := os.Open(imageFile)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
fileInfo, err := f.Stat()
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
// For tracking image file upload progress
|
||||
pf := ui.TrackProgress(imageFile, 0, fileInfo.Size(), f)
|
||||
defer pf.Close()
|
||||
|
||||
hash := md5.New()
|
||||
if _, err := io.Copy(hash, f); err != nil {
|
||||
return "", "", fmt.Errorf("image checksumming failed: %s", err)
|
||||
}
|
||||
if _, err := f.Seek(0, 0); err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
sess := session.Must(session.NewSessionWithOptions(session.Options{Config: aws.Config{
|
||||
Region: aws.String(p.config.TemplateZone),
|
||||
Endpoint: aws.String(p.config.SOSEndpoint),
|
||||
Credentials: credentials.NewStaticCredentials(p.config.APIKey, p.config.APISecret, "")}}))
|
||||
|
||||
uploader := s3manager.NewUploader(sess)
|
||||
output, err := uploader.UploadWithContext(ctx, &s3manager.UploadInput{
|
||||
Body: pf,
|
||||
Bucket: aws.String(p.config.ImageBucket),
|
||||
Key: aws.String(bucketFile),
|
||||
ContentMD5: aws.String(base64.StdEncoding.EncodeToString(hash.Sum(nil))),
|
||||
ACL: aws.String("public-read"),
|
||||
})
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
return output.Location, fmt.Sprintf("%x", hash.Sum(nil)), nil
|
||||
}
|
||||
|
||||
func (p *PostProcessor) deleteImage(ctx context.Context, ui packersdk.Ui, a packersdk.Artifact) error {
|
||||
var (
|
||||
imageFile = a.Files()[0]
|
||||
bucketFile = filepath.Base(imageFile)
|
||||
)
|
||||
|
||||
sess := session.Must(session.NewSessionWithOptions(session.Options{Config: aws.Config{
|
||||
Region: aws.String(p.config.TemplateZone),
|
||||
Endpoint: aws.String(p.config.SOSEndpoint),
|
||||
Credentials: credentials.NewStaticCredentials(p.config.APIKey, p.config.APISecret, "")}}))
|
||||
|
||||
svc := s3.New(sess)
|
||||
if _, err := svc.DeleteObject(&s3.DeleteObjectInput{
|
||||
Bucket: aws.String(p.config.ImageBucket),
|
||||
Key: aws.String(bucketFile),
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PostProcessor) registerTemplate(ctx context.Context, ui packersdk.Ui, url, md5sum string) (string, error) {
|
||||
var (
|
||||
passwordEnabled = !p.config.TemplateDisablePassword
|
||||
sshkeyEnabled = !p.config.TemplateDisableSSHKey
|
||||
regErr error
|
||||
)
|
||||
|
||||
exo := egoscale.NewClient(p.config.APIEndpoint, p.config.APIKey, p.config.APISecret)
|
||||
exo.RetryStrategy = egoscale.FibonacciRetryStrategy
|
||||
|
||||
zone := egoscale.Zone{Name: p.config.TemplateZone}
|
||||
if resp, err := exo.GetWithContext(ctx, &zone); err != nil {
|
||||
return "", fmt.Errorf("template zone lookup failed: %s", err)
|
||||
} else {
|
||||
zone.ID = resp.(*egoscale.Zone).ID
|
||||
}
|
||||
|
||||
req := egoscale.RegisterCustomTemplate{
|
||||
URL: url,
|
||||
ZoneID: zone.ID,
|
||||
Name: p.config.TemplateName,
|
||||
Displaytext: p.config.TemplateDescription,
|
||||
PasswordEnabled: &passwordEnabled,
|
||||
SSHKeyEnabled: &sshkeyEnabled,
|
||||
Details: map[string]string{"username": p.config.TemplateUsername},
|
||||
Checksum: md5sum,
|
||||
}
|
||||
|
||||
res := make([]egoscale.Template, 0)
|
||||
|
||||
exo.AsyncRequestWithContext(ctx, req, func(jobRes *egoscale.AsyncJobResult, err error) bool {
|
||||
if err != nil {
|
||||
regErr = fmt.Errorf("request failed: %s", err)
|
||||
return false
|
||||
} else if jobRes.JobStatus == egoscale.Pending {
|
||||
// Job is not completed yet
|
||||
ui.Message("template registration in progress")
|
||||
return true
|
||||
}
|
||||
|
||||
if err := jobRes.Result(&res); err != nil {
|
||||
regErr = err
|
||||
return false
|
||||
}
|
||||
|
||||
if len(res) != 1 {
|
||||
regErr = fmt.Errorf("unexpected response from API (expected 1 item, got %d)", len(res))
|
||||
return false
|
||||
}
|
||||
|
||||
return false
|
||||
})
|
||||
if regErr != nil {
|
||||
return "", regErr
|
||||
}
|
||||
|
||||
return res[0].ID.String(), nil
|
||||
}
|
|
@ -1,13 +0,0 @@
|
|||
package version
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/packer-plugin-sdk/version"
|
||||
packerVersion "github.com/hashicorp/packer/version"
|
||||
)
|
||||
|
||||
var ExoscaleImportPluginVersion *version.PluginVersion
|
||||
|
||||
func init() {
|
||||
ExoscaleImportPluginVersion = version.InitializePluginVersion(
|
||||
packerVersion.Version, packerVersion.VersionPrerelease)
|
||||
}
|
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,24 @@
|
|||
// Copyright 2021 DeepMap, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package runtime
|
||||
|
||||
// Binder is the interface implemented by types that can be bound to a query string or a parameter string
|
||||
// The input can be assumed to be a valid string. If you define a Bind method you are responsible for all
|
||||
// data being completely bound to the type.
|
||||
//
|
||||
// By convention, to approximate the behavior of Bind functions themselves,
|
||||
// Binder implements Bind("") as a no-op.
|
||||
type Binder interface {
|
||||
Bind(src string) error
|
||||
}
|
|
@ -0,0 +1,463 @@
|
|||
// Copyright 2019 DeepMap, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/deepmap/oapi-codegen/pkg/types"
|
||||
)
|
||||
|
||||
// This function binds a parameter as described in the Path Parameters
|
||||
// section here to a Go object:
|
||||
// https://swagger.io/docs/specification/serialization/
|
||||
func BindStyledParameter(style string, explode bool, paramName string,
|
||||
value string, dest interface{}) error {
|
||||
|
||||
if value == "" {
|
||||
return fmt.Errorf("parameter '%s' is empty, can't bind its value", paramName)
|
||||
}
|
||||
|
||||
// Everything comes in by pointer, dereference it
|
||||
v := reflect.Indirect(reflect.ValueOf(dest))
|
||||
|
||||
// This is the basic type of the destination object.
|
||||
t := v.Type()
|
||||
|
||||
if t.Kind() == reflect.Struct {
|
||||
// We've got a destination object, we'll create a JSON representation
|
||||
// of the input value, and let the json library deal with the unmarshaling
|
||||
parts, err := splitStyledParameter(style, explode, true, paramName, value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return bindSplitPartsToDestinationStruct(paramName, parts, explode, dest)
|
||||
}
|
||||
|
||||
if t.Kind() == reflect.Slice {
|
||||
// Chop up the parameter into parts based on its style
|
||||
parts, err := splitStyledParameter(style, explode, false, paramName, value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error splitting input '%s' into parts: %s", value, err)
|
||||
}
|
||||
|
||||
return bindSplitPartsToDestinationArray(parts, dest)
|
||||
}
|
||||
|
||||
// Try to bind the remaining types as a base type.
|
||||
return BindStringToObject(value, dest)
|
||||
}
|
||||
|
||||
// This is a complex set of operations, but each given parameter style can be
|
||||
// packed together in multiple ways, using different styles of separators, and
|
||||
// different packing strategies based on the explode flag. This function takes
|
||||
// as input any parameter format, and unpacks it to a simple list of strings
|
||||
// or key-values which we can then treat generically.
|
||||
// Why, oh why, great Swagger gods, did you have to make this so complicated?
|
||||
func splitStyledParameter(style string, explode bool, object bool, paramName string, value string) ([]string, error) {
|
||||
switch style {
|
||||
case "simple":
|
||||
// In the simple case, we always split on comma
|
||||
parts := strings.Split(value, ",")
|
||||
return parts, nil
|
||||
case "label":
|
||||
// In the label case, it's more tricky. In the no explode case, we have
|
||||
// /users/.3,4,5 for arrays
|
||||
// /users/.role,admin,firstName,Alex for objects
|
||||
// in the explode case, we have:
|
||||
// /users/.3.4.5
|
||||
// /users/.role=admin.firstName=Alex
|
||||
if explode {
|
||||
// In the exploded case, split everything on periods.
|
||||
parts := strings.Split(value, ".")
|
||||
// The first part should be an empty string because we have a
|
||||
// leading period.
|
||||
if parts[0] != "" {
|
||||
return nil, fmt.Errorf("invalid format for label parameter '%s', should start with '.'", paramName)
|
||||
}
|
||||
return parts[1:], nil
|
||||
|
||||
} else {
|
||||
// In the unexploded case, we strip off the leading period.
|
||||
if value[0] != '.' {
|
||||
return nil, fmt.Errorf("invalid format for label parameter '%s', should start with '.'", paramName)
|
||||
}
|
||||
// The rest is comma separated.
|
||||
return strings.Split(value[1:], ","), nil
|
||||
}
|
||||
|
||||
case "matrix":
|
||||
if explode {
|
||||
// In the exploded case, we break everything up on semicolon
|
||||
parts := strings.Split(value, ";")
|
||||
// The first part should always be empty string, since we started
|
||||
// with ;something
|
||||
if parts[0] != "" {
|
||||
return nil, fmt.Errorf("invalid format for matrix parameter '%s', should start with ';'", paramName)
|
||||
}
|
||||
parts = parts[1:]
|
||||
// Now, if we have an object, we just have a list of x=y statements.
|
||||
// for a non-object, like an array, we have id=x, id=y. id=z, etc,
|
||||
// so we need to strip the prefix from each of them.
|
||||
if !object {
|
||||
prefix := paramName + "="
|
||||
for i := range parts {
|
||||
parts[i] = strings.TrimPrefix(parts[i], prefix)
|
||||
}
|
||||
}
|
||||
return parts, nil
|
||||
} else {
|
||||
// In the unexploded case, parameters will start with ;paramName=
|
||||
prefix := ";" + paramName + "="
|
||||
if !strings.HasPrefix(value, prefix) {
|
||||
return nil, fmt.Errorf("expected parameter '%s' to start with %s", paramName, prefix)
|
||||
}
|
||||
str := strings.TrimPrefix(value, prefix)
|
||||
return strings.Split(str, ","), nil
|
||||
}
|
||||
case "form":
|
||||
var parts []string
|
||||
if explode {
|
||||
parts = strings.Split(value, "&")
|
||||
if !object {
|
||||
prefix := paramName + "="
|
||||
for i := range parts {
|
||||
parts[i] = strings.TrimPrefix(parts[i], prefix)
|
||||
}
|
||||
}
|
||||
return parts, nil
|
||||
} else {
|
||||
parts = strings.Split(value, ",")
|
||||
prefix := paramName + "="
|
||||
for i := range parts {
|
||||
parts[i] = strings.TrimPrefix(parts[i], prefix)
|
||||
}
|
||||
}
|
||||
return parts, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unhandled parameter style: %s", style)
|
||||
}
|
||||
|
||||
// Given a set of values as a slice, create a slice to hold them all, and
|
||||
// assign to each one by one.
|
||||
func bindSplitPartsToDestinationArray(parts []string, dest interface{}) error {
|
||||
// Everything comes in by pointer, dereference it
|
||||
v := reflect.Indirect(reflect.ValueOf(dest))
|
||||
|
||||
// This is the basic type of the destination object.
|
||||
t := v.Type()
|
||||
|
||||
// We've got a destination array, bind each object one by one.
|
||||
// This generates a slice of the correct element type and length to
|
||||
// hold all the parts.
|
||||
newArray := reflect.MakeSlice(t, len(parts), len(parts))
|
||||
for i, p := range parts {
|
||||
err := BindStringToObject(p, newArray.Index(i).Addr().Interface())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error setting array element: %s", err)
|
||||
}
|
||||
}
|
||||
v.Set(newArray)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Given a set of chopped up parameter parts, bind them to a destination
|
||||
// struct. The exploded parameter controls whether we send key value pairs
|
||||
// in the exploded case, or a sequence of values which are interpreted as
|
||||
// tuples.
|
||||
// Given the struct Id { firstName string, role string }, as in the canonical
|
||||
// swagger examples, in the exploded case, we would pass
|
||||
// ["firstName=Alex", "role=admin"], where in the non-exploded case, we would
|
||||
// pass "firstName", "Alex", "role", "admin"]
|
||||
//
|
||||
// We punt the hard work of binding these values to the object to the json
|
||||
// library. We'll turn those arrays into JSON strings, and unmarshal
|
||||
// into the struct.
|
||||
func bindSplitPartsToDestinationStruct(paramName string, parts []string, explode bool, dest interface{}) error {
|
||||
// We've got a destination object, we'll create a JSON representation
|
||||
// of the input value, and let the json library deal with the unmarshaling
|
||||
var fields []string
|
||||
if explode {
|
||||
fields = make([]string, len(parts))
|
||||
for i, property := range parts {
|
||||
propertyParts := strings.Split(property, "=")
|
||||
if len(propertyParts) != 2 {
|
||||
return fmt.Errorf("parameter '%s' has invalid exploded format", paramName)
|
||||
}
|
||||
fields[i] = "\"" + propertyParts[0] + "\":\"" + propertyParts[1] + "\""
|
||||
}
|
||||
} else {
|
||||
if len(parts)%2 != 0 {
|
||||
return fmt.Errorf("parameter '%s' has invalid format, property/values need to be pairs", paramName)
|
||||
}
|
||||
fields = make([]string, len(parts)/2)
|
||||
for i := 0; i < len(parts); i += 2 {
|
||||
key := parts[i]
|
||||
value := parts[i+1]
|
||||
fields[i/2] = "\"" + key + "\":\"" + value + "\""
|
||||
}
|
||||
}
|
||||
jsonParam := "{" + strings.Join(fields, ",") + "}"
|
||||
err := json.Unmarshal([]byte(jsonParam), dest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error binding parameter %s fields: %s", paramName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This works much like BindStyledParameter, however it takes a query argument
|
||||
// input array from the url package, since query arguments come through a
|
||||
// different path than the styled arguments. They're also exceptionally fussy.
|
||||
// For example, consider the exploded and unexploded form parameter examples:
|
||||
// (exploded) /users?role=admin&firstName=Alex
|
||||
// (unexploded) /users?id=role,admin,firstName,Alex
|
||||
//
|
||||
// In the first case, we can pull the "id" parameter off the context,
|
||||
// and unmarshal via json as an intermediate. Easy. In the second case, we
|
||||
// don't have the id QueryParam present, but must find "role", and "firstName".
|
||||
// what if there is another parameter similar to "ID" named "role"? We can't
|
||||
// tell them apart. This code tries to fail, but the moral of the story is that
|
||||
// you shouldn't pass objects via form styled query arguments, just use
|
||||
// the Content parameter form.
|
||||
func BindQueryParameter(style string, explode bool, required bool, paramName string,
|
||||
queryParams url.Values, dest interface{}) error {
|
||||
|
||||
// dv = destination value.
|
||||
dv := reflect.Indirect(reflect.ValueOf(dest))
|
||||
|
||||
// intermediate value form which is either dv or dv dereferenced.
|
||||
v := dv
|
||||
|
||||
// inner code will bind the string's value to this interface.
|
||||
var output interface{}
|
||||
|
||||
if required {
|
||||
// If the parameter is required, then the generated code will pass us
|
||||
// a pointer to it: &int, &object, and so forth. We can directly set
|
||||
// them.
|
||||
output = dest
|
||||
} else {
|
||||
// For optional parameters, we have an extra indirect. An optional
|
||||
// parameter of type "int" will be *int on the struct. We pass that
|
||||
// in by pointer, and have **int.
|
||||
|
||||
// If the destination, is a nil pointer, we need to allocate it.
|
||||
if v.IsNil() {
|
||||
t := v.Type()
|
||||
newValue := reflect.New(t.Elem())
|
||||
// for now, hang onto the output buffer separately from destination,
|
||||
// as we don't want to write anything to destination until we can
|
||||
// unmarshal successfully, and check whether a field is required.
|
||||
output = newValue.Interface()
|
||||
} else {
|
||||
// If the destination isn't nil, just use that.
|
||||
output = v.Interface()
|
||||
}
|
||||
|
||||
// Get rid of that extra indirect as compared to the required case,
|
||||
// so the code below doesn't have to care.
|
||||
v = reflect.Indirect(reflect.ValueOf(output))
|
||||
}
|
||||
|
||||
// This is the basic type of the destination object.
|
||||
t := v.Type()
|
||||
k := t.Kind()
|
||||
|
||||
switch style {
|
||||
case "form":
|
||||
var parts []string
|
||||
if explode {
|
||||
// ok, the explode case in query arguments is very, very annoying,
|
||||
// because an exploded object, such as /users?role=admin&firstName=Alex
|
||||
// isn't actually present in the parameter array. We have to do
|
||||
// different things based on destination type.
|
||||
values, found := queryParams[paramName]
|
||||
var err error
|
||||
|
||||
switch k {
|
||||
case reflect.Slice:
|
||||
// In the slice case, we simply use the arguments provided by
|
||||
// http library.
|
||||
if !found {
|
||||
if required {
|
||||
return fmt.Errorf("query parameter '%s' is required", paramName)
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
err = bindSplitPartsToDestinationArray(values, output)
|
||||
case reflect.Struct:
|
||||
// This case is really annoying, and error prone, but the
|
||||
// form style object binding doesn't tell us which arguments
|
||||
// in the query string correspond to the object's fields. We'll
|
||||
// try to bind field by field.
|
||||
err = bindParamsToExplodedObject(paramName, queryParams, output)
|
||||
default:
|
||||
// Primitive object case. We expect to have 1 value to
|
||||
// unmarshal.
|
||||
if len(values) == 0 {
|
||||
if required {
|
||||
return fmt.Errorf("query parameter '%s' is required", paramName)
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if len(values) != 1 {
|
||||
return fmt.Errorf("multiple values for single value parameter '%s'", paramName)
|
||||
}
|
||||
err = BindStringToObject(values[0], output)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If the parameter is required, and we've successfully unmarshaled
|
||||
// it, this assigns the new object to the pointer pointer.
|
||||
if !required {
|
||||
dv.Set(reflect.ValueOf(output))
|
||||
}
|
||||
return nil
|
||||
} else {
|
||||
values, found := queryParams[paramName]
|
||||
if !found {
|
||||
if required {
|
||||
return fmt.Errorf("query parameter '%s' is required", paramName)
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if len(values) != 1 {
|
||||
return fmt.Errorf("parameter '%s' is not exploded, but is specified multiple times", paramName)
|
||||
}
|
||||
parts = strings.Split(values[0], ",")
|
||||
}
|
||||
var err error
|
||||
switch k {
|
||||
case reflect.Slice:
|
||||
err = bindSplitPartsToDestinationArray(parts, output)
|
||||
case reflect.Struct:
|
||||
err = bindSplitPartsToDestinationStruct(paramName, parts, explode, output)
|
||||
default:
|
||||
if len(parts) == 0 {
|
||||
if required {
|
||||
return fmt.Errorf("query parameter '%s' is required", paramName)
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if len(parts) != 1 {
|
||||
return fmt.Errorf("multiple values for single value parameter '%s'", paramName)
|
||||
}
|
||||
err = BindStringToObject(parts[0], output)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !required {
|
||||
dv.Set(reflect.ValueOf(output))
|
||||
}
|
||||
return nil
|
||||
case "deepObject":
|
||||
if !explode {
|
||||
return errors.New("deepObjects must be exploded")
|
||||
}
|
||||
return UnmarshalDeepObject(dest, paramName, queryParams)
|
||||
case "spaceDelimited", "pipeDelimited":
|
||||
return fmt.Errorf("query arguments of style '%s' aren't yet supported", style)
|
||||
default:
|
||||
return fmt.Errorf("style '%s' on parameter '%s' is invalid", style, paramName)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// This function reflects the destination structure, and pulls the value for
|
||||
// each settable field from the given parameters map. This is to deal with the
|
||||
// exploded form styled object which may occupy any number of parameter names.
|
||||
// We don't try to be smart here, if the field exists as a query argument,
|
||||
// set its value.
|
||||
func bindParamsToExplodedObject(paramName string, values url.Values, dest interface{}) error {
|
||||
// Dereference pointers to their destination values
|
||||
binder, v, t := indirect(dest)
|
||||
if binder != nil {
|
||||
return BindStringToObject(values.Get(paramName), dest)
|
||||
}
|
||||
if t.Kind() != reflect.Struct {
|
||||
return fmt.Errorf("unmarshaling query arg '%s' into wrong type", paramName)
|
||||
}
|
||||
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
fieldT := t.Field(i)
|
||||
|
||||
// Skip unsettable fields, such as internal ones.
|
||||
if !v.Field(i).CanSet() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Find the json annotation on the field, and use the json specified
|
||||
// name if available, otherwise, just the field name.
|
||||
tag := fieldT.Tag.Get("json")
|
||||
fieldName := fieldT.Name
|
||||
if tag != "" {
|
||||
tagParts := strings.Split(tag, ",")
|
||||
name := tagParts[0]
|
||||
if name != "" {
|
||||
fieldName = name
|
||||
}
|
||||
}
|
||||
|
||||
// At this point, we look up field name in the parameter list.
|
||||
fieldVal, found := values[fieldName]
|
||||
if found {
|
||||
if len(fieldVal) != 1 {
|
||||
return fmt.Errorf("field '%s' specified multiple times for param '%s'", fieldName, paramName)
|
||||
}
|
||||
err := BindStringToObject(fieldVal[0], v.Field(i).Addr().Interface())
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not bind query arg '%s' to request object: %s'", paramName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// indirect
|
||||
func indirect(dest interface{}) (interface{}, reflect.Value, reflect.Type) {
|
||||
v := reflect.ValueOf(dest)
|
||||
if v.Type().NumMethod() > 0 && v.CanInterface() {
|
||||
if u, ok := v.Interface().(Binder); ok {
|
||||
return u, reflect.Value{}, nil
|
||||
}
|
||||
}
|
||||
v = reflect.Indirect(v)
|
||||
t := v.Type()
|
||||
// special handling for custom types which might look like an object. We
|
||||
// don't want to use object binding on them, but rather treat them as
|
||||
// primitive types. time.Time{} is a unique case since we can't add a Binder
|
||||
// to it without changing the underlying generated code.
|
||||
if t.ConvertibleTo(reflect.TypeOf(time.Time{})) {
|
||||
return dest, reflect.Value{}, nil
|
||||
}
|
||||
if t.ConvertibleTo(reflect.TypeOf(types.Date{})) {
|
||||
return dest, reflect.Value{}, nil
|
||||
}
|
||||
return nil, v, t
|
||||
}
|
|
@ -0,0 +1,143 @@
|
|||
// Copyright 2019 DeepMap, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/deepmap/oapi-codegen/pkg/types"
|
||||
)
|
||||
|
||||
// This function takes a string, and attempts to assign it to the destination
|
||||
// interface via whatever type conversion is necessary. We have to do this
|
||||
// via reflection instead of a much simpler type switch so that we can handle
|
||||
// type aliases. This function was the easy way out, the better way, since we
|
||||
// know the destination type each place that we use this, is to generate code
|
||||
// to read each specific type.
|
||||
func BindStringToObject(src string, dst interface{}) error {
|
||||
var err error
|
||||
|
||||
v := reflect.ValueOf(dst)
|
||||
t := reflect.TypeOf(dst)
|
||||
|
||||
// We need to dereference pointers
|
||||
if t.Kind() == reflect.Ptr {
|
||||
v = reflect.Indirect(v)
|
||||
t = v.Type()
|
||||
}
|
||||
|
||||
// The resulting type must be settable. reflect will catch issues like
|
||||
// passing the destination by value.
|
||||
if !v.CanSet() {
|
||||
return errors.New("destination is not settable")
|
||||
}
|
||||
|
||||
switch t.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
var val int64
|
||||
val, err = strconv.ParseInt(src, 10, 64)
|
||||
if err == nil {
|
||||
v.SetInt(val)
|
||||
}
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
var val uint64
|
||||
val, err = strconv.ParseUint(src, 10, 64)
|
||||
if err == nil {
|
||||
v.SetUint(val)
|
||||
}
|
||||
case reflect.String:
|
||||
v.SetString(src)
|
||||
err = nil
|
||||
case reflect.Float64, reflect.Float32:
|
||||
var val float64
|
||||
val, err = strconv.ParseFloat(src, 64)
|
||||
if err == nil {
|
||||
v.SetFloat(val)
|
||||
}
|
||||
case reflect.Bool:
|
||||
var val bool
|
||||
val, err = strconv.ParseBool(src)
|
||||
if err == nil {
|
||||
v.SetBool(val)
|
||||
}
|
||||
case reflect.Struct:
|
||||
// if this is not of type Time or of type Date look to see if this is of type Binder.
|
||||
if dstType, ok := dst.(Binder); ok {
|
||||
return dstType.Bind(src)
|
||||
}
|
||||
|
||||
if t.ConvertibleTo(reflect.TypeOf(time.Time{})) {
|
||||
// Don't fail on empty string.
|
||||
if src == "" {
|
||||
return nil
|
||||
}
|
||||
// Time is a special case of a struct that we handle
|
||||
parsedTime, err := time.Parse(time.RFC3339Nano, src)
|
||||
if err != nil {
|
||||
parsedTime, err = time.Parse(types.DateFormat, src)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing '%s' as RFC3339 or 2006-01-02 time: %s", src, err)
|
||||
}
|
||||
}
|
||||
// So, assigning this gets a little fun. We have a value to the
|
||||
// dereference destination. We can't do a conversion to
|
||||
// time.Time because the result isn't assignable, so we need to
|
||||
// convert pointers.
|
||||
if t != reflect.TypeOf(time.Time{}) {
|
||||
vPtr := v.Addr()
|
||||
vtPtr := vPtr.Convert(reflect.TypeOf(&time.Time{}))
|
||||
v = reflect.Indirect(vtPtr)
|
||||
}
|
||||
v.Set(reflect.ValueOf(parsedTime))
|
||||
return nil
|
||||
}
|
||||
|
||||
if t.ConvertibleTo(reflect.TypeOf(types.Date{})) {
|
||||
// Don't fail on empty string.
|
||||
if src == "" {
|
||||
return nil
|
||||
}
|
||||
parsedTime, err := time.Parse(types.DateFormat, src)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing '%s' as date: %s", src, err)
|
||||
}
|
||||
parsedDate := types.Date{Time: parsedTime}
|
||||
|
||||
// We have to do the same dance here to assign, just like with times
|
||||
// above.
|
||||
if t != reflect.TypeOf(types.Date{}) {
|
||||
vPtr := v.Addr()
|
||||
vtPtr := vPtr.Convert(reflect.TypeOf(&types.Date{}))
|
||||
v = reflect.Indirect(vtPtr)
|
||||
}
|
||||
v.Set(reflect.ValueOf(parsedDate))
|
||||
return nil
|
||||
}
|
||||
|
||||
// We fall through to the error case below if we haven't handled the
|
||||
// destination type above.
|
||||
fallthrough
|
||||
default:
|
||||
// We've got a bunch of types unimplemented, don't fail silently.
|
||||
err = fmt.Errorf("can not bind to destination of type: %s", t.Kind())
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("error binding string parameter: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,357 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/deepmap/oapi-codegen/pkg/types"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func marshalDeepObject(in interface{}, path []string) ([]string, error) {
|
||||
var result []string
|
||||
|
||||
switch t := in.(type) {
|
||||
case []interface{}:
|
||||
// For the array, we will use numerical subscripts of the form [x],
|
||||
// in the same order as the array.
|
||||
for i, iface := range t {
|
||||
newPath := append(path, strconv.Itoa(i))
|
||||
fields, err := marshalDeepObject(iface, newPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error traversing array")
|
||||
}
|
||||
result = append(result, fields...)
|
||||
}
|
||||
case map[string]interface{}:
|
||||
// For a map, each key (field name) becomes a member of the path, and
|
||||
// we recurse. First, sort the keys.
|
||||
keys := make([]string, len(t))
|
||||
i := 0
|
||||
for k := range t {
|
||||
keys[i] = k
|
||||
i++
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
// Now, for each key, we recursively marshal it.
|
||||
for _, k := range keys {
|
||||
newPath := append(path, k)
|
||||
fields, err := marshalDeepObject(t[k], newPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error traversing map")
|
||||
}
|
||||
result = append(result, fields...)
|
||||
}
|
||||
default:
|
||||
// Now, for a concrete value, we will turn the path elements
|
||||
// into a deepObject style set of subscripts. [a, b, c] turns into
|
||||
// [a][b][c]
|
||||
prefix := "[" + strings.Join(path, "][") + "]"
|
||||
result = []string{
|
||||
prefix + fmt.Sprintf("=%v", t),
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func MarshalDeepObject(i interface{}, paramName string) (string, error) {
|
||||
// We're going to marshal to JSON and unmarshal into an interface{},
|
||||
// which will use the json pkg to deal with all the field annotations. We
|
||||
// can then walk the generic object structure to produce a deepObject. This
|
||||
// isn't efficient and it would be more efficient to reflect on our own,
|
||||
// but it's complicated, error-prone code.
|
||||
buf, err := json.Marshal(i)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to marshal input to JSON")
|
||||
}
|
||||
var i2 interface{}
|
||||
err = json.Unmarshal(buf, &i2)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to unmarshal JSON")
|
||||
}
|
||||
fields, err := marshalDeepObject(i2, nil)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "error traversing JSON structure")
|
||||
}
|
||||
|
||||
// Prefix the param name to each subscripted field.
|
||||
for i := range fields {
|
||||
fields[i] = paramName + fields[i]
|
||||
}
|
||||
return strings.Join(fields, "&"), nil
|
||||
}
|
||||
|
||||
type fieldOrValue struct {
|
||||
fields map[string]fieldOrValue
|
||||
value string
|
||||
}
|
||||
|
||||
func (f *fieldOrValue) appendPathValue(path []string, value string) {
|
||||
fieldName := path[0]
|
||||
if len(path) == 1 {
|
||||
f.fields[fieldName] = fieldOrValue{value: value}
|
||||
return
|
||||
}
|
||||
|
||||
pv, found := f.fields[fieldName]
|
||||
if !found {
|
||||
pv = fieldOrValue{
|
||||
fields: make(map[string]fieldOrValue),
|
||||
}
|
||||
f.fields[fieldName] = pv
|
||||
}
|
||||
pv.appendPathValue(path[1:], value)
|
||||
}
|
||||
|
||||
func makeFieldOrValue(paths [][]string, values []string) fieldOrValue {
|
||||
|
||||
f := fieldOrValue{
|
||||
fields: make(map[string]fieldOrValue),
|
||||
}
|
||||
for i := range paths {
|
||||
path := paths[i]
|
||||
value := values[i]
|
||||
f.appendPathValue(path, value)
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
func UnmarshalDeepObject(dst interface{}, paramName string, params url.Values) error {
|
||||
// Params are all the query args, so we need those that look like
|
||||
// "paramName["...
|
||||
var fieldNames []string
|
||||
var fieldValues []string
|
||||
searchStr := paramName + "["
|
||||
for pName, pValues := range params {
|
||||
if strings.HasPrefix(pName, searchStr) {
|
||||
// trim the parameter name from the full name.
|
||||
pName = pName[len(paramName):]
|
||||
fieldNames = append(fieldNames, pName)
|
||||
if len(pValues) != 1 {
|
||||
return fmt.Errorf("%s has multiple values", pName)
|
||||
}
|
||||
fieldValues = append(fieldValues, pValues[0])
|
||||
}
|
||||
}
|
||||
|
||||
// Now, for each field, reconstruct its subscript path and value
|
||||
paths := make([][]string, len(fieldNames))
|
||||
for i, path := range fieldNames {
|
||||
path = strings.TrimLeft(path, "[")
|
||||
path = strings.TrimRight(path, "]")
|
||||
paths[i] = strings.Split(path, "][")
|
||||
}
|
||||
|
||||
fieldPaths := makeFieldOrValue(paths, fieldValues)
|
||||
err := assignPathValues(dst, fieldPaths)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error assigning value to destination")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This returns a field name, either using the variable name, or the json
|
||||
// annotation if that exists.
|
||||
func getFieldName(f reflect.StructField) string {
|
||||
n := f.Name
|
||||
tag, found := f.Tag.Lookup("json")
|
||||
if found {
|
||||
// If we have a json field, and the first part of it before the
|
||||
// first comma is non-empty, that's our field name.
|
||||
parts := strings.Split(tag, ",")
|
||||
if parts[0] != "" {
|
||||
n = parts[0]
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// Create a map of field names that we'll see in the deepObject to reflect
|
||||
// field indices on the given type.
|
||||
func fieldIndicesByJsonTag(i interface{}) (map[string]int, error) {
|
||||
t := reflect.TypeOf(i)
|
||||
if t.Kind() != reflect.Struct {
|
||||
return nil, errors.New("expected a struct as input")
|
||||
}
|
||||
|
||||
n := t.NumField()
|
||||
fieldMap := make(map[string]int)
|
||||
for i := 0; i < n; i++ {
|
||||
field := t.Field(i)
|
||||
fieldName := getFieldName(field)
|
||||
fieldMap[fieldName] = i
|
||||
}
|
||||
return fieldMap, nil
|
||||
}
|
||||
|
||||
func assignPathValues(dst interface{}, pathValues fieldOrValue) error {
|
||||
//t := reflect.TypeOf(dst)
|
||||
v := reflect.ValueOf(dst)
|
||||
|
||||
iv := reflect.Indirect(v)
|
||||
it := iv.Type()
|
||||
|
||||
switch it.Kind() {
|
||||
case reflect.Slice:
|
||||
sliceLength := len(pathValues.fields)
|
||||
dstSlice := reflect.MakeSlice(it, sliceLength, sliceLength)
|
||||
err := assignSlice(dstSlice, pathValues)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error assigning slice")
|
||||
}
|
||||
iv.Set(dstSlice)
|
||||
return nil
|
||||
case reflect.Struct:
|
||||
// Some special types we care about are structs. Handle them
|
||||
// here. They may be redefined, so we need to do some hoop
|
||||
// jumping. If the types are aliased, we need to type convert
|
||||
// the pointer, then set the value of the dereference pointer.
|
||||
|
||||
// We check to see if the object implements the Binder interface first.
|
||||
if dst, isBinder := v.Interface().(Binder); isBinder {
|
||||
return dst.Bind(pathValues.value)
|
||||
}
|
||||
// Then check the legacy types
|
||||
if it.ConvertibleTo(reflect.TypeOf(types.Date{})) {
|
||||
var date types.Date
|
||||
var err error
|
||||
date.Time, err = time.Parse(types.DateFormat, pathValues.value)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "invalid date format")
|
||||
}
|
||||
dst := iv
|
||||
if it != reflect.TypeOf(types.Date{}) {
|
||||
// Types are aliased, convert the pointers.
|
||||
ivPtr := iv.Addr()
|
||||
aPtr := ivPtr.Convert(reflect.TypeOf(&types.Date{}))
|
||||
dst = reflect.Indirect(aPtr)
|
||||
}
|
||||
dst.Set(reflect.ValueOf(date))
|
||||
}
|
||||
if it.ConvertibleTo(reflect.TypeOf(time.Time{})) {
|
||||
var tm time.Time
|
||||
var err error
|
||||
tm, err = time.Parse(time.RFC3339Nano, pathValues.value)
|
||||
if err != nil {
|
||||
// Fall back to parsing it as a date.
|
||||
tm, err = time.Parse(types.DateFormat, pathValues.value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing tim as RFC3339 or 2006-01-02 time: %s", err)
|
||||
}
|
||||
return errors.Wrap(err, "invalid date format")
|
||||
}
|
||||
dst := iv
|
||||
if it != reflect.TypeOf(time.Time{}) {
|
||||
// Types are aliased, convert the pointers.
|
||||
ivPtr := iv.Addr()
|
||||
aPtr := ivPtr.Convert(reflect.TypeOf(&time.Time{}))
|
||||
dst = reflect.Indirect(aPtr)
|
||||
}
|
||||
dst.Set(reflect.ValueOf(tm))
|
||||
}
|
||||
fieldMap, err := fieldIndicesByJsonTag(iv.Interface())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed enumerating fields")
|
||||
}
|
||||
for _, fieldName := range sortedFieldOrValueKeys(pathValues.fields) {
|
||||
fieldValue := pathValues.fields[fieldName]
|
||||
fieldIndex, found := fieldMap[fieldName]
|
||||
if !found {
|
||||
return fmt.Errorf("field [%s] is not present in destination object", fieldName)
|
||||
}
|
||||
field := iv.Field(fieldIndex)
|
||||
err = assignPathValues(field.Addr().Interface(), fieldValue)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error assigning field [%s]", fieldName)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
case reflect.Ptr:
|
||||
// If we have a pointer after redirecting, it means we're dealing with
|
||||
// an optional field, such as *string, which was passed in as &foo. We
|
||||
// will allocate it if necessary, and call ourselves with a different
|
||||
// interface.
|
||||
dstVal := reflect.New(it.Elem())
|
||||
dstPtr := dstVal.Interface()
|
||||
err := assignPathValues(dstPtr, pathValues)
|
||||
iv.Set(dstVal)
|
||||
return err
|
||||
case reflect.Bool:
|
||||
val, err := strconv.ParseBool(pathValues.value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("expected a valid bool, got %s", pathValues.value)
|
||||
}
|
||||
iv.SetBool(val)
|
||||
return nil
|
||||
case reflect.Float32:
|
||||
val, err := strconv.ParseFloat(pathValues.value, 32)
|
||||
if err != nil {
|
||||
return fmt.Errorf("expected a valid float, got %s", pathValues.value)
|
||||
}
|
||||
iv.SetFloat(val)
|
||||
return nil
|
||||
case reflect.Float64:
|
||||
val, err := strconv.ParseFloat(pathValues.value, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("expected a valid float, got %s", pathValues.value)
|
||||
}
|
||||
iv.SetFloat(val)
|
||||
return nil
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
val, err := strconv.ParseInt(pathValues.value, 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("expected a valid int, got %s", pathValues.value)
|
||||
}
|
||||
iv.SetInt(val)
|
||||
return nil
|
||||
case reflect.String:
|
||||
iv.SetString(pathValues.value)
|
||||
return nil
|
||||
default:
|
||||
return errors.New("unhandled type: " + it.String())
|
||||
}
|
||||
}
|
||||
|
||||
func assignSlice(dst reflect.Value, pathValues fieldOrValue) error {
|
||||
// Gather up the values
|
||||
nValues := len(pathValues.fields)
|
||||
values := make([]string, nValues)
|
||||
// We expect to have consecutive array indices in the map
|
||||
for i := 0; i < nValues; i++ {
|
||||
indexStr := strconv.Itoa(i)
|
||||
fv, found := pathValues.fields[indexStr]
|
||||
if !found {
|
||||
return errors.New("array deepObjects must have consecutive indices")
|
||||
}
|
||||
values[i] = fv.value
|
||||
}
|
||||
|
||||
// This could be cleaner, but we can call into assignPathValues to
|
||||
// avoid recreating this logic.
|
||||
for i := 0; i < nValues; i++ {
|
||||
dstElem := dst.Index(i).Addr()
|
||||
err := assignPathValues(dstElem.Interface(), fieldOrValue{value: values[i]})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error binding array")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func sortedFieldOrValueKeys(m map[string]fieldOrValue) []string {
|
||||
keys := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
return keys
|
||||
}
|
|
@ -0,0 +1,352 @@
|
|||
// Copyright 2019 DeepMap, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/deepmap/oapi-codegen/pkg/types"
|
||||
)
|
||||
|
||||
// Given an input value, such as a primitive type, array or object, turn it
|
||||
// into a parameter based on style/explode definition.
|
||||
func StyleParam(style string, explode bool, paramName string, value interface{}) (string, error) {
|
||||
t := reflect.TypeOf(value)
|
||||
v := reflect.ValueOf(value)
|
||||
|
||||
// Things may be passed in by pointer, we need to dereference, so return
|
||||
// error on nil.
|
||||
if t.Kind() == reflect.Ptr {
|
||||
if v.IsNil() {
|
||||
return "", fmt.Errorf("value is a nil pointer")
|
||||
}
|
||||
v = reflect.Indirect(v)
|
||||
t = v.Type()
|
||||
}
|
||||
|
||||
switch t.Kind() {
|
||||
case reflect.Slice:
|
||||
n := v.Len()
|
||||
sliceVal := make([]interface{}, n)
|
||||
for i := 0; i < n; i++ {
|
||||
sliceVal[i] = v.Index(i).Interface()
|
||||
}
|
||||
return styleSlice(style, explode, paramName, sliceVal)
|
||||
case reflect.Struct:
|
||||
return styleStruct(style, explode, paramName, value)
|
||||
case reflect.Map:
|
||||
return styleMap(style, explode, paramName, value)
|
||||
default:
|
||||
return stylePrimitive(style, explode, paramName, value)
|
||||
}
|
||||
}
|
||||
|
||||
func styleSlice(style string, explode bool, paramName string, values []interface{}) (string, error) {
|
||||
if style == "deepObject" {
|
||||
if !explode {
|
||||
return "", errors.New("deepObjects must be exploded")
|
||||
}
|
||||
return MarshalDeepObject(values, paramName)
|
||||
}
|
||||
|
||||
var prefix string
|
||||
var separator string
|
||||
|
||||
switch style {
|
||||
case "simple":
|
||||
separator = ","
|
||||
case "label":
|
||||
prefix = "."
|
||||
if explode {
|
||||
separator = "."
|
||||
} else {
|
||||
separator = ","
|
||||
}
|
||||
case "matrix":
|
||||
prefix = fmt.Sprintf(";%s=", paramName)
|
||||
if explode {
|
||||
separator = prefix
|
||||
} else {
|
||||
separator = ","
|
||||
}
|
||||
case "form":
|
||||
prefix = fmt.Sprintf("%s=", paramName)
|
||||
if explode {
|
||||
separator = "&" + prefix
|
||||
} else {
|
||||
separator = ","
|
||||
}
|
||||
case "spaceDelimited":
|
||||
prefix = fmt.Sprintf("%s=", paramName)
|
||||
if explode {
|
||||
separator = "&" + prefix
|
||||
} else {
|
||||
separator = " "
|
||||
}
|
||||
case "pipeDelimited":
|
||||
prefix = fmt.Sprintf("%s=", paramName)
|
||||
if explode {
|
||||
separator = "&" + prefix
|
||||
} else {
|
||||
separator = "|"
|
||||
}
|
||||
default:
|
||||
return "", fmt.Errorf("unsupported style '%s'", style)
|
||||
}
|
||||
|
||||
// We're going to assume here that the array is one of simple types.
|
||||
var err error
|
||||
parts := make([]string, len(values))
|
||||
for i, v := range values {
|
||||
parts[i], err = primitiveToString(v)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error formatting '%s': %s", paramName, err)
|
||||
}
|
||||
}
|
||||
return prefix + strings.Join(parts, separator), nil
|
||||
}
|
||||
|
||||
func sortedKeys(strMap map[string]string) []string {
|
||||
keys := make([]string, len(strMap))
|
||||
i := 0
|
||||
for k := range strMap {
|
||||
keys[i] = k
|
||||
i++
|
||||
}
|
||||
sort.Strings(keys)
|
||||
return keys
|
||||
}
|
||||
|
||||
// This is a special case. The struct may be a date or time, in
|
||||
// which case, marshal it in correct format.
|
||||
func marshalDateTimeValue(value interface{}) (string, bool) {
|
||||
v := reflect.Indirect(reflect.ValueOf(value))
|
||||
t := v.Type()
|
||||
|
||||
if t.ConvertibleTo(reflect.TypeOf(time.Time{})) {
|
||||
tt := v.Convert(reflect.TypeOf(time.Time{}))
|
||||
timeVal := tt.Interface().(time.Time)
|
||||
return timeVal.Format(time.RFC3339Nano), true
|
||||
}
|
||||
|
||||
if t.ConvertibleTo(reflect.TypeOf(types.Date{})) {
|
||||
d := v.Convert(reflect.TypeOf(types.Date{}))
|
||||
dateVal := d.Interface().(types.Date)
|
||||
return dateVal.Format(types.DateFormat), true
|
||||
}
|
||||
|
||||
return "", false
|
||||
}
|
||||
|
||||
func styleStruct(style string, explode bool, paramName string, value interface{}) (string, error) {
|
||||
|
||||
if timeVal, ok := marshalDateTimeValue(value); ok {
|
||||
styledVal, err := stylePrimitive(style, explode, paramName, timeVal)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to style time")
|
||||
}
|
||||
return styledVal, nil
|
||||
}
|
||||
|
||||
if style == "deepObject" {
|
||||
if !explode {
|
||||
return "", errors.New("deepObjects must be exploded")
|
||||
}
|
||||
return MarshalDeepObject(value, paramName)
|
||||
}
|
||||
|
||||
// Otherwise, we need to build a dictionary of the struct's fields. Each
|
||||
// field may only be a primitive value.
|
||||
v := reflect.ValueOf(value)
|
||||
t := reflect.TypeOf(value)
|
||||
fieldDict := make(map[string]string)
|
||||
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
fieldT := t.Field(i)
|
||||
// Find the json annotation on the field, and use the json specified
|
||||
// name if available, otherwise, just the field name.
|
||||
tag := fieldT.Tag.Get("json")
|
||||
fieldName := fieldT.Name
|
||||
if tag != "" {
|
||||
tagParts := strings.Split(tag, ",")
|
||||
name := tagParts[0]
|
||||
if name != "" {
|
||||
fieldName = name
|
||||
}
|
||||
}
|
||||
f := v.Field(i)
|
||||
|
||||
// Unset optional fields will be nil pointers, skip over those.
|
||||
if f.Type().Kind() == reflect.Ptr && f.IsNil() {
|
||||
continue
|
||||
}
|
||||
str, err := primitiveToString(f.Interface())
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error formatting '%s': %s", paramName, err)
|
||||
}
|
||||
fieldDict[fieldName] = str
|
||||
}
|
||||
|
||||
return processFieldDict(style, explode, paramName, fieldDict)
|
||||
}
|
||||
|
||||
func styleMap(style string, explode bool, paramName string, value interface{}) (string, error) {
|
||||
if style == "deepObject" {
|
||||
if !explode {
|
||||
return "", errors.New("deepObjects must be exploded")
|
||||
}
|
||||
return MarshalDeepObject(value, paramName)
|
||||
}
|
||||
|
||||
dict, ok := value.(map[string]interface{})
|
||||
if !ok {
|
||||
return "", errors.New("map not of type map[string]interface{}")
|
||||
}
|
||||
|
||||
fieldDict := make(map[string]string)
|
||||
for fieldName, value := range dict {
|
||||
str, err := primitiveToString(value)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error formatting '%s': %s", paramName, err)
|
||||
}
|
||||
fieldDict[fieldName] = str
|
||||
}
|
||||
|
||||
return processFieldDict(style, explode, paramName, fieldDict)
|
||||
}
|
||||
|
||||
func processFieldDict(style string, explode bool, paramName string, fieldDict map[string]string) (string, error) {
|
||||
var parts []string
|
||||
|
||||
// This works for everything except deepObject. We'll handle that one
|
||||
// separately.
|
||||
if style != "deepObject" {
|
||||
if explode {
|
||||
for _, k := range sortedKeys(fieldDict) {
|
||||
v := fieldDict[k]
|
||||
parts = append(parts, k+"="+v)
|
||||
}
|
||||
} else {
|
||||
for _, k := range sortedKeys(fieldDict) {
|
||||
v := fieldDict[k]
|
||||
parts = append(parts, k)
|
||||
parts = append(parts, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var prefix string
|
||||
var separator string
|
||||
|
||||
switch style {
|
||||
case "simple":
|
||||
separator = ","
|
||||
case "label":
|
||||
prefix = "."
|
||||
if explode {
|
||||
separator = prefix
|
||||
} else {
|
||||
separator = ","
|
||||
}
|
||||
case "matrix":
|
||||
if explode {
|
||||
separator = ";"
|
||||
prefix = ";"
|
||||
} else {
|
||||
separator = ","
|
||||
prefix = fmt.Sprintf(";%s=", paramName)
|
||||
}
|
||||
case "form":
|
||||
if explode {
|
||||
separator = "&"
|
||||
} else {
|
||||
prefix = fmt.Sprintf("%s=", paramName)
|
||||
separator = ","
|
||||
}
|
||||
case "deepObject":
|
||||
{
|
||||
if !explode {
|
||||
return "", fmt.Errorf("deepObject parameters must be exploded")
|
||||
}
|
||||
for _, k := range sortedKeys(fieldDict) {
|
||||
v := fieldDict[k]
|
||||
part := fmt.Sprintf("%s[%s]=%s", paramName, k, v)
|
||||
parts = append(parts, part)
|
||||
}
|
||||
separator = "&"
|
||||
}
|
||||
default:
|
||||
return "", fmt.Errorf("unsupported style '%s'", style)
|
||||
}
|
||||
|
||||
return prefix + strings.Join(parts, separator), nil
|
||||
}
|
||||
|
||||
func stylePrimitive(style string, explode bool, paramName string, value interface{}) (string, error) {
|
||||
strVal, err := primitiveToString(value)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var prefix string
|
||||
switch style {
|
||||
case "simple":
|
||||
case "label":
|
||||
prefix = "."
|
||||
case "matrix":
|
||||
prefix = fmt.Sprintf(";%s=", paramName)
|
||||
case "form":
|
||||
prefix = fmt.Sprintf("%s=", paramName)
|
||||
default:
|
||||
return "", fmt.Errorf("unsupported style '%s'", style)
|
||||
}
|
||||
return prefix + url.QueryEscape(strVal), nil
|
||||
}
|
||||
|
||||
// Converts a primitive value to a string. We need to do this based on the
|
||||
// Kind of an interface, not the Type to work with aliased types.
|
||||
func primitiveToString(value interface{}) (string, error) {
|
||||
var output string
|
||||
|
||||
// Values may come in by pointer for optionals, so make sure to dereferene.
|
||||
v := reflect.Indirect(reflect.ValueOf(value))
|
||||
t := v.Type()
|
||||
kind := t.Kind()
|
||||
|
||||
switch kind {
|
||||
case reflect.Int8, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
output = strconv.FormatInt(v.Int(), 10)
|
||||
case reflect.Float32, reflect.Float64:
|
||||
output = strconv.FormatFloat(v.Float(), 'f', -1, 64)
|
||||
case reflect.Bool:
|
||||
if v.Bool() {
|
||||
output = "true"
|
||||
} else {
|
||||
output = "false"
|
||||
}
|
||||
case reflect.String:
|
||||
output = v.String()
|
||||
default:
|
||||
return "", fmt.Errorf("unsupported type %s", reflect.TypeOf(value).String())
|
||||
}
|
||||
return output, nil
|
||||
}
|
|
@ -0,0 +1,30 @@
|
|||
package types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
)
|
||||
|
||||
const DateFormat = "2006-01-02"
|
||||
|
||||
type Date struct {
|
||||
time.Time
|
||||
}
|
||||
|
||||
func (d Date) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(d.Time.Format(DateFormat))
|
||||
}
|
||||
|
||||
func (d *Date) UnmarshalJSON(data []byte) error {
|
||||
var dateStr string
|
||||
err := json.Unmarshal(data, &dateStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
parsed, err := time.Parse(DateFormat, dateStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.Time = parsed
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
package types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
)
|
||||
|
||||
type Email string
|
||||
|
||||
func (e Email) MarshalJSON() ([]byte, error) {
|
||||
if !emailRegex.MatchString(string(e)) {
|
||||
return nil, errors.New("email: failed to pass regex validation")
|
||||
}
|
||||
return json.Marshal(string(e))
|
||||
}
|
||||
|
||||
func (e *Email) UnmarshalJSON(data []byte) error {
|
||||
var s string
|
||||
if err := json.Unmarshal(data, &s); err != nil {
|
||||
return err
|
||||
}
|
||||
if !emailRegex.MatchString(s) {
|
||||
return errors.New("email: failed to pass regex validation")
|
||||
}
|
||||
*e = Email(s)
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,11 @@
|
|||
package types
|
||||
|
||||
import "regexp"
|
||||
|
||||
const (
|
||||
emailRegexString = "^(?:(?:(?:(?:[a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(?:\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|(?:(?:\\x22)(?:(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(?:\\x20|\\x09)+)?(?:(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(\\x20|\\x09)+)?(?:\\x22))))@(?:(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$"
|
||||
)
|
||||
|
||||
var (
|
||||
emailRegex = regexp.MustCompile(emailRegexString)
|
||||
)
|
|
@ -1,31 +0,0 @@
|
|||
version: "2"
|
||||
|
||||
checks:
|
||||
argument-count:
|
||||
enabled: false
|
||||
complex-logic:
|
||||
enabled: false
|
||||
file-lines:
|
||||
enabled: false
|
||||
method-complexity:
|
||||
enabled: false
|
||||
method-count:
|
||||
enabled: false
|
||||
method-lines:
|
||||
enabled: false
|
||||
nested-control-flow:
|
||||
enabled: false
|
||||
return-statements:
|
||||
enabled: false
|
||||
similar-code:
|
||||
enabled: false
|
||||
identical-code:
|
||||
enabled: false
|
||||
|
||||
plugins:
|
||||
gofmt:
|
||||
enabled: true
|
||||
golint:
|
||||
enabled: true
|
||||
govet:
|
||||
enabled: true
|
|
@ -1,3 +1,6 @@
|
|||
run:
|
||||
timeout: 10m
|
||||
|
||||
linters-settings:
|
||||
golint:
|
||||
min-confidence: 0.3
|
||||
|
@ -9,8 +12,8 @@ linters-settings:
|
|||
linters:
|
||||
enable:
|
||||
- deadcode
|
||||
- dupl
|
||||
- errcheck
|
||||
- gocritic
|
||||
- gocyclo
|
||||
- goimports
|
||||
- golint
|
||||
|
|
|
@ -1,25 +0,0 @@
|
|||
{
|
||||
"Test": false,
|
||||
"Enable": [
|
||||
"deadcode",
|
||||
"errcheck",
|
||||
"golint",
|
||||
"gosimple",
|
||||
"gotype",
|
||||
"ineffassign",
|
||||
"interfacer",
|
||||
"misspell",
|
||||
"structcheck",
|
||||
"unconvert",
|
||||
"varcheck",
|
||||
"vet",
|
||||
"vetshadow"
|
||||
],
|
||||
"Disable": [
|
||||
"goconst",
|
||||
"gocyclo",
|
||||
"gosec",
|
||||
"maligned"
|
||||
],
|
||||
"Skip": ["vendor"]
|
||||
}
|
|
@ -1,49 +0,0 @@
|
|||
language: go
|
||||
|
||||
dist: xenial
|
||||
sudo: required
|
||||
|
||||
go:
|
||||
- "1.7.x"
|
||||
- "1.8.x"
|
||||
- "1.9.x"
|
||||
- "1.10.x"
|
||||
- "1.11.x"
|
||||
- "1.12.x"
|
||||
- tip
|
||||
|
||||
env:
|
||||
- GOLANGCI_LINT_VERSION=1.15.0 GO111MODULES=on
|
||||
|
||||
cache: apt
|
||||
|
||||
addons:
|
||||
apt:
|
||||
update: true
|
||||
packages:
|
||||
- rpm
|
||||
|
||||
install:
|
||||
- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $GOPATH/bin v${GOLANGCI_LINT_VERSION}
|
||||
- npm i codeclimate-test-reporter
|
||||
- '[ "$(echo "$TRAVIS_GO_VERSION" | perl -pe "s/\\.[x\\d]+$//")" = "1.11" ] && go mod vendor || go get -u github.com/gofrs/uuid'
|
||||
|
||||
before_script:
|
||||
- curl -L https://codeclimate.com/downloads/test-reporter/test-reporter-latest-linux-amd64 > ./cc-test-reporter
|
||||
- chmod +x ./cc-test-reporter
|
||||
- ./cc-test-reporter before-build
|
||||
|
||||
script:
|
||||
- go test -race -coverprofile=c.out -covermode=atomic .
|
||||
|
||||
after_script:
|
||||
- ./cc-test-reporter after-build --exit-code $TRAVIS_TEST_RESULT
|
||||
|
||||
jobs:
|
||||
include:
|
||||
- stage: golangci-lint
|
||||
go: 1.10.x
|
||||
if: type = pull_request
|
||||
script:
|
||||
- go get -u github.com/gofrs/uuid
|
||||
- golangci-lint run .
|
|
@ -1,6 +1,255 @@
|
|||
Changelog
|
||||
=========
|
||||
|
||||
0.43.1
|
||||
------
|
||||
|
||||
- change: in `NewClient()`, the `v2.Client` embedded in the `Client` struct doesn't inherit the custom `http.Client` set using `WithHTTPClient()`.
|
||||
|
||||
0.43.0
|
||||
------
|
||||
|
||||
- change: [Exoscale API V2](https://openapi-v2.exoscale.com/) related code has been relocated under the `github.com/exoscale/egoscale/v2` package.
|
||||
Note: `egoscale.Client` embeds a `v2.Client` initialized implicitly as a convenience.
|
||||
|
||||
0.42.0
|
||||
------
|
||||
|
||||
- feature: new `SKSNodepool.AntiAffinityGroupIDs` field
|
||||
- change: `SKSCluster.Level` field renamed as `SKSCluster.ServiceLevel`
|
||||
|
||||
0.41.0
|
||||
------
|
||||
|
||||
- feature: new method `ListZones()`
|
||||
|
||||
0.40.1
|
||||
------
|
||||
|
||||
- Improve API v2 async job tests and error reporting (#466)
|
||||
|
||||
0.40.0
|
||||
------
|
||||
|
||||
- feature: new method `UpgradeSKSCluster()`
|
||||
- feature: new fields `SKSCluster.Level` and `SKSCluster.CNI`
|
||||
- change: `SKSCluster.EnableExoscaleCloudController` replaced with `SKSCluster.AddOns`
|
||||
|
||||
0.39.1
|
||||
------
|
||||
|
||||
- fix: add missing `UpdateVirtualMachineSecurityGroups` operation metadata
|
||||
|
||||
0.39.0
|
||||
------
|
||||
|
||||
- feature: add `UpdateVirtualMachineSecurityGroups` operation (#464)
|
||||
|
||||
0.38.0
|
||||
------
|
||||
|
||||
- feature: add `SKSCluster.EvictNodepoolMembers()` and `ListSKSClusterVersions()` methods
|
||||
|
||||
0.37.1
|
||||
------
|
||||
|
||||
- fix: `UpdateIPAddress.HealthcheckTLSSkipVerify` field always set to `false` (#462)
|
||||
|
||||
0.37.0
|
||||
------
|
||||
|
||||
- feature: `NewClient()` now accepts options (460)
|
||||
- fix: NLB service healthcheck TLS SNI bug (#461)
|
||||
|
||||
0.36.2
|
||||
------
|
||||
|
||||
- fix: `CreateInstancePool.AntiAffinityGroupIDs` field is optional (#459)
|
||||
|
||||
0.36.1
|
||||
------
|
||||
|
||||
- feature: add support for Exoscale Cloud Controller in SKS clusters
|
||||
- fix: add missing tests for SKS Nodepools Security Groups
|
||||
|
||||
0.36.0
|
||||
------
|
||||
|
||||
- feature: add support for Anti-Affinity Groups to Instance Pools
|
||||
- feature: add support for Security Groups to SKS Nodepools
|
||||
|
||||
0.35.3
|
||||
------
|
||||
|
||||
- Fix typo in version.go
|
||||
|
||||
0.35.2
|
||||
------
|
||||
|
||||
- Improve API v2 errors handling (#455)
|
||||
|
||||
0.35.1
|
||||
------
|
||||
|
||||
- fix: various SKS-related bugs (#454)
|
||||
|
||||
0.35.0
|
||||
------
|
||||
|
||||
- feature: add support for SKS resources (#453)
|
||||
|
||||
0.34.0
|
||||
------
|
||||
|
||||
- change: `BucketUsage.Usage` is now an `int64` (#451)
|
||||
|
||||
0.33.2
|
||||
------
|
||||
|
||||
- fix: make `GetWithContext` return more relevant errors (#450)
|
||||
|
||||
0.33.1
|
||||
------
|
||||
|
||||
- fix: `UpdateNetworkLoadBalancer` call panicking following a public API change
|
||||
|
||||
0.33.0
|
||||
------
|
||||
|
||||
- feature: add support for Network Load Balancer service HTTPS health checking (#449)
|
||||
|
||||
0.32.0
|
||||
------
|
||||
|
||||
- feature: add support for Instance Pool root disk size update (#448)
|
||||
|
||||
0.31.2
|
||||
------
|
||||
|
||||
- fix: add missing TLS-specific parameters to `AssociateIPAddress`
|
||||
|
||||
0.31.1
|
||||
------
|
||||
|
||||
- fix: Instance Pool IPv6 flag handling
|
||||
|
||||
0.31.0
|
||||
------
|
||||
|
||||
- feature: add support for IPv6 in Instance Pools (#446)
|
||||
|
||||
0.30.0
|
||||
------
|
||||
|
||||
- feature: add new TLS-specific parameters to managed EIP
|
||||
|
||||
0.29.0
|
||||
------
|
||||
|
||||
- feature: `ListVirtualMachines` call to allow searching by `ManagerID` (#442)
|
||||
- fix: remove duplicate `User-Agent` HTTP header in Runstatus calls
|
||||
- tests: `*NetworkLoadBalancer*` calls are now tested using HTTP mocks
|
||||
- codegen: `internal/v2` updated
|
||||
|
||||
0.28.1
|
||||
------
|
||||
|
||||
- fix: Fix `ListVolumes` call to allow searching by ID (#440)
|
||||
|
||||
0.28.0
|
||||
------
|
||||
|
||||
- feature: add `Manager`/`ManagerID` fields to `VirtualMachine` structure (#438)
|
||||
- fix: HTTP request User Agent header handling (#439)
|
||||
|
||||
0.27.0
|
||||
------
|
||||
|
||||
- feature: Add `evictInstancePoolMembers` call to Instance Pool (#437)
|
||||
|
||||
0.26.6
|
||||
------
|
||||
|
||||
- change: Add support for Compute instance templates boot mode (#436)
|
||||
|
||||
0.26.5
|
||||
------
|
||||
|
||||
- fix: bug in the ListNetworkLoadBalancers call (#435)
|
||||
|
||||
0.26.4
|
||||
------
|
||||
|
||||
- Fixing typo in previous release
|
||||
|
||||
0.26.3
|
||||
------
|
||||
|
||||
- change: updated API V2 async operation code (#434)
|
||||
|
||||
0.26.2
|
||||
------
|
||||
|
||||
- change: updated OpenAPI code-generated API V2 bindings
|
||||
|
||||
0.26.1
|
||||
------
|
||||
|
||||
- change: the `DisplayText` property of `RegisterCustomTemplate` is now optional (#433)
|
||||
|
||||
0.26.0
|
||||
------
|
||||
|
||||
- feature: Add support for Network Load Balancer resources (#432)
|
||||
|
||||
0.25.0
|
||||
------
|
||||
|
||||
- feature: Add support for `listBucketsUsage` (#431)
|
||||
- change: Switch CI to Github Actions (#430)
|
||||
|
||||
0.24.0
|
||||
------
|
||||
|
||||
- feature: Add export snapshot implementation (#427)
|
||||
- feature: Add support for public API V2 (#425)
|
||||
- change: Switch module to Go 1.14 (#429)
|
||||
- change: Travis CI: set minimum Go version to 1.13
|
||||
- doc: Annotate API doc regarding use of tags (#423)
|
||||
- tests: fix request client timeout handling (#422)
|
||||
|
||||
0.23.0
|
||||
------
|
||||
|
||||
- change: Add `Resources` field to `APIKey` (#420)
|
||||
|
||||
0.22.0
|
||||
------
|
||||
|
||||
- change: Remove all references to Network Offerings (#418)
|
||||
|
||||
0.21.0
|
||||
------
|
||||
|
||||
- feature: add const `NotFound` 404 on type `ErrorCode` (#417)
|
||||
|
||||
0.20.1
|
||||
------
|
||||
|
||||
- fix: update the `ListAPIKeysResponse` field (#415)
|
||||
|
||||
0.20.0
|
||||
------
|
||||
|
||||
- feature: Add Instance pool implementation (#410)
|
||||
- feature: Add IAM implementation (#411)
|
||||
|
||||
0.19.0
|
||||
------
|
||||
|
||||
- feature: add field `Description` on type `IPAddress` (#413)
|
||||
- change: add Json tag `omitempty` on field `TemplateFilter` in type `ListTemplates` (#412)
|
||||
|
||||
0.18.1
|
||||
------
|
||||
|
||||
|
|
|
@ -5,7 +5,8 @@ description: the Go library for Exoscale
|
|||
|
||||
<a href="https://gopherize.me/gopher/9c1bc7cfe1d84cf43e477dbfc4aa86332065f1fd"><img src="gopher.png" align="right" alt=""></a>
|
||||
|
||||
[![Build Status](https://travis-ci.org/exoscale/egoscale.svg?branch=master)](https://travis-ci.org/exoscale/egoscale) [![Maintainability](https://api.codeclimate.com/v1/badges/fcab3b624b7d3ca96a9d/maintainability)](https://codeclimate.com/github/exoscale/egoscale/maintainability) [![Test Coverage](https://api.codeclimate.com/v1/badges/fcab3b624b7d3ca96a9d/test_coverage)](https://codeclimate.com/github/exoscale/egoscale/test_coverage) [![GoDoc](https://godoc.org/github.com/exoscale/egoscale?status.svg)](https://godoc.org/github.com/exoscale/egoscale) [![Go Report Card](https://goreportcard.com/badge/github.com/exoscale/egoscale)](https://goreportcard.com/report/github.com/exoscale/egoscale)
|
||||
[![Actions Status](https://github.com/exoscale/egoscale/workflows/CI/badge.svg?branch=master)](https://github.com/exoscale/egoscale/actions?query=workflow%3ACI+branch%3Amaster)
|
||||
[![GoDoc](https://godoc.org/github.com/exoscale/egoscale?status.svg)](https://godoc.org/github.com/exoscale/egoscale) [![Go Report Card](https://goreportcard.com/badge/github.com/exoscale/egoscale)](https://goreportcard.com/report/github.com/exoscale/egoscale)
|
||||
|
||||
A wrapper for the [Exoscale public cloud](https://www.exoscale.com) API.
|
||||
|
||||
|
|
|
@ -15,6 +15,8 @@ type Healthcheck struct {
|
|||
StrikesFail int64 `json:"strikes-fail,omitempty" doc:"healthcheck definition: number of times to retry before declaring the healthcheck 'dead'. Default: 3"`
|
||||
StrikesOk int64 `json:"strikes-ok,omitempty" doc:"healthcheck definition: number of times to retry before declaring the healthcheck 'alive'. Default: 2"`
|
||||
Timeout int64 `json:"timeout,omitempty" doc:"healthcheck definition: time in seconds to wait for each check. Default: 2, cannot be greater than interval."`
|
||||
TLSSNI string `json:"tls-sni,omitempty" doc:"healthcheck definition: server name to present for HTTPS checks"`
|
||||
TLSSkipVerify bool `json:"tls-skip-verify" doc:"healthcheck definition: bypass certificate chain verification for HTTPS checks"`
|
||||
}
|
||||
|
||||
// IPAddress represents an IP Address
|
||||
|
@ -23,6 +25,7 @@ type IPAddress struct {
|
|||
Associated string `json:"associated,omitempty" doc:"date the public IP address was associated"`
|
||||
AssociatedNetworkID *UUID `json:"associatednetworkid,omitempty" doc:"the ID of the Network associated with the IP address"`
|
||||
AssociatedNetworkName string `json:"associatednetworkname,omitempty" doc:"the name of the Network associated with the IP address"`
|
||||
Description string `json:"description,omitempty" doc:"The IP address description."`
|
||||
ForVirtualNetwork bool `json:"forvirtualnetwork,omitempty" doc:"the virtual network for the IP address"`
|
||||
Healthcheck *Healthcheck `json:"healthcheck,omitempty" doc:"The IP healthcheck configuration"`
|
||||
ID *UUID `json:"id,omitempty" doc:"public IP address id"`
|
||||
|
@ -89,13 +92,16 @@ func (ipaddress IPAddress) Delete(ctx context.Context, client *Client) error {
|
|||
|
||||
// AssociateIPAddress (Async) represents the IP creation
|
||||
type AssociateIPAddress struct {
|
||||
Description string `json:"description,omitempty" doc:"The IP address description."`
|
||||
HealthcheckInterval int64 `json:"interval,omitempty" doc:"healthcheck definition: time in seconds to wait for each check. Default: 10, minimum: 5"`
|
||||
HealthcheckMode string `json:"mode,omitempty" doc:"healthcheck definition: healthcheck mode can be either 'tcp' or 'http'"`
|
||||
HealthcheckPath string `json:"path,omitempty" doc:"healthcheck definition: the path against which the 'http' healthcheck will be performed. Required if mode is 'http', ignored otherwise."`
|
||||
HealthcheckMode string `json:"mode,omitempty" doc:"healthcheck definition: healthcheck mode can be either 'tcp', 'http', or 'https'"`
|
||||
HealthcheckPath string `json:"path,omitempty" doc:"healthcheck definition: the path against which the 'http' healthcheck will be performed. Required if mode is 'http' or 'https', ignored otherwise."`
|
||||
HealthcheckPort int64 `json:"port,omitempty" doc:"healthcheck definition: the port against which the healthcheck will be performed. Required if a 'mode' is provided."`
|
||||
HealthcheckStrikesFail int64 `json:"strikes-fail,omitempty" doc:"healthcheck definition: number of times to retry before declaring the healthcheck 'dead'. Default: 3"`
|
||||
HealthcheckStrikesOk int64 `json:"strikes-ok,omitempty" doc:"healthcheck definition: number of times to retry before declaring the healthcheck 'alive'. Default: 2"`
|
||||
HealthcheckTimeout int64 `json:"timeout,omitempty" doc:"healthcheck definition: time in seconds to wait for each check. Default: 2, cannot be greater than interval."`
|
||||
HealthcheckTLSSkipVerify bool `json:"tls-skip-verify,omitempty" doc:"healthcheck definition: skip TLS verification for HTTPS checks. Default: false"`
|
||||
HealthcheckTLSSNI string `json:"tls-sni,omitempty" doc:"healthcheck definition: server name to present for HTTPS checks. Default: no server name is presented"`
|
||||
ZoneID *UUID `json:"zoneid,omitempty" doc:"the ID of the availability zone you want to acquire a public IP address from"`
|
||||
_ bool `name:"associateIpAddress" description:"Acquires and associates a public IP to an account."`
|
||||
}
|
||||
|
@ -128,13 +134,16 @@ func (DisassociateIPAddress) AsyncResponse() interface{} {
|
|||
|
||||
// UpdateIPAddress (Async) represents the IP modification
|
||||
type UpdateIPAddress struct {
|
||||
Description string `json:"description,omitempty" doc:"The IP address description."`
|
||||
HealthcheckInterval int64 `json:"interval,omitempty" doc:"healthcheck definition: time in seconds to wait for each check. Default: 10, minimum: 5"`
|
||||
HealthcheckMode string `json:"mode,omitempty" doc:"healthcheck definition: healthcheck mode can be either 'tcp' or 'http'"`
|
||||
HealthcheckMode string `json:"mode,omitempty" doc:"healthcheck definition: healthcheck mode can be either 'tcp', 'http', or 'https'"`
|
||||
HealthcheckPath string `json:"path,omitempty" doc:"healthcheck definition: the path against which the 'http' healthcheck will be performed. Required if mode is 'http', ignored otherwise."`
|
||||
HealthcheckPort int64 `json:"port,omitempty" doc:"healthcheck definition: the port against which the healthcheck will be performed. Required if a 'mode' is provided."`
|
||||
HealthcheckStrikesFail int64 `json:"strikes-fail,omitempty" doc:"healthcheck definition: number of times to retry before declaring the healthcheck 'dead'. Default: 3"`
|
||||
HealthcheckStrikesOk int64 `json:"strikes-ok,omitempty" doc:"healthcheck definition: number of times to retry before declaring the healthcheck 'alive'. Default: 2"`
|
||||
HealthcheckTimeout int64 `json:"timeout,omitempty" doc:"healthcheck definition: time in seconds to wait for each check. Default: 2, cannot be greater than interval."`
|
||||
HealthcheckTLSSNI string `json:"tls-sni,omitempty" doc:"healthcheck definition: server name to present for HTTPS checks"`
|
||||
HealthcheckTLSSkipVerify bool `json:"tls-skip-verify,omitempty" doc:"healthcheck definition: bypass certificate chain verification for HTTPS checks"`
|
||||
ID *UUID `json:"id" doc:"the id of the public IP address to update"`
|
||||
_ bool `name:"updateIpAddress" description:"Updates an IP address"`
|
||||
}
|
||||
|
@ -166,7 +175,7 @@ type ListPublicIPAddresses struct {
|
|||
Page int `json:"page,omitempty"`
|
||||
PageSize int `json:"pagesize,omitempty"`
|
||||
PhysicalNetworkID *UUID `json:"physicalnetworkid,omitempty" doc:"lists all public IP addresses by physical network id"`
|
||||
Tags []ResourceTag `json:"tags,omitempty" doc:"List resources by tags (key/value pairs)"`
|
||||
Tags []ResourceTag `json:"tags,omitempty" doc:"List resources by tags (key/value pairs). Note: multiple tags are OR'ed, not AND'ed."`
|
||||
VlanID *UUID `json:"vlanid,omitempty" doc:"lists all public IP addresses by VLAN ID"`
|
||||
ZoneID *UUID `json:"zoneid,omitempty" doc:"lists all public IP addresses by Zone ID"`
|
||||
_ bool `name:"listPublicIpAddresses" description:"Lists all public ip addresses"`
|
||||
|
|
|
@ -6,20 +6,20 @@ import (
|
|||
"net/url"
|
||||
)
|
||||
|
||||
// AffinityGroup represents an (anti-)affinity group
|
||||
// AffinityGroup represents an Affinity Group.
|
||||
//
|
||||
// Affinity and Anti-Affinity groups provide a way to influence where VMs should run.
|
||||
// Affinity and Anti-Affinity Groups provide a way to influence where VMs should run.
|
||||
// See: http://docs.cloudstack.apache.org/projects/cloudstack-administration/en/stable/virtual_machines.html#affinity-groups
|
||||
type AffinityGroup struct {
|
||||
Account string `json:"account,omitempty" doc:"the account owning the affinity group"`
|
||||
Description string `json:"description,omitempty" doc:"the description of the affinity group"`
|
||||
ID *UUID `json:"id,omitempty" doc:"the ID of the affinity group"`
|
||||
Name string `json:"name,omitempty" doc:"the name of the affinity group"`
|
||||
Type string `json:"type,omitempty" doc:"the type of the affinity group"`
|
||||
VirtualMachineIDs []UUID `json:"virtualmachineIds,omitempty" doc:"virtual machine Ids associated with this affinity group"`
|
||||
Account string `json:"account,omitempty" doc:"the account owning the Affinity Group"`
|
||||
Description string `json:"description,omitempty" doc:"the description of the Affinity Group"`
|
||||
ID *UUID `json:"id,omitempty" doc:"the ID of the Affinity Group"`
|
||||
Name string `json:"name,omitempty" doc:"the name of the Affinity Group"`
|
||||
Type string `json:"type,omitempty" doc:"the type of the Affinity Group"`
|
||||
VirtualMachineIDs []UUID `json:"virtualmachineIds,omitempty" doc:"virtual machine IDs associated with this Affinity Group"`
|
||||
}
|
||||
|
||||
// ListRequest builds the ListAffinityGroups request
|
||||
// ListRequest builds the ListAffinityGroups request.
|
||||
func (ag AffinityGroup) ListRequest() (ListCommand, error) {
|
||||
return &ListAffinityGroups{
|
||||
ID: ag.ID,
|
||||
|
@ -27,7 +27,7 @@ func (ag AffinityGroup) ListRequest() (ListCommand, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
// Delete removes the given Affinity Group
|
||||
// Delete deletes the given Affinity Group.
|
||||
func (ag AffinityGroup) Delete(ctx context.Context, client *Client) error {
|
||||
if ag.ID == nil && ag.Name == "" {
|
||||
return fmt.Errorf("an Affinity Group may only be deleted using ID or Name")
|
||||
|
@ -44,114 +44,114 @@ func (ag AffinityGroup) Delete(ctx context.Context, client *Client) error {
|
|||
return client.BooleanRequestWithContext(ctx, req)
|
||||
}
|
||||
|
||||
// AffinityGroupType represent an affinity group type
|
||||
// AffinityGroupType represent an Affinity Group type.
|
||||
type AffinityGroupType struct {
|
||||
Type string `json:"type,omitempty" doc:"the type of the affinity group"`
|
||||
Type string `json:"type,omitempty" doc:"the type of the Affinity Group"`
|
||||
}
|
||||
|
||||
// CreateAffinityGroup (Async) represents a new (anti-)affinity group
|
||||
// CreateAffinityGroup (Async) represents a new Affinity Group.
|
||||
type CreateAffinityGroup struct {
|
||||
Description string `json:"description,omitempty" doc:"Optional description of the affinity group"`
|
||||
Name string `json:"name,omitempty" doc:"Name of the affinity group"`
|
||||
Type string `json:"type" doc:"Type of the affinity group from the available affinity/anti-affinity group types"`
|
||||
_ bool `name:"createAffinityGroup" description:"Creates an affinity/anti-affinity group"`
|
||||
Description string `json:"description,omitempty" doc:"Optional description of the Affinity Group"`
|
||||
Name string `json:"name" doc:"Name of the Affinity Group"`
|
||||
Type string `json:"type" doc:"Type of the Affinity Group from the available Affinity Group Group types"`
|
||||
_ bool `name:"createAffinityGroup" description:"Creates an Affinity Group Group"`
|
||||
}
|
||||
|
||||
func (req CreateAffinityGroup) onBeforeSend(params url.Values) error {
|
||||
// Name must be set, but can be empty
|
||||
// Name must be set, but can be empty.
|
||||
if req.Name == "" {
|
||||
params.Set("name", "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Response returns the struct to unmarshal
|
||||
// Response returns the struct to unmarshal.
|
||||
func (CreateAffinityGroup) Response() interface{} {
|
||||
return new(AsyncJobResult)
|
||||
}
|
||||
|
||||
// AsyncResponse returns the struct to unmarshal the async job
|
||||
// AsyncResponse returns the struct to unmarshal the async job.
|
||||
func (CreateAffinityGroup) AsyncResponse() interface{} {
|
||||
return new(AffinityGroup)
|
||||
}
|
||||
|
||||
// UpdateVMAffinityGroup (Async) represents a modification of a (anti-)affinity group
|
||||
// UpdateVMAffinityGroup (Async) represents a modification of an Affinity Group.
|
||||
type UpdateVMAffinityGroup struct {
|
||||
ID *UUID `json:"id" doc:"The ID of the virtual machine"`
|
||||
AffinityGroupIDs []UUID `json:"affinitygroupids,omitempty" doc:"comma separated list of affinity groups id that are going to be applied to the virtual machine. Should be passed only when vm is created from a zone with Basic Network support. Mutually exclusive with securitygroupnames parameter"`
|
||||
AffinityGroupNames []string `json:"affinitygroupnames,omitempty" doc:"comma separated list of affinity groups names that are going to be applied to the virtual machine. Should be passed only when vm is created from a zone with Basic Network support. Mutually exclusive with securitygroupids parameter"`
|
||||
_ bool `name:"updateVMAffinityGroup" description:"Updates the affinity/anti-affinity group associations of a virtual machine. The VM has to be stopped and restarted for the new properties to take effect."`
|
||||
AffinityGroupIDs []UUID `json:"affinitygroupids,omitempty" doc:"comma separated list of Affinity Groups id that are going to be applied to the virtual machine. Should be passed only when vm is created from a zone with Basic Network support. Mutually exclusive with securitygroupnames parameter"`
|
||||
AffinityGroupNames []string `json:"affinitygroupnames,omitempty" doc:"comma separated list of Affinity Groups names that are going to be applied to the virtual machine. Should be passed only when vm is created from a zone with Basic Network support. Mutually exclusive with securitygroupids parameter"`
|
||||
_ bool `name:"updateVMAffinityGroup" description:"Updates the Affinity Group Group associations of a virtual machine. The VM has to be stopped and restarted for the new properties to take effect."`
|
||||
}
|
||||
|
||||
func (req UpdateVMAffinityGroup) onBeforeSend(params url.Values) error {
|
||||
// Either AffinityGroupIDs or AffinityGroupNames must be set
|
||||
// Either AffinityGroupIDs or AffinityGroupNames must be set.
|
||||
if len(req.AffinityGroupIDs) == 0 && len(req.AffinityGroupNames) == 0 {
|
||||
params.Set("affinitygroupids", "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Response returns the struct to unmarshal
|
||||
// Response returns the struct to unmarshal.
|
||||
func (UpdateVMAffinityGroup) Response() interface{} {
|
||||
return new(AsyncJobResult)
|
||||
}
|
||||
|
||||
// AsyncResponse returns the struct to unmarshal the async job
|
||||
// AsyncResponse returns the struct to unmarshal the async job.
|
||||
func (UpdateVMAffinityGroup) AsyncResponse() interface{} {
|
||||
return new(VirtualMachine)
|
||||
}
|
||||
|
||||
// DeleteAffinityGroup (Async) represents an (anti-)affinity group to be deleted
|
||||
// DeleteAffinityGroup (Async) represents an Affinity Group to be deleted.
|
||||
type DeleteAffinityGroup struct {
|
||||
ID *UUID `json:"id,omitempty" doc:"The ID of the affinity group. Mutually exclusive with name parameter"`
|
||||
Name string `json:"name,omitempty" doc:"The name of the affinity group. Mutually exclusive with ID parameter"`
|
||||
_ bool `name:"deleteAffinityGroup" description:"Deletes affinity group"`
|
||||
ID *UUID `json:"id,omitempty" doc:"The ID of the Affinity Group. Mutually exclusive with name parameter"`
|
||||
Name string `json:"name,omitempty" doc:"The name of the Affinity Group. Mutually exclusive with ID parameter"`
|
||||
_ bool `name:"deleteAffinityGroup" description:"Deletes Affinity Group"`
|
||||
}
|
||||
|
||||
// Response returns the struct to unmarshal
|
||||
// Response returns the struct to unmarshal.
|
||||
func (DeleteAffinityGroup) Response() interface{} {
|
||||
return new(AsyncJobResult)
|
||||
}
|
||||
|
||||
// AsyncResponse returns the struct to unmarshal the async job
|
||||
// AsyncResponse returns the struct to unmarshal the async job.
|
||||
func (DeleteAffinityGroup) AsyncResponse() interface{} {
|
||||
return new(BooleanResponse)
|
||||
}
|
||||
|
||||
//go:generate go run generate/main.go -interface=Listable ListAffinityGroups
|
||||
|
||||
// ListAffinityGroups represents an (anti-)affinity groups search
|
||||
// ListAffinityGroups represents an Affinity Groups search.
|
||||
type ListAffinityGroups struct {
|
||||
ID *UUID `json:"id,omitempty" doc:"List the affinity group by the ID provided"`
|
||||
ID *UUID `json:"id,omitempty" doc:"List the Affinity Group by the ID provided"`
|
||||
Keyword string `json:"keyword,omitempty" doc:"List by keyword"`
|
||||
Name string `json:"name,omitempty" doc:"Lists affinity groups by name"`
|
||||
Name string `json:"name,omitempty" doc:"Lists Affinity Groups by name"`
|
||||
Page int `json:"page,omitempty"`
|
||||
PageSize int `json:"pagesize,omitempty"`
|
||||
Type string `json:"type,omitempty" doc:"Lists affinity groups by type"`
|
||||
VirtualMachineID *UUID `json:"virtualmachineid,omitempty" doc:"Lists affinity groups by virtual machine ID"`
|
||||
_ bool `name:"listAffinityGroups" description:"Lists affinity groups"`
|
||||
Type string `json:"type,omitempty" doc:"Lists Affinity Groups by type"`
|
||||
VirtualMachineID *UUID `json:"virtualmachineid,omitempty" doc:"Lists Affinity Groups by virtual machine ID"`
|
||||
_ bool `name:"listAffinityGroups" description:"Lists Affinity Groups"`
|
||||
}
|
||||
|
||||
// ListAffinityGroupsResponse represents a list of (anti-)affinity groups
|
||||
// ListAffinityGroupsResponse represents a list of Affinity Groups.
|
||||
type ListAffinityGroupsResponse struct {
|
||||
Count int `json:"count"`
|
||||
AffinityGroup []AffinityGroup `json:"affinitygroup"`
|
||||
}
|
||||
|
||||
// ListAffinityGroupTypes represents an (anti-)affinity groups search
|
||||
// ListAffinityGroupTypes represents an Affinity Groups types search.
|
||||
type ListAffinityGroupTypes struct {
|
||||
Keyword string `json:"keyword,omitempty" doc:"List by keyword"`
|
||||
Page int `json:"page,omitempty"`
|
||||
PageSize int `json:"pagesize,omitempty"`
|
||||
_ bool `name:"listAffinityGroupTypes" description:"Lists affinity group types available"`
|
||||
_ bool `name:"listAffinityGroupTypes" description:"Lists Affinity Group types available"`
|
||||
}
|
||||
|
||||
// Response returns the struct to unmarshal
|
||||
// Response returns the struct to unmarshal.
|
||||
func (ListAffinityGroupTypes) Response() interface{} {
|
||||
return new(ListAffinityGroupTypesResponse)
|
||||
}
|
||||
|
||||
// ListAffinityGroupTypesResponse represents a list of (anti-)affinity group types
|
||||
// ListAffinityGroupTypesResponse represents a list of Affinity Group types.
|
||||
type ListAffinityGroupTypesResponse struct {
|
||||
Count int `json:"count"`
|
||||
AffinityGroupType []AffinityGroupType `json:"affinitygrouptype"`
|
||||
|
|
|
@ -4,12 +4,12 @@ package egoscale
|
|||
|
||||
import "fmt"
|
||||
|
||||
// Response returns the struct to unmarshal
|
||||
// Response returns the struct to unmarshal.
|
||||
func (ListAffinityGroups) Response() interface{} {
|
||||
return new(ListAffinityGroupsResponse)
|
||||
}
|
||||
|
||||
// ListRequest returns itself
|
||||
// ListRequest returns itself.
|
||||
func (ls *ListAffinityGroups) ListRequest() (ListCommand, error) {
|
||||
if ls == nil {
|
||||
return nil, fmt.Errorf("%T cannot be nil", ls)
|
||||
|
@ -17,17 +17,17 @@ func (ls *ListAffinityGroups) ListRequest() (ListCommand, error) {
|
|||
return ls, nil
|
||||
}
|
||||
|
||||
// SetPage sets the current apge
|
||||
// SetPage sets the current page.
|
||||
func (ls *ListAffinityGroups) SetPage(page int) {
|
||||
ls.Page = page
|
||||
}
|
||||
|
||||
// SetPageSize sets the page size
|
||||
// SetPageSize sets the page size.
|
||||
func (ls *ListAffinityGroups) SetPageSize(pageSize int) {
|
||||
ls.PageSize = pageSize
|
||||
}
|
||||
|
||||
// Each triggers the callback for each, valid answer or any non 404 issue
|
||||
// Each triggers the callback for each, valid answer or any non 404 issue.
|
||||
func (ListAffinityGroups) Each(resp interface{}, callback IterateItemFunc) {
|
||||
items, ok := resp.(*ListAffinityGroupsResponse)
|
||||
if !ok {
|
||||
|
|
|
@ -0,0 +1,103 @@
|
|||
package egoscale
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// AntiAffinityGroup represents an Anti-Affinity Group.
|
||||
type AntiAffinityGroup struct {
|
||||
Account string `json:"account,omitempty" doc:"the account owning the Anti-Affinity Group"`
|
||||
Description string `json:"description,omitempty" doc:"the description of the Anti-Affinity Group"`
|
||||
ID *UUID `json:"id,omitempty" doc:"the ID of the Anti-Affinity Group"`
|
||||
Name string `json:"name,omitempty" doc:"the name of the Anti-Affinity Group"`
|
||||
Type string `json:"type,omitempty" doc:"the type of the Anti-Affinity Group"`
|
||||
VirtualMachineIDs []UUID `json:"virtualmachineIds,omitempty" doc:"virtual machine IDs associated with this Anti-Affinity Group"`
|
||||
}
|
||||
|
||||
// ListRequest builds the ListAntiAffinityGroups request.
|
||||
func (ag AntiAffinityGroup) ListRequest() (ListCommand, error) {
|
||||
return &ListAffinityGroups{
|
||||
ID: ag.ID,
|
||||
Name: ag.Name,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Delete deletes the given Anti-Affinity Group.
|
||||
func (ag AntiAffinityGroup) Delete(ctx context.Context, client *Client) error {
|
||||
if ag.ID == nil && ag.Name == "" {
|
||||
return fmt.Errorf("an Anti-Affinity Group may only be deleted using ID or Name")
|
||||
}
|
||||
|
||||
req := &DeleteAffinityGroup{}
|
||||
|
||||
if ag.ID != nil {
|
||||
req.ID = ag.ID
|
||||
} else {
|
||||
req.Name = ag.Name
|
||||
}
|
||||
|
||||
return client.BooleanRequestWithContext(ctx, req)
|
||||
}
|
||||
|
||||
// CreateAntiAffinityGroup represents an Anti-Affinity Group creation.
|
||||
type CreateAntiAffinityGroup struct {
|
||||
Name string `json:"name" doc:"Name of the Anti-Affinity Group"`
|
||||
Description string `json:"description,omitempty" doc:"Optional description of the Anti-Affinity Group"`
|
||||
_ bool `name:"createAntiAffinityGroup" description:"Creates an Anti-Affinity Group"`
|
||||
}
|
||||
|
||||
func (req CreateAntiAffinityGroup) onBeforeSend(params url.Values) error {
|
||||
// Name must be set, but can be empty.
|
||||
if req.Name == "" {
|
||||
params.Set("name", "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Response returns the struct to unmarshal.
|
||||
func (CreateAntiAffinityGroup) Response() interface{} {
|
||||
return new(AsyncJobResult)
|
||||
}
|
||||
|
||||
// AsyncResponse returns the struct to unmarshal the async job.
|
||||
func (CreateAntiAffinityGroup) AsyncResponse() interface{} {
|
||||
return new(AffinityGroup)
|
||||
}
|
||||
|
||||
//go:generate go run generate/main.go -interface=Listable ListAntiAffinityGroups
|
||||
|
||||
// ListAntiAffinityGroups represents an Anti-Affinity Groups search.
|
||||
type ListAntiAffinityGroups struct {
|
||||
ID *UUID `json:"id,omitempty" doc:"List the Anti-Affinity Group by the ID provided"`
|
||||
Keyword string `json:"keyword,omitempty" doc:"List by keyword"`
|
||||
Name string `json:"name,omitempty" doc:"Lists Anti-Affinity Groups by name"`
|
||||
Page int `json:"page,omitempty"`
|
||||
PageSize int `json:"pagesize,omitempty"`
|
||||
VirtualMachineID *UUID `json:"virtualmachineid,omitempty" doc:"Lists Anti-Affinity Groups by virtual machine ID"`
|
||||
_ bool `name:"listAntiAffinityGroups" description:"Lists Anti-Affinity Groups"`
|
||||
}
|
||||
|
||||
// ListAntiAffinityGroupsResponse represents a list of Anti-Affinity Groups.
|
||||
type ListAntiAffinityGroupsResponse struct {
|
||||
Count int `json:"count"`
|
||||
AntiAffinityGroup []AffinityGroup `json:"antiaffinitygroup"`
|
||||
}
|
||||
|
||||
// DeleteAntiAffinityGroup (Async) represents an Anti-Affinity Group to be deleted.
|
||||
type DeleteAntiAffinityGroup struct {
|
||||
ID *UUID `json:"id,omitempty" doc:"The ID of the Anti-Affinity Group. Mutually exclusive with name parameter"`
|
||||
Name string `json:"name,omitempty" doc:"The name of the Anti-Affinity Group. Mutually exclusive with ID parameter"`
|
||||
_ bool `name:"deleteAntiAffinityGroup" description:"Deletes Anti-Affinity Group"`
|
||||
}
|
||||
|
||||
// Response returns the struct to unmarshal.
|
||||
func (DeleteAntiAffinityGroup) Response() interface{} {
|
||||
return new(AsyncJobResult)
|
||||
}
|
||||
|
||||
// AsyncResponse returns the struct to unmarshal the async job.
|
||||
func (DeleteAntiAffinityGroup) AsyncResponse() interface{} {
|
||||
return new(BooleanResponse)
|
||||
}
|
43
vendor/github.com/exoscale/egoscale/antiaffinity_groups_response.go
generated
vendored
Normal file
43
vendor/github.com/exoscale/egoscale/antiaffinity_groups_response.go
generated
vendored
Normal file
|
@ -0,0 +1,43 @@
|
|||
// code generated; DO NOT EDIT.
|
||||
|
||||
package egoscale
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Response returns the struct to unmarshal.
|
||||
func (ListAntiAffinityGroups) Response() interface{} {
|
||||
return new(ListAntiAffinityGroupsResponse)
|
||||
}
|
||||
|
||||
// ListRequest returns itself.
|
||||
func (ls *ListAntiAffinityGroups) ListRequest() (ListCommand, error) {
|
||||
if ls == nil {
|
||||
return nil, fmt.Errorf("%T cannot be nil", ls)
|
||||
}
|
||||
return ls, nil
|
||||
}
|
||||
|
||||
// SetPage sets the current page.
|
||||
func (ls *ListAntiAffinityGroups) SetPage(page int) {
|
||||
ls.Page = page
|
||||
}
|
||||
|
||||
// SetPageSize sets the page size.
|
||||
func (ls *ListAntiAffinityGroups) SetPageSize(pageSize int) {
|
||||
ls.PageSize = pageSize
|
||||
}
|
||||
|
||||
// Each triggers the callback for each, valid answer or any non 404 issue.
|
||||
func (ListAntiAffinityGroups) Each(resp interface{}, callback IterateItemFunc) {
|
||||
items, ok := resp.(*ListAntiAffinityGroupsResponse)
|
||||
if !ok {
|
||||
callback(nil, fmt.Errorf("wrong type, ListAntiAffinityGroupsResponse was expected, got %T", resp))
|
||||
return
|
||||
}
|
||||
|
||||
for i := range items.AntiAffinityGroup {
|
||||
if !callback(&items.AntiAffinityGroup[i], nil) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
|
@ -2,6 +2,7 @@ package egoscale
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
|
@ -10,8 +11,14 @@ import (
|
|||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
v2 "github.com/exoscale/egoscale/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultTimeout represents the default API client HTTP request timeout.
|
||||
DefaultTimeout = 60 * time.Second
|
||||
)
|
||||
|
||||
// UserAgent is the "User-Agent" HTTP request header added to outgoing HTTP requests.
|
||||
|
@ -61,6 +68,12 @@ type Client struct {
|
|||
RetryStrategy RetryStrategyFunc
|
||||
// Logger contains any log, plug your own
|
||||
Logger *log.Logger
|
||||
|
||||
// noV2 represents a flag disabling v2.Client embedding.
|
||||
noV2 bool
|
||||
|
||||
// Public API secondary client
|
||||
*v2.Client
|
||||
}
|
||||
|
||||
// RetryStrategyFunc represents a how much time to wait between two calls to the API
|
||||
|
@ -72,109 +85,173 @@ type IterateItemFunc func(interface{}, error) bool
|
|||
// WaitAsyncJobResultFunc represents the callback to wait a results of an async request, if false stops
|
||||
type WaitAsyncJobResultFunc func(*AsyncJobResult, error) bool
|
||||
|
||||
// NewClient creates an API client with default timeout (60)
|
||||
//
|
||||
// Timeout is set to both the HTTP client and the client itself.
|
||||
func NewClient(endpoint, apiKey, apiSecret string) *Client {
|
||||
timeout := 60 * time.Second
|
||||
expiration := 10 * time.Minute
|
||||
// ClientOpt represents a new Client option.
|
||||
type ClientOpt func(*Client)
|
||||
|
||||
httpClient := &http.Client{
|
||||
Transport: http.DefaultTransport,
|
||||
// WithHTTPClient overrides the Client's default HTTP client.
|
||||
func WithHTTPClient(hc *http.Client) ClientOpt {
|
||||
return func(c *Client) { c.HTTPClient = hc }
|
||||
}
|
||||
|
||||
// WithTimeout overrides the Client's default timeout value (DefaultTimeout).
|
||||
func WithTimeout(d time.Duration) ClientOpt {
|
||||
return func(c *Client) { c.Timeout = d }
|
||||
}
|
||||
|
||||
// WithTrace enables the Client's HTTP request tracing.
|
||||
func WithTrace() ClientOpt {
|
||||
return func(c *Client) { c.TraceOn() }
|
||||
}
|
||||
|
||||
// WithoutV2Client disables implicit v2.Client embedding.
|
||||
func WithoutV2Client() ClientOpt {
|
||||
return func(c *Client) { c.noV2 = true }
|
||||
}
|
||||
|
||||
// NewClient creates an Exoscale API client.
|
||||
// Note: unless the WithoutV2Client() ClientOpt is passed, this function
|
||||
// initializes a v2.Client embedded into the returned *Client struct
|
||||
// inheriting the Exoscale API credentials, endpoint and timeout value, but
|
||||
// not the custom http.Client. The 2 clients must not share the same
|
||||
// *http.Client, as it can cause middleware clashes.
|
||||
func NewClient(endpoint, apiKey, apiSecret string, opts ...ClientOpt) *Client {
|
||||
client := &Client{
|
||||
HTTPClient: httpClient,
|
||||
HTTPClient: &http.Client{
|
||||
Transport: &defaultTransport{next: http.DefaultTransport},
|
||||
},
|
||||
Endpoint: endpoint,
|
||||
APIKey: apiKey,
|
||||
apiSecret: apiSecret,
|
||||
PageSize: 50,
|
||||
Timeout: timeout,
|
||||
Expiration: expiration,
|
||||
Timeout: DefaultTimeout,
|
||||
Expiration: 10 * time.Minute,
|
||||
RetryStrategy: MonotonicRetryStrategyFunc(2),
|
||||
Logger: log.New(ioutil.Discard, "", 0),
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(client)
|
||||
}
|
||||
|
||||
if prefix, ok := os.LookupEnv("EXOSCALE_TRACE"); ok {
|
||||
client.Logger = log.New(os.Stderr, prefix, log.LstdFlags)
|
||||
client.TraceOn()
|
||||
}
|
||||
|
||||
if !client.noV2 {
|
||||
v2Client, err := v2.NewClient(
|
||||
client.APIKey,
|
||||
client.apiSecret,
|
||||
v2.ClientOptWithAPIEndpoint(client.Endpoint),
|
||||
v2.ClientOptWithTimeout(client.Timeout),
|
||||
|
||||
// Don't use v2.ClientOptWithHTTPClient() with the root API client's http.Client, as the
|
||||
// v2.Client uses HTTP middleware that can break callers that expect CS-compatible error
|
||||
// responses.
|
||||
)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("unable to initialize API V2 client: %s", err))
|
||||
}
|
||||
client.Client = v2Client
|
||||
}
|
||||
|
||||
return client
|
||||
}
|
||||
|
||||
// Do implemements the v2.HttpRequestDoer interface in order to intercept HTTP response before the
|
||||
// generated code closes its body, giving us a chance to return meaningful error messages from the API.
|
||||
// This is only relevant for API v2 operations.
|
||||
func (c *Client) Do(req *http.Request) (*http.Response, error) {
|
||||
resp, err := c.HTTPClient.Do(req)
|
||||
if err != nil {
|
||||
// If the request returned a Go error don't bother analyzing the response
|
||||
// body, as there probably won't be any (e.g. connection timeout/refused).
|
||||
return resp, err
|
||||
}
|
||||
|
||||
if resp.StatusCode >= 400 && resp.StatusCode <= 599 {
|
||||
var res struct {
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading response body: %s", err)
|
||||
}
|
||||
|
||||
if json.Valid(data) {
|
||||
if err = json.Unmarshal(data, &res); err != nil {
|
||||
return nil, fmt.Errorf("error unmarshaling response: %s", err)
|
||||
}
|
||||
} else {
|
||||
res.Message = string(data)
|
||||
}
|
||||
|
||||
switch {
|
||||
case resp.StatusCode == http.StatusNotFound:
|
||||
return nil, ErrNotFound
|
||||
|
||||
case resp.StatusCode >= 400 && resp.StatusCode < 500:
|
||||
return nil, fmt.Errorf("%w: %s", ErrInvalidRequest, res.Message)
|
||||
|
||||
case resp.StatusCode >= 500:
|
||||
return nil, fmt.Errorf("%w: %s", ErrAPIError, res.Message)
|
||||
}
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// Get populates the given resource or fails
|
||||
func (client *Client) Get(ls Listable) (interface{}, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), client.Timeout)
|
||||
func (c *Client) Get(ls Listable) (interface{}, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), c.Timeout)
|
||||
defer cancel()
|
||||
|
||||
return client.GetWithContext(ctx, ls)
|
||||
return c.GetWithContext(ctx, ls)
|
||||
}
|
||||
|
||||
// GetWithContext populates the given resource or fails
|
||||
func (client *Client) GetWithContext(ctx context.Context, ls Listable) (interface{}, error) {
|
||||
gs, err := client.ListWithContext(ctx, ls)
|
||||
func (c *Client) GetWithContext(ctx context.Context, ls Listable) (interface{}, error) {
|
||||
gs, err := c.ListWithContext(ctx, ls)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
count := len(gs)
|
||||
if count != 1 {
|
||||
req, err := ls.ListRequest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
params, err := client.Payload(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// removing sensitive/useless informations
|
||||
params.Del("expires")
|
||||
params.Del("response")
|
||||
params.Del("signature")
|
||||
params.Del("signatureversion")
|
||||
|
||||
// formatting the query string nicely
|
||||
payload := params.Encode()
|
||||
payload = strings.Replace(payload, "&", ", ", -1)
|
||||
|
||||
if count == 0 {
|
||||
return nil, &ErrorResponse{
|
||||
CSErrorCode: ServerAPIException,
|
||||
ErrorCode: ParamError,
|
||||
ErrorText: fmt.Sprintf("not found, query: %s", payload),
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("more than one element found: %s", payload)
|
||||
}
|
||||
switch len(gs) {
|
||||
case 0:
|
||||
return nil, ErrNotFound
|
||||
|
||||
case 1:
|
||||
return gs[0], nil
|
||||
|
||||
default:
|
||||
return nil, ErrTooManyFound
|
||||
}
|
||||
}
|
||||
|
||||
// Delete removes the given resource of fails
|
||||
func (client *Client) Delete(g Deletable) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), client.Timeout)
|
||||
func (c *Client) Delete(g Deletable) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), c.Timeout)
|
||||
defer cancel()
|
||||
|
||||
return client.DeleteWithContext(ctx, g)
|
||||
return c.DeleteWithContext(ctx, g)
|
||||
}
|
||||
|
||||
// DeleteWithContext removes the given resource of fails
|
||||
func (client *Client) DeleteWithContext(ctx context.Context, g Deletable) error {
|
||||
return g.Delete(ctx, client)
|
||||
func (c *Client) DeleteWithContext(ctx context.Context, g Deletable) error {
|
||||
return g.Delete(ctx, c)
|
||||
}
|
||||
|
||||
// List lists the given resource (and paginate till the end)
|
||||
func (client *Client) List(g Listable) ([]interface{}, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), client.Timeout)
|
||||
func (c *Client) List(g Listable) ([]interface{}, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), c.Timeout)
|
||||
defer cancel()
|
||||
|
||||
return client.ListWithContext(ctx, g)
|
||||
return c.ListWithContext(ctx, g)
|
||||
}
|
||||
|
||||
// ListWithContext lists the given resources (and paginate till the end)
|
||||
func (client *Client) ListWithContext(ctx context.Context, g Listable) (s []interface{}, err error) {
|
||||
func (c *Client) ListWithContext(ctx context.Context, g Listable) (s []interface{}, err error) {
|
||||
s = make([]interface{}, 0)
|
||||
|
||||
defer func() {
|
||||
|
@ -193,7 +270,7 @@ func (client *Client) ListWithContext(ctx context.Context, g Listable) (s []inte
|
|||
err = e
|
||||
return
|
||||
}
|
||||
client.PaginateWithContext(ctx, req, func(item interface{}, e error) bool {
|
||||
c.PaginateWithContext(ctx, req, func(item interface{}, e error) bool {
|
||||
if item != nil {
|
||||
s = append(s, item)
|
||||
return true
|
||||
|
@ -205,38 +282,8 @@ func (client *Client) ListWithContext(ctx context.Context, g Listable) (s []inte
|
|||
return
|
||||
}
|
||||
|
||||
// AsyncListWithContext lists the given resources (and paginate till the end)
|
||||
//
|
||||
//
|
||||
// // NB: goroutine may leak if not read until the end. Create a proper context!
|
||||
// ctx, cancel := context.WithCancel(context.Background())
|
||||
// defer cancel()
|
||||
//
|
||||
// outChan, errChan := client.AsyncListWithContext(ctx, new(egoscale.VirtualMachine))
|
||||
//
|
||||
// for {
|
||||
// select {
|
||||
// case i, ok := <- outChan:
|
||||
// if ok {
|
||||
// vm := i.(egoscale.VirtualMachine)
|
||||
// // ...
|
||||
// } else {
|
||||
// outChan = nil
|
||||
// }
|
||||
// case err, ok := <- errChan:
|
||||
// if ok {
|
||||
// // do something
|
||||
// }
|
||||
// // Once an error has been received, you can expect the channels to be closed.
|
||||
// errChan = nil
|
||||
// }
|
||||
// if errChan == nil && outChan == nil {
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
//
|
||||
func (client *Client) AsyncListWithContext(ctx context.Context, g Listable) (<-chan interface{}, <-chan error) {
|
||||
outChan := make(chan interface{}, client.PageSize)
|
||||
func (c *Client) AsyncListWithContext(ctx context.Context, g Listable) (<-chan interface{}, <-chan error) {
|
||||
outChan := make(chan interface{}, c.PageSize)
|
||||
errChan := make(chan error)
|
||||
|
||||
go func() {
|
||||
|
@ -248,7 +295,7 @@ func (client *Client) AsyncListWithContext(ctx context.Context, g Listable) (<-c
|
|||
errChan <- err
|
||||
return
|
||||
}
|
||||
client.PaginateWithContext(ctx, req, func(item interface{}, e error) bool {
|
||||
c.PaginateWithContext(ctx, req, func(item interface{}, e error) bool {
|
||||
if item != nil {
|
||||
outChan <- item
|
||||
return true
|
||||
|
@ -262,29 +309,29 @@ func (client *Client) AsyncListWithContext(ctx context.Context, g Listable) (<-c
|
|||
}
|
||||
|
||||
// Paginate runs the ListCommand and paginates
|
||||
func (client *Client) Paginate(g Listable, callback IterateItemFunc) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), client.Timeout)
|
||||
func (c *Client) Paginate(g Listable, callback IterateItemFunc) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), c.Timeout)
|
||||
defer cancel()
|
||||
|
||||
client.PaginateWithContext(ctx, g, callback)
|
||||
c.PaginateWithContext(ctx, g, callback)
|
||||
}
|
||||
|
||||
// PaginateWithContext runs the ListCommand as long as the ctx is valid
|
||||
func (client *Client) PaginateWithContext(ctx context.Context, g Listable, callback IterateItemFunc) {
|
||||
func (c *Client) PaginateWithContext(ctx context.Context, g Listable, callback IterateItemFunc) {
|
||||
req, err := g.ListRequest()
|
||||
if err != nil {
|
||||
callback(nil, err)
|
||||
return
|
||||
}
|
||||
|
||||
pageSize := client.PageSize
|
||||
pageSize := c.PageSize
|
||||
|
||||
page := 1
|
||||
|
||||
for {
|
||||
req.SetPage(page)
|
||||
req.SetPageSize(pageSize)
|
||||
resp, err := client.RequestWithContext(ctx, req)
|
||||
resp, err := c.RequestWithContext(ctx, req)
|
||||
if err != nil {
|
||||
// in case of 431, the response is knowingly empty
|
||||
if errResponse, ok := err.(*ErrorResponse); ok && page == 1 && errResponse.ErrorCode == ParamError {
|
||||
|
@ -322,7 +369,7 @@ func (client *Client) PaginateWithContext(ctx context.Context, g Listable, callb
|
|||
}
|
||||
|
||||
// APIName returns the name of the given command
|
||||
func (client *Client) APIName(command Command) string {
|
||||
func (c *Client) APIName(command Command) string {
|
||||
// This is due to a limitation of Go<=1.7
|
||||
_, ok := command.(*AuthorizeSecurityGroupEgress)
|
||||
_, okPtr := command.(AuthorizeSecurityGroupEgress)
|
||||
|
@ -338,7 +385,7 @@ func (client *Client) APIName(command Command) string {
|
|||
}
|
||||
|
||||
// APIDescription returns the description of the given command
|
||||
func (client *Client) APIDescription(command Command) string {
|
||||
func (c *Client) APIDescription(command Command) string {
|
||||
info, err := info(command)
|
||||
if err != nil {
|
||||
return "*missing description*"
|
||||
|
@ -347,7 +394,7 @@ func (client *Client) APIDescription(command Command) string {
|
|||
}
|
||||
|
||||
// Response returns the response structure of the given command
|
||||
func (client *Client) Response(command Command) interface{} {
|
||||
func (c *Client) Response(command Command) interface{} {
|
||||
switch c := command.(type) {
|
||||
case AsyncCommand:
|
||||
return c.AsyncResponse()
|
||||
|
@ -357,35 +404,54 @@ func (client *Client) Response(command Command) interface{} {
|
|||
}
|
||||
|
||||
// TraceOn activates the HTTP tracer
|
||||
func (client *Client) TraceOn() {
|
||||
if _, ok := client.HTTPClient.Transport.(*traceTransport); !ok {
|
||||
client.HTTPClient.Transport = &traceTransport{
|
||||
transport: client.HTTPClient.Transport,
|
||||
logger: client.Logger,
|
||||
func (c *Client) TraceOn() {
|
||||
if _, ok := c.HTTPClient.Transport.(*traceTransport); !ok {
|
||||
c.HTTPClient.Transport = &traceTransport{
|
||||
next: c.HTTPClient.Transport,
|
||||
logger: c.Logger,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TraceOff deactivates the HTTP tracer
|
||||
func (client *Client) TraceOff() {
|
||||
if rt, ok := client.HTTPClient.Transport.(*traceTransport); ok {
|
||||
client.HTTPClient.Transport = rt.transport
|
||||
func (c *Client) TraceOff() {
|
||||
if rt, ok := c.HTTPClient.Transport.(*traceTransport); ok {
|
||||
c.HTTPClient.Transport = rt.next
|
||||
}
|
||||
}
|
||||
|
||||
// traceTransport contains the original HTTP transport to enable it to be reverted
|
||||
// defaultTransport is the default HTTP client transport.
|
||||
type defaultTransport struct {
|
||||
next http.RoundTripper
|
||||
}
|
||||
|
||||
// RoundTrip executes a single HTTP transaction while augmenting requests with custom headers.
|
||||
func (t *defaultTransport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
req.Header.Add("User-Agent", UserAgent)
|
||||
|
||||
resp, err := t.next.RoundTrip(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// traceTransport is a client HTTP middleware that dumps HTTP requests and responses content to a logger.
|
||||
type traceTransport struct {
|
||||
transport http.RoundTripper
|
||||
logger *log.Logger
|
||||
next http.RoundTripper
|
||||
}
|
||||
|
||||
// RoundTrip executes a single HTTP transaction
|
||||
func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
req.Header.Add("User-Agent", UserAgent)
|
||||
|
||||
if dump, err := httputil.DumpRequest(req, true); err == nil {
|
||||
t.logger.Printf("%s", dump)
|
||||
}
|
||||
|
||||
resp, err := t.transport.RoundTrip(req)
|
||||
resp, err := t.next.RoundTrip(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -4,6 +4,42 @@ package egoscale
|
|||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[CloudRuntimeException-4250]
|
||||
_ = x[ExecutionException-4260]
|
||||
_ = x[HypervisorVersionChangedException-4265]
|
||||
_ = x[CloudException-4275]
|
||||
_ = x[AccountLimitException-4280]
|
||||
_ = x[AgentUnavailableException-4285]
|
||||
_ = x[CloudAuthenticationException-4290]
|
||||
_ = x[ConcurrentOperationException-4300]
|
||||
_ = x[ConflictingNetworkSettingsException-4305]
|
||||
_ = x[DiscoveredWithErrorException-4310]
|
||||
_ = x[HAStateException-4315]
|
||||
_ = x[InsufficientAddressCapacityException-4320]
|
||||
_ = x[InsufficientCapacityException-4325]
|
||||
_ = x[InsufficientNetworkCapacityException-4330]
|
||||
_ = x[InsufficientServerCapacityException-4335]
|
||||
_ = x[InsufficientStorageCapacityException-4340]
|
||||
_ = x[InternalErrorException-4345]
|
||||
_ = x[InvalidParameterValueException-4350]
|
||||
_ = x[ManagementServerException-4355]
|
||||
_ = x[NetworkRuleConflictException-4360]
|
||||
_ = x[PermissionDeniedException-4365]
|
||||
_ = x[ResourceAllocationException-4370]
|
||||
_ = x[ResourceInUseException-4375]
|
||||
_ = x[ResourceUnavailableException-4380]
|
||||
_ = x[StorageUnavailableException-4385]
|
||||
_ = x[UnsupportedServiceException-4390]
|
||||
_ = x[VirtualMachineMigrationException-4395]
|
||||
_ = x[AsyncCommandQueued-4540]
|
||||
_ = x[RequestLimitException-4545]
|
||||
_ = x[ServerAPIException-9999]
|
||||
}
|
||||
|
||||
const _CSErrorCode_name = "CloudRuntimeExceptionExecutionExceptionHypervisorVersionChangedExceptionCloudExceptionAccountLimitExceptionAgentUnavailableExceptionCloudAuthenticationExceptionConcurrentOperationExceptionConflictingNetworkSettingsExceptionDiscoveredWithErrorExceptionHAStateExceptionInsufficientAddressCapacityExceptionInsufficientCapacityExceptionInsufficientNetworkCapacityExceptionInsufficientServerCapacityExceptionInsufficientStorageCapacityExceptionInternalErrorExceptionInvalidParameterValueExceptionManagementServerExceptionNetworkRuleConflictExceptionPermissionDeniedExceptionResourceAllocationExceptionResourceInUseExceptionResourceUnavailableExceptionStorageUnavailableExceptionUnsupportedServiceExceptionVirtualMachineMigrationExceptionAsyncCommandQueuedRequestLimitExceptionServerAPIException"
|
||||
|
||||
var _CSErrorCode_map = map[CSErrorCode]string{
|
||||
|
|
|
@ -63,7 +63,7 @@ Debugging and traces
|
|||
|
||||
As this library is mostly an HTTP client, you can reuse all the existing tools around it.
|
||||
|
||||
cs := egoscale.NewClient("https://api.exoscale.com/compute", "EXO...", "...")
|
||||
cs := egoscale.NewClient("https://api.exoscale.com/v1", "EXO...", "...")
|
||||
// sets a logger on stderr
|
||||
cs.Logger = log.New(os.Stderr, "prefix", log.LstdFlags)
|
||||
// activates the HTTP traces
|
||||
|
@ -76,7 +76,7 @@ APIs
|
|||
|
||||
All the available APIs on the server and provided by the API Discovery plugin.
|
||||
|
||||
cs := egoscale.NewClient("https://api.exoscale.com/compute", "EXO...", "...")
|
||||
cs := egoscale.NewClient("https://api.exoscale.com/v1", "EXO...", "...")
|
||||
|
||||
resp, err := cs.Request(&egoscale.ListAPIs{})
|
||||
if err != nil {
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
package egoscale
|
||||
|
||||
import "errors"
|
||||
|
||||
// ErrNotFound represents an error indicating a non-existent resource.
|
||||
var ErrNotFound = errors.New("resource not found")
|
||||
|
||||
// ErrTooManyFound represents an error indicating multiple results found for a single resource.
|
||||
var ErrTooManyFound = errors.New("multiple resources found")
|
||||
|
||||
// ErrInvalidRequest represents an error indicating that the caller's request is invalid.
|
||||
var ErrInvalidRequest = errors.New("invalid request")
|
||||
|
||||
// ErrAPIError represents an error indicating an API-side issue.
|
||||
var ErrAPIError = errors.New("API error")
|
|
@ -4,15 +4,37 @@ package egoscale
|
|||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[Unauthorized-401]
|
||||
_ = x[NotFound-404]
|
||||
_ = x[MethodNotAllowed-405]
|
||||
_ = x[UnsupportedActionError-422]
|
||||
_ = x[APILimitExceeded-429]
|
||||
_ = x[MalformedParameterError-430]
|
||||
_ = x[ParamError-431]
|
||||
_ = x[InternalError-530]
|
||||
_ = x[AccountError-531]
|
||||
_ = x[AccountResourceLimitError-532]
|
||||
_ = x[InsufficientCapacityError-533]
|
||||
_ = x[ResourceUnavailableError-534]
|
||||
_ = x[ResourceAllocationError-535]
|
||||
_ = x[ResourceInUseError-536]
|
||||
_ = x[NetworkRuleConflictError-537]
|
||||
}
|
||||
|
||||
const (
|
||||
_ErrorCode_name_0 = "Unauthorized"
|
||||
_ErrorCode_name_1 = "MethodNotAllowed"
|
||||
_ErrorCode_name_1 = "NotFoundMethodNotAllowed"
|
||||
_ErrorCode_name_2 = "UnsupportedActionError"
|
||||
_ErrorCode_name_3 = "APILimitExceededMalformedParameterErrorParamError"
|
||||
_ErrorCode_name_4 = "InternalErrorAccountErrorAccountResourceLimitErrorInsufficientCapacityErrorResourceUnavailableErrorResourceAllocationErrorResourceInUseErrorNetworkRuleConflictError"
|
||||
)
|
||||
|
||||
var (
|
||||
_ErrorCode_index_1 = [...]uint8{0, 8, 24}
|
||||
_ErrorCode_index_3 = [...]uint8{0, 16, 39, 49}
|
||||
_ErrorCode_index_4 = [...]uint8{0, 13, 25, 50, 75, 99, 122, 140, 164}
|
||||
)
|
||||
|
@ -21,8 +43,9 @@ func (i ErrorCode) String() string {
|
|||
switch {
|
||||
case i == 401:
|
||||
return _ErrorCode_name_0
|
||||
case i == 405:
|
||||
return _ErrorCode_name_1
|
||||
case 404 <= i && i <= 405:
|
||||
i -= 404
|
||||
return _ErrorCode_name_1[_ErrorCode_index_1[i]:_ErrorCode_index_1[i+1]]
|
||||
case i == 422:
|
||||
return _ErrorCode_name_2
|
||||
case 429 <= i && i <= 431:
|
||||
|
|
|
@ -1,3 +1,13 @@
|
|||
module github.com/exoscale/egoscale
|
||||
|
||||
require github.com/gofrs/uuid v3.2.0+incompatible
|
||||
require (
|
||||
github.com/deepmap/oapi-codegen v1.3.11
|
||||
github.com/gofrs/uuid v3.2.0+incompatible
|
||||
github.com/jarcoal/httpmock v1.0.6
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/stretchr/objx v0.3.0 // indirect
|
||||
github.com/stretchr/testify v1.7.0
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
)
|
||||
|
||||
go 1.14
|
||||
|
|
|
@ -1,2 +1,82 @@
|
|||
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
|
||||
github.com/deepmap/oapi-codegen v1.3.11 h1:Nd3tDQfqgquLmCzyRONHzs5SJEwPPoQcFZxT8MKt1Hs=
|
||||
github.com/deepmap/oapi-codegen v1.3.11/go.mod h1:suMvK7+rKlx3+tpa8ByptmvoXbAV70wERKTOGH3hLp0=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/getkin/kin-openapi v0.13.0/go.mod h1:WGRs2ZMM1Q8LR1QBEwUxC6RJEfaBcD0s+pcEVXFuAjw=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-chi/chi v4.0.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ=
|
||||
github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE=
|
||||
github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y=
|
||||
github.com/jarcoal/httpmock v1.0.6 h1:e81vOSexXU3mJuJ4l//geOmKIt+Vkxerk1feQBC8D0g=
|
||||
github.com/jarcoal/httpmock v1.0.6/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/labstack/echo/v4 v4.1.11 h1:z0BZoArY4FqdpUEl+wlHp4hnr/oSR6MTmQmv8OHSoww=
|
||||
github.com/labstack/echo/v4 v4.1.11/go.mod h1:i541M3Fj6f76NZtHSj7TXnyM8n2gaodfvfxNnFqi74g=
|
||||
github.com/labstack/gommon v0.3.0 h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0=
|
||||
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
|
||||
github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
|
||||
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
|
||||
github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10=
|
||||
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.3.0 h1:NGXK3lHquSN08v5vWalVI/L8XU9hdzE/G6xsrze47As=
|
||||
github.com/stretchr/objx v0.3.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
|
||||
github.com/valyala/fasttemplate v1.1.0 h1:RZqt0yGBsps8NGvLSGW804QQqCUYYLsaOjTVHy1Ocw4=
|
||||
github.com/valyala/fasttemplate v1.1.0/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708 h1:pXVtWnwHkrWD9ru3sDxY/qFK/bfc0egRovX91EjWjf4=
|
||||
golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191112182307-2180aed22343 h1:00ohfJ4K98s3m6BGUoBd8nyfp4Yl0GoIKvw5abItTjI=
|
||||
golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191115151921-52ab43148777 h1:wejkGHRTr38uaKRqECZlsCsJ1/TGxIyFbH32x5zUdu4=
|
||||
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
|
|
@ -0,0 +1,92 @@
|
|||
package egoscale
|
||||
|
||||
// APIKeyType holds the type of the API key
|
||||
type APIKeyType string
|
||||
|
||||
const (
|
||||
// APIKeyTypeUnrestricted is unrestricted
|
||||
APIKeyTypeUnrestricted APIKeyType = "unrestricted"
|
||||
// APIKeyTypeRestricted is restricted
|
||||
APIKeyTypeRestricted APIKeyType = "restricted"
|
||||
)
|
||||
|
||||
// APIKey represents an API key
|
||||
type APIKey struct {
|
||||
Name string `json:"name"`
|
||||
Key string `json:"key"`
|
||||
Secret string `json:"secret,omitempty"`
|
||||
Operations []string `json:"operations,omitempty"`
|
||||
Resources []string `json:"resources,omitempty"`
|
||||
Type APIKeyType `json:"type"`
|
||||
}
|
||||
|
||||
// CreateAPIKey represents an API key creation
|
||||
type CreateAPIKey struct {
|
||||
Name string `json:"name"`
|
||||
Operations string `json:"operations,omitempty"`
|
||||
Resources string `json:"resources,omitempty"`
|
||||
_ bool `name:"createApiKey" description:"Create an API key."`
|
||||
}
|
||||
|
||||
// Response returns the struct to unmarshal
|
||||
func (CreateAPIKey) Response() interface{} {
|
||||
return new(APIKey)
|
||||
}
|
||||
|
||||
// ListAPIKeys represents a search for API keys
|
||||
type ListAPIKeys struct {
|
||||
_ bool `name:"listApiKeys" description:"List API keys."`
|
||||
}
|
||||
|
||||
// ListAPIKeysResponse represents a list of API keys
|
||||
type ListAPIKeysResponse struct {
|
||||
Count int `json:"count"`
|
||||
APIKeys []APIKey `json:"apikey"`
|
||||
}
|
||||
|
||||
// Response returns the struct to unmarshal
|
||||
func (ListAPIKeys) Response() interface{} {
|
||||
return new(ListAPIKeysResponse)
|
||||
}
|
||||
|
||||
// ListAPIKeyOperations represents a search for operations for the current API key
|
||||
type ListAPIKeyOperations struct {
|
||||
_ bool `name:"listApiKeyOperations" description:"List operations allowed for the current API key."`
|
||||
}
|
||||
|
||||
// ListAPIKeyOperationsResponse represents a list of operations for the current API key
|
||||
type ListAPIKeyOperationsResponse struct {
|
||||
Operations []string `json:"operations"`
|
||||
}
|
||||
|
||||
// Response returns the struct to unmarshal
|
||||
func (ListAPIKeyOperations) Response() interface{} {
|
||||
return new(ListAPIKeyOperationsResponse)
|
||||
}
|
||||
|
||||
// GetAPIKey get an API key
|
||||
type GetAPIKey struct {
|
||||
Key string `json:"key"`
|
||||
_ bool `name:"getApiKey" description:"Get an API key."`
|
||||
}
|
||||
|
||||
// Response returns the struct to unmarshal
|
||||
func (GetAPIKey) Response() interface{} {
|
||||
return new(APIKey)
|
||||
}
|
||||
|
||||
// RevokeAPIKey represents a revocation of an API key
|
||||
type RevokeAPIKey struct {
|
||||
Key string `json:"key"`
|
||||
_ bool `name:"revokeApiKey" description:"Revoke an API key."`
|
||||
}
|
||||
|
||||
// RevokeAPIKeyResponse represents the response to an API key revocation
|
||||
type RevokeAPIKeyResponse struct {
|
||||
Success bool `json:"success"`
|
||||
}
|
||||
|
||||
// Response returns the struct to unmarshal
|
||||
func (RevokeAPIKey) Response() interface{} {
|
||||
return new(RevokeAPIKeyResponse)
|
||||
}
|
|
@ -0,0 +1,170 @@
|
|||
package egoscale
|
||||
|
||||
// InstancePoolState represents the state of an Instance Pool.
|
||||
type InstancePoolState string
|
||||
|
||||
const (
|
||||
// InstancePoolCreating creating state.
|
||||
InstancePoolCreating InstancePoolState = "creating"
|
||||
// InstancePoolRunning running state.
|
||||
InstancePoolRunning InstancePoolState = "running"
|
||||
// InstancePoolDestroying destroying state.
|
||||
InstancePoolDestroying InstancePoolState = "destroying"
|
||||
// InstancePoolScalingUp scaling up state.
|
||||
InstancePoolScalingUp InstancePoolState = "scaling-up"
|
||||
// InstancePoolScalingDown scaling down state.
|
||||
InstancePoolScalingDown InstancePoolState = "scaling-down"
|
||||
)
|
||||
|
||||
// InstancePool represents an Instance Pool.
|
||||
type InstancePool struct {
|
||||
ID *UUID `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description"`
|
||||
ServiceOfferingID *UUID `json:"serviceofferingid"`
|
||||
TemplateID *UUID `json:"templateid"`
|
||||
ZoneID *UUID `json:"zoneid"`
|
||||
AntiAffinityGroupIDs []UUID `json:"affinitygroupids"`
|
||||
SecurityGroupIDs []UUID `json:"securitygroupids"`
|
||||
NetworkIDs []UUID `json:"networkids"`
|
||||
IPv6 bool `json:"ipv6"`
|
||||
KeyPair string `json:"keypair"`
|
||||
UserData string `json:"userdata"`
|
||||
Size int `json:"size"`
|
||||
RootDiskSize int `json:"rootdisksize"`
|
||||
State InstancePoolState `json:"state"`
|
||||
VirtualMachines []VirtualMachine `json:"virtualmachines"`
|
||||
}
|
||||
|
||||
// CreateInstancePool represents an Instance Pool creation API request.
|
||||
type CreateInstancePool struct {
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description,omitempty"`
|
||||
ServiceOfferingID *UUID `json:"serviceofferingid"`
|
||||
TemplateID *UUID `json:"templateid"`
|
||||
ZoneID *UUID `json:"zoneid"`
|
||||
AntiAffinityGroupIDs []UUID `json:"affinitygroupids,omitempty"`
|
||||
SecurityGroupIDs []UUID `json:"securitygroupids,omitempty"`
|
||||
NetworkIDs []UUID `json:"networkids,omitempty"`
|
||||
IPv6 bool `json:"ipv6,omitempty"`
|
||||
KeyPair string `json:"keypair,omitempty"`
|
||||
UserData string `json:"userdata,omitempty"`
|
||||
Size int `json:"size"`
|
||||
RootDiskSize int `json:"rootdisksize,omitempty"`
|
||||
_ bool `name:"createInstancePool" description:"Create an Instance Pool"`
|
||||
}
|
||||
|
||||
// CreateInstancePoolResponse represents an Instance Pool creation API response.
|
||||
type CreateInstancePoolResponse struct {
|
||||
ID *UUID `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description"`
|
||||
ServiceOfferingID *UUID `json:"serviceofferingid"`
|
||||
TemplateID *UUID `json:"templateid"`
|
||||
ZoneID *UUID `json:"zoneid"`
|
||||
AntiAffinityGroupIDs []UUID `json:"affinitygroupids"`
|
||||
SecurityGroupIDs []UUID `json:"securitygroupids"`
|
||||
NetworkIDs []UUID `json:"networkids"`
|
||||
IPv6 bool `json:"ipv6"`
|
||||
KeyPair string `json:"keypair"`
|
||||
UserData string `json:"userdata"`
|
||||
Size int64 `json:"size"`
|
||||
RootDiskSize int `json:"rootdisksize"`
|
||||
State InstancePoolState `json:"state"`
|
||||
}
|
||||
|
||||
// Response returns an empty structure to unmarshal an Instance Pool creation API response into.
|
||||
func (CreateInstancePool) Response() interface{} {
|
||||
return new(CreateInstancePoolResponse)
|
||||
}
|
||||
|
||||
// UpdateInstancePool represents an Instance Pool update API request.
|
||||
type UpdateInstancePool struct {
|
||||
ID *UUID `json:"id"`
|
||||
ZoneID *UUID `json:"zoneid"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
TemplateID *UUID `json:"templateid,omitempty"`
|
||||
RootDiskSize int `json:"rootdisksize,omitempty"`
|
||||
UserData string `json:"userdata,omitempty"`
|
||||
IPv6 bool `json:"ipv6,omitempty"`
|
||||
_ bool `name:"updateInstancePool" description:"Update an Instance Pool"`
|
||||
}
|
||||
|
||||
// Response returns an empty structure to unmarshal an Instance Pool update API response into.
|
||||
func (UpdateInstancePool) Response() interface{} {
|
||||
return new(BooleanResponse)
|
||||
}
|
||||
|
||||
// ScaleInstancePool represents an Instance Pool scaling API request.
|
||||
type ScaleInstancePool struct {
|
||||
ID *UUID `json:"id"`
|
||||
ZoneID *UUID `json:"zoneid"`
|
||||
Size int `json:"size"`
|
||||
_ bool `name:"scaleInstancePool" description:"Scale an Instance Pool"`
|
||||
}
|
||||
|
||||
// Response returns an empty structure to unmarshal an Instance Pool scaling API response into.
|
||||
func (ScaleInstancePool) Response() interface{} {
|
||||
return new(BooleanResponse)
|
||||
}
|
||||
|
||||
// DestroyInstancePool represents an Instance Pool destruction API request.
|
||||
type DestroyInstancePool struct {
|
||||
ID *UUID `json:"id"`
|
||||
ZoneID *UUID `json:"zoneid"`
|
||||
_ bool `name:"destroyInstancePool" description:"Destroy an Instance Pool"`
|
||||
}
|
||||
|
||||
// Response returns an empty structure to unmarshal an Instance Pool destruction API response into.
|
||||
func (DestroyInstancePool) Response() interface{} {
|
||||
return new(BooleanResponse)
|
||||
}
|
||||
|
||||
// GetInstancePool retrieves an Instance Pool's details.
|
||||
type GetInstancePool struct {
|
||||
ID *UUID `json:"id"`
|
||||
ZoneID *UUID `json:"zoneid"`
|
||||
_ bool `name:"getInstancePool" description:"Get an Instance Pool"`
|
||||
}
|
||||
|
||||
// GetInstancePoolResponse get Instance Pool API response.
|
||||
type GetInstancePoolResponse struct {
|
||||
Count int
|
||||
InstancePools []InstancePool `json:"instancepool"`
|
||||
}
|
||||
|
||||
// Response returns an empty structure to unmarshal an Instance Pool get API response into.
|
||||
func (GetInstancePool) Response() interface{} {
|
||||
return new(GetInstancePoolResponse)
|
||||
}
|
||||
|
||||
// ListInstancePools represents a list Instance Pool API request.
|
||||
type ListInstancePools struct {
|
||||
ZoneID *UUID `json:"zoneid"`
|
||||
_ bool `name:"listInstancePools" description:"List Instance Pools"`
|
||||
}
|
||||
|
||||
// ListInstancePoolsResponse represents a list Instance Pool API response.
|
||||
type ListInstancePoolsResponse struct {
|
||||
Count int
|
||||
InstancePools []InstancePool `json:"instancepool"`
|
||||
}
|
||||
|
||||
// Response returns an empty structure to unmarshal an Instance Pool list API response into.
|
||||
func (ListInstancePools) Response() interface{} {
|
||||
return new(ListInstancePoolsResponse)
|
||||
}
|
||||
|
||||
// EvictInstancePoolMembers represents an Instance Pool members eviction API request.
|
||||
type EvictInstancePoolMembers struct {
|
||||
ID *UUID `json:"id"`
|
||||
ZoneID *UUID `json:"zoneid"`
|
||||
MemberIDs []UUID `json:"memberids"`
|
||||
_ bool `name:"evictInstancePoolMembers" description:"Evict some Instance Pool members"`
|
||||
}
|
||||
|
||||
// Response returns an empty structure to unmarshal an Instance Pool members eviction API response into.
|
||||
func (EvictInstancePoolMembers) Response() interface{} {
|
||||
return new(BooleanResponse)
|
||||
}
|
|
@ -50,7 +50,7 @@ type ListISOs struct {
|
|||
Page int `json:"page,omitempty"`
|
||||
PageSize int `json:"pagesize,omitempty"`
|
||||
ShowRemoved *bool `json:"showremoved,omitempty" doc:"Show removed ISOs as well"`
|
||||
Tags []ResourceTag `json:"tags,omitempty" doc:"List resources by tags (key/value pairs)"`
|
||||
Tags []ResourceTag `json:"tags,omitempty" doc:"List resources by tags (key/value pairs). Note: multiple tags are OR'ed, not AND'ed."`
|
||||
ZoneID *UUID `json:"zoneid,omitempty" doc:"The ID of the zone"`
|
||||
}
|
||||
|
||||
|
|
|
@ -4,6 +4,15 @@ package egoscale
|
|||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[Pending-0]
|
||||
_ = x[Success-1]
|
||||
_ = x[Failure-2]
|
||||
}
|
||||
|
||||
const _JobStatusType_name = "PendingSuccessFailure"
|
||||
|
||||
var _JobStatusType_index = [...]uint8{0, 7, 14, 21}
|
||||
|
|
|
@ -1,91 +0,0 @@
|
|||
package egoscale
|
||||
|
||||
// NetworkOffering corresponds to the Compute Offerings
|
||||
type NetworkOffering struct {
|
||||
Availability string `json:"availability,omitempty" doc:"availability of the network offering"`
|
||||
ConserveMode bool `json:"conservemode,omitempty" doc:"true if network offering is ip conserve mode enabled"`
|
||||
Created string `json:"created,omitempty" doc:"the date this network offering was created"`
|
||||
Details map[string]string `json:"details,omitempty" doc:"additional key/value details tied with network offering"`
|
||||
DisplayText string `json:"displaytext,omitempty" doc:"an alternate display text of the network offering."`
|
||||
EgressDefaultPolicy bool `json:"egressdefaultpolicy,omitempty" doc:"true if guest network default egress policy is allow; false if default egress policy is deny"`
|
||||
GuestIPType string `json:"guestiptype,omitempty" doc:"guest type of the network offering, can be Shared or Isolated"`
|
||||
ID *UUID `json:"id,omitempty" doc:"the id of the network offering"`
|
||||
IsDefault bool `json:"isdefault,omitempty" doc:"true if network offering is default, false otherwise"`
|
||||
IsPersistent bool `json:"ispersistent,omitempty" doc:"true if network offering supports persistent networks, false otherwise"`
|
||||
MaxConnections int `json:"maxconnections,omitempty" doc:"maximum number of concurrents connections to be handled by lb"`
|
||||
Name string `json:"name,omitempty" doc:"the name of the network offering"`
|
||||
NetworkRate int `json:"networkrate,omitempty" doc:"data transfer rate in megabits per second allowed."`
|
||||
Service []Service `json:"service,omitempty" doc:"the list of supported services"`
|
||||
ServiceOfferingID *UUID `json:"serviceofferingid,omitempty" doc:"the ID of the service offering used by virtual router provider"`
|
||||
SpecifyIPRanges bool `json:"specifyipranges,omitempty" doc:"true if network offering supports specifying ip ranges, false otherwise"`
|
||||
SpecifyVlan bool `json:"specifyvlan,omitempty" doc:"true if network offering supports vlans, false otherwise"`
|
||||
State string `json:"state,omitempty" doc:"state of the network offering. Can be Disabled/Enabled/Inactive"`
|
||||
SupportsStrechedL2Subnet bool `json:"supportsstrechedl2subnet,omitempty" doc:"true if network offering supports network that span multiple zones"`
|
||||
Tags string `json:"tags,omitempty" doc:"the tags for the network offering"`
|
||||
TrafficType string `json:"traffictype,omitempty" doc:"the traffic type for the network offering, supported types are Public, Management, Control, Guest, Vlan or Storage."`
|
||||
}
|
||||
|
||||
// ListRequest builds the ListNetworkOfferings request
|
||||
//
|
||||
// This doesn't take into account the IsDefault flag as the default value is true.
|
||||
func (no NetworkOffering) ListRequest() (ListCommand, error) {
|
||||
req := &ListNetworkOfferings{
|
||||
Availability: no.Availability,
|
||||
ID: no.ID,
|
||||
Name: no.Name,
|
||||
State: no.State,
|
||||
TrafficType: no.TrafficType,
|
||||
}
|
||||
|
||||
return req, nil
|
||||
}
|
||||
|
||||
//go:generate go run generate/main.go -interface=Listable ListNetworkOfferings
|
||||
|
||||
// ListNetworkOfferings represents a query for network offerings
|
||||
type ListNetworkOfferings struct {
|
||||
Availability string `json:"availability,omitempty" doc:"the availability of network offering. Default value is Required"`
|
||||
DisplayText string `json:"displaytext,omitempty" doc:"list network offerings by display text"`
|
||||
GuestIPType string `json:"guestiptype,omitempty" doc:"list network offerings by guest type: Shared or Isolated"`
|
||||
ID *UUID `json:"id,omitempty" doc:"list network offerings by id"`
|
||||
IsDefault *bool `json:"isdefault,omitempty" doc:"true if need to list only default network offerings. Default value is false"`
|
||||
IsTagged *bool `json:"istagged,omitempty" doc:"true if offering has tags specified"`
|
||||
Keyword string `json:"keyword,omitempty" doc:"List by keyword"`
|
||||
Name string `json:"name,omitempty" doc:"list network offerings by name"`
|
||||
NetworkID *UUID `json:"networkid,omitempty" doc:"the ID of the network. Pass this in if you want to see the available network offering that a network can be changed to."`
|
||||
Page int `json:"page,omitempty"`
|
||||
PageSize int `json:"pagesize,omitempty"`
|
||||
SourceNATSupported *bool `json:"sourcenatsupported,omitempty" doc:"true if need to list only netwok offerings where source nat is supported, false otherwise"`
|
||||
SpecifyIPRanges *bool `json:"specifyipranges,omitempty" doc:"true if need to list only network offerings which support specifying ip ranges"`
|
||||
SpecifyVlan *bool `json:"specifyvlan,omitempty" doc:"the tags for the network offering."`
|
||||
State string `json:"state,omitempty" doc:"list network offerings by state"`
|
||||
SupportedServices []Service `json:"supportedservices,omitempty" doc:"list network offerings supporting certain services"`
|
||||
Tags string `json:"tags,omitempty" doc:"list network offerings by tags"`
|
||||
TrafficType string `json:"traffictype,omitempty" doc:"list by traffic type"`
|
||||
ZoneID *UUID `json:"zoneid,omitempty" doc:"list network offerings available for network creation in specific zone"`
|
||||
_ bool `name:"listNetworkOfferings" description:"Lists all available network offerings."`
|
||||
}
|
||||
|
||||
// ListNetworkOfferingsResponse represents a list of service offerings
|
||||
type ListNetworkOfferingsResponse struct {
|
||||
Count int `json:"count"`
|
||||
NetworkOffering []NetworkOffering `json:"networkoffering"`
|
||||
}
|
||||
|
||||
// UpdateNetworkOffering represents a modification of a network offering
|
||||
type UpdateNetworkOffering struct {
|
||||
Availability string `json:"availability,omitempty" doc:"the availability of network offering. Default value is Required for Guest Virtual network offering; Optional for Guest Direct network offering"`
|
||||
DisplayText string `json:"displaytext,omitempty" doc:"the display text of the network offering"`
|
||||
ID *UUID `json:"id,omitempty" doc:"the id of the network offering"`
|
||||
KeepAliveEnabled *bool `json:"keepaliveenabled,omitempty" doc:"if true keepalive will be turned on in the loadbalancer. At the time of writing this has only an effect on haproxy; the mode http and httpclose options are unset in the haproxy conf file."`
|
||||
MaxConnections int `json:"maxconnections,omitempty" doc:"maximum number of concurrent connections supported by the network offering"`
|
||||
Name string `json:"name,omitempty" doc:"the name of the network offering"`
|
||||
SortKey int `json:"sortkey,omitempty" doc:"sort key of the network offering, integer"`
|
||||
State string `json:"state,omitempty" doc:"update state for the network offering"`
|
||||
_ bool `name:"updateNetworkOffering" description:"Updates a network offering."`
|
||||
}
|
||||
|
||||
// Response returns the struct to unmarshal
|
||||
func (UpdateNetworkOffering) Response() interface{} {
|
||||
return new(NetworkOffering)
|
||||
}
|
|
@ -1,43 +0,0 @@
|
|||
// code generated; DO NOT EDIT.
|
||||
|
||||
package egoscale
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Response returns the struct to unmarshal
|
||||
func (ListNetworkOfferings) Response() interface{} {
|
||||
return new(ListNetworkOfferingsResponse)
|
||||
}
|
||||
|
||||
// ListRequest returns itself
|
||||
func (ls *ListNetworkOfferings) ListRequest() (ListCommand, error) {
|
||||
if ls == nil {
|
||||
return nil, fmt.Errorf("%T cannot be nil", ls)
|
||||
}
|
||||
return ls, nil
|
||||
}
|
||||
|
||||
// SetPage sets the current apge
|
||||
func (ls *ListNetworkOfferings) SetPage(page int) {
|
||||
ls.Page = page
|
||||
}
|
||||
|
||||
// SetPageSize sets the page size
|
||||
func (ls *ListNetworkOfferings) SetPageSize(pageSize int) {
|
||||
ls.PageSize = pageSize
|
||||
}
|
||||
|
||||
// Each triggers the callback for each, valid answer or any non 404 issue
|
||||
func (ListNetworkOfferings) Each(resp interface{}, callback IterateItemFunc) {
|
||||
items, ok := resp.(*ListNetworkOfferingsResponse)
|
||||
if !ok {
|
||||
callback(nil, fmt.Errorf("wrong type, ListNetworkOfferingsResponse was expected, got %T", resp))
|
||||
return
|
||||
}
|
||||
|
||||
for i := range items.NetworkOffering {
|
||||
if !callback(&items.NetworkOffering[i], nil) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
|
@ -30,11 +30,6 @@ type Network struct {
|
|||
Netmask net.IP `json:"netmask,omitempty" doc:"the network's netmask"`
|
||||
NetworkCIDR *CIDR `json:"networkcidr,omitempty" doc:"the network CIDR of the guest network configured with IP reservation. It is the summation of CIDR and RESERVED_IP_RANGE"`
|
||||
NetworkDomain string `json:"networkdomain,omitempty" doc:"the network domain"`
|
||||
NetworkOfferingAvailability string `json:"networkofferingavailability,omitempty" doc:"availability of the network offering the network is created from"`
|
||||
NetworkOfferingConserveMode bool `json:"networkofferingconservemode,omitempty" doc:"true if network offering is ip conserve mode enabled"`
|
||||
NetworkOfferingDisplayText string `json:"networkofferingdisplaytext,omitempty" doc:"display text of the network offering the network is created from"`
|
||||
NetworkOfferingID *UUID `json:"networkofferingid,omitempty" doc:"network offering id the network is created from"`
|
||||
NetworkOfferingName string `json:"networkofferingname,omitempty" doc:"name of the network offering the network is created from"`
|
||||
PhysicalNetworkID *UUID `json:"physicalnetworkid,omitempty" doc:"the physical network id"`
|
||||
Related string `json:"related,omitempty" doc:"related to what other network configuration"`
|
||||
ReservedIPRange string `json:"reservediprange,omitempty" doc:"the network's IP range not to be used by CloudStack guest VMs and can be used for non CloudStack purposes"`
|
||||
|
@ -48,7 +43,7 @@ type Network struct {
|
|||
Tags []ResourceTag `json:"tags,omitempty" doc:"the list of resource tags associated with network"`
|
||||
TrafficType string `json:"traffictype,omitempty" doc:"the traffic type of the network"`
|
||||
Type string `json:"type,omitempty" doc:"the type of the network"`
|
||||
Vlan string `json:"vlan,omitemtpy" doc:"The vlan of the network. This parameter is visible to ROOT admins only"`
|
||||
Vlan string `json:"vlan,omitempty" doc:"The vlan of the network. This parameter is visible to ROOT admins only"`
|
||||
ZoneID *UUID `json:"zoneid,omitempty" doc:"zone id of the network"`
|
||||
ZoneName string `json:"zonename,omitempty" doc:"the name of the zone the network belongs to"`
|
||||
ZonesNetworkSpans []Zone `json:"zonesnetworkspans,omitempty" doc:"If a network is enabled for 'streched l2 subnet' then represents zones on which network currently spans"`
|
||||
|
@ -56,7 +51,6 @@ type Network struct {
|
|||
|
||||
// ListRequest builds the ListNetworks request
|
||||
func (network Network) ListRequest() (ListCommand, error) {
|
||||
//TODO add tags support
|
||||
req := &ListNetworks{
|
||||
ID: network.ID,
|
||||
Keyword: network.Name, // this is a hack as listNetworks doesn't support to search by name.
|
||||
|
@ -117,7 +111,6 @@ type CreateNetwork struct {
|
|||
Name string `json:"name,omitempty" doc:"the name of the network"` // This field is required but might be empty
|
||||
Netmask net.IP `json:"netmask,omitempty" doc:"the netmask of the network. Required for managed networks."`
|
||||
NetworkDomain string `json:"networkdomain,omitempty" doc:"network domain"`
|
||||
NetworkOfferingID *UUID `json:"networkofferingid" doc:"the network offering id"`
|
||||
PhysicalNetworkID *UUID `json:"physicalnetworkid,omitempty" doc:"the Physical Network ID the network belongs to"`
|
||||
StartIP net.IP `json:"startip,omitempty" doc:"the beginning IP address in the network IP range. Required for managed networks."`
|
||||
StartIpv6 net.IP `json:"startipv6,omitempty" doc:"the beginning IPv6 address in the IPv6 network range"`
|
||||
|
@ -153,7 +146,6 @@ type UpdateNetwork struct {
|
|||
Name string `json:"name,omitempty" doc:"the new name for the network"`
|
||||
Netmask net.IP `json:"netmask,omitempty" doc:"the netmask of the network. Required for managed networks."`
|
||||
NetworkDomain string `json:"networkdomain,omitempty" doc:"network domain"`
|
||||
NetworkOfferingID *UUID `json:"networkofferingid,omitempty" doc:"network offering ID"`
|
||||
StartIP net.IP `json:"startip,omitempty" doc:"the beginning IP address in the network IP range. Required for managed networks."`
|
||||
}
|
||||
|
||||
|
@ -215,7 +207,7 @@ type ListNetworks struct {
|
|||
RestartRequired *bool `json:"restartrequired,omitempty" doc:"List networks by restartRequired"`
|
||||
SpecifyIPRanges *bool `json:"specifyipranges,omitempty" doc:"True if need to list only networks which support specifying ip ranges"`
|
||||
SupportedServices []Service `json:"supportedservices,omitempty" doc:"List networks supporting certain services"`
|
||||
Tags []ResourceTag `json:"tags,omitempty" doc:"List resources by tags (key/value pairs)"`
|
||||
Tags []ResourceTag `json:"tags,omitempty" doc:"List resources by tags (key/value pairs). Note: multiple tags are OR'ed, not AND'ed."`
|
||||
TrafficType string `json:"traffictype,omitempty" doc:"Type of the traffic"`
|
||||
Type string `json:"type,omitempty" doc:"The type of the network. Supported values are: Isolated and Shared"`
|
||||
ZoneID *UUID `json:"zoneid,omitempty" doc:"The Zone ID of the network"`
|
||||
|
|
|
@ -103,7 +103,7 @@ func (RemoveIPFromNic) AsyncResponse() interface{} {
|
|||
|
||||
// ActivateIP6 (Async) activates the IP6 on the given NIC
|
||||
//
|
||||
// Exoscale specific API: https://community.exoscale.ch/api/compute/#activateip6_GET
|
||||
// Exoscale specific API: https://community.exoscale.com/api/compute/#activateip6_GET
|
||||
type ActivateIP6 struct {
|
||||
NicID *UUID `json:"nicid" doc:"the ID of the nic to which you want to assign the IPv6"`
|
||||
_ bool `name:"activateIp6" description:"Activate the IPv6 on the VM's nic"`
|
||||
|
|
|
@ -4,6 +4,26 @@ package egoscale
|
|||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[A-0]
|
||||
_ = x[AAAA-1]
|
||||
_ = x[ALIAS-2]
|
||||
_ = x[CNAME-3]
|
||||
_ = x[HINFO-4]
|
||||
_ = x[MX-5]
|
||||
_ = x[NAPTR-6]
|
||||
_ = x[NS-7]
|
||||
_ = x[POOL-8]
|
||||
_ = x[SPF-9]
|
||||
_ = x[SRV-10]
|
||||
_ = x[SSHFP-11]
|
||||
_ = x[TXT-12]
|
||||
_ = x[URL-13]
|
||||
}
|
||||
|
||||
const _Record_name = "AAAAAALIASCNAMEHINFOMXNAPTRNSPOOLSPFSRVSSHFPTXTURL"
|
||||
|
||||
var _Record_index = [...]uint8{0, 1, 5, 10, 15, 20, 22, 27, 29, 33, 36, 39, 44, 47, 50}
|
||||
|
|
|
@ -345,7 +345,6 @@ func (client *Client) request(ctx context.Context, command Command) (json.RawMes
|
|||
return nil, err
|
||||
}
|
||||
request = request.WithContext(ctx)
|
||||
request.Header.Add("User-Agent", UserAgent)
|
||||
|
||||
if method == "POST" {
|
||||
request.Header.Add("Content-Type", "application/x-www-form-urlencoded")
|
||||
|
|
|
@ -64,6 +64,8 @@ type ErrorCode int
|
|||
const (
|
||||
// Unauthorized represents ... (TODO)
|
||||
Unauthorized ErrorCode = 401
|
||||
// NotFound represents ... (TODO)
|
||||
NotFound ErrorCode = 404
|
||||
// MethodNotAllowed represents ... (TODO)
|
||||
MethodNotAllowed ErrorCode = 405
|
||||
// UnsupportedActionError represents ... (TODO)
|
||||
|
|
|
@ -41,7 +41,7 @@ func (req RunstatusValidationErrorResponse) Error() string {
|
|||
}
|
||||
return fmt.Sprintf("Runstatus error: %s", strings.Join(errs, "; "))
|
||||
}
|
||||
return fmt.Sprintf("Runstatus error")
|
||||
return "Runstatus error"
|
||||
}
|
||||
|
||||
func (client *Client) runstatusRequest(ctx context.Context, uri string, structParam interface{}, method string) (json.RawMessage, error) {
|
||||
|
@ -82,7 +82,6 @@ func (client *Client) runstatusRequest(ctx context.Context, uri string, structPa
|
|||
|
||||
hdr.Add("Authorization", fmt.Sprintf("Exoscale-HMAC-SHA256 %s:%s", client.APIKey, signature))
|
||||
hdr.Add("Exoscale-Date", time)
|
||||
hdr.Add("User-Agent", UserAgent)
|
||||
hdr.Add("Accept", "application/json")
|
||||
if params != "" {
|
||||
hdr.Add("Content-Type", "application/json")
|
||||
|
|
|
@ -3,6 +3,7 @@ package egoscale
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
@ -67,7 +68,7 @@ func (client *Client) GetRunstatusIncident(ctx context.Context, incident Runstat
|
|||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("%#v not found", incident)
|
||||
return nil, errors.New("incident not found")
|
||||
}
|
||||
|
||||
func (client *Client) getRunstatusIncident(ctx context.Context, incidentURL string) (*RunstatusIncident, error) {
|
||||
|
|
|
@ -3,6 +3,7 @@ package egoscale
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/url"
|
||||
|
@ -97,7 +98,7 @@ func (client *Client) GetRunstatusMaintenance(ctx context.Context, maintenance R
|
|||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("%#v not found", maintenance)
|
||||
return nil, errors.New("maintenance not found")
|
||||
}
|
||||
|
||||
func (client *Client) getRunstatusMaintenance(ctx context.Context, maintenanceURL string) (*RunstatusMaintenance, error) {
|
||||
|
|
|
@ -3,6 +3,7 @@ package egoscale
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
@ -99,7 +100,7 @@ func (client *Client) GetRunstatusPage(ctx context.Context, page RunstatusPage)
|
|||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("%#v not found", page)
|
||||
return nil, errors.New("page not found")
|
||||
}
|
||||
|
||||
func (client *Client) getRunstatusPage(ctx context.Context, pageURL string) (*RunstatusPage, error) {
|
||||
|
|
|
@ -3,6 +3,7 @@ package egoscale
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/url"
|
||||
|
@ -122,7 +123,7 @@ func (client *Client) GetRunstatusService(ctx context.Context, service Runstatus
|
|||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("%#v not found", service)
|
||||
return nil, errors.New("service not found")
|
||||
}
|
||||
|
||||
func (client *Client) getRunstatusService(ctx context.Context, serviceURL string) (*RunstatusService, error) {
|
||||
|
|
|
@ -12,7 +12,9 @@ import (
|
|||
)
|
||||
|
||||
func csQuotePlus(s string) string {
|
||||
s = strings.Replace(s, "+", "%20", -1)
|
||||
s = strings.ReplaceAll(s, "+", "%20")
|
||||
// This line is used to safeguard the "*" when producing the signature
|
||||
s = strings.ReplaceAll(s, "%2A", "*")
|
||||
return s
|
||||
}
|
||||
|
||||
|
|
|
@ -77,7 +77,6 @@ func (ss Snapshot) ListRequest() (ListCommand, error) {
|
|||
VolumeID: ss.VolumeID,
|
||||
SnapshotType: ss.SnapshotType,
|
||||
ZoneID: ss.ZoneID,
|
||||
// TODO: tags
|
||||
}
|
||||
|
||||
return req, nil
|
||||
|
@ -94,7 +93,7 @@ type ListSnapshots struct {
|
|||
Page int `json:"page,omitempty"`
|
||||
PageSize int `json:"pagesize,omitempty"`
|
||||
SnapshotType string `json:"snapshottype,omitempty" doc:"valid values are MANUAL or RECURRING."`
|
||||
Tags []ResourceTag `json:"tags,omitempty" doc:"List resources by tags (key/value pairs)"`
|
||||
Tags []ResourceTag `json:"tags,omitempty" doc:"List resources by tags (key/value pairs). Note: multiple tags are OR'ed, not AND'ed."`
|
||||
VolumeID *UUID `json:"volumeid,omitempty" doc:"the ID of the disk volume"`
|
||||
ZoneID *UUID `json:"zoneid,omitempty" doc:"list snapshots by zone id"`
|
||||
_ bool `name:"listSnapshots" description:"Lists all available snapshots for the account."`
|
||||
|
@ -137,3 +136,25 @@ func (RevertSnapshot) Response() interface{} {
|
|||
func (RevertSnapshot) AsyncResponse() interface{} {
|
||||
return new(BooleanResponse)
|
||||
}
|
||||
|
||||
// ExportSnapshot (Async) exports a volume snapshot
|
||||
type ExportSnapshot struct {
|
||||
ID *UUID `json:"id" doc:"The ID of the snapshot"`
|
||||
_ bool `name:"exportSnapshot" description:"Exports an instant snapshot of a volume."`
|
||||
}
|
||||
|
||||
// ExportSnapshotResponse represents the response of a snapshot export operation
|
||||
type ExportSnapshotResponse struct {
|
||||
PresignedURL string `json:"presignedurl"`
|
||||
MD5sum string `json:"md5sum"`
|
||||
}
|
||||
|
||||
// Response returns the struct to unmarshal
|
||||
func (ExportSnapshot) Response() interface{} {
|
||||
return new(AsyncJobResult)
|
||||
}
|
||||
|
||||
// AsyncResponse returns the struct to unmarshal the async job
|
||||
func (ExportSnapshot) AsyncResponse() interface{} {
|
||||
return new(ExportSnapshotResponse)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,25 @@
|
|||
package egoscale
|
||||
|
||||
// BucketUsage represents the usage (in bytes) for a bucket
|
||||
type BucketUsage struct {
|
||||
Created string `json:"created"`
|
||||
Name string `json:"name"`
|
||||
Region string `json:"region"`
|
||||
Usage int64 `json:"usage"`
|
||||
}
|
||||
|
||||
// ListBucketsUsage represents a listBucketsUsage API request
|
||||
type ListBucketsUsage struct {
|
||||
_ bool `name:"listBucketsUsage" description:"List"`
|
||||
}
|
||||
|
||||
// ListBucketsUsageResponse represents a listBucketsUsage API response
|
||||
type ListBucketsUsageResponse struct {
|
||||
Count int `json:"count"`
|
||||
BucketsUsage []BucketUsage `json:"bucketsusage"`
|
||||
}
|
||||
|
||||
// Response returns the struct to unmarshal
|
||||
func (ListBucketsUsage) Response() interface{} {
|
||||
return new(ListBucketsUsageResponse)
|
||||
}
|
|
@ -5,6 +5,7 @@ type Template struct {
|
|||
Account string `json:"account,omitempty" doc:"the account name to which the template belongs"`
|
||||
AccountID *UUID `json:"accountid,omitempty" doc:"the account id to which the template belongs"`
|
||||
Bootable bool `json:"bootable,omitempty" doc:"true if the ISO is bootable, false otherwise"`
|
||||
BootMode string `json:"bootmode" doc:"the template boot mode (legacy/uefi)"`
|
||||
Checksum string `json:"checksum,omitempty" doc:"checksum of the template"`
|
||||
Created string `json:"created,omitempty" doc:"the date this template was created"`
|
||||
CrossZones bool `json:"crossZones,omitempty" doc:"true if the template is managed across all Zones, false otherwise"`
|
||||
|
@ -70,14 +71,14 @@ func (template Template) ListRequest() (ListCommand, error) {
|
|||
|
||||
// ListTemplates represents a template query filter
|
||||
type ListTemplates struct {
|
||||
TemplateFilter string `json:"templatefilter" doc:"Possible values are \"featured\", \"self\", \"selfexecutable\",\"sharedexecutable\",\"executable\", and \"community\". * featured : templates that have been marked as featured and public. * self : templates that have been registered or created by the calling user. * selfexecutable : same as self, but only returns templates that can be used to deploy a new VM. * sharedexecutable : templates ready to be deployed that have been granted to the calling user by another user. * executable : templates that are owned by the calling user, or public templates, that can be used to deploy a VM. * community : templates that have been marked as public but not featured."`
|
||||
TemplateFilter string `json:"templatefilter,omitempty" doc:"Possible values are \"featured\", \"self\", \"selfexecutable\",\"sharedexecutable\",\"executable\", and \"community\". * featured : templates that have been marked as featured and public. * self : templates that have been registered or created by the calling user. * selfexecutable : same as self, but only returns templates that can be used to deploy a new VM. * sharedexecutable : templates ready to be deployed that have been granted to the calling user by another user. * executable : templates that are owned by the calling user, or public templates, that can be used to deploy a VM. * community : templates that have been marked as public but not featured."`
|
||||
ID *UUID `json:"id,omitempty" doc:"the template ID"`
|
||||
Keyword string `json:"keyword,omitempty" doc:"List by keyword"`
|
||||
Name string `json:"name,omitempty" doc:"the template name"`
|
||||
Page int `json:"page,omitempty"`
|
||||
PageSize int `json:"pagesize,omitempty"`
|
||||
ShowRemoved *bool `json:"showremoved,omitempty" doc:"Show removed templates as well"`
|
||||
Tags []ResourceTag `json:"tags,omitempty" doc:"List resources by tags (key/value pairs)"`
|
||||
Tags []ResourceTag `json:"tags,omitempty" doc:"List resources by tags (key/value pairs). Note: multiple tags are OR'ed, not AND'ed."`
|
||||
ZoneID *UUID `json:"zoneid,omitempty" doc:"list templates by zoneid"`
|
||||
_ bool `name:"listTemplates" description:"List all public, private, and privileged templates."`
|
||||
}
|
||||
|
@ -141,9 +142,10 @@ func (DeleteTemplate) AsyncResponse() interface{} {
|
|||
// RegisterCustomTemplate registers a new template
|
||||
type RegisterCustomTemplate struct {
|
||||
_ bool `name:"registerCustomTemplate" description:"Register a new template."`
|
||||
BootMode string `json:"bootmode" doc:"the template boot mode (legacy/uefi)"`
|
||||
Checksum string `json:"checksum" doc:"the MD5 checksum value of this template"`
|
||||
Details map[string]string `json:"details,omitempty" doc:"Template details in key/value pairs"`
|
||||
Displaytext string `json:"displaytext" doc:"the display text of the template"`
|
||||
Displaytext string `json:"displaytext,omitempty" doc:"the display text of the template"`
|
||||
Name string `json:"name" doc:"the name of the template"`
|
||||
PasswordEnabled *bool `json:"passwordenabled,omitempty" doc:"true if the template supports the password reset feature; default is false"`
|
||||
SSHKeyEnabled *bool `json:"sshkeyenabled,omitempty" doc:"true if the template supports the sshkey upload feature; default is false"`
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
// Package api implements low-level primitives for interacting with the
|
||||
// Exoscale API.
|
||||
package api
|
|
@ -0,0 +1,14 @@
|
|||
package api
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
// ErrNotFound represents an error indicating a non-existent resource.
|
||||
ErrNotFound = errors.New("resource not found")
|
||||
|
||||
// ErrInvalidRequest represents an error indicating that the caller's request is invalid.
|
||||
ErrInvalidRequest = errors.New("invalid request")
|
||||
|
||||
// ErrAPIError represents an error indicating an API-side issue.
|
||||
ErrAPIError = errors.New("API error")
|
||||
)
|
|
@ -0,0 +1,67 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type Middleware interface {
|
||||
http.RoundTripper
|
||||
}
|
||||
|
||||
// ErrorHandlerMiddleware is an Exoscale API HTTP client middleware that
|
||||
// returns concrete Go errors according to API response errors.
|
||||
type ErrorHandlerMiddleware struct {
|
||||
next http.RoundTripper
|
||||
}
|
||||
|
||||
func NewAPIErrorHandlerMiddleware(next http.RoundTripper) Middleware {
|
||||
if next == nil {
|
||||
next = http.DefaultTransport
|
||||
}
|
||||
|
||||
return &ErrorHandlerMiddleware{next: next}
|
||||
}
|
||||
|
||||
func (m *ErrorHandlerMiddleware) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
resp, err := m.next.RoundTrip(req)
|
||||
if err != nil {
|
||||
// If the request returned a Go error don't bother analyzing the response
|
||||
// body, as there probably won't be any (e.g. connection timeout/refused).
|
||||
return resp, err
|
||||
}
|
||||
|
||||
if resp.StatusCode >= 400 && resp.StatusCode <= 599 {
|
||||
var res struct {
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading response body: %s", err)
|
||||
}
|
||||
|
||||
if json.Valid(data) {
|
||||
if err = json.Unmarshal(data, &res); err != nil {
|
||||
return nil, fmt.Errorf("error unmarshaling response: %s", err)
|
||||
}
|
||||
} else {
|
||||
res.Message = string(data)
|
||||
}
|
||||
|
||||
switch {
|
||||
case resp.StatusCode == http.StatusNotFound:
|
||||
return nil, ErrNotFound
|
||||
|
||||
case resp.StatusCode >= 400 && resp.StatusCode < 500:
|
||||
return nil, fmt.Errorf("%w: %s", ErrInvalidRequest, res.Message)
|
||||
|
||||
case resp.StatusCode >= 500:
|
||||
return nil, fmt.Errorf("%w: %s", ErrAPIError, res.Message)
|
||||
}
|
||||
}
|
||||
|
||||
return resp, err
|
||||
}
|
|
@ -0,0 +1,67 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const (
|
||||
EndpointURL = "https://api.exoscale.com/"
|
||||
Prefix = "v2.alpha"
|
||||
)
|
||||
|
||||
const defaultReqEndpointEnv = "api"
|
||||
|
||||
// ReqEndpoint represents an Exoscale API request endpoint.
|
||||
type ReqEndpoint struct {
|
||||
env string
|
||||
zone string
|
||||
}
|
||||
|
||||
// NewReqEndpoint returns a new Exoscale API request endpoint from an environment and zone.
|
||||
func NewReqEndpoint(env, zone string) ReqEndpoint {
|
||||
re := ReqEndpoint{
|
||||
env: env,
|
||||
zone: zone,
|
||||
}
|
||||
|
||||
if re.env == "" {
|
||||
re.env = defaultReqEndpointEnv
|
||||
}
|
||||
|
||||
return re
|
||||
}
|
||||
|
||||
// Env returns the Exoscale API endpoint environment.
|
||||
func (r *ReqEndpoint) Env() string {
|
||||
return r.env
|
||||
}
|
||||
|
||||
// Zone returns the Exoscale API endpoint zone.
|
||||
func (r *ReqEndpoint) Zone() string {
|
||||
return r.zone
|
||||
}
|
||||
|
||||
// Host returns the Exoscale API endpoint host FQDN.
|
||||
func (r *ReqEndpoint) Host() string {
|
||||
return fmt.Sprintf("%s-%s.exoscale.com", r.env, r.zone)
|
||||
}
|
||||
|
||||
// WithEndpoint returns an augmented context instance containing the Exoscale endpoint to send
|
||||
// the request to.
|
||||
func WithEndpoint(ctx context.Context, endpoint ReqEndpoint) context.Context {
|
||||
return context.WithValue(ctx, ReqEndpoint{}, endpoint)
|
||||
}
|
||||
|
||||
// WithZone is a shorthand to WithEndpoint where only the endpoint zone has to be specified.
|
||||
// If a request endpoint is already set in the specified context instance, the value currently
|
||||
// set for the environment will be reused.
|
||||
func WithZone(ctx context.Context, zone string) context.Context {
|
||||
var env string
|
||||
|
||||
if v, ok := ctx.Value(ReqEndpoint{}).(ReqEndpoint); ok {
|
||||
env = v.Env()
|
||||
}
|
||||
|
||||
return WithEndpoint(ctx, NewReqEndpoint(env, zone))
|
||||
}
|
|
@ -0,0 +1,127 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// SecurityProviderExoscale represents an Exoscale public API security
|
||||
// provider.
|
||||
type SecurityProviderExoscale struct {
|
||||
// ReqExpire represents the request expiration duration.
|
||||
ReqExpire time.Duration
|
||||
|
||||
apiKey string
|
||||
apiSecret string
|
||||
}
|
||||
|
||||
// NewSecurityProvider returns a new Exoscale public API security
|
||||
// provider to sign API requests using the specified API key/secret.
|
||||
func NewSecurityProvider(apiKey, apiSecret string) (*SecurityProviderExoscale, error) {
|
||||
if apiKey == "" {
|
||||
return nil, errors.New("missing API key")
|
||||
}
|
||||
|
||||
if apiSecret == "" {
|
||||
return nil, errors.New("missing API secret")
|
||||
}
|
||||
|
||||
return &SecurityProviderExoscale{
|
||||
ReqExpire: 10 * time.Minute,
|
||||
apiKey: apiKey,
|
||||
apiSecret: apiSecret,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Intercept is an HTTP middleware that intercepts and signs client requests
|
||||
// before sending them to the API endpoint.
|
||||
func (s *SecurityProviderExoscale) Intercept(_ context.Context, req *http.Request) error {
|
||||
return s.signRequest(req, time.Now().UTC().Add(s.ReqExpire))
|
||||
}
|
||||
|
||||
func (s *SecurityProviderExoscale) signRequest(req *http.Request, expiration time.Time) error {
|
||||
var (
|
||||
sigParts []string
|
||||
headerParts []string
|
||||
)
|
||||
|
||||
// Request method/URL path
|
||||
sigParts = append(sigParts, fmt.Sprintf("%s %s", req.Method, req.URL.Path))
|
||||
headerParts = append(headerParts, "EXO2-HMAC-SHA256 credential="+s.apiKey)
|
||||
|
||||
// Request body if present
|
||||
body := ""
|
||||
if req.Body != nil {
|
||||
data, err := ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = req.Body.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
body = string(data)
|
||||
req.Body = ioutil.NopCloser(bytes.NewReader(data))
|
||||
}
|
||||
sigParts = append(sigParts, body)
|
||||
|
||||
// Request query string parameters
|
||||
// Important: this is order-sensitive, we have to have to sort parameters alphabetically to ensure signed
|
||||
// values match the names listed in the "signed-query-args=" signature pragma.
|
||||
signedParams, paramsValues := extractRequestParameters(req)
|
||||
sigParts = append(sigParts, paramsValues)
|
||||
if len(signedParams) > 0 {
|
||||
headerParts = append(headerParts, "signed-query-args="+strings.Join(signedParams, ";"))
|
||||
}
|
||||
|
||||
// Request headers -- none at the moment
|
||||
// Note: the same order-sensitive caution for query string parameters applies to headers.
|
||||
sigParts = append(sigParts, "")
|
||||
|
||||
// Request expiration date (UNIX timestamp, no line return)
|
||||
sigParts = append(sigParts, fmt.Sprint(expiration.Unix()))
|
||||
headerParts = append(headerParts, "expires="+fmt.Sprint(expiration.Unix()))
|
||||
|
||||
h := hmac.New(sha256.New, []byte(s.apiSecret))
|
||||
if _, err := h.Write([]byte(strings.Join(sigParts, "\n"))); err != nil {
|
||||
return err
|
||||
}
|
||||
headerParts = append(headerParts, "signature="+base64.StdEncoding.EncodeToString(h.Sum(nil)))
|
||||
|
||||
req.Header.Set("Authorization", strings.Join(headerParts, ","))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// extractRequestParameters returns the list of request URL parameters names
|
||||
// and a strings concatenating the values of the parameters.
|
||||
func extractRequestParameters(req *http.Request) ([]string, string) {
|
||||
var (
|
||||
names []string
|
||||
values string
|
||||
)
|
||||
|
||||
for param, values := range req.URL.Query() {
|
||||
// Keep only parameters that hold exactly 1 value (i.e. no empty or multi-valued parameters)
|
||||
if len(values) == 1 {
|
||||
names = append(names, param)
|
||||
}
|
||||
}
|
||||
sort.Strings(names)
|
||||
|
||||
for _, param := range names {
|
||||
values += req.URL.Query().Get(param)
|
||||
}
|
||||
|
||||
return names, values
|
||||
}
|
|
@ -0,0 +1,133 @@
|
|||
package v2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/exoscale/egoscale/v2/api"
|
||||
papi "github.com/exoscale/egoscale/v2/internal/public-api"
|
||||
)
|
||||
|
||||
const defaultTimeout = 60 * time.Second
|
||||
|
||||
// ClientOpt represents a function setting Exoscale API client option.
|
||||
type ClientOpt func(*Client) error
|
||||
|
||||
// ClientOptWithAPIEndpoint returns a ClientOpt overriding the default Exoscale API endpoint.
|
||||
func ClientOptWithAPIEndpoint(v string) ClientOpt {
|
||||
return func(c *Client) error {
|
||||
endpointURL, err := url.Parse(v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse URL: %s", err)
|
||||
}
|
||||
|
||||
endpointURL = endpointURL.ResolveReference(&url.URL{Path: api.Prefix})
|
||||
c.apiEndpoint = endpointURL.String()
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ClientOptWithTimeout returns a ClientOpt overriding the default client timeout.
|
||||
func ClientOptWithTimeout(v time.Duration) ClientOpt {
|
||||
return func(c *Client) error {
|
||||
c.timeout = v
|
||||
|
||||
if v <= 0 {
|
||||
return errors.New("timeout value must be greater than 0")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ClientOptWithHTTPClient returns a ClientOpt overriding the default http.Client.
|
||||
// Note: the Exoscale API client will chain additional middleware
|
||||
// (http.RoundTripper) on the HTTP client internally, which can alter the HTTP
|
||||
// requests and responses. If you don't want any other middleware than the ones
|
||||
// currently set to your HTTP client, you should duplicate it and pass a copy
|
||||
// instead.
|
||||
func ClientOptWithHTTPClient(v *http.Client) ClientOpt {
|
||||
return func(c *Client) error {
|
||||
c.httpClient = v
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Client represents an Exoscale API client.
|
||||
type Client struct {
|
||||
apiKey string
|
||||
apiSecret string
|
||||
apiEndpoint string
|
||||
timeout time.Duration
|
||||
httpClient *http.Client
|
||||
|
||||
*papi.ClientWithResponses
|
||||
}
|
||||
|
||||
// NewClient returns a new Exoscale API client, or an error if one couldn't be initialized.
|
||||
func NewClient(apiKey, apiSecret string, opts ...ClientOpt) (*Client, error) {
|
||||
client := Client{
|
||||
apiKey: apiKey,
|
||||
apiSecret: apiSecret,
|
||||
apiEndpoint: api.EndpointURL,
|
||||
httpClient: http.DefaultClient,
|
||||
timeout: defaultTimeout,
|
||||
}
|
||||
|
||||
if client.apiKey == "" || client.apiSecret == "" {
|
||||
return nil, fmt.Errorf("%w: missing or incomplete API credentials", ErrClientConfig)
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
if err := opt(&client); err != nil {
|
||||
return nil, fmt.Errorf("%w: %s", ErrClientConfig, err)
|
||||
}
|
||||
}
|
||||
|
||||
apiSecurityProvider, err := api.NewSecurityProvider(client.apiKey, client.apiSecret)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to initialize API security provider: %s", err)
|
||||
}
|
||||
|
||||
apiURL, err := url.Parse(client.apiEndpoint)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to initialize API client: %s", err)
|
||||
}
|
||||
apiURL = apiURL.ResolveReference(&url.URL{Path: api.Prefix})
|
||||
|
||||
client.httpClient.Transport = api.NewAPIErrorHandlerMiddleware(client.httpClient.Transport)
|
||||
|
||||
papiOpts := []papi.ClientOption{
|
||||
papi.WithHTTPClient(client.httpClient),
|
||||
papi.WithRequestEditorFn(
|
||||
papi.MultiRequestsEditor(
|
||||
apiSecurityProvider.Intercept,
|
||||
setEndpointFromContext,
|
||||
),
|
||||
),
|
||||
}
|
||||
|
||||
if client.ClientWithResponses, err = papi.NewClientWithResponses(apiURL.String(), papiOpts...); err != nil {
|
||||
return nil, fmt.Errorf("unable to initialize API client: %s", err)
|
||||
}
|
||||
|
||||
return &client, nil
|
||||
}
|
||||
|
||||
// setEndpointFromContext is an HTTP client request interceptor that overrides the "Host" header
|
||||
// with information from a request endpoint optionally set in the context instance. If none is
|
||||
// found, the request is left untouched.
|
||||
func setEndpointFromContext(ctx context.Context, req *http.Request) error {
|
||||
if v, ok := ctx.Value(api.ReqEndpoint{}).(api.ReqEndpoint); ok {
|
||||
req.Host = v.Host()
|
||||
req.URL.Host = v.Host()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,5 @@
|
|||
package v2
|
||||
|
||||
import "errors"
|
||||
|
||||
var ErrClientConfig = errors.New("client configuration error")
|
123
vendor/github.com/exoscale/egoscale/v2/internal/public-api/async.go
generated
vendored
Normal file
123
vendor/github.com/exoscale/egoscale/v2/internal/public-api/async.go
generated
vendored
Normal file
|
@ -0,0 +1,123 @@
|
|||
package publicapi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
v2 "github.com/exoscale/egoscale/v2/api"
|
||||
)
|
||||
|
||||
const (
|
||||
operationStatePending = "pending"
|
||||
operationStateSuccess = "success"
|
||||
operationStateFailure = "failure"
|
||||
operationStateTimeout = "timeout"
|
||||
|
||||
defaultPollingInterval = 3 * time.Second
|
||||
)
|
||||
|
||||
// PollFunc represents a function invoked periodically in a polling loop. It returns a boolean flag
|
||||
// true if the job is completed or false if polling must continue, and any error that occurred
|
||||
// during the polling (which interrupts the polling regardless of the boolean flag value).
|
||||
// Upon successful completion, an interface descring an opaque operation can be returned to the
|
||||
// caller, which will have to perform type assertion depending on the PollFunc implementation.
|
||||
type PollFunc func(ctx context.Context) (bool, interface{}, error)
|
||||
|
||||
// Poller represents a poller instance.
|
||||
type Poller struct {
|
||||
interval time.Duration
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
// NewPoller returns a Poller instance.
|
||||
func NewPoller() *Poller {
|
||||
return &Poller{
|
||||
interval: defaultPollingInterval,
|
||||
}
|
||||
}
|
||||
|
||||
// WithInterval sets the interval at which the polling function will be executed (default: 3s).
|
||||
func (p *Poller) WithInterval(interval time.Duration) *Poller {
|
||||
if interval > 0 {
|
||||
p.interval = interval
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// WithTimeout sets the time out value after which the polling routine will be cancelled
|
||||
// (default: no time out).
|
||||
func (p *Poller) WithTimeout(timeout time.Duration) *Poller {
|
||||
if timeout > 0 {
|
||||
p.timeout = timeout
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// Poll starts the polling routine, executing the provided polling function at the configured
|
||||
// polling interval. Upon successful polling, an opaque operation is returned to the caller, which
|
||||
// actual type has to asserted depending on the PollFunc executed.
|
||||
func (p *Poller) Poll(ctx context.Context, pf PollFunc) (interface{}, error) {
|
||||
if p.timeout > 0 {
|
||||
ctxWithTimeout, cancel := context.WithTimeout(ctx, p.timeout)
|
||||
defer cancel()
|
||||
ctx = ctxWithTimeout
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(p.interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
done, res, err := pf(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !done {
|
||||
continue
|
||||
}
|
||||
|
||||
return res, nil
|
||||
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// OperationPoller returns a PollFunc function which queries the state of the specified job.
|
||||
// Upon successful job completion, the type of the interface{} returned by the PollFunc is a
|
||||
// pointer to a Resource object (*Resource).
|
||||
func (c *ClientWithResponses) OperationPoller(zone string, jobID string) PollFunc {
|
||||
return func(ctx context.Context) (bool, interface{}, error) {
|
||||
resp, err := c.GetOperationWithResponse(v2.WithZone(ctx, zone), jobID)
|
||||
if err != nil {
|
||||
return true, nil, err
|
||||
}
|
||||
if resp.StatusCode() != http.StatusOK {
|
||||
return true, nil, fmt.Errorf("unexpected response from API: %s", resp.Status())
|
||||
}
|
||||
|
||||
switch *resp.JSON200.State {
|
||||
case operationStatePending:
|
||||
return false, nil, nil
|
||||
|
||||
case operationStateSuccess:
|
||||
return true, resp.JSON200.Reference, nil
|
||||
|
||||
case operationStateFailure:
|
||||
return true, nil, errors.New("job failed")
|
||||
|
||||
case operationStateTimeout:
|
||||
return true, nil, errors.New("job timed out")
|
||||
|
||||
default:
|
||||
return true, nil, fmt.Errorf("unknown job state: %s", *resp.JSON200.State)
|
||||
}
|
||||
}
|
||||
}
|
70
vendor/github.com/exoscale/egoscale/v2/internal/public-api/loadbalancer.go
generated
vendored
Normal file
70
vendor/github.com/exoscale/egoscale/v2/internal/public-api/loadbalancer.go
generated
vendored
Normal file
|
@ -0,0 +1,70 @@
|
|||
package publicapi
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
)
|
||||
|
||||
// UnmarshalJSON unmarshals a LoadBalancer structure into a temporary structure whose "CreatedAt" field of type
|
||||
// string to be able to parse the original timestamp (ISO 8601) into a time.Time object, since json.Unmarshal()
|
||||
// only supports RFC 3339 format.
|
||||
func (lb *LoadBalancer) UnmarshalJSON(data []byte) error {
|
||||
raw := struct {
|
||||
CreatedAt *string `json:"created-at,omitempty"`
|
||||
Description *string `json:"description,omitempty"`
|
||||
Id *string `json:"id,omitempty"` // nolint:golint
|
||||
Ip *string `json:"ip,omitempty"` // nolint:golint
|
||||
Name *string `json:"name,omitempty"`
|
||||
Services *[]LoadBalancerService `json:"services,omitempty"`
|
||||
State *string `json:"state,omitempty"`
|
||||
}{}
|
||||
|
||||
if err := json.Unmarshal(data, &raw); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if raw.CreatedAt != nil {
|
||||
createdAt, err := time.Parse(iso8601Format, *raw.CreatedAt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
lb.CreatedAt = &createdAt
|
||||
}
|
||||
|
||||
lb.Description = raw.Description
|
||||
lb.Id = raw.Id
|
||||
lb.Ip = raw.Ip
|
||||
lb.Name = raw.Name
|
||||
lb.Services = raw.Services
|
||||
lb.State = raw.State
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON returns the JSON encoding of a LoadBalancer structure after having formatted the CreatedAt field
|
||||
// in the original timestamp (ISO 8601), since time.MarshalJSON() only supports RFC 3339 format.
|
||||
func (lb *LoadBalancer) MarshalJSON() ([]byte, error) {
|
||||
raw := struct {
|
||||
CreatedAt *string `json:"created-at,omitempty"`
|
||||
Description *string `json:"description,omitempty"`
|
||||
Id *string `json:"id,omitempty"` // nolint:golint
|
||||
Ip *string `json:"ip,omitempty"` // nolint:golint
|
||||
Name *string `json:"name,omitempty"`
|
||||
Services *[]LoadBalancerService `json:"services,omitempty"`
|
||||
State *string `json:"state,omitempty"`
|
||||
}{}
|
||||
|
||||
if lb.CreatedAt != nil {
|
||||
createdAt := lb.CreatedAt.Format(iso8601Format)
|
||||
raw.CreatedAt = &createdAt
|
||||
}
|
||||
|
||||
raw.Description = lb.Description
|
||||
raw.Id = lb.Id
|
||||
raw.Ip = lb.Ip
|
||||
raw.Name = lb.Name
|
||||
raw.Services = lb.Services
|
||||
raw.State = lb.State
|
||||
|
||||
return json.Marshal(raw)
|
||||
}
|
28
vendor/github.com/exoscale/egoscale/v2/internal/public-api/mock.go
generated
vendored
Normal file
28
vendor/github.com/exoscale/egoscale/v2/internal/public-api/mock.go
generated
vendored
Normal file
|
@ -0,0 +1,28 @@
|
|||
package publicapi
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/jarcoal/httpmock"
|
||||
"github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
type MockClient struct {
|
||||
mock.Mock
|
||||
*httpmock.MockTransport
|
||||
ClientWithResponsesInterface
|
||||
}
|
||||
|
||||
func NewMockClient() *MockClient {
|
||||
var c MockClient
|
||||
|
||||
c.MockTransport = httpmock.NewMockTransport()
|
||||
|
||||
return &c
|
||||
}
|
||||
|
||||
func (c *MockClient) Do(req *http.Request) (*http.Response, error) {
|
||||
hc := http.Client{Transport: c.MockTransport}
|
||||
|
||||
return hc.Do(req)
|
||||
}
|
9554
vendor/github.com/exoscale/egoscale/v2/internal/public-api/public-api.gen.go
generated
vendored
Normal file
9554
vendor/github.com/exoscale/egoscale/v2/internal/public-api/public-api.gen.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
24
vendor/github.com/exoscale/egoscale/v2/internal/public-api/public-api.go
generated
vendored
Normal file
24
vendor/github.com/exoscale/egoscale/v2/internal/public-api/public-api.go
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
// Package publicapi is an internal package containing code generated from the
|
||||
// Exoscale API OpenAPI specs, as well as helpers and transition types exposed
|
||||
// in the public-facing package.
|
||||
package publicapi
|
||||
|
||||
//go:generate oapi-codegen -generate types,client -package publicapi -o public-api.gen.go ../../../public-api.json
|
||||
|
||||
// OptionalString returns the dereferenced string value of v if not nil, otherwise an empty string.
|
||||
func OptionalString(v *string) string {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// OptionalInt64 returns the dereferenced int64 value of v if not nil, otherwise 0.
|
||||
func OptionalInt64(v *int64) int64 {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
20
vendor/github.com/exoscale/egoscale/v2/internal/public-api/request.go
generated
vendored
Normal file
20
vendor/github.com/exoscale/egoscale/v2/internal/public-api/request.go
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
package publicapi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// MultiRequestsEditor is an oapi-codegen compatible RequestEditorFn function that executes multiple
|
||||
// RequestEditorFn functions sequentially.
|
||||
func MultiRequestsEditor(fns ...RequestEditorFn) RequestEditorFn {
|
||||
return func(ctx context.Context, req *http.Request) error {
|
||||
for _, fn := range fns {
|
||||
if err := fn(ctx, req); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
86
vendor/github.com/exoscale/egoscale/v2/internal/public-api/sks_cluster.go
generated
vendored
Normal file
86
vendor/github.com/exoscale/egoscale/v2/internal/public-api/sks_cluster.go
generated
vendored
Normal file
|
@ -0,0 +1,86 @@
|
|||
package publicapi
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
)
|
||||
|
||||
// UnmarshalJSON unmarshals a SksCluster structure into a temporary structure whose "CreatedAt" field of type
|
||||
// string to be able to parse the original timestamp (ISO 8601) into a time.Time object, since json.Unmarshal()
|
||||
// only supports RFC 3339 format.
|
||||
func (c *SksCluster) UnmarshalJSON(data []byte) error {
|
||||
raw := struct {
|
||||
Addons *[]string `json:"addons,omitempty"`
|
||||
Cni *string `json:"cni,omitempty"`
|
||||
CreatedAt *string `json:"created-at,omitempty"`
|
||||
Description *string `json:"description,omitempty"`
|
||||
Endpoint *string `json:"endpoint,omitempty"`
|
||||
Id *string `json:"id,omitempty"` // nolint:golint
|
||||
Level *string `json:"level,omitempty"`
|
||||
Name *string `json:"name,omitempty"`
|
||||
Nodepools *[]SksNodepool `json:"nodepools,omitempty"`
|
||||
State *string `json:"state,omitempty"`
|
||||
Version *string `json:"version,omitempty"`
|
||||
}{}
|
||||
|
||||
if err := json.Unmarshal(data, &raw); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if raw.CreatedAt != nil {
|
||||
createdAt, err := time.Parse(iso8601Format, *raw.CreatedAt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.CreatedAt = &createdAt
|
||||
}
|
||||
|
||||
c.Addons = raw.Addons
|
||||
c.Cni = raw.Cni
|
||||
c.Description = raw.Description
|
||||
c.Endpoint = raw.Endpoint
|
||||
c.Id = raw.Id
|
||||
c.Level = raw.Level
|
||||
c.Name = raw.Name
|
||||
c.Nodepools = raw.Nodepools
|
||||
c.State = raw.State
|
||||
c.Version = raw.Version
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON returns the JSON encoding of a SksCluster structure after having formatted the CreatedAt field
|
||||
// in the original timestamp (ISO 8601), since time.MarshalJSON() only supports RFC 3339 format.
|
||||
func (c *SksCluster) MarshalJSON() ([]byte, error) {
|
||||
raw := struct {
|
||||
Addons *[]string `json:"addons,omitempty"`
|
||||
Cni *string `json:"cni,omitempty"`
|
||||
CreatedAt *string `json:"created-at,omitempty"`
|
||||
Description *string `json:"description,omitempty"`
|
||||
Endpoint *string `json:"endpoint,omitempty"`
|
||||
Id *string `json:"id,omitempty"` // nolint:golint
|
||||
Level *string `json:"level,omitempty"`
|
||||
Name *string `json:"name,omitempty"`
|
||||
Nodepools *[]SksNodepool `json:"nodepools,omitempty"`
|
||||
State *string `json:"state,omitempty"`
|
||||
Version *string `json:"version,omitempty"`
|
||||
}{}
|
||||
|
||||
if c.CreatedAt != nil {
|
||||
createdAt := c.CreatedAt.Format(iso8601Format)
|
||||
raw.CreatedAt = &createdAt
|
||||
}
|
||||
|
||||
raw.Addons = c.Addons
|
||||
raw.Cni = c.Cni
|
||||
raw.Description = c.Description
|
||||
raw.Endpoint = c.Endpoint
|
||||
raw.Id = c.Id
|
||||
raw.Level = c.Level
|
||||
raw.Name = c.Name
|
||||
raw.Nodepools = c.Nodepools
|
||||
raw.State = c.State
|
||||
raw.Version = c.Version
|
||||
|
||||
return json.Marshal(raw)
|
||||
}
|
94
vendor/github.com/exoscale/egoscale/v2/internal/public-api/sks_nodepool.go
generated
vendored
Normal file
94
vendor/github.com/exoscale/egoscale/v2/internal/public-api/sks_nodepool.go
generated
vendored
Normal file
|
@ -0,0 +1,94 @@
|
|||
package publicapi
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
)
|
||||
|
||||
// UnmarshalJSON unmarshals a SksNodepool structure into a temporary structure whose "CreatedAt" field of type
|
||||
// string to be able to parse the original timestamp (ISO 8601) into a time.Time object, since json.Unmarshal()
|
||||
// only supports RFC 3339 format.
|
||||
func (n *SksNodepool) UnmarshalJSON(data []byte) error {
|
||||
raw := struct {
|
||||
AntiAffinityGroups *[]AntiAffinityGroup `json:"anti-affinity-groups,omitempty"`
|
||||
CreatedAt *string `json:"created-at,omitempty"`
|
||||
Description *string `json:"description,omitempty"`
|
||||
DiskSize *int64 `json:"disk-size,omitempty"`
|
||||
Id *string `json:"id,omitempty"` // nolint:golint
|
||||
InstancePool *InstancePool `json:"instance-pool,omitempty"`
|
||||
InstanceType *InstanceType `json:"instance-type,omitempty"`
|
||||
Name *string `json:"name,omitempty"`
|
||||
SecurityGroups *[]SecurityGroup `json:"security-groups,omitempty"`
|
||||
Size *int64 `json:"size,omitempty"`
|
||||
State *string `json:"state,omitempty"`
|
||||
Template *Template `json:"template,omitempty"`
|
||||
Version *string `json:"version,omitempty"`
|
||||
}{}
|
||||
|
||||
if err := json.Unmarshal(data, &raw); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if raw.CreatedAt != nil {
|
||||
createdAt, err := time.Parse(iso8601Format, *raw.CreatedAt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n.CreatedAt = &createdAt
|
||||
}
|
||||
|
||||
n.AntiAffinityGroups = raw.AntiAffinityGroups
|
||||
n.Description = raw.Description
|
||||
n.DiskSize = raw.DiskSize
|
||||
n.Id = raw.Id
|
||||
n.InstancePool = raw.InstancePool
|
||||
n.InstanceType = raw.InstanceType
|
||||
n.Name = raw.Name
|
||||
n.SecurityGroups = raw.SecurityGroups
|
||||
n.Size = raw.Size
|
||||
n.State = raw.State
|
||||
n.Template = raw.Template
|
||||
n.Version = raw.Version
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON returns the JSON encoding of a SksNodepool structure after having formatted the CreatedAt field
|
||||
// in the original timestamp (ISO 8601), since time.MarshalJSON() only supports RFC 3339 format.
|
||||
func (n *SksNodepool) MarshalJSON() ([]byte, error) {
|
||||
raw := struct {
|
||||
AntiAffinityGroups *[]AntiAffinityGroup `json:"anti-affinity-groups,omitempty"`
|
||||
CreatedAt *string `json:"created-at,omitempty"`
|
||||
Description *string `json:"description,omitempty"`
|
||||
DiskSize *int64 `json:"disk-size,omitempty"`
|
||||
Id *string `json:"id,omitempty"` // nolint:golint
|
||||
InstancePool *InstancePool `json:"instance-pool,omitempty"`
|
||||
InstanceType *InstanceType `json:"instance-type,omitempty"`
|
||||
Name *string `json:"name,omitempty"`
|
||||
SecurityGroups *[]SecurityGroup `json:"security-groups,omitempty"`
|
||||
Size *int64 `json:"size,omitempty"`
|
||||
State *string `json:"state,omitempty"`
|
||||
Template *Template `json:"template,omitempty"`
|
||||
Version *string `json:"version,omitempty"`
|
||||
}{}
|
||||
|
||||
if n.CreatedAt != nil {
|
||||
createdAt := n.CreatedAt.Format(iso8601Format)
|
||||
raw.CreatedAt = &createdAt
|
||||
}
|
||||
|
||||
raw.AntiAffinityGroups = n.AntiAffinityGroups
|
||||
raw.Description = n.Description
|
||||
raw.DiskSize = n.DiskSize
|
||||
raw.Id = n.Id
|
||||
raw.InstancePool = n.InstancePool
|
||||
raw.InstanceType = n.InstanceType
|
||||
raw.Name = n.Name
|
||||
raw.SecurityGroups = n.SecurityGroups
|
||||
raw.Size = n.Size
|
||||
raw.State = n.State
|
||||
raw.Template = n.Template
|
||||
raw.Version = n.Version
|
||||
|
||||
return json.Marshal(raw)
|
||||
}
|
102
vendor/github.com/exoscale/egoscale/v2/internal/public-api/template.go
generated
vendored
Normal file
102
vendor/github.com/exoscale/egoscale/v2/internal/public-api/template.go
generated
vendored
Normal file
|
@ -0,0 +1,102 @@
|
|||
package publicapi
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
)
|
||||
|
||||
// UnmarshalJSON unmarshals a Template structure into a temporary structure whose "CreatedAt" field of type
|
||||
// string to be able to parse the original timestamp (ISO 8601) into a time.Time object, since json.Unmarshal()
|
||||
// only supports RFC 3339 format.
|
||||
func (t *Template) UnmarshalJSON(data []byte) error {
|
||||
raw := struct {
|
||||
CreatedAt *string `json:"created-at,omitempty"`
|
||||
BootMode *string `json:"boot-mode,omitempty"`
|
||||
Build *string `json:"build,omitempty"`
|
||||
Checksum *string `json:"checksum,omitempty"`
|
||||
DefaultUser *string `json:"default-user,omitempty"`
|
||||
Description *string `json:"description,omitempty"`
|
||||
Family *string `json:"family,omitempty"`
|
||||
Id *string `json:"id,omitempty"` // nolint:golint
|
||||
Name *string `json:"name,omitempty"`
|
||||
PasswordEnabled *bool `json:"password-enabled,omitempty"`
|
||||
Size *int64 `json:"size,omitempty"`
|
||||
SshKeyEnabled *bool `json:"ssh-key-enabled,omitempty"` // nolint:golint
|
||||
Url *string `json:"url,omitempty"` // nolint:golint
|
||||
Version *string `json:"version,omitempty"`
|
||||
Visibility *string `json:"visibility,omitempty"`
|
||||
}{}
|
||||
|
||||
if err := json.Unmarshal(data, &raw); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if raw.CreatedAt != nil {
|
||||
createdAt, err := time.Parse(iso8601Format, *raw.CreatedAt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.CreatedAt = &createdAt
|
||||
}
|
||||
|
||||
t.BootMode = raw.BootMode
|
||||
t.Build = raw.Build
|
||||
t.Checksum = raw.Checksum
|
||||
t.DefaultUser = raw.DefaultUser
|
||||
t.Description = raw.Description
|
||||
t.Family = raw.Family
|
||||
t.Id = raw.Id
|
||||
t.Name = raw.Name
|
||||
t.PasswordEnabled = raw.PasswordEnabled
|
||||
t.Size = raw.Size
|
||||
t.SshKeyEnabled = raw.SshKeyEnabled
|
||||
t.Url = raw.Url
|
||||
t.Version = raw.Version
|
||||
t.Visibility = raw.Visibility
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON returns the JSON encoding of a Template structure after having formatted the CreatedAt field
|
||||
// in the original timestamp (ISO 8601), since time.MarshalJSON() only supports RFC 3339 format.
|
||||
func (t *Template) MarshalJSON() ([]byte, error) {
|
||||
raw := struct {
|
||||
CreatedAt *string `json:"created-at,omitempty"`
|
||||
BootMode *string `json:"boot-mode,omitempty"`
|
||||
Build *string `json:"build,omitempty"`
|
||||
Checksum *string `json:"checksum,omitempty"`
|
||||
DefaultUser *string `json:"default-user,omitempty"`
|
||||
Description *string `json:"description,omitempty"`
|
||||
Family *string `json:"family,omitempty"`
|
||||
Id *string `json:"id,omitempty"` // nolint:golint
|
||||
Name *string `json:"name,omitempty"`
|
||||
PasswordEnabled *bool `json:"password-enabled,omitempty"`
|
||||
Size *int64 `json:"size,omitempty"`
|
||||
SshKeyEnabled *bool `json:"ssh-key-enabled,omitempty"` // nolint:golint
|
||||
Url *string `json:"url,omitempty"` // nolint:golint
|
||||
Version *string `json:"version,omitempty"`
|
||||
Visibility *string `json:"visibility,omitempty"`
|
||||
}{}
|
||||
|
||||
if t.CreatedAt != nil {
|
||||
createdAt := t.CreatedAt.Format(iso8601Format)
|
||||
raw.CreatedAt = &createdAt
|
||||
}
|
||||
|
||||
raw.BootMode = t.BootMode
|
||||
raw.Build = t.Build
|
||||
raw.Checksum = t.Checksum
|
||||
raw.DefaultUser = t.DefaultUser
|
||||
raw.Description = t.Description
|
||||
raw.Family = t.Family
|
||||
raw.Id = t.Id
|
||||
raw.Name = t.Name
|
||||
raw.PasswordEnabled = t.PasswordEnabled
|
||||
raw.Size = t.Size
|
||||
raw.SshKeyEnabled = t.SshKeyEnabled
|
||||
raw.Url = t.Url
|
||||
raw.Version = t.Version
|
||||
raw.Visibility = t.Visibility
|
||||
|
||||
return json.Marshal(raw)
|
||||
}
|
|
@ -0,0 +1,3 @@
|
|||
package publicapi
|
||||
|
||||
const iso8601Format = "2006-01-02T15:04:05Z"
|
|
@ -0,0 +1,382 @@
|
|||
package v2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
apiv2 "github.com/exoscale/egoscale/v2/api"
|
||||
papi "github.com/exoscale/egoscale/v2/internal/public-api"
|
||||
)
|
||||
|
||||
// NetworkLoadBalancerServerStatus represents a Network Load Balancer service target server status.
|
||||
type NetworkLoadBalancerServerStatus struct {
|
||||
InstanceIP net.IP
|
||||
Status string
|
||||
}
|
||||
|
||||
func nlbServerStatusFromAPI(st *papi.LoadBalancerServerStatus) *NetworkLoadBalancerServerStatus {
|
||||
return &NetworkLoadBalancerServerStatus{
|
||||
InstanceIP: net.ParseIP(papi.OptionalString(st.PublicIp)),
|
||||
Status: papi.OptionalString(st.Status),
|
||||
}
|
||||
}
|
||||
|
||||
// NetworkLoadBalancerServiceHealthcheck represents a Network Load Balancer service healthcheck.
|
||||
type NetworkLoadBalancerServiceHealthcheck struct {
|
||||
Mode string
|
||||
Port uint16
|
||||
Interval time.Duration
|
||||
Timeout time.Duration
|
||||
Retries int64
|
||||
URI string
|
||||
TLSSNI string
|
||||
}
|
||||
|
||||
// NetworkLoadBalancerService represents a Network Load Balancer service.
|
||||
type NetworkLoadBalancerService struct {
|
||||
ID string
|
||||
Name string
|
||||
Description string
|
||||
InstancePoolID string
|
||||
Protocol string
|
||||
Port uint16
|
||||
TargetPort uint16
|
||||
Strategy string
|
||||
Healthcheck NetworkLoadBalancerServiceHealthcheck
|
||||
State string
|
||||
HealthcheckStatus []*NetworkLoadBalancerServerStatus
|
||||
}
|
||||
|
||||
func nlbServiceFromAPI(svc *papi.LoadBalancerService) *NetworkLoadBalancerService {
|
||||
return &NetworkLoadBalancerService{
|
||||
ID: papi.OptionalString(svc.Id),
|
||||
Name: papi.OptionalString(svc.Name),
|
||||
Description: papi.OptionalString(svc.Description),
|
||||
InstancePoolID: papi.OptionalString(svc.InstancePool.Id),
|
||||
Protocol: papi.OptionalString(svc.Protocol),
|
||||
Port: uint16(papi.OptionalInt64(svc.Port)),
|
||||
TargetPort: uint16(papi.OptionalInt64(svc.TargetPort)),
|
||||
Strategy: papi.OptionalString(svc.Strategy),
|
||||
Healthcheck: NetworkLoadBalancerServiceHealthcheck{
|
||||
Mode: svc.Healthcheck.Mode,
|
||||
Port: uint16(svc.Healthcheck.Port),
|
||||
Interval: time.Duration(papi.OptionalInt64(svc.Healthcheck.Interval)) * time.Second,
|
||||
Timeout: time.Duration(papi.OptionalInt64(svc.Healthcheck.Timeout)) * time.Second,
|
||||
Retries: papi.OptionalInt64(svc.Healthcheck.Retries),
|
||||
URI: papi.OptionalString(svc.Healthcheck.Uri),
|
||||
TLSSNI: papi.OptionalString(svc.Healthcheck.TlsSni),
|
||||
},
|
||||
HealthcheckStatus: func() []*NetworkLoadBalancerServerStatus {
|
||||
statuses := make([]*NetworkLoadBalancerServerStatus, 0)
|
||||
|
||||
if svc.HealthcheckStatus != nil {
|
||||
for _, st := range *svc.HealthcheckStatus {
|
||||
st := st
|
||||
statuses = append(statuses, nlbServerStatusFromAPI(&st))
|
||||
}
|
||||
}
|
||||
|
||||
return statuses
|
||||
}(),
|
||||
State: papi.OptionalString(svc.State),
|
||||
}
|
||||
}
|
||||
|
||||
// NetworkLoadBalancer represents a Network Load Balancer instance.
|
||||
type NetworkLoadBalancer struct {
|
||||
ID string
|
||||
Name string
|
||||
Description string
|
||||
CreatedAt time.Time
|
||||
IPAddress net.IP
|
||||
Services []*NetworkLoadBalancerService
|
||||
State string
|
||||
|
||||
c *Client
|
||||
zone string
|
||||
}
|
||||
|
||||
func nlbFromAPI(nlb *papi.LoadBalancer) *NetworkLoadBalancer {
|
||||
return &NetworkLoadBalancer{
|
||||
ID: papi.OptionalString(nlb.Id),
|
||||
Name: papi.OptionalString(nlb.Name),
|
||||
Description: papi.OptionalString(nlb.Description),
|
||||
CreatedAt: *nlb.CreatedAt,
|
||||
IPAddress: net.ParseIP(papi.OptionalString(nlb.Ip)),
|
||||
State: papi.OptionalString(nlb.State),
|
||||
Services: func() []*NetworkLoadBalancerService {
|
||||
services := make([]*NetworkLoadBalancerService, 0)
|
||||
|
||||
if nlb.Services != nil {
|
||||
for _, svc := range *nlb.Services {
|
||||
svc := svc
|
||||
services = append(services, nlbServiceFromAPI(&svc))
|
||||
}
|
||||
}
|
||||
|
||||
return services
|
||||
}(),
|
||||
}
|
||||
}
|
||||
|
||||
// AddService adds a service to the Network Load Balancer instance.
|
||||
func (nlb *NetworkLoadBalancer) AddService(ctx context.Context,
|
||||
svc *NetworkLoadBalancerService) (*NetworkLoadBalancerService, error) {
|
||||
var (
|
||||
port = int64(svc.Port)
|
||||
targetPort = int64(svc.TargetPort)
|
||||
healthcheckPort = int64(svc.Healthcheck.Port)
|
||||
healthcheckInterval = int64(svc.Healthcheck.Interval.Seconds())
|
||||
healthcheckTimeout = int64(svc.Healthcheck.Timeout.Seconds())
|
||||
)
|
||||
|
||||
// The API doesn't return the NLB service created directly, so in order to return a
|
||||
// *NetworkLoadBalancerService corresponding to the new service we have to manually
|
||||
// compare the list of services on the NLB instance before and after the service
|
||||
// creation, and identify the service that wasn't there before.
|
||||
// Note: in case of multiple services creation in parallel this technique is subject
|
||||
// to race condition as we could return an unrelated service. To prevent this, we
|
||||
// also compare the name of the new service to the name specified in the svc
|
||||
// parameter.
|
||||
services := make(map[string]struct{})
|
||||
for _, svc := range nlb.Services {
|
||||
services[svc.ID] = struct{}{}
|
||||
}
|
||||
|
||||
resp, err := nlb.c.AddServiceToLoadBalancerWithResponse(
|
||||
apiv2.WithZone(ctx, nlb.zone),
|
||||
nlb.ID,
|
||||
papi.AddServiceToLoadBalancerJSONRequestBody{
|
||||
Name: svc.Name,
|
||||
Description: &svc.Description,
|
||||
Healthcheck: papi.Healthcheck{
|
||||
Mode: svc.Healthcheck.Mode,
|
||||
Port: healthcheckPort,
|
||||
Interval: &healthcheckInterval,
|
||||
Timeout: &healthcheckTimeout,
|
||||
Retries: &svc.Healthcheck.Retries,
|
||||
Uri: func() *string {
|
||||
if strings.HasPrefix(svc.Healthcheck.Mode, "http") {
|
||||
return &svc.Healthcheck.URI
|
||||
}
|
||||
return nil
|
||||
}(),
|
||||
TlsSni: func() *string {
|
||||
if svc.Healthcheck.Mode == "https" && svc.Healthcheck.TLSSNI != "" {
|
||||
return &svc.Healthcheck.TLSSNI
|
||||
}
|
||||
return nil
|
||||
}(),
|
||||
},
|
||||
InstancePool: papi.InstancePool{Id: &svc.InstancePoolID},
|
||||
Port: port,
|
||||
TargetPort: targetPort,
|
||||
Protocol: svc.Protocol,
|
||||
Strategy: svc.Strategy,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res, err := papi.NewPoller().
|
||||
WithTimeout(nlb.c.timeout).
|
||||
Poll(ctx, nlb.c.OperationPoller(nlb.zone, *resp.JSON200.Id))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nlbUpdated, err := nlb.c.GetNetworkLoadBalancer(ctx, nlb.zone, *res.(*papi.Reference).Id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Look for an unknown service: if we find one we hope it's the one we've just created.
|
||||
for _, s := range nlbUpdated.Services {
|
||||
if _, ok := services[svc.ID]; !ok && s.Name == svc.Name {
|
||||
return s, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.New("unable to identify the service created")
|
||||
}
|
||||
|
||||
// UpdateService updates the specified Network Load Balancer service.
|
||||
func (nlb *NetworkLoadBalancer) UpdateService(ctx context.Context, svc *NetworkLoadBalancerService) error {
|
||||
var (
|
||||
port = int64(svc.Port)
|
||||
targetPort = int64(svc.TargetPort)
|
||||
healthcheckPort = int64(svc.Healthcheck.Port)
|
||||
healthcheckInterval = int64(svc.Healthcheck.Interval.Seconds())
|
||||
healthcheckTimeout = int64(svc.Healthcheck.Timeout.Seconds())
|
||||
)
|
||||
|
||||
resp, err := nlb.c.UpdateLoadBalancerServiceWithResponse(
|
||||
apiv2.WithZone(ctx, nlb.zone),
|
||||
nlb.ID,
|
||||
svc.ID,
|
||||
papi.UpdateLoadBalancerServiceJSONRequestBody{
|
||||
Name: &svc.Name,
|
||||
Description: &svc.Description,
|
||||
Port: &port,
|
||||
TargetPort: &targetPort,
|
||||
Protocol: &svc.Protocol,
|
||||
Strategy: &svc.Strategy,
|
||||
Healthcheck: &papi.Healthcheck{
|
||||
Mode: svc.Healthcheck.Mode,
|
||||
Port: healthcheckPort,
|
||||
Interval: &healthcheckInterval,
|
||||
Timeout: &healthcheckTimeout,
|
||||
Retries: &svc.Healthcheck.Retries,
|
||||
Uri: func() *string {
|
||||
if strings.HasPrefix(svc.Healthcheck.Mode, "http") {
|
||||
return &svc.Healthcheck.URI
|
||||
}
|
||||
return nil
|
||||
}(),
|
||||
TlsSni: func() *string {
|
||||
if svc.Healthcheck.Mode == "https" && svc.Healthcheck.TLSSNI != "" {
|
||||
return &svc.Healthcheck.TLSSNI
|
||||
}
|
||||
return nil
|
||||
}(),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = papi.NewPoller().
|
||||
WithTimeout(nlb.c.timeout).
|
||||
Poll(ctx, nlb.c.OperationPoller(nlb.zone, *resp.JSON200.Id))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteService deletes the specified service from the Network Load Balancer instance.
|
||||
func (nlb *NetworkLoadBalancer) DeleteService(ctx context.Context, svc *NetworkLoadBalancerService) error {
|
||||
resp, err := nlb.c.DeleteLoadBalancerServiceWithResponse(
|
||||
apiv2.WithZone(ctx, nlb.zone),
|
||||
nlb.ID,
|
||||
svc.ID,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = papi.NewPoller().
|
||||
WithTimeout(nlb.c.timeout).
|
||||
Poll(ctx, nlb.c.OperationPoller(nlb.zone, *resp.JSON200.Id))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateNetworkLoadBalancer creates a Network Load Balancer instance in the specified zone.
|
||||
func (c *Client) CreateNetworkLoadBalancer(ctx context.Context, zone string,
|
||||
nlb *NetworkLoadBalancer) (*NetworkLoadBalancer, error) {
|
||||
resp, err := c.CreateLoadBalancerWithResponse(
|
||||
apiv2.WithZone(ctx, zone),
|
||||
papi.CreateLoadBalancerJSONRequestBody{
|
||||
Name: nlb.Name,
|
||||
Description: &nlb.Description,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res, err := papi.NewPoller().
|
||||
WithTimeout(c.timeout).
|
||||
Poll(ctx, c.OperationPoller(zone, *resp.JSON200.Id))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return c.GetNetworkLoadBalancer(ctx, zone, *res.(*papi.Reference).Id)
|
||||
}
|
||||
|
||||
// ListNetworkLoadBalancers returns the list of existing Network Load Balancers in the
|
||||
// specified zone.
|
||||
func (c *Client) ListNetworkLoadBalancers(ctx context.Context, zone string) ([]*NetworkLoadBalancer, error) {
|
||||
list := make([]*NetworkLoadBalancer, 0)
|
||||
|
||||
resp, err := c.ListLoadBalancersWithResponse(apiv2.WithZone(ctx, zone))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.JSON200.LoadBalancers != nil {
|
||||
for i := range *resp.JSON200.LoadBalancers {
|
||||
nlb := nlbFromAPI(&(*resp.JSON200.LoadBalancers)[i])
|
||||
nlb.c = c
|
||||
nlb.zone = zone
|
||||
|
||||
list = append(list, nlb)
|
||||
}
|
||||
}
|
||||
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// GetNetworkLoadBalancer returns the Network Load Balancer instance corresponding to the
|
||||
// specified ID in the specified zone.
|
||||
func (c *Client) GetNetworkLoadBalancer(ctx context.Context, zone, id string) (*NetworkLoadBalancer, error) {
|
||||
resp, err := c.GetLoadBalancerWithResponse(apiv2.WithZone(ctx, zone), id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nlb := nlbFromAPI(resp.JSON200)
|
||||
nlb.c = c
|
||||
nlb.zone = zone
|
||||
|
||||
return nlb, nil
|
||||
}
|
||||
|
||||
// UpdateNetworkLoadBalancer updates the specified Network Load Balancer instance in the specified zone.
|
||||
func (c *Client) UpdateNetworkLoadBalancer(ctx context.Context, zone string, // nolint:dupl
|
||||
nlb *NetworkLoadBalancer) (*NetworkLoadBalancer, error) {
|
||||
resp, err := c.UpdateLoadBalancerWithResponse(
|
||||
apiv2.WithZone(ctx, zone),
|
||||
nlb.ID,
|
||||
papi.UpdateLoadBalancerJSONRequestBody{
|
||||
Name: &nlb.Name,
|
||||
Description: &nlb.Description,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res, err := papi.NewPoller().
|
||||
WithTimeout(c.timeout).
|
||||
Poll(ctx, c.OperationPoller(zone, *resp.JSON200.Id))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return c.GetNetworkLoadBalancer(ctx, zone, *res.(*papi.Reference).Id)
|
||||
}
|
||||
|
||||
// DeleteNetworkLoadBalancer deletes the specified Network Load Balancer instance in the specified zone.
|
||||
func (c *Client) DeleteNetworkLoadBalancer(ctx context.Context, zone, id string) error {
|
||||
resp, err := c.DeleteLoadBalancerWithResponse(apiv2.WithZone(ctx, zone), id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = papi.NewPoller().
|
||||
WithTimeout(c.timeout).
|
||||
Poll(ctx, c.OperationPoller(zone, *resp.JSON200.Id))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,442 @@
|
|||
package v2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
apiv2 "github.com/exoscale/egoscale/v2/api"
|
||||
papi "github.com/exoscale/egoscale/v2/internal/public-api"
|
||||
)
|
||||
|
||||
// SKSNodepool represents a SKS Nodepool.
|
||||
type SKSNodepool struct {
|
||||
ID string
|
||||
Name string
|
||||
Description string
|
||||
CreatedAt time.Time
|
||||
InstancePoolID string
|
||||
InstanceTypeID string
|
||||
TemplateID string
|
||||
DiskSize int64
|
||||
AntiAffinityGroupIDs []string
|
||||
SecurityGroupIDs []string
|
||||
Version string
|
||||
Size int64
|
||||
State string
|
||||
}
|
||||
|
||||
func sksNodepoolFromAPI(n *papi.SksNodepool) *SKSNodepool {
|
||||
return &SKSNodepool{
|
||||
ID: papi.OptionalString(n.Id),
|
||||
Name: papi.OptionalString(n.Name),
|
||||
Description: papi.OptionalString(n.Description),
|
||||
CreatedAt: *n.CreatedAt,
|
||||
InstancePoolID: papi.OptionalString(n.InstancePool.Id),
|
||||
InstanceTypeID: papi.OptionalString(n.InstanceType.Id),
|
||||
TemplateID: papi.OptionalString(n.Template.Id),
|
||||
DiskSize: papi.OptionalInt64(n.DiskSize),
|
||||
AntiAffinityGroupIDs: func() []string {
|
||||
aags := make([]string, 0)
|
||||
|
||||
if n.AntiAffinityGroups != nil {
|
||||
for _, aag := range *n.AntiAffinityGroups {
|
||||
aag := aag
|
||||
aags = append(aags, *aag.Id)
|
||||
}
|
||||
}
|
||||
|
||||
return aags
|
||||
}(),
|
||||
SecurityGroupIDs: func() []string {
|
||||
sgs := make([]string, 0)
|
||||
|
||||
if n.SecurityGroups != nil {
|
||||
for _, sg := range *n.SecurityGroups {
|
||||
sg := sg
|
||||
sgs = append(sgs, *sg.Id)
|
||||
}
|
||||
}
|
||||
|
||||
return sgs
|
||||
}(),
|
||||
Version: papi.OptionalString(n.Version),
|
||||
Size: papi.OptionalInt64(n.Size),
|
||||
State: papi.OptionalString(n.State),
|
||||
}
|
||||
}
|
||||
|
||||
// SKSCluster represents a SKS cluster.
|
||||
type SKSCluster struct {
|
||||
ID string
|
||||
Name string
|
||||
Description string
|
||||
CreatedAt time.Time
|
||||
Endpoint string
|
||||
Nodepools []*SKSNodepool
|
||||
Version string
|
||||
ServiceLevel string
|
||||
CNI string
|
||||
AddOns []string
|
||||
State string
|
||||
|
||||
c *Client
|
||||
zone string
|
||||
}
|
||||
|
||||
func sksClusterFromAPI(c *papi.SksCluster) *SKSCluster {
|
||||
return &SKSCluster{
|
||||
ID: papi.OptionalString(c.Id),
|
||||
Name: papi.OptionalString(c.Name),
|
||||
Description: papi.OptionalString(c.Description),
|
||||
CreatedAt: *c.CreatedAt,
|
||||
Endpoint: papi.OptionalString(c.Endpoint),
|
||||
Nodepools: func() []*SKSNodepool {
|
||||
nodepools := make([]*SKSNodepool, 0)
|
||||
|
||||
if c.Nodepools != nil {
|
||||
for _, n := range *c.Nodepools {
|
||||
n := n
|
||||
nodepools = append(nodepools, sksNodepoolFromAPI(&n))
|
||||
}
|
||||
}
|
||||
|
||||
return nodepools
|
||||
}(),
|
||||
Version: papi.OptionalString(c.Version),
|
||||
ServiceLevel: papi.OptionalString(c.Level),
|
||||
CNI: papi.OptionalString(c.Cni),
|
||||
AddOns: func() []string {
|
||||
addOns := make([]string, 0)
|
||||
if c.Addons != nil {
|
||||
addOns = append(addOns, *c.Addons...)
|
||||
}
|
||||
return addOns
|
||||
}(),
|
||||
State: papi.OptionalString(c.State),
|
||||
}
|
||||
}
|
||||
|
||||
// RequestKubeconfig returns a base64-encoded kubeconfig content for the specified user name,
|
||||
// optionally associated to specified groups for a duration d (default API-set TTL applies if not specified).
|
||||
// Fore more information: https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/
|
||||
func (c *SKSCluster) RequestKubeconfig(ctx context.Context, user string, groups []string,
|
||||
d time.Duration) (string, error) {
|
||||
if user == "" {
|
||||
return "", errors.New("user not specified")
|
||||
}
|
||||
|
||||
resp, err := c.c.GenerateSksClusterKubeconfigWithResponse(
|
||||
apiv2.WithZone(ctx, c.zone),
|
||||
c.ID,
|
||||
papi.GenerateSksClusterKubeconfigJSONRequestBody{
|
||||
User: &user,
|
||||
Groups: &groups,
|
||||
Ttl: func() *int64 {
|
||||
ttl := int64(d.Seconds())
|
||||
if ttl > 0 {
|
||||
return &ttl
|
||||
}
|
||||
return nil
|
||||
}(),
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return papi.OptionalString(resp.JSON200.Kubeconfig), nil
|
||||
}
|
||||
|
||||
// AddNodepool adds a Nodepool to the SKS cluster.
|
||||
func (c *SKSCluster) AddNodepool(ctx context.Context, np *SKSNodepool) (*SKSNodepool, error) {
|
||||
resp, err := c.c.CreateSksNodepoolWithResponse(
|
||||
apiv2.WithZone(ctx, c.zone),
|
||||
c.ID,
|
||||
papi.CreateSksNodepoolJSONRequestBody{
|
||||
Description: &np.Description,
|
||||
DiskSize: np.DiskSize,
|
||||
InstanceType: papi.InstanceType{Id: &np.InstanceTypeID},
|
||||
Name: np.Name,
|
||||
AntiAffinityGroups: func() *[]papi.AntiAffinityGroup {
|
||||
aags := make([]papi.AntiAffinityGroup, len(np.AntiAffinityGroupIDs))
|
||||
for i, aagID := range np.AntiAffinityGroupIDs {
|
||||
aagID := aagID
|
||||
aags[i] = papi.AntiAffinityGroup{Id: &aagID}
|
||||
}
|
||||
return &aags
|
||||
}(),
|
||||
SecurityGroups: func() *[]papi.SecurityGroup {
|
||||
sgs := make([]papi.SecurityGroup, len(np.SecurityGroupIDs))
|
||||
for i, sgID := range np.SecurityGroupIDs {
|
||||
sgID := sgID
|
||||
sgs[i] = papi.SecurityGroup{Id: &sgID}
|
||||
}
|
||||
return &sgs
|
||||
}(),
|
||||
Size: np.Size,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res, err := papi.NewPoller().
|
||||
WithTimeout(c.c.timeout).
|
||||
Poll(ctx, c.c.OperationPoller(c.zone, *resp.JSON200.Id))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nodepoolRes, err := c.c.GetSksNodepoolWithResponse(ctx, c.ID, *res.(*papi.Reference).Id)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to retrieve Nodepool: %s", err)
|
||||
}
|
||||
|
||||
return sksNodepoolFromAPI(nodepoolRes.JSON200), nil
|
||||
}
|
||||
|
||||
// UpdateNodepool updates the specified SKS cluster Nodepool.
|
||||
func (c *SKSCluster) UpdateNodepool(ctx context.Context, np *SKSNodepool) error {
|
||||
resp, err := c.c.UpdateSksNodepoolWithResponse(
|
||||
apiv2.WithZone(ctx, c.zone),
|
||||
c.ID,
|
||||
np.ID,
|
||||
papi.UpdateSksNodepoolJSONRequestBody{
|
||||
Name: &np.Name,
|
||||
Description: &np.Description,
|
||||
InstanceType: &papi.InstanceType{Id: &np.InstanceTypeID},
|
||||
DiskSize: &np.DiskSize,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = papi.NewPoller().
|
||||
WithTimeout(c.c.timeout).
|
||||
Poll(ctx, c.c.OperationPoller(c.zone, *resp.JSON200.Id))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ScaleNodepool scales the SKS cluster Nodepool to the specified number of Kubernetes Nodes.
|
||||
func (c *SKSCluster) ScaleNodepool(ctx context.Context, np *SKSNodepool, nodes int64) error {
|
||||
resp, err := c.c.ScaleSksNodepoolWithResponse(
|
||||
apiv2.WithZone(ctx, c.zone),
|
||||
c.ID,
|
||||
np.ID,
|
||||
papi.ScaleSksNodepoolJSONRequestBody{Size: nodes},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = papi.NewPoller().
|
||||
WithTimeout(c.c.timeout).
|
||||
Poll(ctx, c.c.OperationPoller(c.zone, *resp.JSON200.Id))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EvictNodepoolMembers evicts the specified members (identified by their Compute instance ID) from the
|
||||
// SKS cluster Nodepool.
|
||||
func (c *SKSCluster) EvictNodepoolMembers(ctx context.Context, np *SKSNodepool, members []string) error {
|
||||
resp, err := c.c.EvictSksNodepoolMembersWithResponse(
|
||||
apiv2.WithZone(ctx, c.zone),
|
||||
c.ID,
|
||||
np.ID,
|
||||
papi.EvictSksNodepoolMembersJSONRequestBody{Instances: &members},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = papi.NewPoller().
|
||||
WithTimeout(c.c.timeout).
|
||||
Poll(ctx, c.c.OperationPoller(c.zone, *resp.JSON200.Id))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteNodepool deletes the specified Nodepool from the SKS cluster.
|
||||
func (c *SKSCluster) DeleteNodepool(ctx context.Context, np *SKSNodepool) error {
|
||||
resp, err := c.c.DeleteSksNodepoolWithResponse(
|
||||
apiv2.WithZone(ctx, c.zone),
|
||||
c.ID,
|
||||
np.ID,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = papi.NewPoller().
|
||||
WithTimeout(c.c.timeout).
|
||||
Poll(ctx, c.c.OperationPoller(c.zone, *resp.JSON200.Id))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateSKSCluster creates a SKS cluster in the specified zone.
|
||||
func (c *Client) CreateSKSCluster(ctx context.Context, zone string, cluster *SKSCluster) (*SKSCluster, error) {
|
||||
resp, err := c.CreateSksClusterWithResponse(
|
||||
apiv2.WithZone(ctx, zone),
|
||||
papi.CreateSksClusterJSONRequestBody{
|
||||
Name: cluster.Name,
|
||||
Description: &cluster.Description,
|
||||
Version: cluster.Version,
|
||||
Level: cluster.ServiceLevel,
|
||||
Cni: func() *string {
|
||||
var cni *string
|
||||
if cluster.CNI != "" {
|
||||
cni = &cluster.CNI
|
||||
}
|
||||
return cni
|
||||
}(),
|
||||
Addons: func() *[]string {
|
||||
var addOns *[]string
|
||||
if len(cluster.AddOns) > 0 {
|
||||
addOns = &cluster.AddOns
|
||||
}
|
||||
return addOns
|
||||
}(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res, err := papi.NewPoller().
|
||||
WithTimeout(c.timeout).
|
||||
Poll(ctx, c.OperationPoller(zone, *resp.JSON200.Id))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return c.GetSKSCluster(ctx, zone, *res.(*papi.Reference).Id)
|
||||
}
|
||||
|
||||
// ListSKSClusters returns the list of existing SKS clusters in the specified zone.
|
||||
func (c *Client) ListSKSClusters(ctx context.Context, zone string) ([]*SKSCluster, error) {
|
||||
list := make([]*SKSCluster, 0)
|
||||
|
||||
resp, err := c.ListSksClustersWithResponse(apiv2.WithZone(ctx, zone))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.JSON200.SksClusters != nil {
|
||||
for i := range *resp.JSON200.SksClusters {
|
||||
cluster := sksClusterFromAPI(&(*resp.JSON200.SksClusters)[i])
|
||||
cluster.c = c
|
||||
cluster.zone = zone
|
||||
|
||||
list = append(list, cluster)
|
||||
}
|
||||
}
|
||||
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// ListSKSClusterVersions returns the list of Kubernetes versions supported during SKS cluster creation.
|
||||
func (c *Client) ListSKSClusterVersions(ctx context.Context) ([]string, error) {
|
||||
list := make([]string, 0)
|
||||
|
||||
resp, err := c.ListSksClusterVersionsWithResponse(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.JSON200.SksClusterVersions != nil {
|
||||
for i := range *resp.JSON200.SksClusterVersions {
|
||||
version := &(*resp.JSON200.SksClusterVersions)[i]
|
||||
list = append(list, *version)
|
||||
}
|
||||
}
|
||||
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// GetSKSCluster returns the SKS cluster corresponding to the specified ID in the specified zone.
|
||||
func (c *Client) GetSKSCluster(ctx context.Context, zone, id string) (*SKSCluster, error) {
|
||||
resp, err := c.GetSksClusterWithResponse(apiv2.WithZone(ctx, zone), id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cluster := sksClusterFromAPI(resp.JSON200)
|
||||
cluster.c = c
|
||||
cluster.zone = zone
|
||||
|
||||
return cluster, nil
|
||||
}
|
||||
|
||||
// UpdateSKSCluster updates the specified SKS cluster in the specified zone.
|
||||
func (c *Client) UpdateSKSCluster(ctx context.Context, zone string, cluster *SKSCluster) error {
|
||||
resp, err := c.UpdateSksClusterWithResponse(
|
||||
apiv2.WithZone(ctx, zone),
|
||||
cluster.ID,
|
||||
papi.UpdateSksClusterJSONRequestBody{
|
||||
Name: &cluster.Name,
|
||||
Description: &cluster.Description,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = papi.NewPoller().
|
||||
WithTimeout(c.timeout).
|
||||
Poll(ctx, c.OperationPoller(zone, *resp.JSON200.Id))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpgradeSKSCluster upgrades the SKS cluster corresponding to the specified ID in the specified zone to the
|
||||
// requested Kubernetes version.
|
||||
func (c *Client) UpgradeSKSCluster(ctx context.Context, zone, id, version string) error {
|
||||
resp, err := c.UpgradeSksClusterWithResponse(
|
||||
apiv2.WithZone(ctx, zone),
|
||||
id,
|
||||
papi.UpgradeSksClusterJSONRequestBody{Version: version})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = papi.NewPoller().
|
||||
WithTimeout(c.timeout).
|
||||
Poll(ctx, c.OperationPoller(zone, *resp.JSON200.Id))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteSKSCluster deletes the specified SKS cluster in the specified zone.
|
||||
func (c *Client) DeleteSKSCluster(ctx context.Context, zone, id string) error {
|
||||
resp, err := c.DeleteSksClusterWithResponse(apiv2.WithZone(ctx, zone), id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = papi.NewPoller().
|
||||
WithTimeout(c.timeout).
|
||||
Poll(ctx, c.OperationPoller(zone, *resp.JSON200.Id))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,3 @@
|
|||
package v2
|
||||
|
||||
var testZone = "ch-gva-2"
|
|
@ -0,0 +1,3 @@
|
|||
// Package v2 is the new Exoscale client API binding.
|
||||
// Reference: https://openapi-v2.exoscale.com/
|
||||
package v2
|
|
@ -0,0 +1,24 @@
|
|||
package v2
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// ListZones returns the list of Exoscale zones.
|
||||
func (c *Client) ListZones(ctx context.Context) ([]string, error) {
|
||||
list := make([]string, 0)
|
||||
|
||||
resp, err := c.ListZonesWithResponse(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.JSON200.Zones != nil {
|
||||
for i := range *resp.JSON200.Zones {
|
||||
zone := &(*resp.JSON200.Zones)[i]
|
||||
list = append(list, *zone.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return list, nil
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package egoscale
|
||||
|
||||
// Version of the library
|
||||
const Version = "0.18.1"
|
||||
const Version = "0.43.1"
|
||||
|
|
|
@ -73,6 +73,8 @@ type VirtualMachine struct {
|
|||
IsoID *UUID `json:"isoid,omitempty" doc:"the ID of the ISO attached to the virtual machine"`
|
||||
IsoName string `json:"isoname,omitempty" doc:"the name of the ISO attached to the virtual machine"`
|
||||
KeyPair string `json:"keypair,omitempty" doc:"ssh key-pair"`
|
||||
Manager string `json:"manager,omitempty" doc:"type of virtual machine manager"`
|
||||
ManagerID *UUID `json:"managerid,omitempty" doc:"ID of the virtual machine manager"`
|
||||
Memory int `json:"memory,omitempty" doc:"the memory allocated for the virtual machine"`
|
||||
Name string `json:"name,omitempty" doc:"the name of the virtual machine"`
|
||||
NetworkKbsRead int64 `json:"networkkbsread,omitempty" doc:"the incoming network traffic on the vm"`
|
||||
|
@ -124,6 +126,7 @@ func (vm VirtualMachine) ListRequest() (ListCommand, error) {
|
|||
req := &ListVirtualMachines{
|
||||
GroupID: vm.GroupID,
|
||||
ID: vm.ID,
|
||||
ManagerID: vm.ManagerID,
|
||||
Name: vm.Name,
|
||||
State: vm.State,
|
||||
TemplateID: vm.TemplateID,
|
||||
|
@ -223,8 +226,6 @@ type PCIDevice struct {
|
|||
}
|
||||
|
||||
// Password represents an encrypted password
|
||||
//
|
||||
// TODO: method to decrypt it, https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=34014652
|
||||
type Password struct {
|
||||
EncryptedPassword string `json:"encryptedpassword"`
|
||||
}
|
||||
|
@ -424,6 +425,23 @@ func (UpdateVirtualMachine) Response() interface{} {
|
|||
return new(VirtualMachine)
|
||||
}
|
||||
|
||||
// UpdateVirtualMachineSecurityGroups represents the update of the virtual machine security group membership
|
||||
type UpdateVirtualMachineSecurityGroups struct {
|
||||
ID *UUID `json:"id" doc:"The ID of the virtual machine"`
|
||||
SecurityGroupIDs []UUID `json:"securitygroupids,omitempty" doc:"list of security group ids to be applied on the virtual machine."`
|
||||
_ bool `name:"updateVirtualMachineSecurityGroups" description:"Updates a virtual machine Security Group membership'."`
|
||||
}
|
||||
|
||||
// Response returns the struct to unmarshal
|
||||
func (UpdateVirtualMachineSecurityGroups) Response() interface{} {
|
||||
return new(AsyncJobResult)
|
||||
}
|
||||
|
||||
// AsyncResponse returns the struct to unmarshal the async job
|
||||
func (UpdateVirtualMachineSecurityGroups) AsyncResponse() interface{} {
|
||||
return new(VirtualMachine)
|
||||
}
|
||||
|
||||
// ExpungeVirtualMachine represents the annihilation of a VM
|
||||
type ExpungeVirtualMachine struct {
|
||||
ID *UUID `json:"id" doc:"The ID of the virtual machine"`
|
||||
|
@ -514,13 +532,14 @@ type ListVirtualMachines struct {
|
|||
IPAddress net.IP `json:"ipaddress,omitempty" doc:"an IP address to filter the result"`
|
||||
IsoID *UUID `json:"isoid,omitempty" doc:"list vms by iso"`
|
||||
Keyword string `json:"keyword,omitempty" doc:"List by keyword"`
|
||||
ManagerID *UUID `json:"managerid,omitempty" doc:"list by manager id"`
|
||||
Name string `json:"name,omitempty" doc:"name of the virtual machine"`
|
||||
NetworkID *UUID `json:"networkid,omitempty" doc:"list by network id"`
|
||||
Page int `json:"page,omitempty"`
|
||||
PageSize int `json:"pagesize,omitempty"`
|
||||
ServiceOfferindID *UUID `json:"serviceofferingid,omitempty" doc:"list by the service offering"`
|
||||
State string `json:"state,omitempty" doc:"state of the virtual machine"`
|
||||
Tags []ResourceTag `json:"tags,omitempty" doc:"List resources by tags (key/value pairs)"`
|
||||
Tags []ResourceTag `json:"tags,omitempty" doc:"List resources by tags (key/value pairs). Note: multiple tags are OR'ed, not AND'ed."`
|
||||
TemplateID *UUID `json:"templateid,omitempty" doc:"list vms by template"`
|
||||
ZoneID *UUID `json:"zoneid,omitempty" doc:"the availability zone ID"`
|
||||
_ bool `name:"listVirtualMachines" description:"List the virtual machines owned by the account."`
|
||||
|
|
|
@ -62,6 +62,7 @@ func (Volume) ResourceType() string {
|
|||
// ListRequest builds the ListVolumes request
|
||||
func (vol Volume) ListRequest() (ListCommand, error) {
|
||||
req := &ListVolumes{
|
||||
ID: vol.ID,
|
||||
Name: vol.Name,
|
||||
Type: vol.Type,
|
||||
VirtualMachineID: vol.VirtualMachineID,
|
||||
|
@ -99,7 +100,7 @@ type ListVolumes struct {
|
|||
Name string `json:"name,omitempty" doc:"The name of the disk volume"`
|
||||
Page int `json:"page,omitempty"`
|
||||
PageSize int `json:"pagesize,omitempty"`
|
||||
Tags []ResourceTag `json:"tags,omitempty" doc:"List resources by tags (key/value pairs)"`
|
||||
Tags []ResourceTag `json:"tags,omitempty" doc:"List resources by tags (key/value pairs). Note: multiple tags are OR'ed, not AND'ed."`
|
||||
Type string `json:"type,omitempty" doc:"The type of disk volume"`
|
||||
VirtualMachineID *UUID `json:"virtualmachineid,omitempty" doc:"The ID of the virtual machine"`
|
||||
ZoneID *UUID `json:"zoneid,omitempty" doc:"The ID of the availability zone"`
|
||||
|
|
|
@ -5,8 +5,6 @@ import (
|
|||
)
|
||||
|
||||
// Zone represents a data center
|
||||
//
|
||||
// TODO: represent correctly the capacity field.
|
||||
type Zone struct {
|
||||
AllocationState string `json:"allocationstate,omitempty" doc:"the allocation state of the cluster"`
|
||||
Description string `json:"description,omitempty" doc:"Zone description"`
|
||||
|
|
|
@ -0,0 +1,373 @@
|
|||
Mozilla Public License Version 2.0
|
||||
==================================
|
||||
|
||||
1. Definitions
|
||||
--------------
|
||||
|
||||
1.1. "Contributor"
|
||||
means each individual or legal entity that creates, contributes to
|
||||
the creation of, or owns Covered Software.
|
||||
|
||||
1.2. "Contributor Version"
|
||||
means the combination of the Contributions of others (if any) used
|
||||
by a Contributor and that particular Contributor's Contribution.
|
||||
|
||||
1.3. "Contribution"
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. "Covered Software"
|
||||
means Source Code Form to which the initial Contributor has attached
|
||||
the notice in Exhibit A, the Executable Form of such Source Code
|
||||
Form, and Modifications of such Source Code Form, in each case
|
||||
including portions thereof.
|
||||
|
||||
1.5. "Incompatible With Secondary Licenses"
|
||||
means
|
||||
|
||||
(a) that the initial Contributor has attached the notice described
|
||||
in Exhibit B to the Covered Software; or
|
||||
|
||||
(b) that the Covered Software was made available under the terms of
|
||||
version 1.1 or earlier of the License, but not also under the
|
||||
terms of a Secondary License.
|
||||
|
||||
1.6. "Executable Form"
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. "Larger Work"
|
||||
means a work that combines Covered Software with other material, in
|
||||
a separate file or files, that is not Covered Software.
|
||||
|
||||
1.8. "License"
|
||||
means this document.
|
||||
|
||||
1.9. "Licensable"
|
||||
means having the right to grant, to the maximum extent possible,
|
||||
whether at the time of the initial grant or subsequently, any and
|
||||
all of the rights conveyed by this License.
|
||||
|
||||
1.10. "Modifications"
|
||||
means any of the following:
|
||||
|
||||
(a) any file in Source Code Form that results from an addition to,
|
||||
deletion from, or modification of the contents of Covered
|
||||
Software; or
|
||||
|
||||
(b) any new file in Source Code Form that contains any Covered
|
||||
Software.
|
||||
|
||||
1.11. "Patent Claims" of a Contributor
|
||||
means any patent claim(s), including without limitation, method,
|
||||
process, and apparatus claims, in any patent Licensable by such
|
||||
Contributor that would be infringed, but for the grant of the
|
||||
License, by the making, using, selling, offering for sale, having
|
||||
made, import, or transfer of either its Contributions or its
|
||||
Contributor Version.
|
||||
|
||||
1.12. "Secondary License"
|
||||
means either the GNU General Public License, Version 2.0, the GNU
|
||||
Lesser General Public License, Version 2.1, the GNU Affero General
|
||||
Public License, Version 3.0, or any later versions of those
|
||||
licenses.
|
||||
|
||||
1.13. "Source Code Form"
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. "You" (or "Your")
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, "You" includes any entity that
|
||||
controls, is controlled by, or is under common control with You. For
|
||||
purposes of this definition, "control" means (a) the power, direct
|
||||
or indirect, to cause the direction or management of such entity,
|
||||
whether by contract or otherwise, or (b) ownership of more than
|
||||
fifty percent (50%) of the outstanding shares or beneficial
|
||||
ownership of such entity.
|
||||
|
||||
2. License Grants and Conditions
|
||||
--------------------------------
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
(a) under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or
|
||||
as part of a Larger Work; and
|
||||
|
||||
(b) under Patent Claims of such Contributor to make, use, sell, offer
|
||||
for sale, have made, import, and otherwise transfer either its
|
||||
Contributions or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution
|
||||
become effective for each Contribution on the date the Contributor first
|
||||
distributes such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under
|
||||
this License. No additional rights or licenses will be implied from the
|
||||
distribution or licensing of Covered Software under this License.
|
||||
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
||||
Contributor:
|
||||
|
||||
(a) for any code that a Contributor has removed from Covered Software;
|
||||
or
|
||||
|
||||
(b) for infringements caused by: (i) Your and any other third party's
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
(c) under Patent Claims infringed by Covered Software in the absence of
|
||||
its Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks,
|
||||
or logos of any Contributor (except as may be necessary to comply with
|
||||
the notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this
|
||||
License (see Section 10.2) or under the terms of a Secondary License (if
|
||||
permitted under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its
|
||||
Contributions are its original creation(s) or it has sufficient rights
|
||||
to grant the rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under
|
||||
applicable copyright doctrines of fair use, fair dealing, or other
|
||||
equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
|
||||
in Section 2.1.
|
||||
|
||||
3. Responsibilities
|
||||
-------------------
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under
|
||||
the terms of this License. You must inform recipients that the Source
|
||||
Code Form of the Covered Software is governed by the terms of this
|
||||
License, and how they can obtain a copy of this License. You may not
|
||||
attempt to alter or restrict the recipients' rights in the Source Code
|
||||
Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
(a) such Covered Software must also be made available in Source Code
|
||||
Form, as described in Section 3.1, and You must inform recipients of
|
||||
the Executable Form how they can obtain a copy of such Source Code
|
||||
Form by reasonable means in a timely manner, at a charge no more
|
||||
than the cost of distribution to the recipient; and
|
||||
|
||||
(b) You may distribute such Executable Form under the terms of this
|
||||
License, or sublicense it under different terms, provided that the
|
||||
license for the Executable Form does not attempt to limit or alter
|
||||
the recipients' rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for
|
||||
the Covered Software. If the Larger Work is a combination of Covered
|
||||
Software with a work governed by one or more Secondary Licenses, and the
|
||||
Covered Software is not Incompatible With Secondary Licenses, this
|
||||
License permits You to additionally distribute such Covered Software
|
||||
under the terms of such Secondary License(s), so that the recipient of
|
||||
the Larger Work may, at their option, further distribute the Covered
|
||||
Software under the terms of either this License or such Secondary
|
||||
License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices
|
||||
(including copyright notices, patent notices, disclaimers of warranty,
|
||||
or limitations of liability) contained within the Source Code Form of
|
||||
the Covered Software, except that You may alter any license notices to
|
||||
the extent required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on
|
||||
behalf of any Contributor. You must make it absolutely clear that any
|
||||
such warranty, support, indemnity, or liability obligation is offered by
|
||||
You alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
---------------------------------------------------
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this
|
||||
License with respect to some or all of the Covered Software due to
|
||||
statute, judicial order, or regulation then You must: (a) comply with
|
||||
the terms of this License to the maximum extent possible; and (b)
|
||||
describe the limitations and the code they affect. Such description must
|
||||
be placed in a text file included with all distributions of the Covered
|
||||
Software under this License. Except to the extent prohibited by statute
|
||||
or regulation, such description must be sufficiently detailed for a
|
||||
recipient of ordinary skill to be able to understand it.
|
||||
|
||||
5. Termination
|
||||
--------------
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically
|
||||
if You fail to comply with any of its terms. However, if You become
|
||||
compliant, then the rights granted under this License from a particular
|
||||
Contributor are reinstated (a) provisionally, unless and until such
|
||||
Contributor explicitly and finally terminates Your grants, and (b) on an
|
||||
ongoing basis, if such Contributor fails to notify You of the
|
||||
non-compliance by some reasonable means prior to 60 days after You have
|
||||
come back into compliance. Moreover, Your grants from a particular
|
||||
Contributor are reinstated on an ongoing basis if such Contributor
|
||||
notifies You of the non-compliance by some reasonable means, this is the
|
||||
first time You have received notice of non-compliance with this License
|
||||
from such Contributor, and You become compliant prior to 30 days after
|
||||
Your receipt of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions,
|
||||
counter-claims, and cross-claims) alleging that a Contributor Version
|
||||
directly or indirectly infringes any patent, then the rights granted to
|
||||
You by any and all Contributors for the Covered Software under Section
|
||||
2.1 of this License shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all
|
||||
end user license agreements (excluding distributors and resellers) which
|
||||
have been validly granted by You or Your distributors under this License
|
||||
prior to termination shall survive termination.
|
||||
|
||||
************************************************************************
|
||||
* *
|
||||
* 6. Disclaimer of Warranty *
|
||||
* ------------------------- *
|
||||
* *
|
||||
* Covered Software is provided under this License on an "as is" *
|
||||
* basis, without warranty of any kind, either expressed, implied, or *
|
||||
* statutory, including, without limitation, warranties that the *
|
||||
* Covered Software is free of defects, merchantable, fit for a *
|
||||
* particular purpose or non-infringing. The entire risk as to the *
|
||||
* quality and performance of the Covered Software is with You. *
|
||||
* Should any Covered Software prove defective in any respect, You *
|
||||
* (not any Contributor) assume the cost of any necessary servicing, *
|
||||
* repair, or correction. This disclaimer of warranty constitutes an *
|
||||
* essential part of this License. No use of any Covered Software is *
|
||||
* authorized under this License except under this disclaimer. *
|
||||
* *
|
||||
************************************************************************
|
||||
|
||||
************************************************************************
|
||||
* *
|
||||
* 7. Limitation of Liability *
|
||||
* -------------------------- *
|
||||
* *
|
||||
* Under no circumstances and under no legal theory, whether tort *
|
||||
* (including negligence), contract, or otherwise, shall any *
|
||||
* Contributor, or anyone who distributes Covered Software as *
|
||||
* permitted above, be liable to You for any direct, indirect, *
|
||||
* special, incidental, or consequential damages of any character *
|
||||
* including, without limitation, damages for lost profits, loss of *
|
||||
* goodwill, work stoppage, computer failure or malfunction, or any *
|
||||
* and all other commercial damages or losses, even if such party *
|
||||
* shall have been informed of the possibility of such damages. This *
|
||||
* limitation of liability shall not apply to liability for death or *
|
||||
* personal injury resulting from such party's negligence to the *
|
||||
* extent applicable law prohibits such limitation. Some *
|
||||
* jurisdictions do not allow the exclusion or limitation of *
|
||||
* incidental or consequential damages, so this exclusion and *
|
||||
* limitation may not apply to You. *
|
||||
* *
|
||||
************************************************************************
|
||||
|
||||
8. Litigation
|
||||
-------------
|
||||
|
||||
Any litigation relating to this License may be brought only in the
|
||||
courts of a jurisdiction where the defendant maintains its principal
|
||||
place of business and such litigation shall be governed by laws of that
|
||||
jurisdiction, without reference to its conflict-of-law provisions.
|
||||
Nothing in this Section shall prevent a party's ability to bring
|
||||
cross-claims or counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
----------------
|
||||
|
||||
This License represents the complete agreement concerning the subject
|
||||
matter hereof. If any provision of this License is held to be
|
||||
unenforceable, such provision shall be reformed only to the extent
|
||||
necessary to make it enforceable. Any law or regulation which provides
|
||||
that the language of a contract shall be construed against the drafter
|
||||
shall not be used to construe this License against a Contributor.
|
||||
|
||||
10. Versions of the License
|
||||
---------------------------
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version
|
||||
of the License under which You originally received the Covered Software,
|
||||
or under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a
|
||||
modified version of this License if you rename the license and remove
|
||||
any references to the name of the license steward (except to note that
|
||||
such modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
||||
Licenses
|
||||
|
||||
If You choose to distribute Source Code Form that is Incompatible With
|
||||
Secondary Licenses under the terms of this version of the License, the
|
||||
notice described in Exhibit B of this License must be attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
-------------------------------------------
|
||||
|
||||
This Source Code Form is subject to the terms of the Mozilla Public
|
||||
License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular
|
||||
file, then You may include the notice in a location (such as a LICENSE
|
||||
file in a relevant directory) where a recipient would be likely to look
|
||||
for such a notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
||||
---------------------------------------------------------
|
||||
|
||||
This Source Code Form is "Incompatible With Secondary Licenses", as
|
||||
defined by the Mozilla Public License, v. 2.0.
|
47
vendor/github.com/exoscale/packer-plugin-exoscale/post-processor/exoscale-import/artifact.go
generated
vendored
Normal file
47
vendor/github.com/exoscale/packer-plugin-exoscale/post-processor/exoscale-import/artifact.go
generated
vendored
Normal file
|
@ -0,0 +1,47 @@
|
|||
package exoscaleimport
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/exoscale/egoscale"
|
||||
)
|
||||
|
||||
const BuilderId = "packer.post-processor.exoscale-import"
|
||||
|
||||
type Artifact struct {
|
||||
template egoscale.Template
|
||||
exo *egoscale.Client
|
||||
}
|
||||
|
||||
func (a *Artifact) BuilderId() string {
|
||||
return BuilderId
|
||||
}
|
||||
|
||||
func (a *Artifact) Id() string {
|
||||
return a.template.ID.String()
|
||||
}
|
||||
|
||||
func (a *Artifact) Files() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Artifact) String() string {
|
||||
return fmt.Sprintf("%s @ %s (%s)",
|
||||
a.template.Name,
|
||||
a.template.ZoneName,
|
||||
a.template.ID.String())
|
||||
}
|
||||
|
||||
func (a *Artifact) State(name string) interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Artifact) Destroy() error {
|
||||
_, err := a.exo.RequestWithContext(context.Background(), &egoscale.DeleteTemplate{ID: a.template.ID})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to delete template: %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
93
vendor/github.com/exoscale/packer-plugin-exoscale/post-processor/exoscale-import/config.go
generated
vendored
Normal file
93
vendor/github.com/exoscale/packer-plugin-exoscale/post-processor/exoscale-import/config.go
generated
vendored
Normal file
|
@ -0,0 +1,93 @@
|
|||
//go:generate mapstructure-to-hcl2 -type Config
|
||||
|
||||
package exoscaleimport
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/hcl/v2/hcldec"
|
||||
"github.com/hashicorp/packer-plugin-sdk/common"
|
||||
"github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
pkrconfig "github.com/hashicorp/packer-plugin-sdk/template/config"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/interpolate"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultAPIEndpoint = "https://api.exoscale.com/v1"
|
||||
defaultTemplateBootMode = "legacy"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
SOSEndpoint string `mapstructure:"sos_endpoint"`
|
||||
APIEndpoint string `mapstructure:"api_endpoint"`
|
||||
APIKey string `mapstructure:"api_key"`
|
||||
APISecret string `mapstructure:"api_secret"`
|
||||
ImageBucket string `mapstructure:"image_bucket"`
|
||||
TemplateZone string `mapstructure:"template_zone"`
|
||||
TemplateName string `mapstructure:"template_name"`
|
||||
TemplateDescription string `mapstructure:"template_description"`
|
||||
TemplateUsername string `mapstructure:"template_username"`
|
||||
TemplateBootMode string `mapstructure:"template_boot_mode"`
|
||||
TemplateDisablePassword bool `mapstructure:"template_disable_password"`
|
||||
TemplateDisableSSHKey bool `mapstructure:"template_disable_sshkey"`
|
||||
SkipClean bool `mapstructure:"skip_clean"`
|
||||
|
||||
ctx interpolate.Context
|
||||
|
||||
common.PackerConfig `mapstructure:",squash"`
|
||||
}
|
||||
|
||||
func NewConfig(raws ...interface{}) (*Config, error) {
|
||||
var config Config
|
||||
|
||||
err := pkrconfig.Decode(&config, &pkrconfig.DecodeOpts{
|
||||
PluginType: BuilderId,
|
||||
Interpolate: true,
|
||||
InterpolateContext: &config.ctx,
|
||||
InterpolateFilter: &interpolate.RenderFilter{
|
||||
Exclude: []string{},
|
||||
},
|
||||
}, raws...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
requiredArgs := map[string]*string{
|
||||
"api_key": &config.APIKey,
|
||||
"api_secret": &config.APISecret,
|
||||
"image_bucket": &config.ImageBucket,
|
||||
"template_zone": &config.TemplateZone,
|
||||
"template_name": &config.TemplateName,
|
||||
}
|
||||
|
||||
errs := new(packer.MultiError)
|
||||
for k, v := range requiredArgs {
|
||||
if *v == "" {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, fmt.Errorf("%s must be set", k))
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs.Errors) > 0 {
|
||||
return nil, errs
|
||||
}
|
||||
|
||||
if config.APIEndpoint == "" {
|
||||
config.APIEndpoint = defaultAPIEndpoint
|
||||
}
|
||||
|
||||
if config.TemplateBootMode == "" {
|
||||
config.TemplateBootMode = defaultTemplateBootMode
|
||||
}
|
||||
|
||||
if config.SOSEndpoint == "" {
|
||||
config.SOSEndpoint = "https://sos-" + config.TemplateZone + ".exo.io"
|
||||
}
|
||||
|
||||
return &config, nil
|
||||
}
|
||||
|
||||
// ConfigSpec returns HCL object spec
|
||||
func (p *PostProcessor) ConfigSpec() hcldec.ObjectSpec {
|
||||
return p.config.FlatMapstructure().HCL2Spec()
|
||||
}
|
|
@ -1,5 +1,4 @@
|
|||
// Code generated by "mapstructure-to-hcl2 -type Config"; DO NOT EDIT.
|
||||
|
||||
package exoscaleimport
|
||||
|
||||
import (
|
||||
|
@ -10,26 +9,27 @@ import (
|
|||
// FlatConfig is an auto-generated flat version of Config.
|
||||
// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up.
|
||||
type FlatConfig struct {
|
||||
PackerBuildName *string `mapstructure:"packer_build_name" cty:"packer_build_name" hcl:"packer_build_name"`
|
||||
PackerBuilderType *string `mapstructure:"packer_builder_type" cty:"packer_builder_type" hcl:"packer_builder_type"`
|
||||
PackerCoreVersion *string `mapstructure:"packer_core_version" cty:"packer_core_version" hcl:"packer_core_version"`
|
||||
PackerDebug *bool `mapstructure:"packer_debug" cty:"packer_debug" hcl:"packer_debug"`
|
||||
PackerForce *bool `mapstructure:"packer_force" cty:"packer_force" hcl:"packer_force"`
|
||||
PackerOnError *string `mapstructure:"packer_on_error" cty:"packer_on_error" hcl:"packer_on_error"`
|
||||
PackerUserVars map[string]string `mapstructure:"packer_user_variables" cty:"packer_user_variables" hcl:"packer_user_variables"`
|
||||
PackerSensitiveVars []string `mapstructure:"packer_sensitive_variables" cty:"packer_sensitive_variables" hcl:"packer_sensitive_variables"`
|
||||
SkipClean *bool `mapstructure:"skip_clean" cty:"skip_clean" hcl:"skip_clean"`
|
||||
SOSEndpoint *string `mapstructure:"sos_endpoint" cty:"sos_endpoint" hcl:"sos_endpoint"`
|
||||
APIEndpoint *string `mapstructure:"api_endpoint" cty:"api_endpoint" hcl:"api_endpoint"`
|
||||
APIKey *string `mapstructure:"api_key" cty:"api_key" hcl:"api_key"`
|
||||
APISecret *string `mapstructure:"api_secret" cty:"api_secret" hcl:"api_secret"`
|
||||
ImageBucket *string `mapstructure:"image_bucket" cty:"image_bucket" hcl:"image_bucket"`
|
||||
TemplateZone *string `mapstructure:"template_zone" cty:"template_zone" hcl:"template_zone"`
|
||||
TemplateName *string `mapstructure:"template_name" cty:"template_name" hcl:"template_name"`
|
||||
TemplateDescription *string `mapstructure:"template_description" cty:"template_description" hcl:"template_description"`
|
||||
TemplateUsername *string `mapstructure:"template_username" cty:"template_username" hcl:"template_username"`
|
||||
TemplateDisablePassword *bool `mapstructure:"template_disable_password" cty:"template_disable_password" hcl:"template_disable_password"`
|
||||
TemplateDisableSSHKey *bool `mapstructure:"template_disable_sshkey" cty:"template_disable_sshkey" hcl:"template_disable_sshkey"`
|
||||
PackerBuildName *string `mapstructure:"packer_build_name" cty:"packer_build_name"`
|
||||
PackerBuilderType *string `mapstructure:"packer_builder_type" cty:"packer_builder_type"`
|
||||
PackerCoreVersion *string `mapstructure:"packer_core_version" cty:"packer_core_version"`
|
||||
PackerDebug *bool `mapstructure:"packer_debug" cty:"packer_debug"`
|
||||
PackerForce *bool `mapstructure:"packer_force" cty:"packer_force"`
|
||||
PackerOnError *string `mapstructure:"packer_on_error" cty:"packer_on_error"`
|
||||
PackerUserVars map[string]string `mapstructure:"packer_user_variables" cty:"packer_user_variables"`
|
||||
PackerSensitiveVars []string `mapstructure:"packer_sensitive_variables" cty:"packer_sensitive_variables"`
|
||||
SkipClean *bool `mapstructure:"skip_clean" cty:"skip_clean"`
|
||||
SOSEndpoint *string `mapstructure:"sos_endpoint" cty:"sos_endpoint"`
|
||||
APIEndpoint *string `mapstructure:"api_endpoint" cty:"api_endpoint"`
|
||||
APIKey *string `mapstructure:"api_key" cty:"api_key"`
|
||||
APISecret *string `mapstructure:"api_secret" cty:"api_secret"`
|
||||
ImageBucket *string `mapstructure:"image_bucket" cty:"image_bucket"`
|
||||
TemplateZone *string `mapstructure:"template_zone" cty:"template_zone"`
|
||||
TemplateName *string `mapstructure:"template_name" cty:"template_name"`
|
||||
TemplateDescription *string `mapstructure:"template_description" cty:"template_description"`
|
||||
TemplateUsername *string `mapstructure:"template_username" cty:"template_username"`
|
||||
TemplateBootMode *string `mapstructure:"template_boot_mode" cty:"template_boot_mode"`
|
||||
TemplateDisablePassword *bool `mapstructure:"template_disable_password" cty:"template_disable_password"`
|
||||
TemplateDisableSSHKey *bool `mapstructure:"template_disable_sshkey" cty:"template_disable_sshkey"`
|
||||
}
|
||||
|
||||
// FlatMapstructure returns a new FlatConfig.
|
||||
|
@ -62,6 +62,7 @@ func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec {
|
|||
"template_name": &hcldec.AttrSpec{Name: "template_name", Type: cty.String, Required: false},
|
||||
"template_description": &hcldec.AttrSpec{Name: "template_description", Type: cty.String, Required: false},
|
||||
"template_username": &hcldec.AttrSpec{Name: "template_username", Type: cty.String, Required: false},
|
||||
"template_boot_mode": &hcldec.AttrSpec{Name: "template_boot_mode", Type: cty.String, Required: false},
|
||||
"template_disable_password": &hcldec.AttrSpec{Name: "template_disable_password", Type: cty.Bool, Required: false},
|
||||
"template_disable_sshkey": &hcldec.AttrSpec{Name: "template_disable_sshkey", Type: cty.Bool, Required: false},
|
||||
}
|
92
vendor/github.com/exoscale/packer-plugin-exoscale/post-processor/exoscale-import/post-processor.go
generated
vendored
Normal file
92
vendor/github.com/exoscale/packer-plugin-exoscale/post-processor/exoscale-import/post-processor.go
generated
vendored
Normal file
|
@ -0,0 +1,92 @@
|
|||
package exoscaleimport
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/exoscale/egoscale"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps"
|
||||
"github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/version"
|
||||
)
|
||||
|
||||
const (
|
||||
qemuBuilderID = "transcend.qemu"
|
||||
fileBuilderID = "packer.file"
|
||||
artificeBuilderID = "packer.post-processor.artifice"
|
||||
)
|
||||
|
||||
func init() {
|
||||
egoscale.UserAgent = fmt.Sprintf("Exoscale-Packer-Post-Processor/%s %s",
|
||||
version.SDKVersion.FormattedVersion(), egoscale.UserAgent)
|
||||
}
|
||||
|
||||
type PostProcessor struct {
|
||||
config *Config
|
||||
runner multistep.Runner
|
||||
exo *egoscale.Client
|
||||
}
|
||||
|
||||
func (p *PostProcessor) Configure(raws ...interface{}) error {
|
||||
config, err := NewConfig(raws...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.config = config
|
||||
|
||||
packer.LogSecretFilter.Set(p.config.APIKey, p.config.APISecret)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PostProcessor) PostProcess(ctx context.Context, ui packer.Ui, a packer.Artifact) (packer.Artifact, bool, bool, error) {
|
||||
switch a.BuilderId() {
|
||||
case qemuBuilderID, fileBuilderID, artificeBuilderID:
|
||||
break
|
||||
default:
|
||||
err := fmt.Errorf("unsupported artifact type %q: this post-processor only imports "+
|
||||
"artifacts from QEMU/file builders and Artifice post-processor", a.BuilderId())
|
||||
return nil, false, false, err
|
||||
}
|
||||
|
||||
p.exo = egoscale.NewClient(p.config.APIEndpoint, p.config.APIKey, p.config.APISecret)
|
||||
|
||||
state := new(multistep.BasicStateBag)
|
||||
state.Put("config", p.config)
|
||||
state.Put("exo", p.exo)
|
||||
state.Put("ui", ui)
|
||||
state.Put("artifact", a)
|
||||
|
||||
steps := []multistep.Step{
|
||||
new(stepUploadImage),
|
||||
new(stepRegisterTemplate),
|
||||
new(stepDeleteImage),
|
||||
}
|
||||
|
||||
p.runner = commonsteps.NewRunnerWithPauseFn(steps, p.config.PackerConfig, ui, state)
|
||||
p.runner.Run(ctx, state)
|
||||
|
||||
if rawErr, ok := state.GetOk("error"); ok {
|
||||
return nil, false, false, rawErr.(error)
|
||||
}
|
||||
|
||||
if _, ok := state.GetOk(multistep.StateCancelled); ok {
|
||||
return nil, false, false, errors.New("post-processing cancelled")
|
||||
}
|
||||
|
||||
if _, ok := state.GetOk(multistep.StateHalted); ok {
|
||||
return nil, false, false, errors.New("post-processing halted")
|
||||
}
|
||||
|
||||
v, ok := state.GetOk("template")
|
||||
if !ok {
|
||||
return nil, false, false, errors.New("unable to find template in state")
|
||||
}
|
||||
|
||||
return &Artifact{
|
||||
template: v.(egoscale.Template),
|
||||
exo: p.exo,
|
||||
}, false, false, nil
|
||||
}
|
55
vendor/github.com/exoscale/packer-plugin-exoscale/post-processor/exoscale-import/step_delete_image.go
generated
vendored
Normal file
55
vendor/github.com/exoscale/packer-plugin-exoscale/post-processor/exoscale-import/step_delete_image.go
generated
vendored
Normal file
|
@ -0,0 +1,55 @@
|
|||
package exoscaleimport
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
"github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
type stepDeleteImage struct{}
|
||||
|
||||
func (s *stepDeleteImage) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
var (
|
||||
ui = state.Get("ui").(packer.Ui)
|
||||
config = state.Get("config").(*Config)
|
||||
artifact = state.Get("artifact").(packer.Artifact)
|
||||
|
||||
imageFile = artifact.Files()[0]
|
||||
bucketFile = filepath.Base(imageFile)
|
||||
)
|
||||
|
||||
if config.SkipClean {
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
ui.Say("Deleting uploaded template image")
|
||||
|
||||
sess, err := session.NewSessionWithOptions(session.Options{Config: aws.Config{
|
||||
Region: aws.String(config.TemplateZone),
|
||||
Endpoint: aws.String(config.SOSEndpoint),
|
||||
Credentials: credentials.NewStaticCredentials(config.APIKey, config.APISecret, "")}})
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf("unable to initialize session: %v", err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
svc := s3.New(sess)
|
||||
if _, err := svc.DeleteObject(&s3.DeleteObjectInput{
|
||||
Bucket: aws.String(config.ImageBucket),
|
||||
Key: aws.String(bucketFile),
|
||||
}); err != nil {
|
||||
ui.Error(fmt.Sprintf("unable to delete template image: %v", err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *stepDeleteImage) Cleanup(state multistep.StateBag) {}
|
62
vendor/github.com/exoscale/packer-plugin-exoscale/post-processor/exoscale-import/step_register_template.go
generated
vendored
Normal file
62
vendor/github.com/exoscale/packer-plugin-exoscale/post-processor/exoscale-import/step_register_template.go
generated
vendored
Normal file
|
@ -0,0 +1,62 @@
|
|||
package exoscaleimport
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/exoscale/egoscale"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
"github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
type stepRegisterTemplate struct{}
|
||||
|
||||
func (s *stepRegisterTemplate) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
var (
|
||||
exo = state.Get("exo").(*egoscale.Client)
|
||||
ui = state.Get("ui").(packer.Ui)
|
||||
config = state.Get("config").(*Config)
|
||||
imageURL = state.Get("image_url").(string)
|
||||
imageChecksum = state.Get("image_checksum").(string)
|
||||
|
||||
passwordEnabled = !config.TemplateDisablePassword
|
||||
sshkeyEnabled = !config.TemplateDisableSSHKey
|
||||
)
|
||||
|
||||
ui.Say("Registering Compute instance template")
|
||||
|
||||
resp, err := exo.GetWithContext(ctx, &egoscale.ListZones{Name: config.TemplateZone})
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf("unable to list zones: %s", err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
zone := resp.(*egoscale.Zone)
|
||||
|
||||
resp, err = exo.RequestWithContext(ctx, &egoscale.RegisterCustomTemplate{
|
||||
Name: config.TemplateName,
|
||||
Displaytext: config.TemplateDescription,
|
||||
BootMode: config.TemplateBootMode,
|
||||
URL: imageURL,
|
||||
Checksum: imageChecksum,
|
||||
PasswordEnabled: &passwordEnabled,
|
||||
SSHKeyEnabled: &sshkeyEnabled,
|
||||
Details: func() map[string]string {
|
||||
if config.TemplateUsername != "" {
|
||||
return map[string]string{"username": config.TemplateUsername}
|
||||
}
|
||||
return nil
|
||||
}(),
|
||||
ZoneID: zone.ID,
|
||||
})
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf("unable to export Compute instance snapshot: %s", err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
templates := resp.(*[]egoscale.Template)
|
||||
|
||||
state.Put("template", (*templates)[0])
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *stepRegisterTemplate) Cleanup(state multistep.StateBag) {}
|
89
vendor/github.com/exoscale/packer-plugin-exoscale/post-processor/exoscale-import/step_upload_image.go
generated
vendored
Normal file
89
vendor/github.com/exoscale/packer-plugin-exoscale/post-processor/exoscale-import/step_upload_image.go
generated
vendored
Normal file
|
@ -0,0 +1,89 @@
|
|||
package exoscaleimport
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
"github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
type stepUploadImage struct{}
|
||||
|
||||
func (s *stepUploadImage) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
var (
|
||||
ui = state.Get("ui").(packer.Ui)
|
||||
config = state.Get("config").(*Config)
|
||||
artifact = state.Get("artifact").(packer.Artifact)
|
||||
|
||||
imageFile = artifact.Files()[0]
|
||||
bucketFile = filepath.Base(imageFile)
|
||||
)
|
||||
|
||||
ui.Say("Uploading template image")
|
||||
|
||||
f, err := os.Open(imageFile)
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprint(err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
fileInfo, err := f.Stat()
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprint(err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// For tracking image file upload progress
|
||||
pf := ui.TrackProgress(imageFile, 0, fileInfo.Size(), f)
|
||||
defer pf.Close()
|
||||
|
||||
hash := md5.New()
|
||||
if _, err := io.Copy(hash, f); err != nil {
|
||||
ui.Error(fmt.Sprintf("unable to compute template file checksum: %v", err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
if _, err := f.Seek(0, 0); err != nil {
|
||||
ui.Error(fmt.Sprintf("unable to compute template file checksum: %v", err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
sess, err := session.NewSessionWithOptions(session.Options{Config: aws.Config{
|
||||
Region: aws.String(config.TemplateZone),
|
||||
Endpoint: aws.String(config.SOSEndpoint),
|
||||
Credentials: credentials.NewStaticCredentials(config.APIKey, config.APISecret, "")}})
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf("unable to initialize session: %v", err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
uploader := s3manager.NewUploader(sess)
|
||||
output, err := uploader.UploadWithContext(ctx, &s3manager.UploadInput{
|
||||
Body: pf,
|
||||
Bucket: aws.String(config.ImageBucket),
|
||||
Key: aws.String(bucketFile),
|
||||
ContentMD5: aws.String(base64.StdEncoding.EncodeToString(hash.Sum(nil))),
|
||||
ACL: aws.String("public-read"),
|
||||
})
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf("unable to upload template image: %v", err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
state.Put("image_url", output.Location)
|
||||
state.Put("image_checksum", fmt.Sprintf("%x", hash.Sum(nil)))
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *stepUploadImage) Cleanup(state multistep.StateBag) {}
|
|
@ -6,13 +6,12 @@ go:
|
|||
- 1.9.x
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
- 1.12.x
|
||||
- tip
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
fast_finish: true
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
before_install:
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
script:
|
||||
|
|
|
@ -12,7 +12,6 @@ and parsing of UUIDs in different formats.
|
|||
|
||||
This package supports the following UUID versions:
|
||||
* Version 1, based on timestamp and MAC address (RFC-4122)
|
||||
* Version 2, based on timestamp, MAC address and POSIX UID/GID (DCE 1.1)
|
||||
* Version 3, based on MD5 hashing of a named value (RFC-4122)
|
||||
* Version 4, based on random numbers (RFC-4122)
|
||||
* Version 5, based on SHA-1 hashing of a named value (RFC-4122)
|
||||
|
|
|
@ -114,7 +114,7 @@ func (u *UUID) UnmarshalText(text []byte) error {
|
|||
case 41, 45:
|
||||
return u.decodeURN(text)
|
||||
default:
|
||||
return fmt.Errorf("uuid: incorrect UUID length: %s", text)
|
||||
return fmt.Errorf("uuid: incorrect UUID length %d in string %q", len(text), text)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -122,7 +122,7 @@ func (u *UUID) UnmarshalText(text []byte) error {
|
|||
// "6ba7b810-9dad-11d1-80b4-00c04fd430c8".
|
||||
func (u *UUID) decodeCanonical(t []byte) error {
|
||||
if t[8] != '-' || t[13] != '-' || t[18] != '-' || t[23] != '-' {
|
||||
return fmt.Errorf("uuid: incorrect UUID format %s", t)
|
||||
return fmt.Errorf("uuid: incorrect UUID format in string %q", t)
|
||||
}
|
||||
|
||||
src := t
|
||||
|
@ -160,7 +160,7 @@ func (u *UUID) decodeBraced(t []byte) error {
|
|||
l := len(t)
|
||||
|
||||
if t[0] != '{' || t[l-1] != '}' {
|
||||
return fmt.Errorf("uuid: incorrect UUID format %s", t)
|
||||
return fmt.Errorf("uuid: incorrect UUID format in string %q", t)
|
||||
}
|
||||
|
||||
return u.decodePlain(t[1 : l-1])
|
||||
|
@ -175,7 +175,7 @@ func (u *UUID) decodeURN(t []byte) error {
|
|||
urnUUIDPrefix := t[:9]
|
||||
|
||||
if !bytes.Equal(urnUUIDPrefix, urnPrefix) {
|
||||
return fmt.Errorf("uuid: incorrect UUID format: %s", t)
|
||||
return fmt.Errorf("uuid: incorrect UUID format in string %q", t)
|
||||
}
|
||||
|
||||
return u.decodePlain(t[9:total])
|
||||
|
@ -191,7 +191,7 @@ func (u *UUID) decodePlain(t []byte) error {
|
|||
case 36:
|
||||
return u.decodeCanonical(t)
|
||||
default:
|
||||
return fmt.Errorf("uuid: incorrect UUID length: %s", t)
|
||||
return fmt.Errorf("uuid: incorrect UUID length %d in string %q", len(t), t)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -30,7 +30,6 @@ import (
|
|||
"hash"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
@ -47,21 +46,11 @@ type HWAddrFunc func() (net.HardwareAddr, error)
|
|||
// DefaultGenerator is the default UUID Generator used by this package.
|
||||
var DefaultGenerator Generator = NewGen()
|
||||
|
||||
var (
|
||||
posixUID = uint32(os.Getuid())
|
||||
posixGID = uint32(os.Getgid())
|
||||
)
|
||||
|
||||
// NewV1 returns a UUID based on the current timestamp and MAC address.
|
||||
func NewV1() (UUID, error) {
|
||||
return DefaultGenerator.NewV1()
|
||||
}
|
||||
|
||||
// NewV2 returns a DCE Security UUID based on the POSIX UID/GID.
|
||||
func NewV2(domain byte) (UUID, error) {
|
||||
return DefaultGenerator.NewV2(domain)
|
||||
}
|
||||
|
||||
// NewV3 returns a UUID based on the MD5 hash of the namespace UUID and name.
|
||||
func NewV3(ns UUID, name string) UUID {
|
||||
return DefaultGenerator.NewV3(ns, name)
|
||||
|
@ -80,7 +69,6 @@ func NewV5(ns UUID, name string) UUID {
|
|||
// Generator provides an interface for generating UUIDs.
|
||||
type Generator interface {
|
||||
NewV1() (UUID, error)
|
||||
NewV2(domain byte) (UUID, error)
|
||||
NewV3(ns UUID, name string) UUID
|
||||
NewV4() (UUID, error)
|
||||
NewV5(ns UUID, name string) UUID
|
||||
|
@ -164,28 +152,6 @@ func (g *Gen) NewV1() (UUID, error) {
|
|||
return u, nil
|
||||
}
|
||||
|
||||
// NewV2 returns a DCE Security UUID based on the POSIX UID/GID.
|
||||
func (g *Gen) NewV2(domain byte) (UUID, error) {
|
||||
u, err := g.NewV1()
|
||||
if err != nil {
|
||||
return Nil, err
|
||||
}
|
||||
|
||||
switch domain {
|
||||
case DomainPerson:
|
||||
binary.BigEndian.PutUint32(u[:], posixUID)
|
||||
case DomainGroup:
|
||||
binary.BigEndian.PutUint32(u[:], posixGID)
|
||||
}
|
||||
|
||||
u[9] = domain
|
||||
|
||||
u.SetVersion(V2)
|
||||
u.SetVariant(VariantRFC4122)
|
||||
|
||||
return u, nil
|
||||
}
|
||||
|
||||
// NewV3 returns a UUID based on the MD5 hash of the namespace UUID and name.
|
||||
func (g *Gen) NewV3(ns UUID, name string) UUID {
|
||||
u := newFromHash(md5.New(), ns, name)
|
||||
|
@ -216,7 +182,7 @@ func (g *Gen) NewV5(ns UUID, name string) UUID {
|
|||
return u
|
||||
}
|
||||
|
||||
// Returns the epoch and clock sequence.
|
||||
// getClockSequence returns the epoch and clock sequence.
|
||||
func (g *Gen) getClockSequence() (uint64, uint16, error) {
|
||||
var err error
|
||||
g.clockSequenceOnce.Do(func() {
|
||||
|
|
|
@ -19,11 +19,19 @@
|
|||
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
// Package uuid provides implementations of the Universally Unique Identifier (UUID), as specified in RFC-4122 and DCE 1.1.
|
||||
// Package uuid provides implementations of the Universally Unique Identifier
|
||||
// (UUID), as specified in RFC-4122,
|
||||
//
|
||||
// RFC-4122[1] provides the specification for versions 1, 3, 4, and 5.
|
||||
//
|
||||
// DCE 1.1[2] provides the specification for version 2.
|
||||
// DCE 1.1[2] provides the specification for version 2, but version 2 support
|
||||
// was removed from this package in v4 due to some concerns with the
|
||||
// specification itself. Reading the spec, it seems that it would result in
|
||||
// generating UUIDs that aren't very unique. In having read the spec it seemed
|
||||
// that our implementation did not meet the spec. It also seems to be at-odds
|
||||
// with RFC 4122, meaning we would need quite a bit of special code to support
|
||||
// it. Lastly, there were no Version 2 implementations that we could find to
|
||||
// ensure we were understanding the specification correctly.
|
||||
//
|
||||
// [1] https://tools.ietf.org/html/rfc4122
|
||||
// [2] http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01
|
||||
|
@ -33,6 +41,8 @@ import (
|
|||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
|
@ -46,7 +56,7 @@ type UUID [Size]byte
|
|||
const (
|
||||
_ byte = iota
|
||||
V1 // Version 1 (date-time and MAC address)
|
||||
V2 // Version 2 (date-time and MAC address, DCE security version)
|
||||
_ // Version 2 (date-time and MAC address, DCE security version) [removed]
|
||||
V3 // Version 3 (namespace name-based)
|
||||
V4 // Version 4 (random)
|
||||
V5 // Version 5 (namespace name-based)
|
||||
|
@ -68,8 +78,8 @@ const (
|
|||
)
|
||||
|
||||
// Timestamp is the count of 100-nanosecond intervals since 00:00:00.00,
|
||||
// 15 October 1582 within a V1 UUID. This type has no meaning for V2-V5
|
||||
// UUIDs since they don't have an embedded timestamp.
|
||||
// 15 October 1582 within a V1 UUID. This type has no meaning for other
|
||||
// UUID versions since they don't have an embedded timestamp.
|
||||
type Timestamp uint64
|
||||
|
||||
const _100nsPerSecond = 10000000
|
||||
|
@ -156,6 +166,65 @@ func (u UUID) String() string {
|
|||
return string(buf)
|
||||
}
|
||||
|
||||
// Format implements fmt.Formatter for UUID values.
|
||||
//
|
||||
// The behavior is as follows:
|
||||
// The 'x' and 'X' verbs output only the hex digits of the UUID, using a-f for 'x' and A-F for 'X'.
|
||||
// The 'v', '+v', 's' and 'q' verbs return the canonical RFC-4122 string representation.
|
||||
// The 'S' verb returns the RFC-4122 format, but with capital hex digits.
|
||||
// The '#v' verb returns the "Go syntax" representation, which is a 16 byte array initializer.
|
||||
// All other verbs not handled directly by the fmt package (like '%p') are unsupported and will return
|
||||
// "%!verb(uuid.UUID=value)" as recommended by the fmt package.
|
||||
func (u UUID) Format(f fmt.State, c rune) {
|
||||
switch c {
|
||||
case 'x', 'X':
|
||||
s := hex.EncodeToString(u.Bytes())
|
||||
if c == 'X' {
|
||||
s = strings.Map(toCapitalHexDigits, s)
|
||||
}
|
||||
_, _ = io.WriteString(f, s)
|
||||
case 'v':
|
||||
var s string
|
||||
if f.Flag('#') {
|
||||
s = fmt.Sprintf("%#v", [Size]byte(u))
|
||||
} else {
|
||||
s = u.String()
|
||||
}
|
||||
_, _ = io.WriteString(f, s)
|
||||
case 's', 'S':
|
||||
s := u.String()
|
||||
if c == 'S' {
|
||||
s = strings.Map(toCapitalHexDigits, s)
|
||||
}
|
||||
_, _ = io.WriteString(f, s)
|
||||
case 'q':
|
||||
_, _ = io.WriteString(f, `"`+u.String()+`"`)
|
||||
default:
|
||||
// invalid/unsupported format verb
|
||||
fmt.Fprintf(f, "%%!%c(uuid.UUID=%s)", c, u.String())
|
||||
}
|
||||
}
|
||||
|
||||
func toCapitalHexDigits(ch rune) rune {
|
||||
// convert a-f hex digits to A-F
|
||||
switch ch {
|
||||
case 'a':
|
||||
return 'A'
|
||||
case 'b':
|
||||
return 'B'
|
||||
case 'c':
|
||||
return 'C'
|
||||
case 'd':
|
||||
return 'D'
|
||||
case 'e':
|
||||
return 'E'
|
||||
case 'f':
|
||||
return 'F'
|
||||
default:
|
||||
return ch
|
||||
}
|
||||
}
|
||||
|
||||
// SetVersion sets the version bits.
|
||||
func (u *UUID) SetVersion(v byte) {
|
||||
u[6] = (u[6] & 0x0f) | (v << 4)
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
|
@ -0,0 +1,20 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Jared Morse
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -0,0 +1,252 @@
|
|||
# httpmock [![Build Status](https://github.com/jarcoal/httpmock/workflows/Build/badge.svg?branch=v1)](https://github.com/jarcoal/httpmock/actions?query=workflow%3ABuild) [![Coverage Status](https://coveralls.io/repos/github/jarcoal/httpmock/badge.svg?branch=v1)](https://coveralls.io/github/jarcoal/httpmock?branch=v1) [![GoDoc](https://godoc.org/github.com/jarcoal/httpmock?status.svg)](https://godoc.org/github.com/jarcoal/httpmock) [![Version](https://img.shields.io/github/tag/jarcoal/httpmock.svg)](https://github.com/jarcoal/httpmock/releases) [![Mentioned in Awesome Go](https://awesome.re/mentioned-badge.svg)](https://github.com/avelino/awesome-go/#testing)
|
||||
|
||||
Easy mocking of http responses from external resources.
|
||||
|
||||
## Install
|
||||
|
||||
Currently supports Go 1.7 - 1.15.
|
||||
|
||||
`v1` branch has to be used instead of `master`.
|
||||
|
||||
|
||||
### Using go modules (aka. `go mod`)
|
||||
|
||||
In your go files, simply use:
|
||||
```go
|
||||
import "github.com/jarcoal/httpmock"
|
||||
```
|
||||
|
||||
Then next `go mod tidy` or `go test` invocation will automatically
|
||||
populate your `go.mod` with the last httpmock release, now
|
||||
[![Version](https://img.shields.io/github/tag/jarcoal/httpmock.svg)](https://github.com/jarcoal/httpmock/releases).
|
||||
|
||||
Note you can use `go mod vendor` to vendor your dependencies.
|
||||
|
||||
|
||||
### Using `$GOPATH`
|
||||
|
||||
`v1` branch is configured as the default branch in github, so:
|
||||
```
|
||||
go get github.com/jarcoal/httpmock
|
||||
```
|
||||
|
||||
automatically downloads the `v1` branch in `$GOPATH/src`. Then in your
|
||||
go files use:
|
||||
```go
|
||||
import "github.com/jarcoal/httpmock"
|
||||
```
|
||||
|
||||
|
||||
### Vendoring, using [`govendor`](https://github.com/kardianos/govendor) for example
|
||||
|
||||
When vendoring is used, `v1` branch has to be specified. Two choices here:
|
||||
|
||||
- preferred way:
|
||||
```
|
||||
govendor fetch github.com/jarcoal/httpmock@v1
|
||||
```
|
||||
then in go files:
|
||||
```go
|
||||
import "github.com/jarcoal/httpmock"
|
||||
```
|
||||
- old way (before `v1` was set as default branch), use gopkg to read from
|
||||
`v1` branch:
|
||||
```
|
||||
govendor fetch gopkg.in/jarcoal/httpmock.v1
|
||||
```
|
||||
then in go files:
|
||||
```go
|
||||
import "gopkg.in/jarcoal/httpmock.v1"
|
||||
```
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
### Simple Example:
|
||||
```go
|
||||
func TestFetchArticles(t *testing.T) {
|
||||
httpmock.Activate()
|
||||
defer httpmock.DeactivateAndReset()
|
||||
|
||||
// Exact URL match
|
||||
httpmock.RegisterResponder("GET", "https://api.mybiz.com/articles",
|
||||
httpmock.NewStringResponder(200, `[{"id": 1, "name": "My Great Article"}]`))
|
||||
|
||||
// Regexp match (could use httpmock.RegisterRegexpResponder instead)
|
||||
httpmock.RegisterResponder("GET", `=~^https://api\.mybiz\.com/articles/id/\d+\z`,
|
||||
httpmock.NewStringResponder(200, `{"id": 1, "name": "My Great Article"}`))
|
||||
|
||||
// do stuff that makes a request to articles
|
||||
...
|
||||
|
||||
// get count info
|
||||
httpmock.GetTotalCallCount()
|
||||
|
||||
// get the amount of calls for the registered responder
|
||||
info := httpmock.GetCallCountInfo()
|
||||
info["GET https://api.mybiz.com/articles"] // number of GET calls made to https://api.mybiz.com/articles
|
||||
info["GET https://api.mybiz.com/articles/id/12"] // number of GET calls made to https://api.mybiz.com/articles/id/12
|
||||
info[`GET =~^https://api\.mybiz\.com/articles/id/\d+\z`] // number of GET calls made to https://api.mybiz.com/articles/id/<any-number>
|
||||
}
|
||||
```
|
||||
|
||||
### Advanced Example:
|
||||
```go
|
||||
func TestFetchArticles(t *testing.T) {
|
||||
httpmock.Activate()
|
||||
defer httpmock.DeactivateAndReset()
|
||||
|
||||
// our database of articles
|
||||
articles := make([]map[string]interface{}, 0)
|
||||
|
||||
// mock to list out the articles
|
||||
httpmock.RegisterResponder("GET", "https://api.mybiz.com/articles",
|
||||
func(req *http.Request) (*http.Response, error) {
|
||||
resp, err := httpmock.NewJsonResponse(200, articles)
|
||||
if err != nil {
|
||||
return httpmock.NewStringResponse(500, ""), nil
|
||||
}
|
||||
return resp, nil
|
||||
},
|
||||
)
|
||||
|
||||
// return an article related to the request with the help of regexp submatch (\d+)
|
||||
httpmock.RegisterResponder("GET", `=~^https://api\.mybiz\.com/articles/id/(\d+)\z`,
|
||||
func(req *http.Request) (*http.Response, error) {
|
||||
// Get ID from request
|
||||
id := httpmock.MustGetSubmatchAsUint(req, 1) // 1=first regexp submatch
|
||||
return httpmock.NewJsonResponse(200, map[string]interface{}{
|
||||
"id": id,
|
||||
"name": "My Great Article",
|
||||
})
|
||||
},
|
||||
)
|
||||
|
||||
// mock to add a new article
|
||||
httpmock.RegisterResponder("POST", "https://api.mybiz.com/articles",
|
||||
func(req *http.Request) (*http.Response, error) {
|
||||
article := make(map[string]interface{})
|
||||
if err := json.NewDecoder(req.Body).Decode(&article); err != nil {
|
||||
return httpmock.NewStringResponse(400, ""), nil
|
||||
}
|
||||
|
||||
articles = append(articles, article)
|
||||
|
||||
resp, err := httpmock.NewJsonResponse(200, article)
|
||||
if err != nil {
|
||||
return httpmock.NewStringResponse(500, ""), nil
|
||||
}
|
||||
return resp, nil
|
||||
},
|
||||
)
|
||||
|
||||
// do stuff that adds and checks articles
|
||||
}
|
||||
```
|
||||
|
||||
### Algorithm
|
||||
|
||||
When `GET http://example.tld/some/path?b=12&a=foo&a=bar` request is
|
||||
caught, all standard responders are checked against the following URL
|
||||
or paths, the first match stops the search:
|
||||
|
||||
1. `http://example.tld/some/path?b=12&a=foo&a=bar` (original URL)
|
||||
1. `http://example.tld/some/path?a=bar&a=foo&b=12` (sorted query params)
|
||||
1. `http://example.tld/some/path` (without query params)
|
||||
1. `/some/path?b=12&a=foo&a=bar` (original URL without scheme and host)
|
||||
1. `/some/path?a=bar&a=foo&b=12` (same, but sorted query params)
|
||||
1. `/some/path` (path only)
|
||||
|
||||
If no standard responder matched, the regexp responders are checked,
|
||||
in the same order, the first match stops the search.
|
||||
|
||||
|
||||
### [Ginkgo](https://onsi.github.io/ginkgo/) Example:
|
||||
```go
|
||||
// article_suite_test.go
|
||||
|
||||
import (
|
||||
// ...
|
||||
"github.com/jarcoal/httpmock"
|
||||
)
|
||||
// ...
|
||||
var _ = BeforeSuite(func() {
|
||||
// block all HTTP requests
|
||||
httpmock.Activate()
|
||||
})
|
||||
|
||||
var _ = BeforeEach(func() {
|
||||
// remove any mocks
|
||||
httpmock.Reset()
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
httpmock.DeactivateAndReset()
|
||||
})
|
||||
|
||||
|
||||
// article_test.go
|
||||
|
||||
import (
|
||||
// ...
|
||||
"github.com/jarcoal/httpmock"
|
||||
)
|
||||
|
||||
var _ = Describe("Articles", func() {
|
||||
It("returns a list of articles", func() {
|
||||
httpmock.RegisterResponder("GET", "https://api.mybiz.com/articles.json",
|
||||
httpmock.NewStringResponder(200, `[{"id": 1, "name": "My Great Article"}]`))
|
||||
|
||||
// do stuff that makes a request to articles.json
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
### [Ginkgo](https://onsi.github.io/ginkgo/) + [Resty](https://github.com/go-resty/resty) Example:
|
||||
```go
|
||||
// article_suite_test.go
|
||||
|
||||
import (
|
||||
// ...
|
||||
"github.com/jarcoal/httpmock"
|
||||
"github.com/go-resty/resty"
|
||||
)
|
||||
// ...
|
||||
var _ = BeforeSuite(func() {
|
||||
// block all HTTP requests
|
||||
httpmock.ActivateNonDefault(resty.DefaultClient.GetClient())
|
||||
})
|
||||
|
||||
var _ = BeforeEach(func() {
|
||||
// remove any mocks
|
||||
httpmock.Reset()
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
httpmock.DeactivateAndReset()
|
||||
})
|
||||
|
||||
|
||||
// article_test.go
|
||||
|
||||
import (
|
||||
// ...
|
||||
"github.com/jarcoal/httpmock"
|
||||
"github.com/go-resty/resty"
|
||||
)
|
||||
|
||||
var _ = Describe("Articles", func() {
|
||||
It("returns a list of articles", func() {
|
||||
fixture := `{"status":{"message": "Your message", "code": 200}}`
|
||||
responder := httpmock.NewStringResponder(200, fixture)
|
||||
fakeUrl := "https://api.mybiz.com/articles.json"
|
||||
httpmock.RegisterResponder("GET", fakeUrl, responder)
|
||||
|
||||
// fetch the article into struct
|
||||
articleObject := &models.Article{}
|
||||
_, err := resty.R().SetResult(articleObject).Get(fakeUrl)
|
||||
|
||||
// do stuff with the article object ...
|
||||
})
|
||||
})
|
||||
```
|
|
@ -0,0 +1,81 @@
|
|||
/*
|
||||
Package httpmock provides tools for mocking HTTP responses.
|
||||
|
||||
Simple Example:
|
||||
func TestFetchArticles(t *testing.T) {
|
||||
httpmock.Activate()
|
||||
defer httpmock.DeactivateAndReset()
|
||||
|
||||
// Exact URL match
|
||||
httpmock.RegisterResponder("GET", "https://api.mybiz.com/articles",
|
||||
httpmock.NewStringResponder(200, `[{"id": 1, "name": "My Great Article"}]`))
|
||||
|
||||
// Regexp match (could use httpmock.RegisterRegexpResponder instead)
|
||||
httpmock.RegisterResponder("GET", `=~^https://api\.mybiz\.com/articles/id/\d+\z`,
|
||||
httpmock.NewStringResponder(200, `{"id": 1, "name": "My Great Article"}`))
|
||||
|
||||
// do stuff that makes a request to articles
|
||||
|
||||
// get count info
|
||||
httpmock.GetTotalCallCount()
|
||||
|
||||
// get the amount of calls for the registered responder
|
||||
info := httpmock.GetCallCountInfo()
|
||||
info["GET https://api.mybiz.com/articles"] // number of GET calls made to https://api.mybiz.com/articles
|
||||
info["GET https://api.mybiz.com/articles/id/12"] // number of GET calls made to https://api.mybiz.com/articles/id/12
|
||||
info[`GET =~^https://api\.mybiz\.com/articles/id/\d+\z`] // number of GET calls made to https://api.mybiz.com/articles/id/<any-number>
|
||||
}
|
||||
|
||||
Advanced Example:
|
||||
func TestFetchArticles(t *testing.T) {
|
||||
httpmock.Activate()
|
||||
defer httpmock.DeactivateAndReset()
|
||||
|
||||
// our database of articles
|
||||
articles := make([]map[string]interface{}, 0)
|
||||
|
||||
// mock to list out the articles
|
||||
httpmock.RegisterResponder("GET", "https://api.mybiz.com/articles",
|
||||
func(req *http.Request) (*http.Response, error) {
|
||||
resp, err := httpmock.NewJsonResponse(200, articles)
|
||||
if err != nil {
|
||||
return httpmock.NewStringResponse(500, ""), nil
|
||||
}
|
||||
return resp, nil
|
||||
},
|
||||
)
|
||||
|
||||
// return an article related to the request with the help of regexp submatch (\d+)
|
||||
httpmock.RegisterResponder("GET", `=~^https://api\.mybiz\.com/articles/id/(\d+)\z`,
|
||||
func(req *http.Request) (*http.Response, error) {
|
||||
// Get ID from request
|
||||
id := httpmock.MustGetSubmatchAsUint(req, 1) // 1=first regexp submatch
|
||||
return httpmock.NewJsonResponse(200, map[string]interface{}{
|
||||
"id": id,
|
||||
"name": "My Great Article",
|
||||
})
|
||||
},
|
||||
)
|
||||
|
||||
// mock to add a new article
|
||||
httpmock.RegisterResponder("POST", "https://api.mybiz.com/articles",
|
||||
func(req *http.Request) (*http.Response, error) {
|
||||
article := make(map[string]interface{})
|
||||
if err := json.NewDecoder(req.Body).Decode(&article); err != nil {
|
||||
return httpmock.NewStringResponse(400, ""), nil
|
||||
}
|
||||
|
||||
articles = append(articles, article)
|
||||
|
||||
resp, err := httpmock.NewJsonResponse(200, article)
|
||||
if err != nil {
|
||||
return httpmock.NewStringResponse(500, ""), nil
|
||||
}
|
||||
return resp, nil
|
||||
},
|
||||
)
|
||||
|
||||
// do stuff that adds and checks articles
|
||||
}
|
||||
*/
|
||||
package httpmock
|
|
@ -0,0 +1,13 @@
|
|||
package httpmock
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
var envVarName = "GONOMOCKS"
|
||||
|
||||
// Disabled allows to test whether httpmock is enabled or not. It
|
||||
// depends on GONOMOCKS environment variable.
|
||||
func Disabled() bool {
|
||||
return os.Getenv(envVarName) != ""
|
||||
}
|
|
@ -0,0 +1,57 @@
|
|||
package httpmock
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
)
|
||||
|
||||
// File is a file name. The contents of this file is loaded on demand
|
||||
// by the following methods.
|
||||
//
|
||||
// Note that:
|
||||
// file := httpmock.File("file.txt")
|
||||
// fmt.Printf("file: %s\n", file)
|
||||
//
|
||||
// prints the content of file "file.txt" as String() method is used.
|
||||
//
|
||||
// To print the file name, and not its content, simply do:
|
||||
// file := httpmock.File("file.txt")
|
||||
// fmt.Printf("file: %s\n", string(file))
|
||||
type File string
|
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
//
|
||||
// Useful to be used in conjunction with NewJsonResponse() or
|
||||
// NewJsonResponder() as in:
|
||||
// httpmock.NewJsonResponder(200, httpmock.File("body.json"))
|
||||
func (f File) MarshalJSON() ([]byte, error) {
|
||||
return f.bytes()
|
||||
}
|
||||
|
||||
func (f File) bytes() ([]byte, error) {
|
||||
return ioutil.ReadFile(string(f))
|
||||
}
|
||||
|
||||
// Bytes returns the content of file as a []byte. If an error occurs
|
||||
// during the opening or reading of the file, it panics.
|
||||
//
|
||||
// Useful to be used in conjunction with NewBytesResponse() or
|
||||
// NewBytesResponder() as in:
|
||||
// httpmock.NewBytesResponder(200, httpmock.File("body.raw").Bytes())
|
||||
func (f File) Bytes() []byte {
|
||||
b, err := f.bytes()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Cannot read %s: %s", string(f), err))
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// String returns the content of file as a string. If an error occurs
|
||||
// during the opening or reading of the file, it panics.
|
||||
//
|
||||
// Useful to be used in conjunction with NewStringResponse() or
|
||||
// NewStringResponder() as in:
|
||||
// httpmock.NewStringResponder(200, httpmock.File("body.txt").String())
|
||||
func (f File) String() string {
|
||||
return string(f.Bytes())
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue